4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
35 * Author: Nikita Danilov <nikita@clusterfs.com>
36 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
40 #define DEBUG_SUBSYSTEM S_OSD
42 /* prerequisite for linux/xattr.h */
43 #include <linux/types.h>
44 /* prerequisite for linux/xattr.h */
47 #include <linux/swap.h>
48 #include <linux/pagevec.h>
51 * struct OBD_{ALLOC,FREE}*()
53 #include <obd_support.h>
54 #include <libcfs/libcfs.h>
56 #include "osd_internal.h"
59 #include <ldiskfs/ldiskfs_extents.h>
60 #include <ldiskfs/ldiskfs.h>
63 #define SECTOR_SHIFT 9
66 struct kmem_cache *biop_cachep;
68 #ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
69 static void dio_complete_routine(struct bio *bio);
71 static void dio_complete_routine(struct bio *bio, int error);
74 static int osd_bio_init(struct bio *bio, struct osd_iobuf *iobuf,
77 struct osd_bio_private *bio_private = NULL;
80 OBD_SLAB_ALLOC_GFP(bio_private, biop_cachep, sizeof(*bio_private),
82 if (bio_private == NULL)
85 bio->bi_end_io = dio_complete_routine;
86 bio->bi_private = bio_private;
87 bio_private->obp_start_page_idx = start_page_idx;
88 bio_private->obp_iobuf = iobuf;
93 static void osd_bio_fini(struct bio *bio)
95 struct osd_bio_private *bio_private;
99 bio_private = bio->bi_private;
101 OBD_SLAB_FREE(bio_private, biop_cachep, sizeof(*bio_private));
104 static inline bool osd_use_page_cache(struct osd_device *d)
106 /* do not use pagecache if write and read caching are disabled */
107 if (d->od_writethrough_cache + d->od_read_cache == 0)
109 /* use pagecache by default */
113 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
115 int rw, const short line, int pages)
119 LASSERTF(iobuf->dr_elapsed_valid == 0,
120 "iobuf %px, reqs %d, rw %d, line %d\n", iobuf,
121 atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
123 LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
125 init_waitqueue_head(&iobuf->dr_wait);
126 atomic_set(&iobuf->dr_numreqs, 0);
127 iobuf->dr_npages = 0;
128 iobuf->dr_lextents = 0;
129 iobuf->dr_pextents = 0;
134 iobuf->dr_elapsed = ktime_set(0, 0);
135 /* must be counted before, so assert */
137 iobuf->dr_init_at = line;
138 iobuf->dr_inode = inode;
140 /* Init dr_start_pg_wblks to 0 for osd_read/write_prep().
141 * For osd_write_commit() need to keep the value assigned in
142 * osd_ldiskfs_map_inode_pages() during retries, and before it ,
143 * init dr_start_pg_wblks to 0 in osd_write_prep() is sufficient.
146 iobuf->dr_start_pg_wblks = 0;
148 blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
149 if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
153 /* start with 1MB for 4K blocks */
155 while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
158 CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
159 (unsigned int)(pages * sizeof(iobuf->dr_lnbs[0])), i, pages);
161 blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
162 iobuf->dr_max_pages = 0;
163 CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
164 (unsigned int)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
166 lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
167 iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
168 if (unlikely(iobuf->dr_blocks == NULL))
171 lu_buf_realloc(&iobuf->dr_lnb_buf,
172 pages * sizeof(iobuf->dr_lnbs[0]));
173 iobuf->dr_lnbs = iobuf->dr_lnb_buf.lb_buf;
174 if (unlikely(iobuf->dr_lnbs == NULL))
177 iobuf->dr_max_pages = pages;
182 #define osd_init_iobuf(dev, iobuf, inode, rw, pages) \
185 BUILD_BUG_ON(__LINE__ >= (1 << 16)); \
186 __r = __osd_init_iobuf(dev, iobuf, inode, rw, __LINE__, pages); \
190 static void osd_iobuf_add_page(struct osd_iobuf *iobuf,
191 struct niobuf_local *lnb)
193 LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
194 iobuf->dr_lnbs[iobuf->dr_npages] = lnb;
198 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
200 int rw = iobuf->dr_rw;
202 if (iobuf->dr_elapsed_valid) {
203 struct brw_stats *h = &d->od_brw_stats;
205 iobuf->dr_elapsed_valid = 0;
206 LASSERT(iobuf->dr_dev == d);
207 LASSERT(iobuf->dr_frags > 0);
208 lprocfs_oh_tally_pcpu(&h->bs_hist[BRW_R_DIO_FRAGS+rw],
210 lprocfs_oh_tally_log2_pcpu(&h->bs_hist[BRW_R_IO_TIME+rw],
211 ktime_to_ms(iobuf->dr_elapsed));
217 #ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
218 static void dio_complete_routine(struct bio *bio)
220 int error = blk_status_to_errno(bio->bi_status);
222 static void dio_complete_routine(struct bio *bio, int error)
225 struct osd_bio_private *bio_private = bio->bi_private;
226 struct osd_iobuf *iobuf = bio_private->obp_iobuf;
230 /* CAVEAT EMPTOR: possibly in IRQ context
231 * DO NOT record procfs stats here!!!
233 if (unlikely(iobuf == NULL)) {
234 CERROR("***** bio->bi_private is NULL! Dump the bio contents to the console. Please report this to <https://jira.whamcloud.com/>, and probably have to reboot this node.\n");
235 CERROR("bi_next: %p, bi_flags: %lx, " __stringify(bi_opf)
236 ": %x, bi_vcnt: %d, bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, bi_private: %p\n",
237 bio->bi_next, (unsigned long)bio->bi_flags,
238 (unsigned int)bio->bi_opf, bio->bi_vcnt, bio_idx(bio),
239 bio_sectors(bio) << 9, bio->bi_end_io,
240 atomic_read(&bio->__bi_cnt),
245 /* the check is outside of the cycle for performance reason -bzzz */
246 if (!bio_data_dir(bio)) {
247 DECLARE_BVEC_ITER_ALL(iter_all);
249 bio_for_each_segment_all(bvl, bio, iter_all) {
250 if (likely(error == 0))
251 SetPageUptodate(bvl_to_page(bvl));
252 LASSERT(PageLocked(bvl_to_page(bvl)));
254 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
256 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
259 /* any real error is good enough -bzzz */
260 if (error != 0 && iobuf->dr_error == 0)
261 iobuf->dr_error = error;
264 * set dr_elapsed before dr_numreqs turns to 0, otherwise
265 * it's possible that service thread will see dr_numreqs
266 * is zero, but dr_elapsed is not set yet, leading to lost
267 * data in this processing and an assertion in a subsequent
270 if (atomic_read(&iobuf->dr_numreqs) == 1) {
271 ktime_t now = ktime_get();
273 iobuf->dr_elapsed = ktime_sub(now, iobuf->dr_start_time);
274 iobuf->dr_elapsed_valid = 1;
276 if (atomic_dec_and_test(&iobuf->dr_numreqs))
277 wake_up(&iobuf->dr_wait);
279 /* Completed bios used to be chained off iobuf->dr_bios and freed in
280 * filter_clear_dreq(). It was then possible to exhaust the biovec-256
281 * mempool when serious on-disk fragmentation was encountered,
282 * deadlocking the OST. The bios are now released as soon as complete
283 * so the pool cannot be exhausted while IOs are competing. b=10076
288 static void record_start_io(struct osd_iobuf *iobuf, int size)
290 struct osd_device *osd = iobuf->dr_dev;
291 struct brw_stats *h = &osd->od_brw_stats;
294 atomic_inc(&iobuf->dr_numreqs);
296 if (iobuf->dr_rw == 0) {
297 atomic_inc(&osd->od_r_in_flight);
298 lprocfs_oh_tally_pcpu(&h->bs_hist[BRW_R_RPC_HIST],
299 atomic_read(&osd->od_r_in_flight));
300 lprocfs_oh_tally_log2_pcpu(&h->bs_hist[BRW_R_DISK_IOSIZE],
302 } else if (iobuf->dr_rw == 1) {
303 atomic_inc(&osd->od_w_in_flight);
304 lprocfs_oh_tally_pcpu(&h->bs_hist[BRW_W_RPC_HIST],
305 atomic_read(&osd->od_w_in_flight));
306 lprocfs_oh_tally_log2_pcpu(&h->bs_hist[BRW_W_DISK_IOSIZE],
313 static int osd_submit_bio(struct osd_device *osd,
314 struct osd_iobuf *iobuf,
317 struct request_queue *q;
318 unsigned int bi_size;
324 q = bio_get_queue(bio);
325 bi_size = bio_sectors(bio) << SECTOR_SHIFT;
326 /* Dang! I have to fragment this I/O */
328 "bio++ sz %d vcnt %d(%d) sectors %d(%d) psg %d(%d)\n",
329 bi_size, bio->bi_vcnt, bio->bi_max_vecs,
331 queue_max_sectors(q),
332 osd_bio_nr_segs(bio),
333 queue_max_segments(q));
335 rc = osd_bio_integrity_handle(osd, bio, iobuf);
339 record_start_io(iobuf, bi_size);
341 #ifdef HAVE_SUBMIT_BIO_2ARGS
342 submit_bio(iobuf->dr_rw ? WRITE : READ, bio);
344 bio->bi_opf |= iobuf->dr_rw;
351 static int can_be_merged(struct bio *bio, sector_t sector)
354 return bio_end_sector(bio) == sector ? 1 : 0;
358 static void osd_mark_page_io_done(struct osd_iobuf *iobuf,
360 sector_t start_blocks,
363 struct niobuf_local **lnbs = iobuf->dr_lnbs;
364 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
367 i = start_blocks / blocks_per_page;
368 end = (start_blocks + count) / blocks_per_page;
369 for ( ; i < end; i++)
370 lnbs[i]->lnb_flags |= OBD_BRW_DONE;
374 * Linux v5.12-rc1-20-ga8affc03a9b3
375 * block: rename BIO_MAX_PAGES to BIO_MAX_VECS
378 #define BIO_MAX_VECS BIO_MAX_PAGES
381 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
382 struct osd_iobuf *iobuf, sector_t start_blocks,
385 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
386 struct niobuf_local **lnbs = iobuf->dr_lnbs;
387 int npages = iobuf->dr_npages;
388 sector_t *blocks = iobuf->dr_blocks;
389 struct super_block *sb = inode->i_sb;
390 int sector_bits = sb->s_blocksize_bits - SECTOR_SHIFT;
391 unsigned int blocksize = sb->s_blocksize;
392 struct block_device *bdev = sb->s_bdev;
393 struct bio *bio = NULL;
394 int bio_start_page_idx = 0;
396 unsigned int page_offset;
399 int block_idx, block_idx_end;
400 int page_idx, page_idx_start;
403 bool integrity_enabled;
404 struct blk_plug plug;
405 int blocks_left_page;
409 LASSERT(iobuf->dr_npages == npages);
410 iobuf->dr_start_time = ktime_get();
411 integrity_enabled = bdev_integrity_enabled(bdev, iobuf->dr_rw);
414 count = npages * blocks_per_page;
415 block_idx_end = start_blocks + count;
417 blk_start_plug(&plug);
419 page_idx_start = start_blocks / blocks_per_page;
420 for (page_idx = page_idx_start, block_idx = start_blocks;
421 block_idx < block_idx_end; page_idx++,
422 block_idx += blocks_left_page) {
423 /* For cases where the filesystems blocksize is not the
424 * same as PAGE_SIZE (e.g. ARM with PAGE_SIZE=64KB and
425 * blocksize=4KB), there will be multiple blocks to
426 * read/write per page. Also, the start and end block may
427 * not be aligned to the start and end of the page, so the
428 * first page may skip some blocks at the start ("i != 0",
429 * "blocks_left_page" is reduced), and the last page may
430 * skip some blocks at the end (limited by "count").
432 page = lnbs[page_idx]->lnb_page;
433 LASSERT(page_idx < iobuf->dr_npages);
435 i = block_idx % blocks_per_page;
436 blocks_left_page = blocks_per_page - i;
437 if (block_idx + blocks_left_page > block_idx_end)
438 blocks_left_page = block_idx_end - block_idx;
439 page_offset = i * blocksize;
440 for (i = 0; i < blocks_left_page;
441 i += nblocks, page_offset += blocksize * nblocks) {
444 if (blocks[block_idx + i] == 0) { /* hole */
445 LASSERTF(iobuf->dr_rw == 0,
446 "page_idx %u, block_idx %u, i %u,"
447 "start_blocks: %llu, count: %llu, npages: %d\n",
448 page_idx, block_idx, i,
449 (unsigned long long)start_blocks,
450 (unsigned long long)count, npages);
451 memset(kmap(page) + page_offset, 0, blocksize);
456 sector = (sector_t)blocks[block_idx + i] << sector_bits;
458 /* Additional contiguous file blocks? */
459 while (i + nblocks < blocks_left_page &&
460 (sector + (nblocks << sector_bits)) ==
461 ((sector_t)blocks[block_idx + i + nblocks] <<
465 if (bio && can_be_merged(bio, sector) &&
466 bio_add_page(bio, page, blocksize * nblocks,
468 continue; /* added this frag OK */
470 rc = osd_submit_bio(osd, iobuf, bio);
474 bio_start_page_idx = page_idx;
475 /* allocate new bio */
476 bio = cfs_bio_alloc(bdev,
477 min_t(unsigned short, BIO_MAX_VECS,
478 (block_idx_end - block_idx +
479 blocks_left_page - 1)),
480 iobuf->dr_rw ? REQ_OP_WRITE
484 CERROR("Can't allocate bio %u pages\n",
485 block_idx_end - block_idx +
486 blocks_left_page - 1);
490 bio_set_sector(bio, sector);
491 rc = osd_bio_init(bio, iobuf, bio_start_page_idx);
495 rc = bio_add_page(bio, page,
496 blocksize * nblocks, page_offset);
500 rc = osd_submit_bio(osd, iobuf, bio);
504 blk_finish_plug(&plug);
506 /* in order to achieve better IO throughput, we don't wait for writes
507 * completion here. instead we proceed with transaction commit in
508 * parallel and wait for IO completion once transaction is stopped
509 * see osd_trans_stop() for more details -bzzz
511 if (iobuf->dr_rw == 0 || CFS_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_FAULT)) {
512 wait_event(iobuf->dr_wait,
513 atomic_read(&iobuf->dr_numreqs) == 0);
517 rc = iobuf->dr_error;
521 if (iobuf->dr_rw == 0 || CFS_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_FAULT))
522 osd_fini_iobuf(osd, iobuf);
525 if (rc == 0 && iobuf->dr_rw)
526 osd_mark_page_io_done(iobuf, inode,
527 start_blocks, count);
532 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
533 struct niobuf_local *lnb, int maxlnb)
541 int poff = offset & (PAGE_SIZE - 1);
542 int plen = PAGE_SIZE - poff;
544 if (*nrpages >= maxlnb) {
551 lnb->lnb_file_offset = offset;
552 lnb->lnb_page_offset = poff;
554 /* lnb->lnb_flags = rnb->rnb_flags; */
556 lnb->lnb_page = NULL;
558 lnb->lnb_guard_rpc = 0;
559 lnb->lnb_guard_disk = 0;
562 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
573 static struct page *osd_get_page(const struct lu_env *env, struct dt_object *dt,
574 loff_t offset, gfp_t gfp_mask, bool cache)
576 struct osd_thread_info *oti = osd_oti_get(env);
577 struct inode *inode = osd_dt_obj(dt)->oo_inode;
578 struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
585 page = find_or_create_page(inode->i_mapping,
586 offset >> PAGE_SHIFT, gfp_mask);
589 LASSERT(!PagePrivate2(page));
590 wait_on_page_writeback(page);
592 lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
598 if (inode->i_mapping->nrpages) {
599 /* consult with pagecache, but do not create new pages */
600 /* this is normally used once */
601 page = find_lock_page(inode->i_mapping, offset >> PAGE_SHIFT);
603 wait_on_page_writeback(page);
608 LASSERT(oti->oti_dio_pages);
609 cur = oti->oti_dio_pages_used;
610 page = oti->oti_dio_pages[cur];
612 if (unlikely(!page)) {
613 LASSERT(cur < PTLRPC_MAX_BRW_PAGES);
614 page = alloc_page(gfp_mask);
617 oti->oti_dio_pages[cur] = page;
618 SetPagePrivate2(page);
622 ClearPageUptodate(page);
623 page->index = offset >> PAGE_SHIFT;
624 oti->oti_dio_pages_used++;
630 * there are following "locks":
641 * - lock pages, unlock
643 * - lock partial page
649 * Unlock and release pages loaded by osd_bufs_get()
651 * Unlock \a npages pages from \a lnb and drop the refcount on them.
653 * \param env thread execution environment
654 * \param dt dt object undergoing IO (OSD object + methods)
655 * \param lnb array of pages undergoing IO
656 * \param npages number of pages in \a lnb
660 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
661 struct niobuf_local *lnb, int npages)
663 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
664 struct osd_thread_info *oti = osd_oti_get(env);
665 struct osd_iobuf *iobuf = &oti->oti_iobuf;
666 struct folio_batch fbatch;
669 osd_brw_stats_update(osd, iobuf);
670 ll_folio_batch_init(&fbatch, 0);
672 for (i = 0; i < npages; i++) {
673 struct page *page = lnb[i].lnb_page;
678 /* if the page isn't cached, then reset uptodate
681 if (PagePrivate2(page)) {
682 oti->oti_dio_pages_used--;
684 if (lnb[i].lnb_locked)
686 if (folio_batch_add_page(&fbatch, page) == 0)
687 folio_batch_release(&fbatch);
690 lnb[i].lnb_page = NULL;
693 LASSERTF(oti->oti_dio_pages_used == 0, "%d\n", oti->oti_dio_pages_used);
695 /* Release any partial folio_batch */
696 folio_batch_release(&fbatch);
702 * Load and lock pages undergoing IO
704 * Pages as described in the \a lnb array are fetched (from disk or cache)
705 * and locked for IO by the caller.
707 * DLM locking protects us from write and truncate competing for same region,
708 * but partial-page truncate can leave dirty pages in the cache for ldiskfs.
709 * It's possible the writeout on a such a page is in progress when we access
710 * it. It's also possible that during this writeout we put new (partial) data
711 * into the page, but won't be able to proceed in filter_commitrw_write().
712 * Therefore, just wait for writeout completion as it should be rare enough.
714 * \param env thread execution environment
715 * \param dt dt object undergoing IO (OSD object + methods)
716 * \param pos byte offset of IO start
717 * \param len number of bytes of IO
718 * \param lnb array of extents undergoing IO
719 * \param rw read or write operation, and other flags
720 * \param capa capabilities
722 * \retval pages (zero or more) loaded successfully
723 * \retval -ENOMEM on memory/page allocation error
725 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
726 loff_t pos, ssize_t len, struct niobuf_local *lnb,
727 int maxlnb, enum dt_bufs_type rw)
729 struct osd_thread_info *oti = osd_oti_get(env);
730 struct osd_object *obj = osd_dt_obj(dt);
731 struct osd_device *osd = osd_obj2dev(obj);
732 int npages, i, iosize, rc = 0;
737 LASSERT(obj->oo_inode);
739 if (unlikely(obj->oo_destroyed))
742 rc = osd_map_remote_to_local(pos, len, &npages, lnb, maxlnb);
746 write = rw & DT_BUFS_TYPE_WRITE;
748 fsize = lnb[npages - 1].lnb_file_offset + lnb[npages - 1].lnb_len;
749 iosize = fsize - lnb[0].lnb_file_offset;
750 fsize = max(fsize, i_size_read(obj->oo_inode));
752 cache = rw & DT_BUFS_TYPE_READAHEAD;
756 cache = osd_use_page_cache(osd);
759 if (!osd->od_writethrough_cache) {
763 if (iosize >= osd->od_writethrough_max_iosize) {
768 if (!osd->od_read_cache) {
772 if (iosize >= osd->od_readcache_max_iosize) {
777 /* don't use cache on large files */
778 if (osd->od_readcache_max_filesize &&
779 fsize > osd->od_readcache_max_filesize)
785 if (!cache && unlikely(!oti->oti_dio_pages)) {
786 OBD_ALLOC_PTR_ARRAY_LARGE(oti->oti_dio_pages,
787 PTLRPC_MAX_BRW_PAGES);
788 if (!oti->oti_dio_pages)
792 /* this could also try less hard for DT_BUFS_TYPE_READAHEAD pages */
793 gfp_mask = rw & DT_BUFS_TYPE_LOCAL ? (GFP_NOFS | __GFP_HIGHMEM) :
795 for (i = 0; i < npages; i++, lnb++) {
796 lnb->lnb_page = osd_get_page(env, dt, lnb->lnb_file_offset,
798 if (lnb->lnb_page == NULL)
799 GOTO(cleanup, rc = -ENOMEM);
803 mark_page_accessed(lnb->lnb_page);
807 /* XXX: this version doesn't invalidate cached pages, but use them */
808 if (!cache && write && obj->oo_inode->i_mapping->nrpages) {
809 /* do not allow data aliasing, invalidate pagecache */
810 /* XXX: can be quite expensive in mixed case */
811 invalidate_mapping_pages(obj->oo_inode->i_mapping,
812 lnb[0].lnb_file_offset >> PAGE_SHIFT,
813 lnb[npages - 1].lnb_file_offset >> PAGE_SHIFT);
821 osd_bufs_put(env, dt, lnb - i, i);
825 #ifdef HAVE_LDISKFS_JOURNAL_ENSURE_CREDITS
826 static int osd_extend_restart_trans(handle_t *handle, int needed,
831 rc = ldiskfs_journal_ensure_credits(handle, needed,
832 ldiskfs_trans_default_revoke_credits(inode->i_sb));
833 /* this means journal has been restarted */
840 static int osd_extend_restart_trans(handle_t *handle, int needed,
845 if (ldiskfs_handle_has_enough_credits(handle, needed))
847 rc = ldiskfs_journal_extend(handle,
848 needed - handle->h_buffer_credits);
852 return ldiskfs_journal_restart(handle, needed);
854 #endif /* HAVE_LDISKFS_JOURNAL_ENSURE_CREDITS */
856 static int osd_ldiskfs_map_write(struct inode *inode, struct osd_iobuf *iobuf,
857 struct osd_device *osd, sector_t start_blocks,
858 sector_t count, loff_t *disk_size,
861 /* if file has grown, take user_size into account */
862 if (user_size && *disk_size > user_size)
863 *disk_size = user_size;
865 spin_lock(&inode->i_lock);
866 if (*disk_size > i_size_read(inode)) {
867 i_size_write(inode, *disk_size);
868 LDISKFS_I(inode)->i_disksize = *disk_size;
869 spin_unlock(&inode->i_lock);
870 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
872 spin_unlock(&inode->i_lock);
876 * We don't do stats here as in read path because
877 * write is async: we'll do this in osd_put_bufs()
879 return osd_do_bio(osd, inode, iobuf, start_blocks, count);
882 static unsigned int osd_extent_bytes(const struct osd_device *o)
884 unsigned int *extent_bytes_ptr =
885 raw_cpu_ptr(o->od_extent_bytes_percpu);
887 if (likely(*extent_bytes_ptr))
888 return *extent_bytes_ptr;
890 /* initialize on first access or CPU hotplug */
891 if (!ldiskfs_has_feature_extents(osd_sb(o)))
892 *extent_bytes_ptr = 1 << osd_sb(o)->s_blocksize_bits;
894 *extent_bytes_ptr = OSD_DEFAULT_EXTENT_BYTES;
896 return *extent_bytes_ptr;
899 #define EXTENT_BYTES_DECAY 64
900 static void osd_decay_extent_bytes(struct osd_device *osd,
901 unsigned int new_bytes)
903 unsigned int old_bytes;
905 if (!ldiskfs_has_feature_extents(osd_sb(osd)))
908 old_bytes = osd_extent_bytes(osd);
909 *raw_cpu_ptr(osd->od_extent_bytes_percpu) =
910 (old_bytes * (EXTENT_BYTES_DECAY - 1) +
911 min(new_bytes, OSD_DEFAULT_EXTENT_BYTES) +
912 EXTENT_BYTES_DECAY - 1) / EXTENT_BYTES_DECAY;
915 static int osd_ldiskfs_map_inode_pages(struct inode *inode,
916 struct osd_iobuf *iobuf,
917 struct osd_device *osd,
921 struct thandle *thandle)
923 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
924 int rc = 0, i = 0, mapped_index = 0;
925 struct page *fp = NULL;
927 pgoff_t max_page_index;
928 handle_t *handle = NULL;
929 sector_t start_blocks = 0, count = 0;
930 loff_t disk_size = 0;
931 struct niobuf_local **lnbs = iobuf->dr_lnbs;
932 int pages = iobuf->dr_npages;
933 sector_t *blocks = iobuf->dr_blocks;
934 struct niobuf_local *lnb1, *lnb2;
936 bool compressed = false;
939 max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
941 CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
942 inode->i_ino, pages, (*lnbs)->lnb_page->index);
944 if (osd->od_extents_dense)
945 compressed = iobuf->dr_lnbs[0]->lnb_flags & OBD_BRW_COMPRESSED;
948 flags = LDISKFS_GET_BLOCKS_CREATE;
949 handle = ldiskfs_journal_current_handle();
950 LASSERT(handle != NULL);
951 rc = osd_attach_jinode(inode);
954 disk_size = i_size_read(inode);
955 /* if disk_size is already bigger than specified user_size,
958 if (disk_size > user_size)
961 /* pages are sorted already. so, we just have to find
962 * contig. space and process them properly
965 long blen, total = 0, previous_total = 0;
966 struct ldiskfs_map_blocks map = { 0 };
969 if (fp == NULL) { /* start new extent */
970 fp = (*lnbs)->lnb_page;
973 iobuf->dr_lextents++;
976 } else if (fp->index + clen == (*lnbs)->lnb_page->index) {
977 /* continue the extent */
983 if (fp->index + clen >= max_page_index)
984 GOTO(cleanup, rc = -EFBIG);
985 /* process found extent */
986 map.m_lblk = fp->index * blocks_per_page;
987 map.m_len = blen = clen * blocks_per_page;
990 * Skip already written blocks of the start page.
991 * Note that this branch will not go into for 4K PAGE_SIZE.
992 * Because dr_start_pg_wblks is always 0 for 4K PAGE_SIZE.
993 * iobuf->dr_start_pg_wblks = (start_blocks + count) %
996 if (iobuf->dr_start_pg_wblks > 0) {
997 total = previous_total = start_blocks =
998 iobuf->dr_start_pg_wblks;
999 map.m_lblk = fp->index * blocks_per_page +
1001 map.m_len = blen - total;
1002 iobuf->dr_start_pg_wblks = 0;
1007 * We might restart transaction for block allocations,
1008 * in order to make sure data ordered mode, issue IO, disk
1009 * size update and block allocations need be within same
1010 * transaction to make sure consistency.
1012 if (handle && check_credits) {
1013 struct osd_thandle *oh;
1015 LASSERT(thandle != NULL);
1016 oh = container_of(thandle, struct osd_thandle,
1019 * only issue IO if restart transaction needed,
1020 * as update disk size need hold inode lock, we
1021 * want to avoid that as much as possible.
1023 if (oh->oh_declared_ext <= 0) {
1024 rc = osd_ldiskfs_map_write(inode,
1025 iobuf, osd, start_blocks,
1026 count, &disk_size, user_size);
1029 thandle->th_restart_tran = 1;
1030 iobuf->dr_start_pg_wblks = (start_blocks +
1031 count) % blocks_per_page;
1032 GOTO(cleanup, rc = -EAGAIN);
1035 if (CFS_FAIL_CHECK(OBD_FAIL_OST_RESTART_IO))
1036 oh->oh_declared_ext = 0;
1038 oh->oh_declared_ext--;
1040 #ifdef LDISKFS_GET_BLOCKS_VERY_DENSE
1041 if (osd->od_extents_dense) {
1042 if (CFS_FAIL_CHECK(OBD_FAIL_OSD_MARK_COMPRESSED))
1043 flags |= LDISKFS_GET_BLOCKS_VERY_DENSE;
1045 flags |= LDISKFS_GET_BLOCKS_VERY_DENSE;
1050 rc = ldiskfs_map_blocks(handle, inode, &map, flags);
1051 time = ktime_sub(ktime_get(), time);
1054 struct brw_stats *h = &osd->od_brw_stats;
1057 iobuf->dr_pextents++;
1059 idx = map.m_flags & LDISKFS_MAP_NEW ?
1060 BRW_ALLOC_TIME : BRW_MAP_TIME;
1061 lprocfs_oh_tally_log2_pcpu(&h->bs_hist[idx],
1064 for (; total < blen && c < map.m_len; c++, total++) {
1066 *(blocks + total) = 0;
1070 if ((map.m_flags & LDISKFS_MAP_UNWRITTEN) &&
1072 /* don't try to read allocated, but
1073 * unwritten blocks, instead fill the
1074 * patches with zeros in osd_do_bio() */
1075 *(blocks + total) = 0;
1078 *(blocks + total) = map.m_pblk + c;
1079 /* unmap any possible underlying
1080 * metadata from the block device
1083 if ((map.m_flags & LDISKFS_MAP_NEW) &&
1085 clean_bdev_aliases(inode->i_sb->s_bdev,
1091 if (rc == 0 && create) {
1092 count += (total - previous_total);
1093 mapped_index = (start_blocks + count + blocks_per_page -
1094 1) / blocks_per_page - 1;
1095 lnb1 = iobuf->dr_lnbs[i - clen];
1096 lnb2 = iobuf->dr_lnbs[mapped_index];
1097 size1 = lnb1->lnb_file_offset -
1098 (lnb1->lnb_file_offset % PAGE_SIZE) +
1099 (total << inode->i_blkbits);
1100 size2 = lnb2->lnb_file_offset + lnb2->lnb_len;
1104 if (size1 > disk_size)
1108 if (rc == 0 && total < blen) {
1110 * decay extent blocks if we could not
1111 * allocate extent once.
1113 osd_decay_extent_bytes(osd,
1114 (total - previous_total) << inode->i_blkbits);
1115 map.m_lblk = fp->index * blocks_per_page + total;
1116 map.m_len = blen - total;
1117 previous_total = total;
1123 * decay extent blocks if we could allocate
1124 * good large extent.
1126 if (total - previous_total >=
1127 osd_extent_bytes(osd) >> inode->i_blkbits)
1128 osd_decay_extent_bytes(osd,
1129 (total - previous_total) << inode->i_blkbits);
1130 /* look for next extent */
1132 blocks += blocks_per_page * clen;
1135 if (rc == 0 && create &&
1136 start_blocks < pages * blocks_per_page) {
1137 rc = osd_ldiskfs_map_write(inode, iobuf, osd, start_blocks,
1138 count, &disk_size, user_size);
1139 LASSERT(start_blocks + count == pages * blocks_per_page);
1144 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
1145 struct niobuf_local *lnb, int npages)
1147 struct osd_thread_info *oti = osd_oti_get(env);
1148 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1149 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1150 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1159 rc = osd_init_iobuf(osd, iobuf, inode, 0, npages);
1160 if (unlikely(rc != 0))
1163 isize = i_size_read(inode);
1164 maxidx = ((isize + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1;
1166 start = ktime_get();
1167 for (i = 0; i < npages; i++) {
1170 * till commit the content of the page is undefined
1171 * we'll set it uptodate once bulk is done. otherwise
1172 * subsequent reads can access non-stable data
1174 ClearPageUptodate(lnb[i].lnb_page);
1176 if (lnb[i].lnb_len == PAGE_SIZE)
1179 if (maxidx >= lnb[i].lnb_page->index) {
1180 osd_iobuf_add_page(iobuf, &lnb[i]);
1183 char *p = kmap(lnb[i].lnb_page);
1185 off = lnb[i].lnb_page_offset;
1188 off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
1191 memset(p + off, 0, PAGE_SIZE - off);
1192 kunmap(lnb[i].lnb_page);
1196 timediff = ktime_us_delta(end, start);
1197 lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1199 if (iobuf->dr_npages) {
1200 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd, 0,
1202 if (likely(rc == 0)) {
1203 rc = osd_do_bio(osd, inode, iobuf, 0, 0);
1204 /* do IO stats for preparation reads */
1205 osd_fini_iobuf(osd, iobuf);
1212 #define DECLARE_MM_SEGMENT_T(name) mm_segment_t name
1213 #define access_set_kernel(saved_fs, fei) \
1215 saved_fs = get_fs(); \
1216 set_fs(KERNEL_DS); \
1218 #define access_unset_kernel(saved_fs, fei) set_fs((saved_fs))
1220 #define DECLARE_MM_SEGMENT_T(name)
1221 #define access_set_kernel(saved_fs, fei) \
1222 (fei)->fi_flags |= LDISKFS_FIEMAP_FLAG_MEMCPY
1223 #define access_unset_kernel(saved_fs, fei) \
1224 (fei)->fi_flags &= ~(LDISKFS_FIEMAP_FLAG_MEMCPY)
1225 #endif /* KERNEL_DS */
1227 static int osd_is_mapped(struct dt_object *dt, __u64 offset,
1228 struct ldiskfs_map_blocks *map)
1230 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1232 sector_t block = osd_i_blocks(inode, offset);
1235 if (i_size_read(inode) == 0)
1238 /* Beyond EOF, must not be mapped */
1239 if ((i_size_read(inode) - 1) < offset)
1242 end = map->m_lblk + map->m_len;
1243 if (block >= map->m_lblk && block < end)
1244 return map->m_flags & LDISKFS_MAP_MAPPED;
1246 map->m_lblk = block;
1247 map->m_len = INT_MAX;
1249 mapped = ldiskfs_map_blocks(NULL, inode, map, 0);
1255 return map->m_flags & LDISKFS_MAP_MAPPED;
1258 #define MAX_EXTENTS_PER_WRITE 100
1259 static int osd_declare_write_commit(const struct lu_env *env,
1260 struct dt_object *dt,
1261 struct niobuf_local *lnb, int npages,
1262 struct thandle *handle)
1264 const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1265 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1266 struct osd_thandle *oh;
1267 int extents = 0, new_meta = 0;
1268 int depth, new_blocks = 0;
1270 int dirty_groups = 0;
1273 long long quota_space = 0;
1274 struct ldiskfs_map_blocks map;
1275 enum osd_quota_local_flags local_flags = 0;
1276 enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
1277 unsigned int extent_bytes;
1278 loff_t extent_start = 0;
1279 loff_t extent_end = 0;
1282 LASSERT(handle != NULL);
1283 oh = container_of(handle, struct osd_thandle, ot_super);
1284 LASSERT(oh->ot_handle == NULL);
1287 * We track a decaying average extent blocks per filesystem,
1288 * for most of time, it will be 1M, with filesystem becoming
1289 * heavily-fragmented, it will be reduced to 4K at the worst.
1291 extent_bytes = osd_extent_bytes(osd);
1292 LASSERT(extent_bytes >= osd_sb(osd)->s_blocksize);
1294 /* calculate number of extents (probably better to pass nb) */
1295 for (i = 0; i < npages; i++) {
1296 /* ignore quota for the whole request if any page is from
1297 * client cache or written by root.
1299 * XXX we could handle this on per-lnb basis as done by
1302 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
1303 (lnb[i].lnb_flags & OBD_BRW_SYS_RESOURCE) ||
1304 !(lnb[i].lnb_flags & OBD_BRW_SYNC))
1305 declare_flags |= OSD_QID_FORCE;
1308 * Convert unwritten extent might need split extents, could
1311 if (osd_is_mapped(dt, lnb[i].lnb_file_offset, &map) &&
1312 !(map.m_flags & LDISKFS_MAP_UNWRITTEN)) {
1313 lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1317 if (lnb[i].lnb_flags & OBD_BRW_DONE) {
1318 lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1322 /* count only unmapped changes */
1324 if (lnb[i].lnb_file_offset != extent_end || extent_end == 0) {
1325 if (extent_end != 0)
1326 extents += (extent_end - extent_start +
1327 extent_bytes - 1) / extent_bytes;
1328 extent_start = lnb[i].lnb_file_offset;
1329 extent_end = lnb[i].lnb_file_offset + lnb[i].lnb_len;
1331 extent_end += lnb[i].lnb_len;
1334 quota_space += PAGE_SIZE;
1337 credits++; /* inode */
1339 * overwrite case, no need to modify tree and
1345 extents += (extent_end - extent_start +
1346 extent_bytes - 1) / extent_bytes;
1348 * with system space usage growing up, mballoc codes won't
1349 * try best to scan block group to align best free extent as
1350 * we can. So extent bytes per extent could be decayed to a
1351 * very small value, this could make us reserve too many credits.
1352 * We could be more optimistic in the credit reservations, even
1353 * in a case where the filesystem is nearly full, it is extremely
1354 * unlikely that the worst case would ever be hit.
1356 if (extents > MAX_EXTENTS_PER_WRITE)
1357 extents = MAX_EXTENTS_PER_WRITE;
1360 * If we add a single extent, then in the worse case, each tree
1361 * level index/leaf need to be changed in case of the tree split.
1362 * If more extents are inserted, they could cause the whole tree
1363 * split more than once, but this is really rare.
1365 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1367 * many concurrent threads may grow tree by the time
1368 * our transaction starts. so, consider 2 is a min depth.
1370 depth = ext_depth(inode);
1371 depth = min(max(depth, 1) + 1, LDISKFS_MAX_EXTENT_DEPTH);
1373 credits += depth * 2 * extents;
1376 credits += depth * 3 * extents;
1377 new_meta = depth * 2 * extents;
1381 * With N contiguous data blocks, we need at most
1382 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
1383 * 2 dindirect blocks, and 1 tindirect block
1385 new_meta = DIV_ROUND_UP(new_blocks,
1386 LDISKFS_ADDR_PER_BLOCK(inode->i_sb)) + 4;
1387 credits += new_meta;
1389 dirty_groups += (extents + new_meta);
1391 oh->oh_declared_ext = extents;
1393 /* quota space for metadata blocks */
1394 quota_space += new_meta * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1396 /* quota space should be reported in 1K blocks */
1397 quota_space = toqb(quota_space);
1399 /* each new block can go in different group (bitmap + gd) */
1401 /* we can't dirty more bitmap blocks than exist */
1402 if (dirty_groups > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1403 credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1405 credits += dirty_groups;
1407 /* we can't dirty more gd blocks than exist */
1408 if (dirty_groups > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1409 credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1411 credits += dirty_groups;
1414 "%s: inode #%lu extent_bytes %u extents %d credits %d\n",
1415 osd_ino2name(inode), inode->i_ino, extent_bytes, extents,
1419 osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1421 /* make sure the over quota flags were not set */
1422 lnb[0].lnb_flags &= ~OBD_BRW_OVER_ALLQUOTA;
1424 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1425 i_projid_read(inode), quota_space, oh,
1426 osd_dt_obj(dt), &local_flags, declare_flags);
1428 /* we need only to store the overquota flags in the first lnb for
1429 * now, once we support multiple objects BRW, this code needs be
1432 if (local_flags & QUOTA_FL_OVER_USRQUOTA)
1433 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
1434 if (local_flags & QUOTA_FL_OVER_GRPQUOTA)
1435 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
1436 if (local_flags & QUOTA_FL_OVER_PRJQUOTA)
1437 lnb[0].lnb_flags |= OBD_BRW_OVER_PRJQUOTA;
1438 if (local_flags & QUOTA_FL_ROOT_PRJQUOTA)
1439 lnb[0].lnb_flags |= OBD_BRW_ROOT_PRJQUOTA;
1442 rc = osd_trunc_lock(osd_dt_obj(dt), oh, true);
1447 /* Check if a block is allocated or not */
1448 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1449 struct niobuf_local *lnb, int npages,
1450 struct thandle *thandle, __u64 user_size)
1452 struct osd_thread_info *oti = osd_oti_get(env);
1453 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1454 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1455 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1456 int rc = 0, i, check_credits = 0;
1460 rc = osd_init_iobuf(osd, iobuf, inode, 1, npages);
1461 if (unlikely(rc != 0))
1464 dquot_initialize(inode);
1466 for (i = 0; i < npages; i++) {
1467 if (lnb[i].lnb_rc == -ENOSPC &&
1468 (lnb[i].lnb_flags & OBD_BRW_MAPPED)) {
1469 /* Allow the write to proceed if overwriting an
1475 if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
1476 CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1478 LASSERT(lnb[i].lnb_page);
1479 generic_error_remove_page(inode->i_mapping,
1484 if (lnb[i].lnb_flags & OBD_BRW_DONE)
1487 if (!(lnb[i].lnb_flags & OBD_BRW_MAPPED))
1490 LASSERT(PageLocked(lnb[i].lnb_page));
1491 LASSERT(!PageWriteback(lnb[i].lnb_page));
1494 * Since write and truncate are serialized by oo_sem, even
1495 * partial-page truncate should not leave dirty pages in the
1498 LASSERT(!PageDirty(lnb[i].lnb_page));
1500 SetPageUptodate(lnb[i].lnb_page);
1502 osd_iobuf_add_page(iobuf, &lnb[i]);
1505 osd_trans_exec_op(env, thandle, OSD_OT_WRITE);
1507 if (CFS_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1509 } else if (iobuf->dr_npages > 0) {
1510 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd,
1515 /* no pages to write, no transno is needed */
1516 thandle->th_local = 1;
1519 if (rc != 0 && !thandle->th_restart_tran)
1520 osd_fini_iobuf(osd, iobuf);
1522 osd_trans_exec_check(env, thandle, OSD_OT_WRITE);
1524 if (unlikely(rc != 0 && !thandle->th_restart_tran)) {
1525 /* if write fails, we should drop pages from the cache */
1526 for (i = 0; i < npages; i++) {
1527 if (lnb[i].lnb_page == NULL)
1529 if (!PagePrivate2(lnb[i].lnb_page)) {
1530 LASSERT(PageLocked(lnb[i].lnb_page));
1531 generic_error_remove_page(inode->i_mapping,
1540 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1541 struct niobuf_local *lnb, int npages)
1543 struct osd_thread_info *oti = osd_oti_get(env);
1544 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1545 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1546 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1547 int rc = 0, i, cache_hits = 0, cache_misses = 0;
1554 rc = osd_init_iobuf(osd, iobuf, inode, 0, npages);
1555 if (unlikely(rc != 0))
1558 isize = i_size_read(inode);
1560 start = ktime_get();
1561 for (i = 0; i < npages; i++) {
1563 if (isize <= lnb[i].lnb_file_offset)
1564 /* If there's no more data, abort early.
1565 * lnb->lnb_rc == 0, so it's easy to detect later.
1569 /* instead of looking if we go beyong isize, send complete
1570 * pages all the time
1572 lnb[i].lnb_rc = lnb[i].lnb_len;
1574 /* Bypass disk read if fail_loc is set properly */
1575 if (CFS_FAIL_CHECK_QUIET(OBD_FAIL_OST_FAKE_RW))
1576 SetPageUptodate(lnb[i].lnb_page);
1578 if (PageUptodate(lnb[i].lnb_page)) {
1580 unlock_page(lnb[i].lnb_page);
1583 osd_iobuf_add_page(iobuf, &lnb[i]);
1585 /* no need to unlock in osd_bufs_put(), the sooner page is
1586 * unlocked, the earlier another client can access it.
1587 * notice real unlock_page() can be called few lines
1588 * below after osd_do_bio(). lnb is a per-thread, so it's
1589 * fine to have PG_locked and lnb_locked inconsistent here
1591 lnb[i].lnb_locked = 0;
1594 timediff = ktime_us_delta(end, start);
1595 lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1597 if (cache_hits != 0)
1598 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT,
1600 if (cache_misses != 0)
1601 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS,
1603 if (cache_hits + cache_misses != 0)
1604 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
1605 cache_hits + cache_misses);
1607 if (iobuf->dr_npages) {
1608 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd, 0,
1611 rc = osd_do_bio(osd, inode, iobuf, 0, 0);
1613 /* IO stats will be done in osd_bufs_put() */
1615 /* early release to let others read data during the bulk */
1616 for (i = 0; i < iobuf->dr_npages; i++) {
1617 struct page *page = iobuf->dr_lnbs[i]->lnb_page;
1618 LASSERT(PageLocked(page));
1619 if (!PagePrivate2(page))
1628 * XXX: Another layering violation for now.
1630 * We don't want to use ->f_op->read methods, because generic file write
1632 * - serializes on ->i_sem, and
1634 * - does a lot of extra work like balance_dirty_pages(),
1636 * which doesn't work for globally shared files like /last_rcvd.
1638 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1640 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1642 memcpy(buffer, (char *)ei->i_data, buflen);
1647 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1649 struct buffer_head *bh;
1650 unsigned long block;
1656 /* prevent reading after eof */
1657 spin_lock(&inode->i_lock);
1658 if (i_size_read(inode) < *offs + size) {
1659 loff_t diff = i_size_read(inode) - *offs;
1661 spin_unlock(&inode->i_lock);
1664 "size %llu is too short to read @%llu\n",
1665 i_size_read(inode), *offs);
1667 } else if (diff == 0) {
1673 spin_unlock(&inode->i_lock);
1676 blocksize = 1 << inode->i_blkbits;
1679 block = *offs >> inode->i_blkbits;
1680 boffs = *offs & (blocksize - 1);
1681 csize = min(blocksize - boffs, size);
1682 bh = __ldiskfs_bread(NULL, inode, block, 0);
1684 CERROR("%s: can't read %u@%llu on ino %lu: rc = %ld\n",
1685 osd_ino2name(inode), csize, *offs, inode->i_ino,
1691 memcpy(buf, bh->b_data + boffs, csize);
1694 memset(buf, 0, csize);
1704 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1705 struct lu_buf *buf, loff_t *pos)
1707 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1710 /* Read small symlink from inode body as we need to maintain correct
1711 * on-disk symlinks for ldiskfs.
1713 if (S_ISLNK(dt->do_lu.lo_header->loh_attr)) {
1714 loff_t size = i_size_read(inode);
1716 if (buf->lb_len < size)
1719 if (size < sizeof(LDISKFS_I(inode)->i_data))
1720 rc = osd_ldiskfs_readlink(inode, buf->lb_buf, size);
1722 rc = osd_ldiskfs_read(inode, buf->lb_buf, size, pos);
1724 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1730 static inline int osd_extents_enabled(struct super_block *sb,
1731 struct inode *inode)
1733 if (inode != NULL) {
1734 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
1736 } else if (ldiskfs_has_feature_extents(sb)) {
1742 int osd_calc_bkmap_credits(struct super_block *sb, struct inode *inode,
1743 const loff_t size, const loff_t pos,
1746 int credits, bits, bs, i;
1748 bits = sb->s_blocksize_bits;
1751 /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1752 * we do not expect blockmaps on the large files,
1753 * so let's shrink it to 2 levels (4GB files)
1756 /* this is default reservation: 2 levels */
1757 credits = (blocks + 2) * 3;
1759 /* actual offset is unknown, hard to optimize */
1763 /* now check for few specific cases to optimize */
1764 if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1767 /* allocate if not allocated */
1768 if (inode == NULL) {
1769 credits += blocks * 2;
1772 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1773 LASSERT(i < LDISKFS_NDIR_BLOCKS);
1774 if (LDISKFS_I(inode)->i_data[i] == 0)
1777 } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1778 /* single indirect */
1779 credits = blocks * 3;
1780 if (inode == NULL ||
1781 LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK] == 0)
1784 /* The indirect block may be modified. */
1791 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
1792 const struct lu_buf *buf, loff_t _pos,
1793 struct thandle *handle)
1795 struct osd_object *obj = osd_dt_obj(dt);
1796 struct inode *inode = obj->oo_inode;
1797 struct super_block *sb = osd_sb(osd_obj2dev(obj));
1798 struct osd_thandle *oh;
1799 int rc = 0, est = 0, credits, blocks, allocated = 0;
1805 LASSERT(buf != NULL);
1806 LASSERT(handle != NULL);
1808 oh = container_of(handle, struct osd_thandle, ot_super);
1809 LASSERT(oh->ot_handle == NULL);
1812 bits = sb->s_blocksize_bits;
1815 if (osd_tx_was_declared(env, oh, dt, DTO_WRITE_BASE, _pos))
1819 /* if this is an append, then we
1820 * should expect cross-block record
1827 /* blocks to modify */
1828 blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
1829 LASSERT(blocks > 0);
1831 if (inode != NULL && _pos != -1) {
1832 /* object size in blocks */
1833 est = (i_size_read(inode) + bs - 1) >> bits;
1834 allocated = inode->i_blocks >> (bits - 9);
1835 if (pos + size <= i_size_read(inode) && est <= allocated) {
1836 /* looks like an overwrite, no need to modify tree */
1838 /* no need to modify i_size */
1843 if (osd_extents_enabled(sb, inode)) {
1845 * many concurrent threads may grow tree by the time
1846 * our transaction starts. so, consider 2 is a min depth
1847 * for every level we may need to allocate a new block
1848 * and take some entries from the old one. so, 3 blocks
1849 * to allocate (bitmap, gd, itself) + old block - 4 per
1852 depth = inode != NULL ? ext_depth(inode) : 0;
1853 depth = min(max(depth, 1) + 3, LDISKFS_MAX_EXTENT_DEPTH);
1855 /* if not append, then split may need to modify
1856 * existing blocks moving entries into the new ones
1860 /* blocks to store data: bitmap,gd,itself */
1861 credits += blocks * 3;
1863 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
1865 /* if inode is created as part of the transaction,
1866 * then it's counted already by the creation method
1873 osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1875 /* dt_declare_write() is usually called for system objects, such
1876 * as llog or last_rcvd files. We needn't enforce quota on those
1877 * objects, so always set the lqi_space as 0.
1880 rc = osd_declare_inode_qid(env, i_uid_read(inode),
1882 i_projid_read(inode), 0,
1883 oh, obj, NULL, OSD_QID_BLK);
1886 rc = osd_trunc_lock(obj, oh, true);
1891 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
1893 /* LU-2634: clear the extent format for fast symlink */
1894 ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
1896 /* Copying the NUL byte terminating the link target as well */
1897 memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen + 1);
1898 spin_lock(&inode->i_lock);
1899 LDISKFS_I(inode)->i_disksize = buflen;
1900 i_size_write(inode, buflen);
1901 spin_unlock(&inode->i_lock);
1902 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
1907 static int osd_ldiskfs_write_record(struct dt_object *dt, void *buf,
1908 int bufsize, int write_NUL, loff_t *offs,
1911 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1912 struct buffer_head *bh = NULL;
1913 loff_t offset = *offs;
1914 loff_t new_size = i_size_read(inode);
1915 unsigned long block;
1916 int blocksize = 1 << inode->i_blkbits;
1917 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1921 int dirty_inode = 0;
1922 bool create, sparse, sync = false;
1926 * long symlink write does not count the NUL terminator in
1927 * bufsize, we write it, and the inode's file size does not
1928 * count the NUL terminator as well.
1930 ((char *)buf)[bufsize] = '\0';
1934 /* only the first flag-set matters */
1935 dirty_inode = !test_and_set_bit(LDISKFS_INODE_JOURNAL_DATA,
1938 /* sparse checking is racy, but sparse is very rare case, leave as is */
1939 sparse = (new_size > 0 && (inode->i_blocks >> (inode->i_blkbits - 9)) <
1940 ((new_size - 1) >> inode->i_blkbits) + 1);
1942 while (bufsize > 0) {
1943 int credits = handle->h_buffer_credits;
1944 unsigned long last_block = (new_size == 0) ? 0 :
1945 (new_size - 1) >> inode->i_blkbits;
1950 block = offset >> inode->i_blkbits;
1951 boffs = offset & (blocksize - 1);
1952 size = min(blocksize - boffs, bufsize);
1953 sync = (block > last_block || new_size == 0 || sparse);
1956 down(&ei->i_append_sem);
1958 bh = __ldiskfs_bread(handle, inode, block, 0);
1960 if (unlikely(IS_ERR_OR_NULL(bh) && !sync))
1962 "%s: adding bh without locking off %llu (block %lu, size %d, offs %llu)\n",
1963 osd_ino2name(inode),
1964 offset, block, bufsize, *offs);
1966 if (IS_ERR_OR_NULL(bh)) {
1967 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1968 int flags = LDISKFS_GET_BLOCKS_CREATE;
1970 /* while the file system is being mounted, avoid
1971 * preallocation otherwise mount can take a long
1972 * time as mballoc cache is cold.
1973 * XXX: this is a workaround until we have a proper
1975 * XXX: works with extent-based files only */
1976 if (!osd->od_cl_seq)
1977 flags |= LDISKFS_GET_BLOCKS_NO_NORMALIZE;
1978 bh = __ldiskfs_bread(handle, inode, block, flags);
1982 up(&ei->i_append_sem);
1987 if (IS_ERR_OR_NULL(bh)) {
1996 "%s: error reading offset %llu (block %lu, size %d, offs %llu), credits %d/%d: rc = %d\n",
1997 osd_ino2name(inode), offset, block, bufsize,
1998 *offs, credits, handle->h_buffer_credits, err);
2002 err = osd_ldiskfs_journal_get_write_access(handle, inode->i_sb,
2006 CERROR("journal_get_write_access() returned error %d\n",
2010 LASSERTF(boffs + size <= bh->b_size,
2011 "boffs %d size %d bh->b_size %lu\n",
2012 boffs, size, (unsigned long)bh->b_size);
2014 memset(bh->b_data, 0, bh->b_size);
2016 up(&ei->i_append_sem);
2020 memcpy(bh->b_data + boffs, buf, size);
2021 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2025 if (offset + size > new_size)
2026 new_size = offset + size;
2032 up(&ei->i_append_sem);
2039 /* correct in-core and on-disk sizes */
2040 if (new_size > i_size_read(inode)) {
2041 spin_lock(&inode->i_lock);
2042 if (new_size > i_size_read(inode))
2043 i_size_write(inode, new_size);
2044 if (i_size_read(inode) > ei->i_disksize) {
2045 ei->i_disksize = i_size_read(inode);
2048 spin_unlock(&inode->i_lock);
2051 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
2058 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
2059 const struct lu_buf *buf, loff_t *pos,
2060 struct thandle *handle)
2062 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2063 struct osd_thandle *oh;
2067 LASSERT(dt_object_exists(dt));
2069 LASSERT(handle != NULL);
2070 LASSERT(inode != NULL);
2071 dquot_initialize(inode);
2073 /* XXX: don't check: one declared chunk can be used many times */
2074 /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
2076 oh = container_of(handle, struct osd_thandle, ot_super);
2077 LASSERT(oh->ot_handle->h_transaction != NULL);
2078 osd_trans_exec_op(env, handle, OSD_OT_WRITE);
2080 /* Write small symlink to inode body as we need to maintain correct
2081 * on-disk symlinks for ldiskfs.
2082 * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
2083 * does not count it in.
2085 is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
2086 if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
2087 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
2089 result = osd_ldiskfs_write_record(dt, buf->lb_buf, buf->lb_len,
2090 is_link, pos, oh->ot_handle);
2092 result = buf->lb_len;
2094 osd_trans_exec_check(env, handle, OSD_OT_WRITE);
2099 static int osd_declare_fallocate(const struct lu_env *env,
2100 struct dt_object *dt, __u64 start, __u64 end,
2101 int mode, struct thandle *th)
2103 struct osd_thandle *oh = container_of(th, struct osd_thandle, ot_super);
2104 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
2105 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2106 long long quota_space = 0;
2107 /* 5 is max tree depth. (inode + 4 index blocks) */
2114 * mode == 0 (which is standard prealloc) and PUNCH is supported
2115 * Rest of mode options is not supported yet.
2117 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2118 RETURN(-EOPNOTSUPP);
2120 /* disable fallocate completely */
2121 if (osd_dev(dt->do_lu.lo_dev)->od_fallocate_zero_blocks < 0)
2122 RETURN(-EOPNOTSUPP);
2127 if ((mode & FALLOC_FL_PUNCH_HOLE) == 0) {
2128 /* quota space for metadata blocks
2129 * approximate metadata estimate should be good enough.
2131 quota_space += PAGE_SIZE;
2132 quota_space += depth * LDISKFS_BLOCK_SIZE(osd_sb(osd));
2134 /* quota space should be reported in 1K blocks */
2135 quota_space = toqb(quota_space) + toqb(end - start) +
2136 LDISKFS_META_TRANS_BLOCKS(inode->i_sb);
2139 * We don't need to reserve credits for whole fallocate here.
2140 * We reserve space only for metadata. Fallocate credits are
2141 * extended as required
2144 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2145 i_projid_read(inode), quota_space, oh,
2146 osd_dt_obj(dt), NULL, OSD_QID_BLK);
2151 * The both hole punch and allocation may need few transactions
2152 * to complete, so we have to avoid concurrent writes/truncates
2153 * as we can't release object lock from within ldiskfs.
2154 * Notice locking order: transaction start, then lock object
2155 * (don't confuse object lock dt_{read|write}_lock() with the
2158 rc = osd_trunc_lock(osd_dt_obj(dt), oh, false);
2163 static int osd_fallocate_preallocate(const struct lu_env *env,
2164 struct dt_object *dt,
2165 __u64 start, __u64 end, int mode,
2168 struct osd_thandle *oh = container_of(th, struct osd_thandle, ot_super);
2169 handle_t *handle = ldiskfs_journal_current_handle();
2170 unsigned int save_credits = oh->ot_credits;
2171 struct osd_object *obj = osd_dt_obj(dt);
2172 struct inode *inode = obj->oo_inode;
2173 struct ldiskfs_map_blocks map;
2174 unsigned int credits;
2175 ldiskfs_lblk_t blen;
2176 ldiskfs_lblk_t boff;
2177 loff_t new_size = 0;
2184 LASSERT(dt_object_exists(dt));
2185 LASSERT(osd_invariant(obj));
2186 LASSERT(inode != NULL);
2188 CDEBUG(D_INODE, "fallocate: inode #%lu: start %llu end %llu mode %d\n",
2189 inode->i_ino, start, end, mode);
2191 dquot_initialize(inode);
2195 boff = osd_i_blocks(inode, start);
2196 blen = osd_i_blocks(inode, ALIGN(end, 1 << inode->i_blkbits)) - boff;
2198 /* Create and mark new extents as either zero or unwritten */
2199 flags = (osd_dev(dt->do_lu.lo_dev)->od_fallocate_zero_blocks ||
2200 !ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS)) ?
2201 LDISKFS_GET_BLOCKS_CREATE_ZERO :
2202 LDISKFS_GET_BLOCKS_CREATE_UNWRIT_EXT;
2203 #ifdef LDISKFS_GET_BLOCKS_KEEP_SIZE
2204 if (mode & FALLOC_FL_KEEP_SIZE)
2205 flags |= LDISKFS_GET_BLOCKS_KEEP_SIZE;
2209 if (!(mode & FALLOC_FL_KEEP_SIZE) && (end > i_size_read(inode) ||
2210 end > LDISKFS_I(inode)->i_disksize)) {
2212 rc = inode_newsize_ok(inode, new_size);
2217 inode_dio_wait(inode);
2222 /* Don't normalize the request if it can fit in one extent so
2223 * that it doesn't get unnecessarily split into multiple extents.
2225 if (blen <= EXT_UNWRITTEN_MAX_LEN)
2226 flags |= LDISKFS_GET_BLOCKS_NO_NORMALIZE;
2229 * credits to insert 1 extent into extent tree.
2231 credits = ldiskfs_chunk_trans_blocks(inode, blen);
2232 depth = ext_depth(inode);
2234 while (rc >= 0 && blen) {
2238 * Recalculate credits when extent tree depth changes.
2240 if (depth != ext_depth(inode)) {
2241 credits = ldiskfs_chunk_trans_blocks(inode, blen);
2242 depth = ext_depth(inode);
2245 /* TODO: quota check */
2246 rc = osd_extend_restart_trans(handle, credits, inode);
2250 rc = ldiskfs_map_blocks(handle, inode, &map, flags);
2253 "inode #%lu: block %u: len %u: ldiskfs_map_blocks returned %d\n",
2254 inode->i_ino, map.m_lblk, map.m_len, rc);
2255 ldiskfs_mark_inode_dirty(handle, inode);
2260 map.m_len = blen = blen - rc;
2261 epos = (loff_t)map.m_lblk << inode->i_blkbits;
2262 inode_set_ctime_current(inode);
2266 if (ldiskfs_update_inode_size(inode, epos) & 0x1)
2267 inode_set_mtime_to_ts(inode,
2268 inode_get_ctime(inode));
2269 #ifdef LDISKFS_EOFBLOCKS_FL
2271 if (epos > inode->i_size)
2272 ldiskfs_set_inode_flag(inode,
2273 LDISKFS_INODE_EOFBLOCKS);
2277 ldiskfs_mark_inode_dirty(handle, inode);
2281 /* extand credits if needed for operations such as attribute set */
2283 rc = osd_extend_restart_trans(handle, save_credits, inode);
2285 inode_unlock(inode);
2290 static int osd_fallocate_punch(const struct lu_env *env, struct dt_object *dt,
2291 __u64 start, __u64 end, int mode,
2294 struct osd_object *obj = osd_dt_obj(dt);
2295 struct inode *inode = obj->oo_inode;
2296 struct osd_access_lock *al;
2297 struct osd_thandle *oh;
2298 int rc = 0, found = 0;
2302 LASSERT(dt_object_exists(dt));
2303 LASSERT(osd_invariant(obj));
2304 LASSERT(inode != NULL);
2306 dquot_initialize(inode);
2309 oh = container_of(th, struct osd_thandle, ot_super);
2310 LASSERT(oh->ot_handle->h_transaction != NULL);
2312 list_for_each_entry(al, &oh->ot_trunc_locks, tl_list) {
2313 if (obj != al->tl_obj)
2315 LASSERT(al->tl_shared == 0);
2317 /* do actual punch in osd_trans_stop() */
2318 al->tl_start = start;
2321 al->tl_punch = true;
2328 static int osd_fallocate(const struct lu_env *env, struct dt_object *dt,
2329 __u64 start, __u64 end, int mode, struct thandle *th)
2335 if (mode & FALLOC_FL_PUNCH_HOLE) {
2337 rc = osd_fallocate_punch(env, dt, start, end, mode, th);
2339 /* standard preallocate */
2340 rc = osd_fallocate_preallocate(env, dt, start, end, mode, th);
2345 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
2346 __u64 start, __u64 end, struct thandle *th)
2348 struct osd_thandle *oh;
2349 struct osd_object *obj = osd_dt_obj(dt);
2350 struct inode *inode;
2355 oh = container_of(th, struct osd_thandle, ot_super);
2358 * we don't need to reserve credits for whole truncate
2359 * it's not possible as truncate may need to free too many
2360 * blocks and that won't fit a single transaction. instead
2361 * we reserve credits to change i_size and put inode onto
2362 * orphan list. if needed truncate will extend or restart
2365 osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
2366 osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
2368 inode = obj->oo_inode;
2371 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2372 i_projid_read(inode), 0, oh, obj,
2375 /* if object holds encrypted content, we need to make sure we truncate
2376 * on an encryption unit boundary, or subsequent reads will get
2380 if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL &&
2381 start & ~LUSTRE_ENCRYPTION_MASK)
2382 start = (start & LUSTRE_ENCRYPTION_MASK) +
2383 LUSTRE_ENCRYPTION_UNIT_SIZE;
2384 ll_truncate_pagecache(inode, start);
2385 rc = osd_trunc_lock(obj, oh, false);
2391 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
2392 __u64 start, __u64 end, struct thandle *th)
2394 struct osd_object *obj = osd_dt_obj(dt);
2395 struct osd_device *osd = osd_obj2dev(obj);
2396 struct inode *inode = obj->oo_inode;
2397 struct osd_access_lock *al;
2398 struct osd_thandle *oh;
2399 int rc = 0, found = 0;
2403 LASSERT(dt_object_exists(dt));
2404 LASSERT(osd_invariant(obj));
2405 LASSERT(inode != NULL);
2406 dquot_initialize(inode);
2409 oh = container_of(th, struct osd_thandle, ot_super);
2410 LASSERT(oh->ot_handle->h_transaction != NULL);
2412 /* we used to skip truncate to current size to
2413 * optimize truncates on OST. with DoM we can
2414 * get attr_set to set specific size (MDS_REINT)
2415 * and then get truncate RPC which essentially
2416 * would be skipped. this is bad.. so, disable
2417 * this optimization on MDS till the client stop
2418 * to sent MDS_REINT (LU-11033) -bzzz
2420 if (osd->od_is_ost && i_size_read(inode) == start)
2423 osd_trans_exec_op(env, th, OSD_OT_PUNCH);
2425 spin_lock(&inode->i_lock);
2426 if (i_size_read(inode) < start)
2428 i_size_write(inode, start);
2429 spin_unlock(&inode->i_lock);
2431 /* optimize grow case */
2433 osd_execute_truncate(obj);
2438 /* add to orphan list to ensure truncate completion
2439 * if this transaction succeed. ldiskfs_truncate()
2440 * will take the inode out of the list
2442 rc = ldiskfs_orphan_add(oh->ot_handle, inode);
2443 inode_unlock(inode);
2447 list_for_each_entry(al, &oh->ot_trunc_locks, tl_list) {
2448 if (obj != al->tl_obj)
2450 LASSERT(al->tl_shared == 0);
2452 /* do actual truncate in osd_trans_stop() */
2453 al->tl_truncate = 1;
2462 static int fiemap_check_ranges(struct inode *inode,
2463 u64 start, u64 len, u64 *new_len)
2472 if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
2473 maxbytes = inode->i_sb->s_maxbytes;
2475 maxbytes = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes;
2477 if (start > maxbytes)
2481 * Shrink request scope to what the fs can actually handle.
2483 if (len > maxbytes || (maxbytes - len) < start)
2484 *new_len = maxbytes - start;
2489 /* So that the fiemap access checks can't overflow on 32 bit machines. */
2490 #define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent))
2492 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
2495 struct fiemap_extent_info fieinfo = {0, };
2496 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2499 DECLARE_MM_SEGMENT_T(saved_fs);
2502 if (inode->i_op->fiemap == NULL)
2505 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS)
2508 rc = fiemap_check_ranges(inode, fm->fm_start, fm->fm_length, &len);
2512 fieinfo.fi_flags = fm->fm_flags;
2513 fieinfo.fi_extents_max = fm->fm_extent_count;
2514 fieinfo.fi_extents_start = fm->fm_extents;
2516 if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
2517 filemap_write_and_wait(inode->i_mapping);
2519 access_set_kernel(saved_fs, &fieinfo);
2520 rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
2521 access_unset_kernel(saved_fs, &fieinfo);
2522 fm->fm_flags = fieinfo.fi_flags;
2523 fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
2528 static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
2529 __u64 start, __u64 end, enum lu_ladvise_type advice)
2531 struct osd_object *obj = osd_dt_obj(dt);
2536 case LU_LADVISE_DONTNEED:
2538 invalidate_mapping_pages(obj->oo_inode->i_mapping,
2539 start >> PAGE_SHIFT,
2540 (end - 1) >> PAGE_SHIFT);
2550 static loff_t osd_lseek(const struct lu_env *env, struct dt_object *dt,
2551 loff_t offset, int whence)
2553 struct osd_object *obj = osd_dt_obj(dt);
2554 struct osd_device *dev = osd_obj2dev(obj);
2555 struct inode *inode = obj->oo_inode;
2560 LASSERT(dt_object_exists(dt));
2561 LASSERT(osd_invariant(obj));
2563 LASSERT(offset >= 0);
2565 file = alloc_file_pseudo(inode, dev->od_mnt, "/", O_NOATIME,
2568 RETURN(PTR_ERR(file));
2570 file->f_mode |= FMODE_64BITHASH;
2571 result = file->f_op->llseek(file, offset, whence);
2575 * If 'offset' is beyond end of object file then treat it as not error
2576 * but valid case for SEEK_HOLE and return 'offset' as result.
2577 * LOV will decide if it is beyond real end of file or not.
2579 if (whence == SEEK_HOLE && result == -ENXIO)
2582 CDEBUG(D_INFO, "seek %s from %lld: %lld\n", whence == SEEK_HOLE ?
2583 "hole" : "data", offset, result);
2588 * in some cases we may need declare methods for objects being created
2589 * e.g., when we create symlink
2591 const struct dt_body_operations osd_body_ops_new = {
2592 .dbo_declare_write = osd_declare_write,
2595 const struct dt_body_operations osd_body_ops = {
2596 .dbo_read = osd_read,
2597 .dbo_declare_write = osd_declare_write,
2598 .dbo_write = osd_write,
2599 .dbo_bufs_get = osd_bufs_get,
2600 .dbo_bufs_put = osd_bufs_put,
2601 .dbo_write_prep = osd_write_prep,
2602 .dbo_declare_write_commit = osd_declare_write_commit,
2603 .dbo_write_commit = osd_write_commit,
2604 .dbo_read_prep = osd_read_prep,
2605 .dbo_declare_punch = osd_declare_punch,
2606 .dbo_punch = osd_punch,
2607 .dbo_fiemap_get = osd_fiemap_get,
2608 .dbo_ladvise = osd_ladvise,
2609 .dbo_declare_fallocate = osd_declare_fallocate,
2610 .dbo_fallocate = osd_fallocate,
2611 .dbo_lseek = osd_lseek,
2615 * Get a truncate lock
2617 * In order to take multi-transaction truncate out of main transaction we let
2618 * the caller grab a lock on the object passed. the lock can be shared (for
2619 * writes) and exclusive (for truncate). It's not allowed to mix truncate
2620 * and write in the same transaction handle (do not confuse with big ldiskfs
2621 * transaction containing lots of handles).
2622 * The lock must be taken at declaration.
2624 * \param obj object to lock
2626 * \shared shared or exclusive
2628 * \retval 0 lock is granted
2629 * \retval -NOMEM no memory to allocate lock
2631 int osd_trunc_lock(struct osd_object *obj, struct osd_thandle *oh, bool shared)
2633 struct osd_access_lock *al, *tmp;
2638 list_for_each_entry(tmp, &oh->ot_trunc_locks, tl_list) {
2639 if (tmp->tl_obj != obj)
2641 LASSERT(tmp->tl_shared == shared);
2642 /* found same lock */
2647 if (unlikely(al == NULL))
2650 al->tl_truncate = false;
2652 down_read(&obj->oo_ext_idx_sem);
2654 down_write(&obj->oo_ext_idx_sem);
2655 al->tl_shared = shared;
2656 lu_object_get(&obj->oo_dt.do_lu);
2658 list_add(&al->tl_list, &oh->ot_trunc_locks);
2663 void osd_trunc_unlock_all(const struct lu_env *env, struct list_head *list)
2665 struct osd_access_lock *al, *tmp;
2667 list_for_each_entry_safe(al, tmp, list, tl_list) {
2669 up_read(&al->tl_obj->oo_ext_idx_sem);
2671 up_write(&al->tl_obj->oo_ext_idx_sem);
2672 osd_object_put(env, al->tl_obj);
2673 list_del(&al->tl_list);
2678 /* For a partial-page punch, flush punch range to disk immediately */
2679 static void osd_partial_page_flush_punch(struct osd_device *d,
2680 struct inode *inode, loff_t start,
2683 if (osd_use_page_cache(d)) {
2684 filemap_fdatawrite_range(inode->i_mapping, start, end);
2686 /* Notice we use "wait" version to ensure I/O is complete */
2687 filemap_write_and_wait_range(inode->i_mapping, start,
2689 invalidate_mapping_pages(inode->i_mapping, start >> PAGE_SHIFT,
2695 * For a partial-page truncate, flush the page to disk immediately to
2696 * avoid data corruption during direct disk write. b=17397
2698 static void osd_partial_page_flush(struct osd_device *d, struct inode *inode,
2701 if (!(offset & ~PAGE_MASK))
2704 if (osd_use_page_cache(d)) {
2705 filemap_fdatawrite_range(inode->i_mapping, offset, offset + 1);
2707 /* Notice we use "wait" version to ensure I/O is complete */
2708 filemap_write_and_wait_range(inode->i_mapping, offset,
2710 invalidate_mapping_pages(inode->i_mapping, offset >> PAGE_SHIFT,
2711 offset >> PAGE_SHIFT);
2715 void osd_execute_truncate(struct osd_object *obj)
2717 struct osd_device *d = osd_obj2dev(obj);
2718 struct inode *inode = obj->oo_inode;
2721 /* simulate crash before (in the middle) of delayed truncate */
2722 if (CFS_FAIL_CHECK(OBD_FAIL_OSD_FAIL_AT_TRUNCATE)) {
2723 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
2724 struct ldiskfs_sb_info *sbi = LDISKFS_SB(inode->i_sb);
2726 mutex_lock(&sbi->s_orphan_lock);
2727 list_del_init(&ei->i_orphan);
2728 mutex_unlock(&sbi->s_orphan_lock);
2732 size = i_size_read(inode);
2734 /* if object holds encrypted content, we need to make sure we truncate
2735 * on an encryption unit boundary, or block content will get corrupted
2737 if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL &&
2738 size & ~LUSTRE_ENCRYPTION_MASK)
2739 inode->i_size = (size & LUSTRE_ENCRYPTION_MASK) +
2740 LUSTRE_ENCRYPTION_UNIT_SIZE;
2741 ldiskfs_truncate(inode);
2742 inode_unlock(inode);
2743 if (inode->i_size != size) {
2744 spin_lock(&inode->i_lock);
2745 i_size_write(inode, size);
2746 LDISKFS_I(inode)->i_disksize = size;
2747 spin_unlock(&inode->i_lock);
2748 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
2750 osd_partial_page_flush(d, inode, size);
2753 static int osd_execute_punch(const struct lu_env *env, struct osd_object *obj,
2754 loff_t start, loff_t end, int mode)
2756 struct osd_device *d = osd_obj2dev(obj);
2757 struct inode *inode = obj->oo_inode;
2761 file = alloc_file_pseudo(inode, d->od_mnt, "/", O_NOATIME,
2764 RETURN(PTR_ERR(file));
2766 file->f_mode |= FMODE_64BITHASH;
2767 rc = file->f_op->fallocate(file, mode, start, end - start);
2771 osd_partial_page_flush_punch(d, inode, start, end - 1);
2775 int osd_process_truncates(const struct lu_env *env, struct list_head *list)
2777 struct osd_access_lock *al;
2780 LASSERT(!journal_current_handle());
2782 list_for_each_entry(al, list, tl_list) {
2785 if (al->tl_truncate)
2786 osd_execute_truncate(al->tl_obj);
2787 else if (al->tl_punch)
2788 rc = osd_execute_punch(env, al->tl_obj, al->tl_start,
2789 al->tl_end, al->tl_mode);