4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Author: Nikita Danilov <nikita@clusterfs.com>
37 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
41 #define DEBUG_SUBSYSTEM S_OSD
43 /* prerequisite for linux/xattr.h */
44 #include <linux/types.h>
45 /* prerequisite for linux/xattr.h */
48 #include <linux/pagevec.h>
51 * struct OBD_{ALLOC,FREE}*()
54 #include <obd_support.h>
56 #include "osd_internal.h"
59 #include <ldiskfs/ldiskfs_extents.h>
61 static inline bool osd_use_page_cache(struct osd_device *d)
63 /* do not use pagecache if write and read caching are disabled */
64 if (d->od_writethrough_cache + d->od_read_cache == 0)
66 /* use pagecache by default */
70 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
71 int rw, int line, int pages)
75 LASSERTF(iobuf->dr_elapsed_valid == 0,
76 "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
77 atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
79 LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
81 init_waitqueue_head(&iobuf->dr_wait);
82 atomic_set(&iobuf->dr_numreqs, 0);
87 iobuf->dr_elapsed = ktime_set(0, 0);
88 /* must be counted before, so assert */
90 iobuf->dr_init_at = line;
92 blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
93 if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
94 LASSERT(iobuf->dr_pg_buf.lb_len >=
95 pages * sizeof(iobuf->dr_pages[0]));
99 /* start with 1MB for 4K blocks */
101 while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
104 CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
105 (unsigned int)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
107 blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
108 iobuf->dr_max_pages = 0;
109 CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
110 (unsigned int)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
112 lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
113 iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
114 if (unlikely(iobuf->dr_blocks == NULL))
117 lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
118 iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
119 if (unlikely(iobuf->dr_pages == NULL))
122 lu_buf_realloc(&iobuf->dr_lnb_buf,
123 pages * sizeof(iobuf->dr_lnbs[0]));
124 iobuf->dr_lnbs = iobuf->dr_lnb_buf.lb_buf;
125 if (unlikely(iobuf->dr_lnbs == NULL))
128 iobuf->dr_max_pages = pages;
132 #define osd_init_iobuf(dev, iobuf, rw, pages) \
133 __osd_init_iobuf(dev, iobuf, rw, __LINE__, pages)
135 static void osd_iobuf_add_page(struct osd_iobuf *iobuf,
136 struct niobuf_local *lnb)
138 LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
139 iobuf->dr_pages[iobuf->dr_npages] = lnb->lnb_page;
140 iobuf->dr_lnbs[iobuf->dr_npages] = lnb;
144 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
146 int rw = iobuf->dr_rw;
148 if (iobuf->dr_elapsed_valid) {
149 iobuf->dr_elapsed_valid = 0;
150 LASSERT(iobuf->dr_dev == d);
151 LASSERT(iobuf->dr_frags > 0);
152 lprocfs_oh_tally(&d->od_brw_stats.hist[BRW_R_DIO_FRAGS+rw],
154 lprocfs_oh_tally_log2(&d->od_brw_stats.hist[BRW_R_IO_TIME+rw],
155 ktime_to_ms(iobuf->dr_elapsed));
159 #ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
160 static void dio_complete_routine(struct bio *bio)
162 int error = bio->bi_status;
164 static void dio_complete_routine(struct bio *bio, int error)
167 struct osd_iobuf *iobuf = bio->bi_private;
170 /* CAVEAT EMPTOR: possibly in IRQ context
171 * DO NOT record procfs stats here!!!
174 if (unlikely(iobuf == NULL)) {
175 CERROR("***** bio->bi_private is NULL! This should never happen. Normally, I would crash here, but instead I will dump the bio contents to the console. Please report this to <https://jira.whamcloud.com/> , along with any interesting messages leading up to this point (like SCSI errors, perhaps). Because bi_private is NULL, I can't wake up the thread that initiated this IO - you will probably have to reboot this node.\n");
176 CERROR("bi_next: %p, bi_flags: %lx, " __stringify(bi_opf)
177 ": %x, bi_vcnt: %d, bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, bi_private: %p\n",
178 bio->bi_next, (unsigned long)bio->bi_flags,
179 (unsigned int)bio->bi_opf, bio->bi_vcnt, bio_idx(bio),
180 bio_sectors(bio) << 9, bio->bi_end_io,
181 atomic_read(&bio->__bi_cnt),
186 /* the check is outside of the cycle for performance reason -bzzz */
187 if (!bio_data_dir(bio)) {
188 DECLARE_BVEC_ITER_ALL(iter_all);
190 bio_for_each_segment_all(bvl, bio, iter_all) {
191 if (likely(error == 0))
192 SetPageUptodate(bvl_to_page(bvl));
193 LASSERT(PageLocked(bvl_to_page(bvl)));
195 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
197 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
200 /* any real error is good enough -bzzz */
201 if (error != 0 && iobuf->dr_error == 0)
202 iobuf->dr_error = error;
205 * set dr_elapsed before dr_numreqs turns to 0, otherwise
206 * it's possible that service thread will see dr_numreqs
207 * is zero, but dr_elapsed is not set yet, leading to lost
208 * data in this processing and an assertion in a subsequent
211 if (atomic_read(&iobuf->dr_numreqs) == 1) {
212 ktime_t now = ktime_get();
214 iobuf->dr_elapsed = ktime_sub(now, iobuf->dr_start_time);
215 iobuf->dr_elapsed_valid = 1;
217 if (atomic_dec_and_test(&iobuf->dr_numreqs))
218 wake_up(&iobuf->dr_wait);
220 /* Completed bios used to be chained off iobuf->dr_bios and freed in
221 * filter_clear_dreq(). It was then possible to exhaust the biovec-256
222 * mempool when serious on-disk fragmentation was encountered,
223 * deadlocking the OST. The bios are now released as soon as complete
224 * so the pool cannot be exhausted while IOs are competing. b=10076
229 static void record_start_io(struct osd_iobuf *iobuf, int size)
231 struct osd_device *osd = iobuf->dr_dev;
232 struct obd_histogram *h = osd->od_brw_stats.hist;
235 atomic_inc(&iobuf->dr_numreqs);
237 if (iobuf->dr_rw == 0) {
238 atomic_inc(&osd->od_r_in_flight);
239 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
240 atomic_read(&osd->od_r_in_flight));
241 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
242 } else if (iobuf->dr_rw == 1) {
243 atomic_inc(&osd->od_w_in_flight);
244 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
245 atomic_read(&osd->od_w_in_flight));
246 lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
252 static void osd_submit_bio(int rw, struct bio *bio)
254 LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
255 #ifdef HAVE_SUBMIT_BIO_2ARGS
256 submit_bio(rw ? WRITE : READ, bio);
263 static int can_be_merged(struct bio *bio, sector_t sector)
268 return bio_end_sector(bio) == sector ? 1 : 0;
271 #if IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)
273 * This function will change the data written, thus it should only be
274 * used when checking data integrity feature
276 static void bio_integrity_fault_inject(struct bio *bio)
278 struct bio_vec *bvec;
279 DECLARE_BVEC_ITER_ALL(iter_all);
283 bio_for_each_segment_all(bvec, bio, iter_all) {
284 struct page *page = bvec->bv_page;
294 static int bio_dif_compare(__u16 *expected_guard_buf, void *bio_prot_buf,
295 unsigned int sectors, int tuple_size)
297 __u16 *expected_guard;
301 expected_guard = expected_guard_buf;
302 for (i = 0; i < sectors; i++) {
303 bio_guard = (__u16 *)bio_prot_buf;
304 if (*bio_guard != *expected_guard) {
306 "unexpected guard tags on sector %d expected guard %u, bio guard %u, sectors %u, tuple size %d\n",
307 i, *expected_guard, *bio_guard, sectors,
312 bio_prot_buf += tuple_size;
317 static int osd_bio_integrity_compare(struct bio *bio, struct block_device *bdev,
318 struct osd_iobuf *iobuf, int index)
320 struct blk_integrity *bi = bdev_get_integrity(bdev);
321 struct bio_integrity_payload *bip = bio->bi_integrity;
322 struct niobuf_local *lnb;
323 unsigned short sector_size = blk_integrity_interval(bi);
324 void *bio_prot_buf = page_address(bip->bip_vec->bv_page) +
325 bip->bip_vec->bv_offset;
327 sector_t sector = bio_start_sector(bio);
328 unsigned int sectors, total;
329 DECLARE_BVEC_ITER_ALL(iter_all);
330 __u16 *expected_guard;
334 bio_for_each_segment_all(bv, bio, iter_all) {
335 lnb = iobuf->dr_lnbs[index];
336 expected_guard = lnb->lnb_guards;
337 sectors = bv->bv_len / sector_size;
338 if (lnb->lnb_guard_rpc) {
339 rc = bio_dif_compare(expected_guard, bio_prot_buf,
340 sectors, bi->tuple_size);
346 bio_prot_buf += sectors * bi->tuple_size;
347 total += sectors * bi->tuple_size;
348 LASSERT(total <= bip_size(bio->bi_integrity));
354 static int osd_bio_integrity_handle(struct osd_device *osd, struct bio *bio,
355 struct osd_iobuf *iobuf,
356 int start_page_idx, bool fault_inject,
357 bool integrity_enabled)
359 struct super_block *sb = osd_sb(osd);
360 integrity_gen_fn *generate_fn = NULL;
361 integrity_vrfy_fn *verify_fn = NULL;
366 if (!integrity_enabled)
369 rc = osd_get_integrity_profile(osd, &generate_fn, &verify_fn);
373 rc = bio_integrity_prep_fn(bio, generate_fn, verify_fn);
377 /* Verify and inject fault only when writing */
378 if (iobuf->dr_rw == 1) {
379 if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_CMP))) {
380 rc = osd_bio_integrity_compare(bio, sb->s_bdev, iobuf,
386 if (unlikely(fault_inject))
387 bio_integrity_fault_inject(bio);
393 #ifdef HAVE_BIO_INTEGRITY_PREP_FN
394 # ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
395 static void dio_integrity_complete_routine(struct bio *bio)
397 static void dio_integrity_complete_routine(struct bio *bio, int error)
400 struct osd_bio_private *bio_private = bio->bi_private;
402 bio->bi_private = bio_private->obp_iobuf;
403 osd_dio_complete_routine(bio, error);
405 OBD_FREE_PTR(bio_private);
407 #endif /* HAVE_BIO_INTEGRITY_PREP_FN */
408 #else /* !CONFIG_BLK_DEV_INTEGRITY */
409 #define osd_bio_integrity_handle(osd, bio, iobuf, start_page_idx, \
410 fault_inject, integrity_enabled) 0
411 #endif /* CONFIG_BLK_DEV_INTEGRITY */
413 static int osd_bio_init(struct bio *bio, struct osd_iobuf *iobuf,
414 bool integrity_enabled, int start_page_idx,
415 struct osd_bio_private **pprivate)
421 #ifdef HAVE_BIO_INTEGRITY_PREP_FN
422 if (integrity_enabled) {
423 struct osd_bio_private *bio_private = NULL;
425 OBD_ALLOC_GFP(bio_private, sizeof(*bio_private), GFP_NOIO);
426 if (bio_private == NULL)
428 bio->bi_end_io = dio_integrity_complete_routine;
429 bio->bi_private = bio_private;
430 bio_private->obp_start_page_idx = start_page_idx;
431 bio_private->obp_iobuf = iobuf;
432 *pprivate = bio_private;
436 bio->bi_end_io = dio_complete_routine;
437 bio->bi_private = iobuf;
443 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
444 struct osd_iobuf *iobuf, sector_t start_blocks,
447 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
448 struct page **pages = iobuf->dr_pages;
449 int npages = iobuf->dr_npages;
450 sector_t *blocks = iobuf->dr_blocks;
451 struct super_block *sb = inode->i_sb;
452 int sector_bits = sb->s_blocksize_bits - 9;
453 unsigned int blocksize = sb->s_blocksize;
454 struct block_device *bdev = sb->s_bdev;
455 struct osd_bio_private *bio_private = NULL;
456 struct bio *bio = NULL;
457 int bio_start_page_idx;
459 unsigned int page_offset;
462 int block_idx, block_idx_end;
463 int page_idx, page_idx_start;
467 bool integrity_enabled;
468 struct blk_plug plug;
469 int blocks_left_page;
473 fault_inject = OBD_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_FAULT);
474 LASSERT(iobuf->dr_npages == npages);
476 integrity_enabled = bdev_integrity_enabled(bdev, iobuf->dr_rw);
478 osd_brw_stats_update(osd, iobuf);
479 iobuf->dr_start_time = ktime_get();
482 count = npages * blocks_per_page;
483 block_idx_end = start_blocks + count;
485 blk_start_plug(&plug);
487 page_idx_start = start_blocks / blocks_per_page;
488 for (page_idx = page_idx_start, block_idx = start_blocks;
489 block_idx < block_idx_end; page_idx++,
490 block_idx += blocks_left_page) {
491 page = pages[page_idx];
492 LASSERT(page_idx < iobuf->dr_npages);
494 i = block_idx % blocks_per_page;
495 blocks_left_page = blocks_per_page - i;
496 for (page_offset = i * blocksize; i < blocks_left_page;
497 i += nblocks, page_offset += blocksize * nblocks) {
500 if (blocks[block_idx + i] == 0) { /* hole */
501 LASSERTF(iobuf->dr_rw == 0,
502 "page_idx %u, block_idx %u, i %u,"
503 "start_blocks: %llu, count: %llu, npages: %d\n",
504 page_idx, block_idx, i,
505 (unsigned long long)start_blocks,
506 (unsigned long long)count, npages);
507 memset(kmap(page) + page_offset, 0, blocksize);
512 sector = (sector_t)blocks[block_idx + i] << sector_bits;
514 /* Additional contiguous file blocks? */
515 while (i + nblocks < blocks_left_page &&
516 (sector + (nblocks << sector_bits)) ==
517 ((sector_t)blocks[block_idx + i + nblocks] <<
521 if (bio && can_be_merged(bio, sector) &&
522 bio_add_page(bio, page, blocksize * nblocks,
524 continue; /* added this frag OK */
527 struct request_queue *q = bio_get_queue(bio);
528 unsigned int bi_size = bio_sectors(bio) << 9;
530 /* Dang! I have to fragment this I/O */
532 "bio++ sz %d vcnt %d(%d) sectors %d(%d) psg %d(%d)\n",
533 bi_size, bio->bi_vcnt, bio->bi_max_vecs,
535 queue_max_sectors(q),
536 osd_bio_nr_segs(bio),
537 queue_max_segments(q));
538 rc = osd_bio_integrity_handle(osd, bio,
539 iobuf, bio_start_page_idx,
540 fault_inject, integrity_enabled);
546 record_start_io(iobuf, bi_size);
547 osd_submit_bio(iobuf->dr_rw, bio);
550 bio_start_page_idx = page_idx;
551 /* allocate new bio */
552 bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
553 (block_idx_end - block_idx +
554 blocks_left_page - 1)));
556 CERROR("Can't allocate bio %u pages\n",
557 block_idx_end - block_idx +
558 blocks_left_page - 1);
563 bio_set_dev(bio, bdev);
564 bio_set_sector(bio, sector);
565 bio->bi_opf = iobuf->dr_rw ? WRITE : READ;
566 rc = osd_bio_init(bio, iobuf, integrity_enabled,
567 bio_start_page_idx, &bio_private);
573 rc = bio_add_page(bio, page,
574 blocksize * nblocks, page_offset);
580 rc = osd_bio_integrity_handle(osd, bio, iobuf,
589 record_start_io(iobuf, bio_sectors(bio) << 9);
590 osd_submit_bio(iobuf->dr_rw, bio);
595 blk_finish_plug(&plug);
597 /* in order to achieve better IO throughput, we don't wait for writes
598 * completion here. instead we proceed with transaction commit in
599 * parallel and wait for IO completion once transaction is stopped
600 * see osd_trans_stop() for more details -bzzz
602 if (iobuf->dr_rw == 0 || fault_inject) {
603 wait_event(iobuf->dr_wait,
604 atomic_read(&iobuf->dr_numreqs) == 0);
605 osd_fini_iobuf(osd, iobuf);
609 rc = iobuf->dr_error;
612 OBD_FREE_PTR(bio_private);
618 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
619 struct niobuf_local *lnb, int maxlnb)
627 int poff = offset & (PAGE_SIZE - 1);
628 int plen = PAGE_SIZE - poff;
630 if (*nrpages >= maxlnb) {
637 lnb->lnb_file_offset = offset;
638 lnb->lnb_page_offset = poff;
640 /* lnb->lnb_flags = rnb->rnb_flags; */
642 lnb->lnb_page = NULL;
644 lnb->lnb_guard_rpc = 0;
645 lnb->lnb_guard_disk = 0;
648 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
659 static struct page *osd_get_page(const struct lu_env *env, struct dt_object *dt,
660 loff_t offset, gfp_t gfp_mask, bool cache)
662 struct osd_thread_info *oti = osd_oti_get(env);
663 struct inode *inode = osd_dt_obj(dt)->oo_inode;
664 struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
671 page = find_or_create_page(inode->i_mapping,
672 offset >> PAGE_SHIFT, gfp_mask);
675 LASSERT(!PagePrivate2(page));
676 wait_on_page_writeback(page);
678 lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
684 if (inode->i_mapping->nrpages) {
685 /* consult with pagecache, but do not create new pages */
686 /* this is normally used once */
687 page = find_lock_page(inode->i_mapping, offset >> PAGE_SHIFT);
689 wait_on_page_writeback(page);
694 LASSERT(oti->oti_dio_pages);
695 cur = oti->oti_dio_pages_used;
696 page = oti->oti_dio_pages[cur];
698 if (unlikely(!page)) {
699 LASSERT(cur < PTLRPC_MAX_BRW_PAGES);
700 page = alloc_page(gfp_mask);
703 oti->oti_dio_pages[cur] = page;
704 SetPagePrivate2(page);
708 ClearPageUptodate(page);
709 page->index = offset >> PAGE_SHIFT;
710 oti->oti_dio_pages_used++;
716 * there are following "locks":
727 * - lock pages, unlock
729 * - lock partial page
735 * Unlock and release pages loaded by osd_bufs_get()
737 * Unlock \a npages pages from \a lnb and drop the refcount on them.
739 * \param env thread execution environment
740 * \param dt dt object undergoing IO (OSD object + methods)
741 * \param lnb array of pages undergoing IO
742 * \param npages number of pages in \a lnb
746 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
747 struct niobuf_local *lnb, int npages)
749 struct osd_thread_info *oti = osd_oti_get(env);
753 ll_pagevec_init(&pvec, 0);
755 for (i = 0; i < npages; i++) {
756 struct page *page = lnb[i].lnb_page;
761 /* if the page isn't cached, then reset uptodate
764 if (PagePrivate2(page)) {
765 oti->oti_dio_pages_used--;
767 if (lnb[i].lnb_locked)
769 if (pagevec_add(&pvec, page) == 0)
770 pagevec_release(&pvec);
773 lnb[i].lnb_page = NULL;
776 LASSERTF(oti->oti_dio_pages_used == 0, "%d\n", oti->oti_dio_pages_used);
778 /* Release any partial pagevec */
779 pagevec_release(&pvec);
785 * Load and lock pages undergoing IO
787 * Pages as described in the \a lnb array are fetched (from disk or cache)
788 * and locked for IO by the caller.
790 * DLM locking protects us from write and truncate competing for same region,
791 * but partial-page truncate can leave dirty pages in the cache for ldiskfs.
792 * It's possible the writeout on a such a page is in progress when we access
793 * it. It's also possible that during this writeout we put new (partial) data
794 * into the page, but won't be able to proceed in filter_commitrw_write().
795 * Therefore, just wait for writeout completion as it should be rare enough.
797 * \param env thread execution environment
798 * \param dt dt object undergoing IO (OSD object + methods)
799 * \param pos byte offset of IO start
800 * \param len number of bytes of IO
801 * \param lnb array of extents undergoing IO
802 * \param rw read or write operation, and other flags
803 * \param capa capabilities
805 * \retval pages (zero or more) loaded successfully
806 * \retval -ENOMEM on memory/page allocation error
808 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
809 loff_t pos, ssize_t len, struct niobuf_local *lnb,
810 int maxlnb, enum dt_bufs_type rw)
812 struct osd_thread_info *oti = osd_oti_get(env);
813 struct osd_object *obj = osd_dt_obj(dt);
814 struct osd_device *osd = osd_obj2dev(obj);
815 int npages, i, iosize, rc = 0;
820 LASSERT(obj->oo_inode);
822 rc = osd_map_remote_to_local(pos, len, &npages, lnb, maxlnb);
826 write = rw & DT_BUFS_TYPE_WRITE;
828 fsize = lnb[npages - 1].lnb_file_offset + lnb[npages - 1].lnb_len;
829 iosize = fsize - lnb[0].lnb_file_offset;
830 fsize = max(fsize, i_size_read(obj->oo_inode));
832 cache = rw & DT_BUFS_TYPE_READAHEAD;
836 cache = osd_use_page_cache(osd);
839 if (!osd->od_writethrough_cache) {
843 if (iosize > osd->od_writethrough_max_iosize) {
848 if (!osd->od_read_cache) {
852 if (iosize > osd->od_readcache_max_iosize) {
857 /* don't use cache on large files */
858 if (osd->od_readcache_max_filesize &&
859 fsize > osd->od_readcache_max_filesize)
865 if (!cache && unlikely(!oti->oti_dio_pages)) {
866 OBD_ALLOC_PTR_ARRAY_LARGE(oti->oti_dio_pages,
867 PTLRPC_MAX_BRW_PAGES);
868 if (!oti->oti_dio_pages)
872 /* this could also try less hard for DT_BUFS_TYPE_READAHEAD pages */
873 gfp_mask = rw & DT_BUFS_TYPE_LOCAL ? (GFP_NOFS | __GFP_HIGHMEM) :
875 for (i = 0; i < npages; i++, lnb++) {
876 lnb->lnb_page = osd_get_page(env, dt, lnb->lnb_file_offset,
878 if (lnb->lnb_page == NULL)
879 GOTO(cleanup, rc = -ENOMEM);
885 /* XXX: this version doesn't invalidate cached pages, but use them */
886 if (!cache && write && obj->oo_inode->i_mapping->nrpages) {
887 /* do not allow data aliasing, invalidate pagecache */
888 /* XXX: can be quite expensive in mixed case */
889 invalidate_mapping_pages(obj->oo_inode->i_mapping,
890 lnb[0].lnb_file_offset >> PAGE_SHIFT,
891 lnb[npages - 1].lnb_file_offset >> PAGE_SHIFT);
899 osd_bufs_put(env, dt, lnb - i, i);
902 /* Borrow @ext4_chunk_trans_blocks */
903 static int osd_chunk_trans_blocks(struct inode *inode, int nrblocks)
905 ldiskfs_group_t groups;
911 depth = ext_depth(inode);
912 idxblocks = depth * 2;
915 * Now let's see how many group bitmaps and group descriptors need
918 groups = idxblocks + 1;
920 if (groups > LDISKFS_SB(inode->i_sb)->s_groups_count)
921 groups = LDISKFS_SB(inode->i_sb)->s_groups_count;
922 if (gdpblocks > LDISKFS_SB(inode->i_sb)->s_gdb_count)
923 gdpblocks = LDISKFS_SB(inode->i_sb)->s_gdb_count;
925 /* bitmaps and block group descriptor blocks */
926 ret = idxblocks + groups + gdpblocks;
928 /* Blocks for super block, inode, quota and xattr blocks */
929 ret += LDISKFS_META_TRANS_BLOCKS(inode->i_sb);
934 static int osd_extend_trans(handle_t *handle, int needed)
936 if (ldiskfs_handle_has_enough_credits(handle, needed))
939 return ldiskfs_journal_extend(handle,
940 needed - handle->h_buffer_credits);
943 static int osd_extend_restart_trans(handle_t *handle, int needed)
946 int rc = osd_extend_trans(handle, needed);
951 return ldiskfs_journal_restart(handle, needed);
954 static int osd_ldiskfs_map_write(struct inode *inode, struct osd_iobuf *iobuf,
955 struct osd_device *osd, sector_t start_blocks,
956 sector_t count, loff_t *disk_size,
959 /* if file has grown, take user_size into account */
960 if (user_size && *disk_size > user_size)
961 *disk_size = user_size;
963 spin_lock(&inode->i_lock);
964 if (*disk_size > i_size_read(inode)) {
965 i_size_write(inode, *disk_size);
966 LDISKFS_I(inode)->i_disksize = *disk_size;
967 spin_unlock(&inode->i_lock);
968 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
970 spin_unlock(&inode->i_lock);
974 * We don't do stats here as in read path because
975 * write is async: we'll do this in osd_put_bufs()
977 return osd_do_bio(osd, inode, iobuf, start_blocks, count);
981 static int osd_ldiskfs_map_inode_pages(struct inode *inode,
982 struct osd_iobuf *iobuf,
983 struct osd_device *osd,
984 int create, __u64 user_size,
987 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
988 int rc = 0, i = 0, mapped_index = 0;
989 struct page *fp = NULL;
991 pgoff_t max_page_index;
992 handle_t *handle = NULL;
994 sector_t start_blocks = 0, count = 0;
995 loff_t disk_size = 0;
996 struct page **page = iobuf->dr_pages;
997 int pages = iobuf->dr_npages;
998 sector_t *blocks = iobuf->dr_blocks;
999 struct niobuf_local *lnb1, *lnb2;
1000 loff_t size1, size2;
1002 max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
1004 CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
1005 inode->i_ino, pages, (*page)->index);
1008 create = LDISKFS_GET_BLOCKS_CREATE;
1009 handle = ldiskfs_journal_current_handle();
1010 LASSERT(handle != NULL);
1011 rc = osd_attach_jinode(inode);
1014 disk_size = i_size_read(inode);
1015 /* if disk_size is already bigger than specified user_size,
1018 if (disk_size > user_size)
1021 /* pages are sorted already. so, we just have to find
1022 * contig. space and process them properly
1025 long blen, total = 0, previous_total = 0;
1026 struct ldiskfs_map_blocks map = { 0 };
1028 if (fp == NULL) { /* start new extent */
1033 } else if (fp->index + clen == (*page)->index) {
1034 /* continue the extent */
1040 if (fp->index + clen >= max_page_index)
1041 GOTO(cleanup, rc = -EFBIG);
1042 /* process found extent */
1043 map.m_lblk = fp->index * blocks_per_page;
1044 map.m_len = blen = clen * blocks_per_page;
1047 * We might restart transaction for block allocations,
1048 * in order to make sure data ordered mode, issue IO, disk
1049 * size update and block allocations need be within same
1050 * transaction to make sure consistency.
1052 if (handle && check_credits) {
1054 * credits to insert 1 extent into extent tree.
1056 credits = osd_chunk_trans_blocks(inode, blen);
1057 rc = osd_extend_trans(handle, credits);
1061 * only issue IO if restart transaction needed,
1062 * as update disk size need hold inode lock, we
1063 * want to avoid that as much as possible.
1066 WARN_ON_ONCE(start_blocks == 0);
1067 rc = osd_ldiskfs_map_write(inode,
1068 iobuf, osd, start_blocks,
1069 count, &disk_size, user_size);
1072 rc = ldiskfs_journal_restart(handle, credits);
1075 start_blocks += count;
1076 /* reset IO block count */
1080 rc = ldiskfs_map_blocks(handle, inode, &map, create);
1084 for (; total < blen && c < map.m_len; c++, total++) {
1086 *(blocks + total) = 0;
1090 *(blocks + total) = map.m_pblk + c;
1091 /* unmap any possible underlying
1092 * metadata from the block device
1095 if ((map.m_flags & LDISKFS_MAP_NEW) &&
1097 clean_bdev_aliases(inode->i_sb->s_bdev,
1103 if (rc == 0 && create) {
1104 count += (total - previous_total);
1105 mapped_index = (count + blocks_per_page -
1106 1) / blocks_per_page - 1;
1107 lnb1 = iobuf->dr_lnbs[i - clen];
1108 lnb2 = iobuf->dr_lnbs[mapped_index];
1109 size1 = lnb1->lnb_file_offset -
1110 (lnb1->lnb_file_offset % PAGE_SIZE) +
1111 (total << inode->i_blkbits);
1112 size2 = lnb2->lnb_file_offset + lnb2->lnb_len;
1116 if (size1 > disk_size)
1120 if (rc == 0 && total < blen) {
1121 map.m_lblk = fp->index * blocks_per_page + total;
1122 map.m_len = blen - total;
1123 previous_total = total;
1129 /* look for next extent */
1131 blocks += blocks_per_page * clen;
1134 if (rc == 0 && create &&
1135 start_blocks < pages * blocks_per_page) {
1136 rc = osd_ldiskfs_map_write(inode, iobuf, osd, start_blocks,
1137 count, &disk_size, user_size);
1138 LASSERT(start_blocks + count == pages * blocks_per_page);
1143 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
1144 struct niobuf_local *lnb, int npages)
1146 struct osd_thread_info *oti = osd_oti_get(env);
1147 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1148 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1149 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1158 rc = osd_init_iobuf(osd, iobuf, 0, npages);
1159 if (unlikely(rc != 0))
1162 isize = i_size_read(inode);
1163 maxidx = ((isize + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1;
1165 start = ktime_get();
1166 for (i = 0; i < npages; i++) {
1169 * till commit the content of the page is undefined
1170 * we'll set it uptodate once bulk is done. otherwise
1171 * subsequent reads can access non-stable data
1173 ClearPageUptodate(lnb[i].lnb_page);
1175 if (lnb[i].lnb_len == PAGE_SIZE)
1178 if (maxidx >= lnb[i].lnb_page->index) {
1179 osd_iobuf_add_page(iobuf, &lnb[i]);
1182 char *p = kmap(lnb[i].lnb_page);
1184 off = lnb[i].lnb_page_offset;
1187 off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
1190 memset(p + off, 0, PAGE_SIZE - off);
1191 kunmap(lnb[i].lnb_page);
1195 timediff = ktime_us_delta(end, start);
1196 lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1198 if (iobuf->dr_npages) {
1199 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd, 0,
1201 if (likely(rc == 0)) {
1202 rc = osd_do_bio(osd, inode, iobuf, 0, 0);
1203 /* do IO stats for preparation reads */
1204 osd_fini_iobuf(osd, iobuf);
1210 struct osd_fextent {
1213 unsigned int mapped:1;
1216 static int osd_is_mapped(struct dt_object *dt, __u64 offset,
1217 struct osd_fextent *cached_extent)
1219 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1220 sector_t block = offset >> inode->i_blkbits;
1222 struct fiemap_extent_info fei = { 0 };
1223 struct fiemap_extent fe = { 0 };
1224 mm_segment_t saved_fs;
1227 if (block >= cached_extent->start && block < cached_extent->end)
1228 return cached_extent->mapped;
1230 if (i_size_read(inode) == 0)
1233 /* Beyond EOF, must not be mapped */
1234 if (((i_size_read(inode) - 1) >> inode->i_blkbits) < block)
1237 fei.fi_extents_max = 1;
1238 fei.fi_extents_start = &fe;
1240 saved_fs = get_fs();
1242 rc = inode->i_op->fiemap(inode, &fei, offset, FIEMAP_MAX_OFFSET-offset);
1247 start = fe.fe_logical >> inode->i_blkbits;
1249 if (start > block) {
1250 cached_extent->start = block;
1251 cached_extent->end = start;
1252 cached_extent->mapped = 0;
1254 cached_extent->start = start;
1255 cached_extent->end = (fe.fe_logical + fe.fe_length) >>
1257 cached_extent->mapped = 1;
1260 return cached_extent->mapped;
1263 static int osd_declare_write_commit(const struct lu_env *env,
1264 struct dt_object *dt,
1265 struct niobuf_local *lnb, int npages,
1266 struct thandle *handle)
1268 const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1269 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1270 struct osd_thandle *oh;
1277 long long quota_space = 0;
1278 struct osd_fextent mapped = { 0 }, extent = { 0 };
1279 enum osd_quota_local_flags local_flags = 0;
1280 enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
1283 LASSERT(handle != NULL);
1284 oh = container_of(handle, struct osd_thandle, ot_super);
1285 LASSERT(oh->ot_handle == NULL);
1287 /* calculate number of extents (probably better to pass nb) */
1288 for (i = 0; i < npages; i++) {
1289 /* ignore quota for the whole request if any page is from
1290 * client cache or written by root.
1292 * XXX once we drop the 1.8 client support, the checking
1293 * for whether page is from cache can be simplified as:
1294 * !(lnb[i].flags & OBD_BRW_SYNC)
1296 * XXX we could handle this on per-lnb basis as done by
1299 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
1300 (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
1302 declare_flags |= OSD_QID_FORCE;
1304 if (osd_is_mapped(dt, lnb[i].lnb_file_offset, &mapped)) {
1305 lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1309 /* count only unmapped changes */
1311 if (lnb[i].lnb_file_offset != extent.end || extent.end == 0) {
1313 extent.end = lnb[i].lnb_file_offset + lnb[i].lnb_len;
1315 extent.end += lnb[i].lnb_len;
1318 quota_space += PAGE_SIZE;
1321 credits++; /* inode */
1323 * overwrite case, no need to modify tree and
1329 * each extent can go into new leaf causing a split
1330 * 5 is max tree depth: inode + 4 index blocks
1331 * with blockmaps, depth is 3 at most
1333 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1335 * many concurrent threads may grow tree by the time
1336 * our transaction starts. so, consider 2 is a min depth
1338 depth = ext_depth(inode);
1339 depth = max(depth, 1) + 1;
1341 credits += depth * 2 * extents;
1345 credits += depth * extents;
1349 * try a bit more extents to avoid restart
1350 * as much as possible in normal case.
1352 if (npages > 1 && extents)
1355 /* quota space for metadata blocks */
1356 quota_space += depth * extents * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1358 /* quota space should be reported in 1K blocks */
1359 quota_space = toqb(quota_space);
1361 /* each new block can go in different group (bitmap + gd) */
1363 /* we can't dirty more bitmap blocks than exist */
1364 if (extents > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1365 credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1369 /* we can't dirty more gd blocks than exist */
1370 if (extents > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1371 credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1376 osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1378 /* make sure the over quota flags were not set */
1379 lnb[0].lnb_flags &= ~OBD_BRW_OVER_ALLQUOTA;
1381 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1382 i_projid_read(inode), quota_space, oh,
1383 osd_dt_obj(dt), &local_flags, declare_flags);
1385 /* we need only to store the overquota flags in the first lnb for
1386 * now, once we support multiple objects BRW, this code needs be
1389 if (local_flags & QUOTA_FL_OVER_USRQUOTA)
1390 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
1391 if (local_flags & QUOTA_FL_OVER_GRPQUOTA)
1392 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
1393 if (local_flags & QUOTA_FL_OVER_PRJQUOTA)
1394 lnb[0].lnb_flags |= OBD_BRW_OVER_PRJQUOTA;
1397 rc = osd_trunc_lock(osd_dt_obj(dt), oh, true);
1402 /* Check if a block is allocated or not */
1403 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1404 struct niobuf_local *lnb, int npages,
1405 struct thandle *thandle, __u64 user_size)
1407 struct osd_thread_info *oti = osd_oti_get(env);
1408 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1409 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1410 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1411 int rc = 0, i, check_credits = 0;
1412 struct osd_thandle *oh = container_of(thandle,
1413 struct osd_thandle, ot_super);
1414 unsigned int save_credits = oh->ot_credits;
1418 rc = osd_init_iobuf(osd, iobuf, 1, npages);
1419 if (unlikely(rc != 0))
1422 dquot_initialize(inode);
1424 for (i = 0; i < npages; i++) {
1425 if (lnb[i].lnb_rc == -ENOSPC &&
1426 (lnb[i].lnb_flags & OBD_BRW_MAPPED)) {
1427 /* Allow the write to proceed if overwriting an
1433 if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
1434 CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1436 LASSERT(lnb[i].lnb_page);
1437 generic_error_remove_page(inode->i_mapping,
1442 if (!(lnb[i].lnb_flags & OBD_BRW_MAPPED))
1445 LASSERT(PageLocked(lnb[i].lnb_page));
1446 LASSERT(!PageWriteback(lnb[i].lnb_page));
1449 * Since write and truncate are serialized by oo_sem, even
1450 * partial-page truncate should not leave dirty pages in the
1453 LASSERT(!PageDirty(lnb[i].lnb_page));
1455 SetPageUptodate(lnb[i].lnb_page);
1457 osd_iobuf_add_page(iobuf, &lnb[i]);
1460 osd_trans_exec_op(env, thandle, OSD_OT_WRITE);
1462 if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1464 } else if (iobuf->dr_npages > 0) {
1465 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd,
1469 * Write might restart transaction, extend credits
1470 * if needed for operations such as attribute set.
1473 handle_t *handle = ldiskfs_journal_current_handle();
1475 LASSERT(handle != NULL);
1476 rc = osd_extend_restart_trans(handle, save_credits);
1479 /* no pages to write, no transno is needed */
1480 thandle->th_local = 1;
1484 osd_fini_iobuf(osd, iobuf);
1486 osd_trans_exec_check(env, thandle, OSD_OT_WRITE);
1488 if (unlikely(rc != 0)) {
1489 /* if write fails, we should drop pages from the cache */
1490 for (i = 0; i < npages; i++) {
1491 if (lnb[i].lnb_page == NULL)
1493 if (!PagePrivate2(lnb[i].lnb_page)) {
1494 LASSERT(PageLocked(lnb[i].lnb_page));
1495 generic_error_remove_page(inode->i_mapping,
1504 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1505 struct niobuf_local *lnb, int npages)
1507 struct osd_thread_info *oti = osd_oti_get(env);
1508 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1509 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1510 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1511 int rc = 0, i, cache_hits = 0, cache_misses = 0;
1518 rc = osd_init_iobuf(osd, iobuf, 0, npages);
1519 if (unlikely(rc != 0))
1522 isize = i_size_read(inode);
1524 start = ktime_get();
1525 for (i = 0; i < npages; i++) {
1527 if (isize <= lnb[i].lnb_file_offset)
1528 /* If there's no more data, abort early.
1529 * lnb->lnb_rc == 0, so it's easy to detect later.
1533 /* instead of looking if we go beyong isize, send complete
1534 * pages all the time
1536 lnb[i].lnb_rc = lnb[i].lnb_len;
1538 /* Bypass disk read if fail_loc is set properly */
1539 if (OBD_FAIL_CHECK(OBD_FAIL_OST_FAKE_RW))
1540 SetPageUptodate(lnb[i].lnb_page);
1542 if (PageUptodate(lnb[i].lnb_page)) {
1544 unlock_page(lnb[i].lnb_page);
1547 osd_iobuf_add_page(iobuf, &lnb[i]);
1549 /* no need to unlock in osd_bufs_put(), the sooner page is
1550 * unlocked, the earlier another client can access it.
1551 * notice real unlock_page() can be called few lines
1552 * below after osd_do_bio(). lnb is a per-thread, so it's
1553 * fine to have PG_locked and lnb_locked inconsistent here
1555 lnb[i].lnb_locked = 0;
1558 timediff = ktime_us_delta(end, start);
1559 lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1561 if (cache_hits != 0)
1562 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT,
1564 if (cache_misses != 0)
1565 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS,
1567 if (cache_hits + cache_misses != 0)
1568 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
1569 cache_hits + cache_misses);
1571 if (iobuf->dr_npages) {
1572 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd, 0,
1575 rc = osd_do_bio(osd, inode, iobuf, 0, 0);
1577 /* IO stats will be done in osd_bufs_put() */
1579 /* early release to let others read data during the bulk */
1580 for (i = 0; i < iobuf->dr_npages; i++) {
1581 LASSERT(PageLocked(iobuf->dr_pages[i]));
1582 if (!PagePrivate2(iobuf->dr_pages[i]))
1583 unlock_page(iobuf->dr_pages[i]);
1591 * XXX: Another layering violation for now.
1593 * We don't want to use ->f_op->read methods, because generic file write
1595 * - serializes on ->i_sem, and
1597 * - does a lot of extra work like balance_dirty_pages(),
1599 * which doesn't work for globally shared files like /last_rcvd.
1601 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1603 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1605 memcpy(buffer, (char *)ei->i_data, buflen);
1610 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1612 struct buffer_head *bh;
1613 unsigned long block;
1619 /* prevent reading after eof */
1620 spin_lock(&inode->i_lock);
1621 if (i_size_read(inode) < *offs + size) {
1622 loff_t diff = i_size_read(inode) - *offs;
1624 spin_unlock(&inode->i_lock);
1627 "size %llu is too short to read @%llu\n",
1628 i_size_read(inode), *offs);
1630 } else if (diff == 0) {
1636 spin_unlock(&inode->i_lock);
1639 blocksize = 1 << inode->i_blkbits;
1642 block = *offs >> inode->i_blkbits;
1643 boffs = *offs & (blocksize - 1);
1644 csize = min(blocksize - boffs, size);
1645 bh = __ldiskfs_bread(NULL, inode, block, 0);
1647 CERROR("%s: can't read %u@%llu on ino %lu: rc = %ld\n",
1648 osd_ino2name(inode), csize, *offs, inode->i_ino,
1654 memcpy(buf, bh->b_data + boffs, csize);
1657 memset(buf, 0, csize);
1667 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1668 struct lu_buf *buf, loff_t *pos)
1670 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1673 /* Read small symlink from inode body as we need to maintain correct
1674 * on-disk symlinks for ldiskfs.
1676 if (S_ISLNK(dt->do_lu.lo_header->loh_attr)) {
1677 loff_t size = i_size_read(inode);
1679 if (buf->lb_len < size)
1682 if (size < sizeof(LDISKFS_I(inode)->i_data))
1683 rc = osd_ldiskfs_readlink(inode, buf->lb_buf, size);
1685 rc = osd_ldiskfs_read(inode, buf->lb_buf, size, pos);
1687 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1693 static inline int osd_extents_enabled(struct super_block *sb,
1694 struct inode *inode)
1696 if (inode != NULL) {
1697 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
1699 } else if (ldiskfs_has_feature_extents(sb)) {
1705 int osd_calc_bkmap_credits(struct super_block *sb, struct inode *inode,
1706 const loff_t size, const loff_t pos,
1709 int credits, bits, bs, i;
1711 bits = sb->s_blocksize_bits;
1714 /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1715 * we do not expect blockmaps on the large files,
1716 * so let's shrink it to 2 levels (4GB files)
1719 /* this is default reservation: 2 levels */
1720 credits = (blocks + 2) * 3;
1722 /* actual offset is unknown, hard to optimize */
1726 /* now check for few specific cases to optimize */
1727 if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1730 /* allocate if not allocated */
1731 if (inode == NULL) {
1732 credits += blocks * 2;
1735 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1736 LASSERT(i < LDISKFS_NDIR_BLOCKS);
1737 if (LDISKFS_I(inode)->i_data[i] == 0)
1740 } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1741 /* single indirect */
1742 credits = blocks * 3;
1743 if (inode == NULL ||
1744 LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK] == 0)
1747 /* The indirect block may be modified. */
1754 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
1755 const struct lu_buf *buf, loff_t _pos,
1756 struct thandle *handle)
1758 struct osd_object *obj = osd_dt_obj(dt);
1759 struct inode *inode = obj->oo_inode;
1760 struct super_block *sb = osd_sb(osd_obj2dev(obj));
1761 struct osd_thandle *oh;
1762 int rc = 0, est = 0, credits, blocks, allocated = 0;
1768 LASSERT(buf != NULL);
1769 LASSERT(handle != NULL);
1771 oh = container_of(handle, struct osd_thandle, ot_super);
1772 LASSERT(oh->ot_handle == NULL);
1775 bits = sb->s_blocksize_bits;
1779 /* if this is an append, then we
1780 * should expect cross-block record
1787 /* blocks to modify */
1788 blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
1789 LASSERT(blocks > 0);
1791 if (inode != NULL && _pos != -1) {
1792 /* object size in blocks */
1793 est = (i_size_read(inode) + bs - 1) >> bits;
1794 allocated = inode->i_blocks >> (bits - 9);
1795 if (pos + size <= i_size_read(inode) && est <= allocated) {
1796 /* looks like an overwrite, no need to modify tree */
1798 /* no need to modify i_size */
1803 if (osd_extents_enabled(sb, inode)) {
1805 * many concurrent threads may grow tree by the time
1806 * our transaction starts. so, consider 2 is a min depth
1807 * for every level we may need to allocate a new block
1808 * and take some entries from the old one. so, 3 blocks
1809 * to allocate (bitmap, gd, itself) + old block - 4 per
1812 depth = inode != NULL ? ext_depth(inode) : 0;
1813 depth = max(depth, 1) + 1;
1815 /* if not append, then split may need to modify
1816 * existing blocks moving entries into the new ones
1820 /* blocks to store data: bitmap,gd,itself */
1821 credits += blocks * 3;
1823 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
1825 /* if inode is created as part of the transaction,
1826 * then it's counted already by the creation method
1833 osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1835 /* dt_declare_write() is usually called for system objects, such
1836 * as llog or last_rcvd files. We needn't enforce quota on those
1837 * objects, so always set the lqi_space as 0.
1840 rc = osd_declare_inode_qid(env, i_uid_read(inode),
1842 i_projid_read(inode), 0,
1843 oh, obj, NULL, OSD_QID_BLK);
1846 rc = osd_trunc_lock(obj, oh, true);
1851 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
1853 /* LU-2634: clear the extent format for fast symlink */
1854 ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
1856 memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen);
1857 spin_lock(&inode->i_lock);
1858 LDISKFS_I(inode)->i_disksize = buflen;
1859 i_size_write(inode, buflen);
1860 spin_unlock(&inode->i_lock);
1861 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
1866 static int osd_ldiskfs_write_record(struct dt_object *dt, void *buf,
1867 int bufsize, int write_NUL, loff_t *offs,
1870 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1871 struct buffer_head *bh = NULL;
1872 loff_t offset = *offs;
1873 loff_t new_size = i_size_read(inode);
1874 unsigned long block;
1875 int blocksize = 1 << inode->i_blkbits;
1876 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1880 int dirty_inode = 0;
1881 bool create, sparse, sync = false;
1885 * long symlink write does not count the NUL terminator in
1886 * bufsize, we write it, and the inode's file size does not
1887 * count the NUL terminator as well.
1889 ((char *)buf)[bufsize] = '\0';
1893 dirty_inode = test_and_set_bit(LDISKFS_INODE_JOURNAL_DATA,
1896 /* sparse checking is racy, but sparse is very rare case, leave as is */
1897 sparse = (new_size > 0 && (inode->i_blocks >> (inode->i_blkbits - 9)) <
1898 ((new_size - 1) >> inode->i_blkbits) + 1);
1900 while (bufsize > 0) {
1901 int credits = handle->h_buffer_credits;
1902 unsigned long last_block = (new_size == 0) ? 0 :
1903 (new_size - 1) >> inode->i_blkbits;
1908 block = offset >> inode->i_blkbits;
1909 boffs = offset & (blocksize - 1);
1910 size = min(blocksize - boffs, bufsize);
1911 sync = (block > last_block || new_size == 0 || sparse);
1914 down(&ei->i_append_sem);
1916 bh = __ldiskfs_bread(handle, inode, block, 0);
1918 if (unlikely(IS_ERR_OR_NULL(bh) && !sync))
1920 "%s: adding bh without locking off %llu (block %lu, size %d, offs %llu)\n",
1922 offset, block, bufsize, *offs);
1924 if (IS_ERR_OR_NULL(bh)) {
1925 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1926 int flags = LDISKFS_GET_BLOCKS_CREATE;
1928 /* while the file system is being mounted, avoid
1929 * preallocation otherwise mount can take a long
1930 * time as mballoc cache is cold.
1931 * XXX: this is a workaround until we have a proper
1933 * XXX: works with extent-based files only */
1934 if (!osd->od_cl_seq)
1935 flags |= LDISKFS_GET_BLOCKS_NO_NORMALIZE;
1936 bh = __ldiskfs_bread(handle, inode, block, flags);
1940 up(&ei->i_append_sem);
1945 if (IS_ERR_OR_NULL(bh)) {
1954 "%s: error reading offset %llu (block %lu, size %d, offs %llu), credits %d/%d: rc = %d\n",
1955 inode->i_sb->s_id, offset, block, bufsize, *offs,
1956 credits, handle->h_buffer_credits, err);
1960 err = ldiskfs_journal_get_write_access(handle, bh);
1962 CERROR("journal_get_write_access() returned error %d\n",
1966 LASSERTF(boffs + size <= bh->b_size,
1967 "boffs %d size %d bh->b_size %lu\n",
1968 boffs, size, (unsigned long)bh->b_size);
1970 memset(bh->b_data, 0, bh->b_size);
1972 up(&ei->i_append_sem);
1976 memcpy(bh->b_data + boffs, buf, size);
1977 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
1981 if (offset + size > new_size)
1982 new_size = offset + size;
1988 up(&ei->i_append_sem);
1995 /* correct in-core and on-disk sizes */
1996 if (new_size > i_size_read(inode)) {
1997 spin_lock(&inode->i_lock);
1998 if (new_size > i_size_read(inode))
1999 i_size_write(inode, new_size);
2000 if (i_size_read(inode) > ei->i_disksize) {
2001 ei->i_disksize = i_size_read(inode);
2004 spin_unlock(&inode->i_lock);
2007 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
2014 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
2015 const struct lu_buf *buf, loff_t *pos,
2016 struct thandle *handle)
2018 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2019 struct osd_thandle *oh;
2023 LASSERT(dt_object_exists(dt));
2025 LASSERT(handle != NULL);
2026 LASSERT(inode != NULL);
2027 dquot_initialize(inode);
2029 /* XXX: don't check: one declared chunk can be used many times */
2030 /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
2032 oh = container_of(handle, struct osd_thandle, ot_super);
2033 LASSERT(oh->ot_handle->h_transaction != NULL);
2034 osd_trans_exec_op(env, handle, OSD_OT_WRITE);
2036 /* Write small symlink to inode body as we need to maintain correct
2037 * on-disk symlinks for ldiskfs.
2038 * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
2039 * does not count it in.
2041 is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
2042 if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
2043 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
2045 result = osd_ldiskfs_write_record(dt, buf->lb_buf, buf->lb_len,
2046 is_link, pos, oh->ot_handle);
2048 result = buf->lb_len;
2050 osd_trans_exec_check(env, handle, OSD_OT_WRITE);
2055 static int osd_declare_fallocate(const struct lu_env *env,
2056 struct dt_object *dt, __u64 start, __u64 end,
2057 int mode, struct thandle *th)
2059 struct osd_thandle *oh = container_of(th, struct osd_thandle, ot_super);
2060 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
2061 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2062 long long quota_space = 0;
2063 /* 5 is max tree depth. (inode + 4 index blocks) */
2070 * Only mode == 0 (which is standard prealloc) is supported now.
2071 * Rest of mode options is not supported yet.
2073 if (mode & ~FALLOC_FL_KEEP_SIZE)
2074 RETURN(-EOPNOTSUPP);
2079 /* quota space for metadata blocks
2080 * approximate metadata estimate should be good enough.
2082 quota_space += PAGE_SIZE;
2083 quota_space += depth * LDISKFS_BLOCK_SIZE(osd_sb(osd));
2085 /* quota space should be reported in 1K blocks */
2086 quota_space = toqb(quota_space) + toqb(end - start) +
2087 LDISKFS_META_TRANS_BLOCKS(inode->i_sb);
2089 /* We don't need to reserve credits for whole fallocate here.
2090 * We reserve space only for metadata. Fallocate credits are
2091 * extended as required
2093 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2094 i_projid_read(inode), quota_space, oh,
2095 osd_dt_obj(dt), NULL, OSD_QID_BLK);
2099 static int osd_fallocate(const struct lu_env *env, struct dt_object *dt,
2100 __u64 start, __u64 end, int mode, struct thandle *th)
2102 struct osd_thandle *oh = container_of(th, struct osd_thandle, ot_super);
2103 handle_t *handle = ldiskfs_journal_current_handle();
2104 unsigned int save_credits = oh->ot_credits;
2105 struct osd_object *obj = osd_dt_obj(dt);
2106 struct inode *inode = obj->oo_inode;
2107 struct ldiskfs_map_blocks map;
2108 unsigned int credits;
2109 ldiskfs_lblk_t blen;
2110 ldiskfs_lblk_t boff;
2111 loff_t new_size = 0;
2118 LASSERT(dt_object_exists(dt));
2119 LASSERT(osd_invariant(obj));
2120 LASSERT(inode != NULL);
2122 CDEBUG(D_INODE, "fallocate: inode #%lu: start %llu end %llu mode %d\n",
2123 inode->i_ino, start, end, mode);
2125 dquot_initialize(inode);
2129 boff = start >> inode->i_blkbits;
2130 blen = (ALIGN(end, 1 << inode->i_blkbits) >> inode->i_blkbits) - boff;
2132 flags = LDISKFS_GET_BLOCKS_CREATE;
2133 if (mode & FALLOC_FL_KEEP_SIZE)
2134 flags |= LDISKFS_GET_BLOCKS_KEEP_SIZE;
2139 * We only support preallocation for extent-based file only.
2141 if (!(ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS)))
2142 GOTO(out, rc = -EOPNOTSUPP);
2144 if (!(mode & FALLOC_FL_KEEP_SIZE) && (end > i_size_read(inode) ||
2145 end > LDISKFS_I(inode)->i_disksize)) {
2147 rc = inode_newsize_ok(inode, new_size);
2152 inode_dio_wait(inode);
2157 /* Don't normalize the request if it can fit in one extent so
2158 * that it doesn't get unnecessarily split into multiple extents.
2160 if (blen <= EXT_UNWRITTEN_MAX_LEN)
2161 flags |= LDISKFS_GET_BLOCKS_NO_NORMALIZE;
2164 * credits to insert 1 extent into extent tree.
2166 credits = osd_chunk_trans_blocks(inode, blen);
2167 depth = ext_depth(inode);
2169 while (rc >= 0 && blen) {
2173 * Recalculate credits when extent tree depth changes.
2175 if (depth != ext_depth(inode)) {
2176 credits = osd_chunk_trans_blocks(inode, blen);
2177 depth = ext_depth(inode);
2180 /* TODO: quota check */
2181 rc = osd_extend_restart_trans(handle, credits);
2185 rc = ldiskfs_map_blocks(handle, inode, &map, flags);
2188 "inode #%lu: block %u: len %u: ldiskfs_map_blocks returned %d\n",
2189 inode->i_ino, map.m_lblk, map.m_len, rc);
2190 ldiskfs_mark_inode_dirty(handle, inode);
2195 map.m_len = blen = blen - rc;
2196 epos = (loff_t)map.m_lblk << inode->i_blkbits;
2197 inode->i_ctime = current_time(inode);
2201 if (ldiskfs_update_inode_size(inode, epos) & 0x1)
2202 inode->i_mtime = inode->i_ctime;
2204 if (epos > inode->i_size)
2205 ldiskfs_set_inode_flag(inode,
2206 LDISKFS_INODE_EOFBLOCKS);
2209 ldiskfs_mark_inode_dirty(handle, inode);
2213 inode_unlock(inode);
2215 /* extand credits if needed for operations such as attribute set */
2217 rc = osd_extend_restart_trans(handle, save_credits);
2222 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
2223 __u64 start, __u64 end, struct thandle *th)
2225 struct osd_thandle *oh;
2226 struct inode *inode;
2231 oh = container_of(th, struct osd_thandle, ot_super);
2234 * we don't need to reserve credits for whole truncate
2235 * it's not possible as truncate may need to free too many
2236 * blocks and that won't fit a single transaction. instead
2237 * we reserve credits to change i_size and put inode onto
2238 * orphan list. if needed truncate will extend or restart
2241 osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
2242 osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
2244 inode = osd_dt_obj(dt)->oo_inode;
2247 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2248 i_projid_read(inode), 0, oh, osd_dt_obj(dt),
2252 rc = osd_trunc_lock(osd_dt_obj(dt), oh, false);
2257 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
2258 __u64 start, __u64 end, struct thandle *th)
2260 struct osd_object *obj = osd_dt_obj(dt);
2261 struct osd_device *osd = osd_obj2dev(obj);
2262 struct inode *inode = obj->oo_inode;
2263 struct osd_access_lock *al;
2264 struct osd_thandle *oh;
2265 int rc = 0, found = 0;
2269 LASSERT(dt_object_exists(dt));
2270 LASSERT(osd_invariant(obj));
2271 LASSERT(inode != NULL);
2272 dquot_initialize(inode);
2275 oh = container_of(th, struct osd_thandle, ot_super);
2276 LASSERT(oh->ot_handle->h_transaction != NULL);
2278 /* we used to skip truncate to current size to
2279 * optimize truncates on OST. with DoM we can
2280 * get attr_set to set specific size (MDS_REINT)
2281 * and then get truncate RPC which essentially
2282 * would be skipped. this is bad.. so, disable
2283 * this optimization on MDS till the client stop
2284 * to sent MDS_REINT (LU-11033) -bzzz
2286 if (osd->od_is_ost && i_size_read(inode) == start)
2289 osd_trans_exec_op(env, th, OSD_OT_PUNCH);
2291 spin_lock(&inode->i_lock);
2292 if (i_size_read(inode) < start)
2294 i_size_write(inode, start);
2295 spin_unlock(&inode->i_lock);
2296 /* if object holds encrypted content, we need to make sure we truncate
2297 * on an encryption unit boundary, or subsequent reads will get
2300 if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL &&
2301 start & ~LUSTRE_ENCRYPTION_MASK)
2302 start = (start & LUSTRE_ENCRYPTION_MASK) +
2303 LUSTRE_ENCRYPTION_UNIT_SIZE;
2304 ll_truncate_pagecache(inode, start);
2306 /* optimize grow case */
2308 osd_execute_truncate(obj);
2313 /* add to orphan list to ensure truncate completion
2314 * if this transaction succeed. ldiskfs_truncate()
2315 * will take the inode out of the list
2317 rc = ldiskfs_orphan_add(oh->ot_handle, inode);
2318 inode_unlock(inode);
2322 list_for_each_entry(al, &oh->ot_trunc_locks, tl_list) {
2323 if (obj != al->tl_obj)
2325 LASSERT(al->tl_shared == 0);
2327 /* do actual truncate in osd_trans_stop() */
2328 al->tl_truncate = 1;
2337 static int fiemap_check_ranges(struct inode *inode,
2338 u64 start, u64 len, u64 *new_len)
2347 if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
2348 maxbytes = inode->i_sb->s_maxbytes;
2350 maxbytes = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes;
2352 if (start > maxbytes)
2356 * Shrink request scope to what the fs can actually handle.
2358 if (len > maxbytes || (maxbytes - len) < start)
2359 *new_len = maxbytes - start;
2364 /* So that the fiemap access checks can't overflow on 32 bit machines. */
2365 #define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent))
2367 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
2370 struct fiemap_extent_info fieinfo = {0, };
2371 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2374 mm_segment_t cur_fs;
2377 if (inode->i_op->fiemap == NULL)
2380 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS)
2383 rc = fiemap_check_ranges(inode, fm->fm_start, fm->fm_length, &len);
2387 fieinfo.fi_flags = fm->fm_flags;
2388 fieinfo.fi_extents_max = fm->fm_extent_count;
2389 fieinfo.fi_extents_start = fm->fm_extents;
2391 if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
2392 filemap_write_and_wait(inode->i_mapping);
2394 /* Save previous value address limit */
2396 /* Set the address limit of the kernel */
2399 rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
2400 fm->fm_flags = fieinfo.fi_flags;
2401 fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
2403 /* Restore the previous address limt */
2409 static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
2410 __u64 start, __u64 end, enum lu_ladvise_type advice)
2412 struct osd_object *obj = osd_dt_obj(dt);
2417 case LU_LADVISE_DONTNEED:
2419 invalidate_mapping_pages(obj->oo_inode->i_mapping,
2420 start >> PAGE_SHIFT,
2421 (end - 1) >> PAGE_SHIFT);
2431 static loff_t osd_lseek(const struct lu_env *env, struct dt_object *dt,
2432 loff_t offset, int whence)
2434 struct osd_object *obj = osd_dt_obj(dt);
2435 struct inode *inode = obj->oo_inode;
2441 LASSERT(dt_object_exists(dt));
2442 LASSERT(osd_invariant(obj));
2444 LASSERT(offset >= 0);
2446 file = osd_quasi_file(env, inode);
2447 result = file->f_op->llseek(file, offset, whence);
2450 * If 'offset' is beyond end of object file then treat it as not error
2451 * but valid case for SEEK_HOLE and return 'offset' as result.
2452 * LOV will decide if it is beyond real end of file or not.
2454 if (whence == SEEK_HOLE && result == -ENXIO)
2457 CDEBUG(D_INFO, "seek %s from %lld: %lld\n", whence == SEEK_HOLE ?
2458 "hole" : "data", offset, result);
2463 * in some cases we may need declare methods for objects being created
2464 * e.g., when we create symlink
2466 const struct dt_body_operations osd_body_ops_new = {
2467 .dbo_declare_write = osd_declare_write,
2470 const struct dt_body_operations osd_body_ops = {
2471 .dbo_read = osd_read,
2472 .dbo_declare_write = osd_declare_write,
2473 .dbo_write = osd_write,
2474 .dbo_bufs_get = osd_bufs_get,
2475 .dbo_bufs_put = osd_bufs_put,
2476 .dbo_write_prep = osd_write_prep,
2477 .dbo_declare_write_commit = osd_declare_write_commit,
2478 .dbo_write_commit = osd_write_commit,
2479 .dbo_read_prep = osd_read_prep,
2480 .dbo_declare_punch = osd_declare_punch,
2481 .dbo_punch = osd_punch,
2482 .dbo_fiemap_get = osd_fiemap_get,
2483 .dbo_ladvise = osd_ladvise,
2484 .dbo_declare_fallocate = osd_declare_fallocate,
2485 .dbo_fallocate = osd_fallocate,
2486 .dbo_lseek = osd_lseek,
2490 * Get a truncate lock
2492 * In order to take multi-transaction truncate out of main transaction we let
2493 * the caller grab a lock on the object passed. the lock can be shared (for
2494 * writes) and exclusive (for truncate). It's not allowed to mix truncate
2495 * and write in the same transaction handle (do not confuse with big ldiskfs
2496 * transaction containing lots of handles).
2497 * The lock must be taken at declaration.
2499 * \param obj object to lock
2501 * \shared shared or exclusive
2503 * \retval 0 lock is granted
2504 * \retval -NOMEM no memory to allocate lock
2506 int osd_trunc_lock(struct osd_object *obj, struct osd_thandle *oh, bool shared)
2508 struct osd_access_lock *al, *tmp;
2513 list_for_each_entry(tmp, &oh->ot_trunc_locks, tl_list) {
2514 if (tmp->tl_obj != obj)
2516 LASSERT(tmp->tl_shared == shared);
2517 /* found same lock */
2522 if (unlikely(al == NULL))
2525 al->tl_truncate = false;
2527 down_read(&obj->oo_ext_idx_sem);
2529 down_write(&obj->oo_ext_idx_sem);
2530 al->tl_shared = shared;
2531 lu_object_get(&obj->oo_dt.do_lu);
2533 list_add(&al->tl_list, &oh->ot_trunc_locks);
2538 void osd_trunc_unlock_all(const struct lu_env *env, struct list_head *list)
2540 struct osd_access_lock *al, *tmp;
2542 list_for_each_entry_safe(al, tmp, list, tl_list) {
2544 up_read(&al->tl_obj->oo_ext_idx_sem);
2546 up_write(&al->tl_obj->oo_ext_idx_sem);
2547 osd_object_put(env, al->tl_obj);
2548 list_del(&al->tl_list);
2553 void osd_execute_truncate(struct osd_object *obj)
2555 struct osd_device *d = osd_obj2dev(obj);
2556 struct inode *inode = obj->oo_inode;
2559 /* simulate crash before (in the middle) of delayed truncate */
2560 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FAIL_AT_TRUNCATE)) {
2561 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
2562 struct ldiskfs_sb_info *sbi = LDISKFS_SB(inode->i_sb);
2564 mutex_lock(&sbi->s_orphan_lock);
2565 list_del_init(&ei->i_orphan);
2566 mutex_unlock(&sbi->s_orphan_lock);
2570 size = i_size_read(inode);
2572 /* if object holds encrypted content, we need to make sure we truncate
2573 * on an encryption unit boundary, or block content will get corrupted
2575 if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL &&
2576 size & ~LUSTRE_ENCRYPTION_MASK)
2577 inode->i_size = (size & LUSTRE_ENCRYPTION_MASK) +
2578 LUSTRE_ENCRYPTION_UNIT_SIZE;
2579 ldiskfs_truncate(inode);
2580 inode_unlock(inode);
2581 if (inode->i_size != size) {
2582 spin_lock(&inode->i_lock);
2583 i_size_write(inode, size);
2584 LDISKFS_I(inode)->i_disksize = size;
2585 spin_unlock(&inode->i_lock);
2586 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
2590 * For a partial-page truncate, flush the page to disk immediately to
2591 * avoid data corruption during direct disk write. b=17397
2593 if ((size & ~PAGE_MASK) == 0)
2595 if (osd_use_page_cache(d)) {
2596 filemap_fdatawrite_range(inode->i_mapping, size, size + 1);
2598 /* Notice we use "wait" version to ensure I/O is complete */
2599 filemap_write_and_wait_range(inode->i_mapping, size, size + 1);
2600 invalidate_mapping_pages(inode->i_mapping, size >> PAGE_SHIFT,
2601 size >> PAGE_SHIFT);
2605 void osd_process_truncates(struct list_head *list)
2607 struct osd_access_lock *al;
2609 LASSERT(journal_current_handle() == NULL);
2611 list_for_each_entry(al, list, tl_list) {
2614 if (!al->tl_truncate)
2616 osd_execute_truncate(al->tl_obj);