4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Author: Nikita Danilov <nikita@clusterfs.com>
37 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
41 /* prerequisite for linux/xattr.h */
42 #include <linux/types.h>
43 /* prerequisite for linux/xattr.h */
46 #include <linux/pagevec.h>
49 * struct OBD_{ALLOC,FREE}*()
52 #include <obd_support.h>
54 #include "osd_internal.h"
57 #include <ldiskfs/ldiskfs_extents.h>
59 static inline bool osd_use_page_cache(struct osd_device *d)
61 /* do not use pagecache if write and read caching are disabled */
62 if (d->od_writethrough_cache + d->od_read_cache == 0)
64 /* use pagecache by default */
68 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
69 int rw, int line, int pages)
73 LASSERTF(iobuf->dr_elapsed_valid == 0,
74 "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
75 atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
77 LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
79 init_waitqueue_head(&iobuf->dr_wait);
80 atomic_set(&iobuf->dr_numreqs, 0);
85 iobuf->dr_elapsed = ktime_set(0, 0);
86 /* must be counted before, so assert */
88 iobuf->dr_init_at = line;
90 blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
91 if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
92 LASSERT(iobuf->dr_pg_buf.lb_len >=
93 pages * sizeof(iobuf->dr_pages[0]));
97 /* start with 1MB for 4K blocks */
99 while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
102 CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
103 (unsigned)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
105 blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
106 iobuf->dr_max_pages = 0;
107 CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
108 (unsigned)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
110 lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
111 iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
112 if (unlikely(iobuf->dr_blocks == NULL))
115 lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
116 iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
117 if (unlikely(iobuf->dr_pages == NULL))
120 lu_buf_realloc(&iobuf->dr_lnb_buf,
121 pages * sizeof(iobuf->dr_lnbs[0]));
122 iobuf->dr_lnbs = iobuf->dr_lnb_buf.lb_buf;
123 if (unlikely(iobuf->dr_lnbs == NULL))
126 iobuf->dr_max_pages = pages;
130 #define osd_init_iobuf(dev, iobuf, rw, pages) \
131 __osd_init_iobuf(dev, iobuf, rw, __LINE__, pages)
133 static void osd_iobuf_add_page(struct osd_iobuf *iobuf,
134 struct niobuf_local *lnb)
136 LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
137 iobuf->dr_pages[iobuf->dr_npages] = lnb->lnb_page;
138 iobuf->dr_lnbs[iobuf->dr_npages] = lnb;
142 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
144 int rw = iobuf->dr_rw;
146 if (iobuf->dr_elapsed_valid) {
147 iobuf->dr_elapsed_valid = 0;
148 LASSERT(iobuf->dr_dev == d);
149 LASSERT(iobuf->dr_frags > 0);
150 lprocfs_oh_tally(&d->od_brw_stats.
151 hist[BRW_R_DIO_FRAGS+rw],
153 lprocfs_oh_tally_log2(&d->od_brw_stats.hist[BRW_R_IO_TIME+rw],
154 ktime_to_ms(iobuf->dr_elapsed));
158 #ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
159 static void dio_complete_routine(struct bio *bio)
161 # ifdef HAVE_BI_STATUS
162 int error = bio->bi_status;
164 int error = bio->bi_error;
167 static void dio_complete_routine(struct bio *bio, int error)
170 struct osd_iobuf *iobuf = bio->bi_private;
174 /* CAVEAT EMPTOR: possibly in IRQ context
175 * DO NOT record procfs stats here!!! */
177 if (unlikely(iobuf == NULL)) {
178 CERROR("***** bio->bi_private is NULL! This should never "
179 "happen. Normally, I would crash here, but instead I "
180 "will dump the bio contents to the console. Please "
181 "report this to <https://jira.whamcloud.com/> , along "
182 "with any interesting messages leading up to this point "
183 "(like SCSI errors, perhaps). Because bi_private is "
184 "NULL, I can't wake up the thread that initiated this "
185 "IO - you will probably have to reboot this node.\n");
186 CERROR("bi_next: %p, bi_flags: %lx, "
192 "bi_vcnt: %d, bi_idx: %d, bi->size: %d, bi_end_io: %p,"
193 "bi_cnt: %d, bi_private: %p\n", bio->bi_next,
194 (unsigned long)bio->bi_flags,
200 bio->bi_vcnt, bio_idx(bio),
201 bio_sectors(bio) << 9, bio->bi_end_io,
203 atomic_read(&bio->bi_cnt),
205 atomic_read(&bio->__bi_cnt),
211 /* the check is outside of the cycle for performance reason -bzzz */
212 if (!bio_data_dir(bio)) {
213 bio_for_each_segment_all(bvl, bio, iter) {
214 if (likely(error == 0))
215 SetPageUptodate(bvl_to_page(bvl));
216 LASSERT(PageLocked(bvl_to_page(bvl)));
218 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
220 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
223 /* any real error is good enough -bzzz */
224 if (error != 0 && iobuf->dr_error == 0)
225 iobuf->dr_error = error;
228 * set dr_elapsed before dr_numreqs turns to 0, otherwise
229 * it's possible that service thread will see dr_numreqs
230 * is zero, but dr_elapsed is not set yet, leading to lost
231 * data in this processing and an assertion in a subsequent
234 if (atomic_read(&iobuf->dr_numreqs) == 1) {
235 ktime_t now = ktime_get();
237 iobuf->dr_elapsed = ktime_sub(now, iobuf->dr_start_time);
238 iobuf->dr_elapsed_valid = 1;
240 if (atomic_dec_and_test(&iobuf->dr_numreqs))
241 wake_up(&iobuf->dr_wait);
243 /* Completed bios used to be chained off iobuf->dr_bios and freed in
244 * filter_clear_dreq(). It was then possible to exhaust the biovec-256
245 * mempool when serious on-disk fragmentation was encountered,
246 * deadlocking the OST. The bios are now released as soon as complete
247 * so the pool cannot be exhausted while IOs are competing. bug 10076 */
251 static void record_start_io(struct osd_iobuf *iobuf, int size)
253 struct osd_device *osd = iobuf->dr_dev;
254 struct obd_histogram *h = osd->od_brw_stats.hist;
257 atomic_inc(&iobuf->dr_numreqs);
259 if (iobuf->dr_rw == 0) {
260 atomic_inc(&osd->od_r_in_flight);
261 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
262 atomic_read(&osd->od_r_in_flight));
263 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
264 } else if (iobuf->dr_rw == 1) {
265 atomic_inc(&osd->od_w_in_flight);
266 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
267 atomic_read(&osd->od_w_in_flight));
268 lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
274 static void osd_submit_bio(int rw, struct bio *bio)
276 LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
277 #ifdef HAVE_SUBMIT_BIO_2ARGS
279 submit_bio(READ, bio);
281 submit_bio(WRITE, bio);
288 static int can_be_merged(struct bio *bio, sector_t sector)
293 return bio_end_sector(bio) == sector ? 1 : 0;
296 #if IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)
298 * This function will change the data written, thus it should only be
299 * used when checking data integrity feature
301 static void bio_integrity_fault_inject(struct bio *bio)
303 struct bio_vec *bvec;
308 bio_for_each_segment_all(bvec, bio, i) {
309 struct page *page = bvec->bv_page;
319 static int bio_dif_compare(__u16 *expected_guard_buf, void *bio_prot_buf,
320 unsigned int sectors, int tuple_size)
322 __u16 *expected_guard;
326 expected_guard = expected_guard_buf;
327 for (i = 0; i < sectors; i++) {
328 bio_guard = (__u16 *)bio_prot_buf;
329 if (*bio_guard != *expected_guard) {
330 CERROR("unexpected guard tags on sector %d "
331 "expected guard %u, bio guard "
332 "%u, sectors %u, tuple size %d\n",
333 i, *expected_guard, *bio_guard, sectors,
338 bio_prot_buf += tuple_size;
343 static int osd_bio_integrity_compare(struct bio *bio, struct block_device *bdev,
344 struct osd_iobuf *iobuf, int index)
346 struct blk_integrity *bi = bdev_get_integrity(bdev);
347 struct bio_integrity_payload *bip = bio->bi_integrity;
348 struct niobuf_local *lnb;
349 unsigned short sector_size = blk_integrity_interval(bi);
350 void *bio_prot_buf = page_address(bip->bip_vec->bv_page) +
351 bip->bip_vec->bv_offset;
353 sector_t sector = bio_start_sector(bio);
354 unsigned int i, sectors, total;
355 __u16 *expected_guard;
359 bio_for_each_segment_all(bv, bio, i) {
360 lnb = iobuf->dr_lnbs[index];
361 expected_guard = lnb->lnb_guards;
362 sectors = bv->bv_len / sector_size;
363 if (lnb->lnb_guard_rpc) {
364 rc = bio_dif_compare(expected_guard, bio_prot_buf,
365 sectors, bi->tuple_size);
371 bio_prot_buf += sectors * bi->tuple_size;
372 total += sectors * bi->tuple_size;
373 LASSERT(total <= bip_size(bio->bi_integrity));
379 static int osd_bio_integrity_handle(struct osd_device *osd, struct bio *bio,
380 struct osd_iobuf *iobuf,
381 int start_page_idx, bool fault_inject,
382 bool integrity_enabled)
384 struct super_block *sb = osd_sb(osd);
386 #ifdef HAVE_BIO_INTEGRITY_PREP_FN
387 integrity_gen_fn *generate_fn = NULL;
388 integrity_vrfy_fn *verify_fn = NULL;
393 if (!integrity_enabled)
396 #ifdef HAVE_BIO_INTEGRITY_PREP_FN
397 rc = osd_get_integrity_profile(osd, &generate_fn, &verify_fn);
401 rc = bio_integrity_prep_fn(bio, generate_fn, verify_fn);
403 rc = bio_integrity_prep(bio);
408 /* Verify and inject fault only when writing */
409 if (iobuf->dr_rw == 1) {
410 if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_CMP))) {
411 rc = osd_bio_integrity_compare(bio, sb->s_bdev, iobuf,
417 if (unlikely(fault_inject))
418 bio_integrity_fault_inject(bio);
424 #ifdef HAVE_BIO_INTEGRITY_PREP_FN
425 # ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
426 static void dio_integrity_complete_routine(struct bio *bio)
429 static void dio_integrity_complete_routine(struct bio *bio, int error)
432 struct osd_bio_private *bio_private = bio->bi_private;
434 bio->bi_private = bio_private->obp_iobuf;
435 # ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
436 dio_complete_routine(bio);
438 dio_complete_routine(bio, error);
441 OBD_FREE_PTR(bio_private);
443 #endif /* HAVE_BIO_INTEGRITY_PREP_FN */
444 #else /* !CONFIG_BLK_DEV_INTEGRITY */
445 #define osd_bio_integrity_handle(osd, bio, iobuf, start_page_idx, \
446 fault_inject, integrity_enabled) 0
447 #endif /* CONFIG_BLK_DEV_INTEGRITY */
449 static int osd_bio_init(struct bio *bio, struct osd_iobuf *iobuf,
450 bool integrity_enabled, int start_page_idx,
451 struct osd_bio_private **pprivate)
453 #ifdef HAVE_BIO_INTEGRITY_PREP_FN
454 struct osd_bio_private *bio_private;
459 if (integrity_enabled) {
460 OBD_ALLOC_GFP(bio_private, sizeof(*bio_private), GFP_NOIO);
461 if (bio_private == NULL)
463 bio->bi_end_io = dio_integrity_complete_routine;
464 bio->bi_private = bio_private;
465 bio_private->obp_start_page_idx = start_page_idx;
466 bio_private->obp_iobuf = iobuf;
467 *pprivate = bio_private;
469 bio->bi_end_io = dio_complete_routine;
470 bio->bi_private = iobuf;
476 bio->bi_end_io = dio_complete_routine;
477 bio->bi_private = iobuf;
482 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
483 struct osd_iobuf *iobuf)
485 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
486 struct page **pages = iobuf->dr_pages;
487 int npages = iobuf->dr_npages;
488 sector_t *blocks = iobuf->dr_blocks;
489 int total_blocks = npages * blocks_per_page;
490 struct super_block *sb = inode->i_sb;
491 int sector_bits = sb->s_blocksize_bits - 9;
492 unsigned int blocksize = sb->s_blocksize;
493 struct block_device *bdev = sb->s_bdev;
494 struct osd_bio_private *bio_private = NULL;
495 struct bio *bio = NULL;
496 int bio_start_page_idx;
498 unsigned int page_offset;
506 bool integrity_enabled;
510 fault_inject = OBD_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_FAULT);
511 LASSERT(iobuf->dr_npages == npages);
513 integrity_enabled = bdev_integrity_enabled(bdev, iobuf->dr_rw);
515 osd_brw_stats_update(osd, iobuf);
516 iobuf->dr_start_time = ktime_get();
518 blk_start_plug(&plug);
519 for (page_idx = 0, block_idx = 0;
521 page_idx++, block_idx += blocks_per_page) {
523 page = pages[page_idx];
524 LASSERT(block_idx + blocks_per_page <= total_blocks);
526 for (i = 0, page_offset = 0;
528 i += nblocks, page_offset += blocksize * nblocks) {
532 if (blocks[block_idx + i] == 0) { /* hole */
533 LASSERTF(iobuf->dr_rw == 0,
534 "page_idx %u, block_idx %u, i %u\n",
535 page_idx, block_idx, i);
536 memset(kmap(page) + page_offset, 0, blocksize);
541 sector = (sector_t)blocks[block_idx + i] << sector_bits;
543 /* Additional contiguous file blocks? */
544 while (i + nblocks < blocks_per_page &&
545 (sector + (nblocks << sector_bits)) ==
546 ((sector_t)blocks[block_idx + i + nblocks] <<
551 can_be_merged(bio, sector) &&
552 bio_add_page(bio, page,
553 blocksize * nblocks, page_offset) != 0)
554 continue; /* added this frag OK */
557 struct request_queue *q = bio_get_queue(bio);
558 unsigned int bi_size = bio_sectors(bio) << 9;
560 /* Dang! I have to fragment this I/O */
561 CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
562 "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
563 bi_size, bio->bi_vcnt, bio->bi_max_vecs,
565 queue_max_sectors(q),
566 bio_phys_segments(q, bio),
567 queue_max_phys_segments(q),
568 0, queue_max_hw_segments(q));
569 rc = osd_bio_integrity_handle(osd, bio,
570 iobuf, bio_start_page_idx,
571 fault_inject, integrity_enabled);
577 record_start_io(iobuf, bi_size);
578 osd_submit_bio(iobuf->dr_rw, bio);
581 bio_start_page_idx = page_idx;
582 /* allocate new bio */
583 bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
584 (npages - page_idx) *
587 CERROR("Can't allocate bio %u*%u = %u pages\n",
588 (npages - page_idx), blocks_per_page,
589 (npages - page_idx) * blocks_per_page);
594 bio_set_dev(bio, bdev);
595 bio_set_sector(bio, sector);
597 bio->bi_rw = (iobuf->dr_rw == 0) ? READ : WRITE;
599 bio->bi_opf = (iobuf->dr_rw == 0) ? READ : WRITE;
601 rc = osd_bio_init(bio, iobuf, integrity_enabled,
602 bio_start_page_idx, &bio_private);
608 rc = bio_add_page(bio, page,
609 blocksize * nblocks, page_offset);
615 rc = osd_bio_integrity_handle(osd, bio, iobuf,
624 record_start_io(iobuf, bio_sectors(bio) << 9);
625 osd_submit_bio(iobuf->dr_rw, bio);
630 blk_finish_plug(&plug);
632 /* in order to achieve better IO throughput, we don't wait for writes
633 * completion here. instead we proceed with transaction commit in
634 * parallel and wait for IO completion once transaction is stopped
635 * see osd_trans_stop() for more details -bzzz */
636 if (iobuf->dr_rw == 0 || fault_inject) {
637 wait_event(iobuf->dr_wait,
638 atomic_read(&iobuf->dr_numreqs) == 0);
639 osd_fini_iobuf(osd, iobuf);
643 rc = iobuf->dr_error;
646 OBD_FREE_PTR(bio_private);
652 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
653 struct niobuf_local *lnb, int maxlnb)
661 int poff = offset & (PAGE_SIZE - 1);
662 int plen = PAGE_SIZE - poff;
664 if (*nrpages >= maxlnb) {
671 lnb->lnb_file_offset = offset;
672 lnb->lnb_page_offset = poff;
674 /* lnb->lnb_flags = rnb->rnb_flags; */
676 lnb->lnb_page = NULL;
678 lnb->lnb_guard_rpc = 0;
679 lnb->lnb_guard_disk = 0;
682 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
693 static struct page *osd_get_page(const struct lu_env *env, struct dt_object *dt,
694 loff_t offset, gfp_t gfp_mask)
696 struct osd_thread_info *oti = osd_oti_get(env);
697 struct inode *inode = osd_dt_obj(dt)->oo_inode;
698 struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
700 int cur = oti->oti_dio_pages_used;
704 if (osd_use_page_cache(d)) {
705 page = find_or_create_page(inode->i_mapping,
706 offset >> PAGE_SHIFT,
710 LASSERT(!test_bit(PG_private_2, &page->flags));
712 lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
715 LASSERT(oti->oti_dio_pages);
717 if (unlikely(!oti->oti_dio_pages[cur])) {
718 LASSERT(cur < PTLRPC_MAX_BRW_PAGES);
719 page = alloc_page(gfp_mask);
722 oti->oti_dio_pages[cur] = page;
725 page = oti->oti_dio_pages[cur];
726 LASSERT(!test_bit(PG_private_2, &page->flags));
727 set_bit(PG_private_2, &page->flags);
728 oti->oti_dio_pages_used++;
730 LASSERT(!PageLocked(page));
733 LASSERT(!page->mapping);
734 LASSERT(!PageWriteback(page));
735 ClearPageUptodate(page);
737 page->index = offset >> PAGE_SHIFT;
744 * there are following "locks":
755 * - lock pages, unlock
757 * - lock partial page
763 * Unlock and release pages loaded by osd_bufs_get()
765 * Unlock \a npages pages from \a lnb and drop the refcount on them.
767 * \param env thread execution environment
768 * \param dt dt object undergoing IO (OSD object + methods)
769 * \param lnb array of pages undergoing IO
770 * \param npages number of pages in \a lnb
774 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
775 struct niobuf_local *lnb, int npages)
777 struct osd_thread_info *oti = osd_oti_get(env);
781 ll_pagevec_init(&pvec, 0);
783 for (i = 0; i < npages; i++) {
784 struct page *page = lnb[i].lnb_page;
789 /* if the page isn't cached, then reset uptodate
790 * to prevent reuse */
791 if (test_bit(PG_private_2, &page->flags)) {
792 clear_bit(PG_private_2, &page->flags);
793 ClearPageUptodate(page);
794 if (lnb[i].lnb_locked)
796 oti->oti_dio_pages_used--;
798 if (lnb[i].lnb_locked)
800 if (pagevec_add(&pvec, page) == 0)
801 pagevec_release(&pvec);
803 dt_object_put(env, dt);
805 lnb[i].lnb_page = NULL;
808 LASSERTF(oti->oti_dio_pages_used == 0, "%d\n", oti->oti_dio_pages_used);
810 /* Release any partial pagevec */
811 pagevec_release(&pvec);
817 * Load and lock pages undergoing IO
819 * Pages as described in the \a lnb array are fetched (from disk or cache)
820 * and locked for IO by the caller.
822 * DLM locking protects us from write and truncate competing for same region,
823 * but partial-page truncate can leave dirty pages in the cache for ldiskfs.
824 * It's possible the writeout on a such a page is in progress when we access
825 * it. It's also possible that during this writeout we put new (partial) data
826 * into the page, but won't be able to proceed in filter_commitrw_write().
827 * Therefore, just wait for writeout completion as it should be rare enough.
829 * \param env thread execution environment
830 * \param dt dt object undergoing IO (OSD object + methods)
831 * \param pos byte offset of IO start
832 * \param len number of bytes of IO
833 * \param lnb array of extents undergoing IO
834 * \param rw read or write operation, and other flags
835 * \param capa capabilities
837 * \retval pages (zero or more) loaded successfully
838 * \retval -ENOMEM on memory/page allocation error
840 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
841 loff_t pos, ssize_t len, struct niobuf_local *lnb,
842 int maxlnb, enum dt_bufs_type rw)
844 struct osd_thread_info *oti = osd_oti_get(env);
845 struct osd_object *obj = osd_dt_obj(dt);
846 int npages, i, rc = 0;
849 LASSERT(obj->oo_inode);
851 if (!osd_use_page_cache(osd_obj2dev(obj))) {
852 if (unlikely(!oti->oti_dio_pages)) {
853 OBD_ALLOC(oti->oti_dio_pages,
854 sizeof(struct page *) * PTLRPC_MAX_BRW_PAGES);
855 if (!oti->oti_dio_pages)
860 rc = osd_map_remote_to_local(pos, len, &npages, lnb, maxlnb);
864 /* this could also try less hard for DT_BUFS_TYPE_READAHEAD pages */
865 gfp_mask = rw & DT_BUFS_TYPE_LOCAL ? (GFP_NOFS | __GFP_HIGHMEM) :
867 for (i = 0; i < npages; i++, lnb++) {
868 lnb->lnb_page = osd_get_page(env, dt, lnb->lnb_file_offset,
870 if (lnb->lnb_page == NULL)
871 GOTO(cleanup, rc = -ENOMEM);
874 wait_on_page_writeback(lnb->lnb_page);
875 BUG_ON(PageWriteback(lnb->lnb_page));
877 lu_object_get(&dt->do_lu);
884 osd_bufs_put(env, dt, lnb - i, i);
888 #ifndef HAVE_LDISKFS_MAP_BLOCKS
890 #ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
891 #define ldiskfs_ext_pblock(ex) ext_pblock((ex))
902 static long ldiskfs_ext_find_goal(struct inode *inode,
903 struct ldiskfs_ext_path *path,
904 unsigned long block, int *aflags)
906 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
907 unsigned long bg_start;
908 unsigned long colour;
912 struct ldiskfs_extent *ex;
913 depth = path->p_depth;
915 /* try to predict block placement */
916 if ((ex = path[depth].p_ext))
917 return ldiskfs_ext_pblock(ex) +
918 (block - le32_to_cpu(ex->ee_block));
920 /* it looks index is empty
921 * try to find starting from index itself */
922 if (path[depth].p_bh)
923 return path[depth].p_bh->b_blocknr;
926 /* OK. use inode's group */
927 bg_start = (ei->i_block_group * LDISKFS_BLOCKS_PER_GROUP(inode->i_sb)) +
928 le32_to_cpu(LDISKFS_SB(inode->i_sb)->s_es->s_first_data_block);
929 colour = (current->pid % 16) *
930 (LDISKFS_BLOCKS_PER_GROUP(inode->i_sb) / 16);
931 return bg_start + colour + block;
934 static unsigned long new_blocks(handle_t *handle, struct inode *inode,
935 struct ldiskfs_ext_path *path,
936 unsigned long block, unsigned long *count,
939 struct ldiskfs_allocation_request ar;
940 unsigned long pblock;
943 /* find neighbour allocated blocks */
945 *err = ldiskfs_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
949 *err = ldiskfs_ext_search_right(inode, path, &ar.lright, &ar.pright);
953 /* allocate new block */
954 ar.goal = ldiskfs_ext_find_goal(inode, path, block, &aflags);
958 ar.flags = LDISKFS_MB_HINT_DATA;
959 pblock = ldiskfs_mb_new_blocks(handle, &ar, err);
964 static int ldiskfs_ext_new_extent_cb(struct inode *inode,
965 struct ldiskfs_ext_path *path,
966 struct ldiskfs_ext_cache *cex,
967 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
968 struct ldiskfs_extent *ex,
972 struct bpointers *bp = cbdata;
973 struct ldiskfs_extent nex;
974 unsigned long pblock = 0;
980 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
981 if (cex->ec_type == LDISKFS_EXT_CACHE_EXTENT) {
983 if ((cex->ec_len != 0) && (cex->ec_start != 0)) {
989 if (bp->create == 0) {
991 if (cex->ec_block < bp->start)
992 i = bp->start - cex->ec_block;
993 if (i >= cex->ec_len)
994 CERROR("nothing to do?! i = %d, e_num = %u\n",
996 for (; i < cex->ec_len && bp->num; i++) {
1003 return EXT_CONTINUE;
1006 tgen = LDISKFS_I(inode)->i_ext_generation;
1007 count = ldiskfs_ext_calc_credits_for_insert(inode, path);
1009 handle = osd_journal_start(inode, LDISKFS_HT_MISC,
1010 count + LDISKFS_ALLOC_NEEDED + 1);
1011 if (IS_ERR(handle)) {
1012 return PTR_ERR(handle);
1015 if (tgen != LDISKFS_I(inode)->i_ext_generation) {
1016 /* the tree has changed. so path can be invalid at moment */
1017 ldiskfs_journal_stop(handle);
1021 /* In 2.6.32 kernel, ldiskfs_ext_walk_space()'s callback func is not
1022 * protected by i_data_sem as whole. so we patch it to store
1023 * generation to path and now verify the tree hasn't changed */
1024 down_write((&LDISKFS_I(inode)->i_data_sem));
1026 /* validate extent, make sure the extent tree does not changed */
1027 if (LDISKFS_I(inode)->i_ext_generation != path[0].p_generation) {
1028 /* cex is invalid, try again */
1029 up_write(&LDISKFS_I(inode)->i_data_sem);
1030 ldiskfs_journal_stop(handle);
1034 count = cex->ec_len;
1035 pblock = new_blocks(handle, inode, path, cex->ec_block, &count, &err);
1038 BUG_ON(count > cex->ec_len);
1040 /* insert new extent */
1041 nex.ee_block = cpu_to_le32(cex->ec_block);
1042 ldiskfs_ext_store_pblock(&nex, pblock);
1043 nex.ee_len = cpu_to_le16(count);
1044 err = ldiskfs_ext_insert_extent(handle, inode, path, &nex, 0);
1046 /* free data blocks we just allocated */
1047 /* not a good idea to call discard here directly,
1048 * but otherwise we'd need to call it every free() */
1049 ldiskfs_discard_preallocations(inode);
1050 #ifdef HAVE_EXT_FREE_BLOCK_WITH_BUFFER_HEAD /* Introduced in 2.6.32-rc7 */
1051 ldiskfs_free_blocks(handle, inode, NULL,
1052 ldiskfs_ext_pblock(&nex),
1053 le16_to_cpu(nex.ee_len), 0);
1055 ldiskfs_free_blocks(handle, inode, ldiskfs_ext_pblock(&nex),
1056 le16_to_cpu(nex.ee_len), 0);
1062 * Putting len of the actual extent we just inserted,
1063 * we are asking ldiskfs_ext_walk_space() to continue
1064 * scaning after that block
1066 cex->ec_len = le16_to_cpu(nex.ee_len);
1067 cex->ec_start = ldiskfs_ext_pblock(&nex);
1068 BUG_ON(le16_to_cpu(nex.ee_len) == 0);
1069 BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
1072 up_write((&LDISKFS_I(inode)->i_data_sem));
1073 ldiskfs_journal_stop(handle);
1078 CERROR("hmm. why do we find this extent?\n");
1079 CERROR("initial space: %lu:%u\n",
1080 bp->start, bp->init_num);
1081 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
1082 CERROR("current extent: %u/%u/%llu %d\n",
1083 cex->ec_block, cex->ec_len,
1084 (unsigned long long)cex->ec_start,
1087 CERROR("current extent: %u/%u/%llu\n",
1088 cex->ec_block, cex->ec_len,
1089 (unsigned long long)cex->ec_start);
1093 if (cex->ec_block < bp->start)
1094 i = bp->start - cex->ec_block;
1095 if (i >= cex->ec_len)
1096 CERROR("nothing to do?! i = %d, e_num = %u\n",
1098 for (; i < cex->ec_len && bp->num; i++) {
1099 *(bp->blocks) = cex->ec_start + i;
1101 /* unmap any possible underlying metadata from
1102 * the block device mapping. bug 6998. */
1103 #ifndef HAVE_CLEAN_BDEV_ALIASES
1104 unmap_underlying_metadata(inode->i_sb->s_bdev,
1107 clean_bdev_aliases(inode->i_sb->s_bdev,
1119 static int osd_ldiskfs_map_nblocks(struct inode *inode, unsigned long index,
1120 int clen, sector_t *blocks, int create)
1122 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
1123 struct bpointers bp;
1126 if (index + clen >= inode->i_sb->s_maxbytes >> PAGE_SHIFT)
1130 bp.start = index * blocks_per_page;
1131 bp.init_num = bp.num = clen * blocks_per_page;
1134 CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
1135 bp.start, bp.start + bp.num - 1, (unsigned)inode->i_ino);
1137 err = ldiskfs_ext_walk_space(inode, bp.start, bp.num,
1138 ldiskfs_ext_new_extent_cb, &bp);
1139 ldiskfs_ext_invalidate_cache(inode);
1144 static int osd_ldiskfs_map_bm_inode_pages(struct inode *inode,
1145 struct page **page, int pages,
1146 sector_t *blocks, int create)
1148 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
1149 pgoff_t bitmap_max_page_index;
1153 bitmap_max_page_index = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes >>
1155 for (i = 0, b = blocks; i < pages; i++, page++) {
1156 if ((*page)->index + 1 >= bitmap_max_page_index) {
1160 rc = ldiskfs_map_inode_page(inode, *page, b, create);
1162 CERROR("ino %lu, blk %llu create %d: rc %d\n",
1164 (unsigned long long)*b, create, rc);
1167 b += blocks_per_page;
1172 static int osd_ldiskfs_map_ext_inode_pages(struct inode *inode,
1174 int pages, sector_t *blocks,
1177 int rc = 0, i = 0, clen = 0;
1178 struct page *fp = NULL;
1180 CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
1181 inode->i_ino, pages, (*page)->index);
1183 /* pages are sorted already. so, we just have to find
1184 * contig. space and process them properly */
1187 /* start new extent */
1192 } else if (fp->index + clen == (*page)->index) {
1193 /* continue the extent */
1200 /* process found extent */
1201 rc = osd_ldiskfs_map_nblocks(inode, fp->index, clen,
1206 /* look for next extent */
1208 blocks += clen * (PAGE_SIZE >> inode->i_blkbits);
1212 rc = osd_ldiskfs_map_nblocks(inode, fp->index, clen,
1219 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
1220 int pages, sector_t *blocks,
1225 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1226 rc = osd_ldiskfs_map_ext_inode_pages(inode, page, pages,
1230 rc = osd_ldiskfs_map_bm_inode_pages(inode, page, pages, blocks, create);
1235 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
1236 int pages, sector_t *blocks,
1239 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
1241 struct page *fp = NULL;
1243 pgoff_t max_page_index;
1244 handle_t *handle = NULL;
1246 max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
1248 CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
1249 inode->i_ino, pages, (*page)->index);
1252 create = LDISKFS_GET_BLOCKS_CREATE;
1253 handle = ldiskfs_journal_current_handle();
1254 LASSERT(handle != NULL);
1255 rc = osd_attach_jinode(inode);
1259 /* pages are sorted already. so, we just have to find
1260 * contig. space and process them properly */
1262 long blen, total = 0;
1263 struct ldiskfs_map_blocks map = { 0 };
1265 if (fp == NULL) { /* start new extent */
1270 } else if (fp->index + clen == (*page)->index) {
1271 /* continue the extent */
1277 if (fp->index + clen >= max_page_index)
1278 GOTO(cleanup, rc = -EFBIG);
1279 /* process found extent */
1280 map.m_lblk = fp->index * blocks_per_page;
1281 map.m_len = blen = clen * blocks_per_page;
1283 rc = ldiskfs_map_blocks(handle, inode, &map, create);
1286 for (; total < blen && c < map.m_len; c++, total++) {
1288 *(blocks + total) = 0;
1292 *(blocks + total) = map.m_pblk + c;
1293 /* unmap any possible underlying
1294 * metadata from the block device
1295 * mapping. bug 6998. */
1296 if ((map.m_flags & LDISKFS_MAP_NEW) &&
1298 #ifndef HAVE_CLEAN_BDEV_ALIASES
1299 unmap_underlying_metadata(
1300 inode->i_sb->s_bdev,
1304 inode->i_sb->s_bdev,
1311 if (rc == 0 && total < blen) {
1312 map.m_lblk = fp->index * blocks_per_page + total;
1313 map.m_len = blen - total;
1319 /* look for next extent */
1321 blocks += blocks_per_page * clen;
1326 #endif /* HAVE_LDISKFS_MAP_BLOCKS */
1328 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
1329 struct niobuf_local *lnb, int npages)
1331 struct osd_thread_info *oti = osd_oti_get(env);
1332 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1333 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1334 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1346 rc = osd_init_iobuf(osd, iobuf, 0, npages);
1347 if (unlikely(rc != 0))
1350 isize = i_size_read(inode);
1351 maxidx = ((isize + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1;
1353 if (osd->od_writethrough_cache)
1355 if (isize > osd->od_readcache_max_filesize)
1358 start = ktime_get();
1359 for (i = 0; i < npages; i++) {
1362 generic_error_remove_page(inode->i_mapping,
1366 * till commit the content of the page is undefined
1367 * we'll set it uptodate once bulk is done. otherwise
1368 * subsequent reads can access non-stable data
1370 ClearPageUptodate(lnb[i].lnb_page);
1372 if (lnb[i].lnb_len == PAGE_SIZE)
1375 if (maxidx >= lnb[i].lnb_page->index) {
1376 osd_iobuf_add_page(iobuf, &lnb[i]);
1379 char *p = kmap(lnb[i].lnb_page);
1381 off = lnb[i].lnb_page_offset;
1384 off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
1387 memset(p + off, 0, PAGE_SIZE - off);
1388 kunmap(lnb[i].lnb_page);
1392 timediff = ktime_us_delta(end, start);
1393 lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1395 if (iobuf->dr_npages) {
1396 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1398 iobuf->dr_blocks, 0);
1399 if (likely(rc == 0)) {
1400 rc = osd_do_bio(osd, inode, iobuf);
1401 /* do IO stats for preparation reads */
1402 osd_fini_iobuf(osd, iobuf);
1408 struct osd_fextent {
1411 unsigned int mapped:1;
1414 static int osd_is_mapped(struct dt_object *dt, __u64 offset,
1415 struct osd_fextent *cached_extent)
1417 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1418 sector_t block = offset >> inode->i_blkbits;
1420 struct fiemap_extent_info fei = { 0 };
1421 struct fiemap_extent fe = { 0 };
1422 mm_segment_t saved_fs;
1425 if (block >= cached_extent->start && block < cached_extent->end)
1426 return cached_extent->mapped;
1428 if (i_size_read(inode) == 0)
1431 /* Beyond EOF, must not be mapped */
1432 if (((i_size_read(inode) - 1) >> inode->i_blkbits) < block)
1435 fei.fi_extents_max = 1;
1436 fei.fi_extents_start = &fe;
1438 saved_fs = get_fs();
1440 rc = inode->i_op->fiemap(inode, &fei, offset, FIEMAP_MAX_OFFSET-offset);
1445 start = fe.fe_logical >> inode->i_blkbits;
1447 if (start > block) {
1448 cached_extent->start = block;
1449 cached_extent->end = start;
1450 cached_extent->mapped = 0;
1452 cached_extent->start = start;
1453 cached_extent->end = (fe.fe_logical + fe.fe_length) >>
1455 cached_extent->mapped = 1;
1458 return cached_extent->mapped;
1461 static int osd_declare_write_commit(const struct lu_env *env,
1462 struct dt_object *dt,
1463 struct niobuf_local *lnb, int npages,
1464 struct thandle *handle)
1466 const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1467 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1468 struct osd_thandle *oh;
1475 long long quota_space = 0;
1476 struct osd_fextent extent = { 0 };
1477 enum osd_quota_local_flags local_flags = 0;
1478 enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
1481 LASSERT(handle != NULL);
1482 oh = container_of0(handle, struct osd_thandle, ot_super);
1483 LASSERT(oh->ot_handle == NULL);
1487 /* calculate number of extents (probably better to pass nb) */
1488 for (i = 0; i < npages; i++) {
1489 if (i && lnb[i].lnb_file_offset !=
1490 lnb[i - 1].lnb_file_offset + lnb[i - 1].lnb_len)
1493 if (osd_is_mapped(dt, lnb[i].lnb_file_offset, &extent))
1494 lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1496 quota_space += PAGE_SIZE;
1498 /* ignore quota for the whole request if any page is from
1499 * client cache or written by root.
1501 * XXX once we drop the 1.8 client support, the checking
1502 * for whether page is from cache can be simplified as:
1503 * !(lnb[i].flags & OBD_BRW_SYNC)
1505 * XXX we could handle this on per-lnb basis as done by
1507 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
1508 (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
1510 declare_flags |= OSD_QID_FORCE;
1514 * each extent can go into new leaf causing a split
1515 * 5 is max tree depth: inode + 4 index blocks
1516 * with blockmaps, depth is 3 at most
1518 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1520 * many concurrent threads may grow tree by the time
1521 * our transaction starts. so, consider 2 is a min depth
1523 depth = ext_depth(inode);
1524 depth = max(depth, 1) + 1;
1526 credits++; /* inode */
1527 credits += depth * 2 * extents;
1531 credits++; /* inode */
1532 credits += depth * extents;
1535 /* quota space for metadata blocks */
1536 quota_space += depth * extents * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1538 /* quota space should be reported in 1K blocks */
1539 quota_space = toqb(quota_space);
1541 /* each new block can go in different group (bitmap + gd) */
1543 /* we can't dirty more bitmap blocks than exist */
1544 if (newblocks > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1545 credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1547 credits += newblocks;
1549 /* we can't dirty more gd blocks than exist */
1550 if (newblocks > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1551 credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1553 credits += newblocks;
1555 osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1557 /* make sure the over quota flags were not set */
1558 lnb[0].lnb_flags &= ~OBD_BRW_OVER_ALLQUOTA;
1560 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1561 i_projid_read(inode), quota_space, oh,
1562 osd_dt_obj(dt), &local_flags, declare_flags);
1564 /* we need only to store the overquota flags in the first lnb for
1565 * now, once we support multiple objects BRW, this code needs be
1567 if (local_flags & QUOTA_FL_OVER_USRQUOTA)
1568 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
1569 if (local_flags & QUOTA_FL_OVER_GRPQUOTA)
1570 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
1571 if (local_flags & QUOTA_FL_OVER_PRJQUOTA)
1572 lnb[0].lnb_flags |= OBD_BRW_OVER_PRJQUOTA;
1575 rc = osd_trunc_lock(osd_dt_obj(dt), oh, true);
1580 /* Check if a block is allocated or not */
1581 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1582 struct niobuf_local *lnb, int npages,
1583 struct thandle *thandle)
1585 struct osd_thread_info *oti = osd_oti_get(env);
1586 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1587 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1588 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1594 rc = osd_init_iobuf(osd, iobuf, 1, npages);
1595 if (unlikely(rc != 0))
1598 isize = i_size_read(inode);
1599 ll_vfs_dq_init(inode);
1601 for (i = 0; i < npages; i++) {
1602 if (lnb[i].lnb_rc == -ENOSPC &&
1603 (lnb[i].lnb_flags & OBD_BRW_MAPPED)) {
1604 /* Allow the write to proceed if overwriting an
1609 if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
1610 CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1612 LASSERT(lnb[i].lnb_page);
1613 generic_error_remove_page(inode->i_mapping,
1618 LASSERT(PageLocked(lnb[i].lnb_page));
1619 LASSERT(!PageWriteback(lnb[i].lnb_page));
1621 if (lnb[i].lnb_file_offset + lnb[i].lnb_len > isize)
1622 isize = lnb[i].lnb_file_offset + lnb[i].lnb_len;
1625 * Since write and truncate are serialized by oo_sem, even
1626 * partial-page truncate should not leave dirty pages in the
1629 LASSERT(!PageDirty(lnb[i].lnb_page));
1631 SetPageUptodate(lnb[i].lnb_page);
1633 osd_iobuf_add_page(iobuf, &lnb[i]);
1636 osd_trans_exec_op(env, thandle, OSD_OT_WRITE);
1638 if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1640 } else if (iobuf->dr_npages > 0) {
1641 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1643 iobuf->dr_blocks, 1);
1645 /* no pages to write, no transno is needed */
1646 thandle->th_local = 1;
1649 if (likely(rc == 0)) {
1650 spin_lock(&inode->i_lock);
1651 if (isize > i_size_read(inode)) {
1652 i_size_write(inode, isize);
1653 LDISKFS_I(inode)->i_disksize = isize;
1654 spin_unlock(&inode->i_lock);
1655 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1657 spin_unlock(&inode->i_lock);
1660 rc = osd_do_bio(osd, inode, iobuf);
1661 /* we don't do stats here as in read path because
1662 * write is async: we'll do this in osd_put_bufs() */
1664 osd_fini_iobuf(osd, iobuf);
1667 osd_trans_exec_check(env, thandle, OSD_OT_WRITE);
1669 if (unlikely(rc != 0)) {
1670 /* if write fails, we should drop pages from the cache */
1671 for (i = 0; i < npages; i++) {
1672 if (lnb[i].lnb_page == NULL)
1674 LASSERT(PageLocked(lnb[i].lnb_page));
1675 generic_error_remove_page(inode->i_mapping,
1683 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1684 struct niobuf_local *lnb, int npages)
1686 struct osd_thread_info *oti = osd_oti_get(env);
1687 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1688 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1689 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1690 int rc = 0, i, cache = 0, cache_hits = 0, cache_misses = 0;
1697 rc = osd_init_iobuf(osd, iobuf, 0, npages);
1698 if (unlikely(rc != 0))
1701 isize = i_size_read(inode);
1703 if (osd->od_read_cache)
1705 if (isize > osd->od_readcache_max_filesize)
1708 start = ktime_get();
1709 for (i = 0; i < npages; i++) {
1711 if (isize <= lnb[i].lnb_file_offset)
1712 /* If there's no more data, abort early.
1713 * lnb->lnb_rc == 0, so it's easy to detect later. */
1716 if (isize < lnb[i].lnb_file_offset + lnb[i].lnb_len)
1717 lnb[i].lnb_rc = isize - lnb[i].lnb_file_offset;
1719 lnb[i].lnb_rc = lnb[i].lnb_len;
1721 /* Bypass disk read if fail_loc is set properly */
1722 if (OBD_FAIL_CHECK(OBD_FAIL_OST_FAKE_RW))
1723 SetPageUptodate(lnb[i].lnb_page);
1726 generic_error_remove_page(inode->i_mapping,
1729 if (PageUptodate(lnb[i].lnb_page)) {
1731 unlock_page(lnb[i].lnb_page);
1734 osd_iobuf_add_page(iobuf, &lnb[i]);
1736 /* no need to unlock in osd_bufs_put(), the sooner page is
1737 * unlocked, the earlier another client can access it.
1738 * notice real unlock_page() can be called few lines
1739 * below after osd_do_bio(). lnb is a per-thread, so it's
1740 * fine to have PG_locked and lnb_locked inconsistent here */
1741 lnb[i].lnb_locked = 0;
1744 timediff = ktime_us_delta(end, start);
1745 lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1747 if (cache_hits != 0)
1748 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT,
1750 if (cache_misses != 0)
1751 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS,
1753 if (cache_hits + cache_misses != 0)
1754 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
1755 cache_hits + cache_misses);
1757 if (iobuf->dr_npages) {
1758 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1760 iobuf->dr_blocks, 0);
1761 rc = osd_do_bio(osd, inode, iobuf);
1763 /* IO stats will be done in osd_bufs_put() */
1765 /* early release to let others read data during the bulk */
1766 for (i = 0; i < iobuf->dr_npages; i++) {
1767 LASSERT(PageLocked(iobuf->dr_pages[i]));
1768 unlock_page(iobuf->dr_pages[i]);
1776 * XXX: Another layering violation for now.
1778 * We don't want to use ->f_op->read methods, because generic file write
1780 * - serializes on ->i_sem, and
1782 * - does a lot of extra work like balance_dirty_pages(),
1784 * which doesn't work for globally shared files like /last_rcvd.
1786 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1788 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1790 memcpy(buffer, (char *)ei->i_data, buflen);
1795 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1797 struct buffer_head *bh;
1798 unsigned long block;
1804 /* prevent reading after eof */
1805 spin_lock(&inode->i_lock);
1806 if (i_size_read(inode) < *offs + size) {
1807 loff_t diff = i_size_read(inode) - *offs;
1808 spin_unlock(&inode->i_lock);
1810 CDEBUG(D_EXT2, "size %llu is too short to read @%llu\n",
1811 i_size_read(inode), *offs);
1813 } else if (diff == 0) {
1819 spin_unlock(&inode->i_lock);
1822 blocksize = 1 << inode->i_blkbits;
1825 block = *offs >> inode->i_blkbits;
1826 boffs = *offs & (blocksize - 1);
1827 csize = min(blocksize - boffs, size);
1828 bh = __ldiskfs_bread(NULL, inode, block, 0);
1830 CERROR("%s: can't read %u@%llu on ino %lu: "
1831 "rc = %ld\n", osd_ino2name(inode),
1832 csize, *offs, inode->i_ino,
1838 memcpy(buf, bh->b_data + boffs, csize);
1841 memset(buf, 0, csize);
1851 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1852 struct lu_buf *buf, loff_t *pos)
1854 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1857 /* Read small symlink from inode body as we need to maintain correct
1858 * on-disk symlinks for ldiskfs.
1860 if (S_ISLNK(dt->do_lu.lo_header->loh_attr)) {
1861 loff_t size = i_size_read(inode);
1863 if (buf->lb_len < size)
1866 if (size < sizeof(LDISKFS_I(inode)->i_data))
1867 rc = osd_ldiskfs_readlink(inode, buf->lb_buf, size);
1869 rc = osd_ldiskfs_read(inode, buf->lb_buf, size, pos);
1871 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1877 static inline int osd_extents_enabled(struct super_block *sb,
1878 struct inode *inode)
1880 if (inode != NULL) {
1881 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
1883 } else if (ldiskfs_has_feature_extents(sb)) {
1889 int osd_calc_bkmap_credits(struct super_block *sb, struct inode *inode,
1890 const loff_t size, const loff_t pos,
1893 int credits, bits, bs, i;
1895 bits = sb->s_blocksize_bits;
1898 /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1899 * we do not expect blockmaps on the large files,
1900 * so let's shrink it to 2 levels (4GB files) */
1902 /* this is default reservation: 2 levels */
1903 credits = (blocks + 2) * 3;
1905 /* actual offset is unknown, hard to optimize */
1909 /* now check for few specific cases to optimize */
1910 if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1913 /* allocate if not allocated */
1914 if (inode == NULL) {
1915 credits += blocks * 2;
1918 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1919 LASSERT(i < LDISKFS_NDIR_BLOCKS);
1920 if (LDISKFS_I(inode)->i_data[i] == 0)
1923 } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1924 /* single indirect */
1925 credits = blocks * 3;
1926 if (inode == NULL ||
1927 LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK] == 0)
1930 /* The indirect block may be modified. */
1937 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
1938 const struct lu_buf *buf, loff_t _pos,
1939 struct thandle *handle)
1941 struct osd_object *obj = osd_dt_obj(dt);
1942 struct inode *inode = obj->oo_inode;
1943 struct super_block *sb = osd_sb(osd_obj2dev(obj));
1944 struct osd_thandle *oh;
1945 int rc = 0, est = 0, credits, blocks, allocated = 0;
1951 LASSERT(buf != NULL);
1952 LASSERT(handle != NULL);
1954 oh = container_of0(handle, struct osd_thandle, ot_super);
1955 LASSERT(oh->ot_handle == NULL);
1958 bits = sb->s_blocksize_bits;
1962 /* if this is an append, then we
1963 * should expect cross-block record */
1969 /* blocks to modify */
1970 blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
1971 LASSERT(blocks > 0);
1973 if (inode != NULL && _pos != -1) {
1974 /* object size in blocks */
1975 est = (i_size_read(inode) + bs - 1) >> bits;
1976 allocated = inode->i_blocks >> (bits - 9);
1977 if (pos + size <= i_size_read(inode) && est <= allocated) {
1978 /* looks like an overwrite, no need to modify tree */
1980 /* no need to modify i_size */
1985 if (osd_extents_enabled(sb, inode)) {
1987 * many concurrent threads may grow tree by the time
1988 * our transaction starts. so, consider 2 is a min depth
1989 * for every level we may need to allocate a new block
1990 * and take some entries from the old one. so, 3 blocks
1991 * to allocate (bitmap, gd, itself) + old block - 4 per
1994 depth = inode != NULL ? ext_depth(inode) : 0;
1995 depth = max(depth, 1) + 1;
1997 /* if not append, then split may need to modify
1998 * existing blocks moving entries into the new ones */
2001 /* blocks to store data: bitmap,gd,itself */
2002 credits += blocks * 3;
2004 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
2006 /* if inode is created as part of the transaction,
2007 * then it's counted already by the creation method */
2013 osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
2015 /* dt_declare_write() is usually called for system objects, such
2016 * as llog or last_rcvd files. We needn't enforce quota on those
2017 * objects, so always set the lqi_space as 0. */
2019 rc = osd_declare_inode_qid(env, i_uid_read(inode),
2021 i_projid_read(inode), 0,
2022 oh, obj, NULL, OSD_QID_BLK);
2025 rc = osd_trunc_lock(obj, oh, true);
2030 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
2032 /* LU-2634: clear the extent format for fast symlink */
2033 ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
2035 memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen);
2036 spin_lock(&inode->i_lock);
2037 LDISKFS_I(inode)->i_disksize = buflen;
2038 i_size_write(inode, buflen);
2039 spin_unlock(&inode->i_lock);
2040 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2045 static int osd_ldiskfs_write_record(struct dt_object *dt, void *buf,
2046 int bufsize, int write_NUL, loff_t *offs,
2049 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2050 struct buffer_head *bh = NULL;
2051 loff_t offset = *offs;
2052 loff_t new_size = i_size_read(inode);
2053 unsigned long block;
2054 int blocksize = 1 << inode->i_blkbits;
2055 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
2059 int dirty_inode = 0;
2060 bool create, sparse, sync = false;
2064 * long symlink write does not count the NUL terminator in
2065 * bufsize, we write it, and the inode's file size does not
2066 * count the NUL terminator as well.
2068 ((char *)buf)[bufsize] = '\0';
2072 dirty_inode = test_and_set_bit(LDISKFS_INODE_JOURNAL_DATA, &ei->i_flags);
2074 /* sparse checking is racy, but sparse is very rare case, leave as is */
2075 sparse = (new_size > 0 && (inode->i_blocks >> (inode->i_blkbits - 9)) <
2076 ((new_size - 1) >> inode->i_blkbits) + 1);
2078 while (bufsize > 0) {
2079 int credits = handle->h_buffer_credits;
2080 unsigned long last_block = (new_size == 0) ? 0 :
2081 (new_size - 1) >> inode->i_blkbits;
2086 block = offset >> inode->i_blkbits;
2087 boffs = offset & (blocksize - 1);
2088 size = min(blocksize - boffs, bufsize);
2089 sync = (block > last_block || new_size == 0 || sparse);
2092 down(&ei->i_append_sem);
2094 bh = __ldiskfs_bread(handle, inode, block, 0);
2096 if (unlikely(IS_ERR_OR_NULL(bh) && !sync))
2097 CWARN("%s: adding bh without locking off %llu (block %lu, "
2098 "size %d, offs %llu)\n", inode->i_sb->s_id,
2099 offset, block, bufsize, *offs);
2101 if (IS_ERR_OR_NULL(bh)) {
2102 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
2103 int flags = LDISKFS_GET_BLOCKS_CREATE;
2105 /* while the file system is being mounted, avoid
2106 * preallocation otherwise mount can take a long
2107 * time as mballoc cache is cold.
2108 * XXX: this is a workaround until we have a proper
2110 * XXX: works with extent-based files only */
2111 if (!osd->od_cl_seq)
2112 flags |= LDISKFS_GET_BLOCKS_NO_NORMALIZE;
2113 bh = __ldiskfs_bread(handle, inode, block, flags);
2117 up(&ei->i_append_sem);
2122 if (IS_ERR_OR_NULL(bh)) {
2130 CERROR("%s: error reading offset %llu (block %lu, "
2131 "size %d, offs %llu), credits %d/%d: rc = %d\n",
2132 inode->i_sb->s_id, offset, block, bufsize, *offs,
2133 credits, handle->h_buffer_credits, err);
2137 err = ldiskfs_journal_get_write_access(handle, bh);
2139 CERROR("journal_get_write_access() returned error %d\n",
2143 LASSERTF(boffs + size <= bh->b_size,
2144 "boffs %d size %d bh->b_size %lu\n",
2145 boffs, size, (unsigned long)bh->b_size);
2147 memset(bh->b_data, 0, bh->b_size);
2149 up(&ei->i_append_sem);
2153 memcpy(bh->b_data + boffs, buf, size);
2154 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2158 if (offset + size > new_size)
2159 new_size = offset + size;
2165 up(&ei->i_append_sem);
2172 /* correct in-core and on-disk sizes */
2173 if (new_size > i_size_read(inode)) {
2174 spin_lock(&inode->i_lock);
2175 if (new_size > i_size_read(inode))
2176 i_size_write(inode, new_size);
2177 if (i_size_read(inode) > ei->i_disksize) {
2178 ei->i_disksize = i_size_read(inode);
2181 spin_unlock(&inode->i_lock);
2184 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2191 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
2192 const struct lu_buf *buf, loff_t *pos,
2193 struct thandle *handle)
2195 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2196 struct osd_thandle *oh;
2200 LASSERT(dt_object_exists(dt));
2202 LASSERT(handle != NULL);
2203 LASSERT(inode != NULL);
2204 ll_vfs_dq_init(inode);
2206 /* XXX: don't check: one declared chunk can be used many times */
2207 /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
2209 oh = container_of(handle, struct osd_thandle, ot_super);
2210 LASSERT(oh->ot_handle->h_transaction != NULL);
2211 osd_trans_exec_op(env, handle, OSD_OT_WRITE);
2213 /* Write small symlink to inode body as we need to maintain correct
2214 * on-disk symlinks for ldiskfs.
2215 * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
2216 * does not count it in.
2218 is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
2219 if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
2220 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
2222 result = osd_ldiskfs_write_record(dt, buf->lb_buf, buf->lb_len,
2223 is_link, pos, oh->ot_handle);
2225 result = buf->lb_len;
2227 osd_trans_exec_check(env, handle, OSD_OT_WRITE);
2232 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
2233 __u64 start, __u64 end, struct thandle *th)
2235 struct osd_thandle *oh;
2236 struct inode *inode;
2241 oh = container_of(th, struct osd_thandle, ot_super);
2244 * we don't need to reserve credits for whole truncate
2245 * it's not possible as truncate may need to free too many
2246 * blocks and that won't fit a single transaction. instead
2247 * we reserve credits to change i_size and put inode onto
2248 * orphan list. if needed truncate will extend or restart
2251 osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
2252 osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
2254 inode = osd_dt_obj(dt)->oo_inode;
2257 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2258 i_projid_read(inode), 0, oh, osd_dt_obj(dt),
2262 rc = osd_trunc_lock(osd_dt_obj(dt), oh, false);
2267 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
2268 __u64 start, __u64 end, struct thandle *th)
2270 struct osd_object *obj = osd_dt_obj(dt);
2271 struct osd_device *osd = osd_obj2dev(obj);
2272 struct inode *inode = obj->oo_inode;
2273 struct osd_access_lock *al;
2274 struct osd_thandle *oh;
2275 int rc = 0, found = 0;
2279 LASSERT(end == OBD_OBJECT_EOF);
2280 LASSERT(dt_object_exists(dt));
2281 LASSERT(osd_invariant(obj));
2282 LASSERT(inode != NULL);
2283 ll_vfs_dq_init(inode);
2286 oh = container_of(th, struct osd_thandle, ot_super);
2287 LASSERT(oh->ot_handle->h_transaction != NULL);
2289 /* we used to skip truncate to current size to
2290 * optimize truncates on OST. with DoM we can
2291 * get attr_set to set specific size (MDS_REINT)
2292 * and then get truncate RPC which essentially
2293 * would be skipped. this is bad.. so, disable
2294 * this optimization on MDS till the client stop
2295 * to sent MDS_REINT (LU-11033) -bzzz */
2296 if (osd->od_is_ost && i_size_read(inode) == start)
2299 osd_trans_exec_op(env, th, OSD_OT_PUNCH);
2301 spin_lock(&inode->i_lock);
2302 if (i_size_read(inode) < start)
2304 i_size_write(inode, start);
2305 spin_unlock(&inode->i_lock);
2306 ll_truncate_pagecache(inode, start);
2308 /* optimize grow case */
2310 osd_execute_truncate(obj);
2314 /* add to orphan list to ensure truncate completion
2315 * if this transaction succeed. ldiskfs_truncate()
2316 * will take the inode out of the list */
2317 rc = ldiskfs_orphan_add(oh->ot_handle, inode);
2321 list_for_each_entry(al, &oh->ot_trunc_locks, tl_list) {
2322 if (obj != al->tl_obj)
2324 LASSERT(al->tl_shared == 0);
2326 /* do actual truncate in osd_trans_stop() */
2327 al->tl_truncate = 1;
2336 static int fiemap_check_ranges(struct inode *inode,
2337 u64 start, u64 len, u64 *new_len)
2346 if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
2347 maxbytes = inode->i_sb->s_maxbytes;
2349 maxbytes = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes;
2351 if (start > maxbytes)
2355 * Shrink request scope to what the fs can actually handle.
2357 if (len > maxbytes || (maxbytes - len) < start)
2358 *new_len = maxbytes - start;
2363 /* So that the fiemap access checks can't overflow on 32 bit machines. */
2364 #define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent))
2366 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
2369 struct fiemap_extent_info fieinfo = {0, };
2370 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2376 if (inode->i_op->fiemap == NULL)
2379 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS)
2382 rc = fiemap_check_ranges(inode, fm->fm_start, fm->fm_length, &len);
2386 fieinfo.fi_flags = fm->fm_flags;
2387 fieinfo.fi_extents_max = fm->fm_extent_count;
2388 fieinfo.fi_extents_start = fm->fm_extents;
2390 if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
2391 filemap_write_and_wait(inode->i_mapping);
2393 rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
2394 fm->fm_flags = fieinfo.fi_flags;
2395 fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
2400 static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
2401 __u64 start, __u64 end, enum lu_ladvise_type advice)
2404 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2408 case LU_LADVISE_DONTNEED:
2411 invalidate_mapping_pages(inode->i_mapping,
2412 start >> PAGE_SHIFT,
2413 (end - 1) >> PAGE_SHIFT);
2424 * in some cases we may need declare methods for objects being created
2425 * e.g., when we create symlink
2427 const struct dt_body_operations osd_body_ops_new = {
2428 .dbo_declare_write = osd_declare_write,
2431 const struct dt_body_operations osd_body_ops = {
2432 .dbo_read = osd_read,
2433 .dbo_declare_write = osd_declare_write,
2434 .dbo_write = osd_write,
2435 .dbo_bufs_get = osd_bufs_get,
2436 .dbo_bufs_put = osd_bufs_put,
2437 .dbo_write_prep = osd_write_prep,
2438 .dbo_declare_write_commit = osd_declare_write_commit,
2439 .dbo_write_commit = osd_write_commit,
2440 .dbo_read_prep = osd_read_prep,
2441 .dbo_declare_punch = osd_declare_punch,
2442 .dbo_punch = osd_punch,
2443 .dbo_fiemap_get = osd_fiemap_get,
2444 .dbo_ladvise = osd_ladvise,
2448 * Get a truncate lock
2450 * In order to take multi-transaction truncate out of main transaction we let
2451 * the caller grab a lock on the object passed. the lock can be shared (for
2452 * writes) and exclusive (for truncate). It's not allowed to mix truncate
2453 * and write in the same transaction handle (do not confuse with big ldiskfs
2454 * transaction containing lots of handles).
2455 * The lock must be taken at declaration.
2457 * \param obj object to lock
2459 * \shared shared or exclusive
2461 * \retval 0 lock is granted
2462 * \retval -NOMEM no memory to allocate lock
2464 int osd_trunc_lock(struct osd_object *obj, struct osd_thandle *oh, bool shared)
2466 struct osd_access_lock *al, *tmp;
2471 list_for_each_entry(tmp, &oh->ot_trunc_locks, tl_list) {
2472 if (tmp->tl_obj != obj)
2474 LASSERT(tmp->tl_shared == shared);
2475 /* found same lock */
2480 if (unlikely(al == NULL))
2483 al->tl_truncate = false;
2485 down_read(&obj->oo_ext_idx_sem);
2487 down_write(&obj->oo_ext_idx_sem);
2488 al->tl_shared = shared;
2490 list_add(&al->tl_list, &oh->ot_trunc_locks);
2495 void osd_trunc_unlock_all(struct list_head *list)
2497 struct osd_access_lock *al, *tmp;
2498 list_for_each_entry_safe(al, tmp, list, tl_list) {
2500 up_read(&al->tl_obj->oo_ext_idx_sem);
2502 up_write(&al->tl_obj->oo_ext_idx_sem);
2503 list_del(&al->tl_list);
2508 void osd_execute_truncate(struct osd_object *obj)
2510 struct osd_device *d = osd_obj2dev(obj);
2511 struct inode *inode = obj->oo_inode;
2514 /* simulate crash before (in the middle) of delayed truncate */
2515 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FAIL_AT_TRUNCATE)) {
2516 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
2517 struct ldiskfs_sb_info *sbi = LDISKFS_SB(inode->i_sb);
2519 mutex_lock(&sbi->s_orphan_lock);
2520 list_del_init(&ei->i_orphan);
2521 mutex_unlock(&sbi->s_orphan_lock);
2525 #ifdef HAVE_INODEOPS_TRUNCATE
2526 if (inode->i_op->truncate)
2527 inode->i_op->truncate(inode);
2530 ldiskfs_truncate(inode);
2533 * For a partial-page truncate, flush the page to disk immediately to
2534 * avoid data corruption during direct disk write. b=17397
2536 size = i_size_read(inode);
2537 if ((size & ~PAGE_MASK) == 0)
2539 if (osd_use_page_cache(d)) {
2540 filemap_fdatawrite_range(inode->i_mapping, size, size + 1);
2542 /* Notice we use "wait" version to ensure I/O is complete */
2543 filemap_write_and_wait_range(inode->i_mapping, size, size + 1);
2544 invalidate_mapping_pages(inode->i_mapping, size >> PAGE_SHIFT,
2545 size >> PAGE_SHIFT);
2549 void osd_process_truncates(struct list_head *list)
2551 struct osd_access_lock *al;
2553 LASSERT(journal_current_handle() == NULL);
2555 list_for_each_entry(al, list, tl_list) {
2558 if (!al->tl_truncate)
2560 osd_execute_truncate(al->tl_obj);