4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Author: Nikita Danilov <nikita@clusterfs.com>
37 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
41 /* prerequisite for linux/xattr.h */
42 #include <linux/types.h>
43 /* prerequisite for linux/xattr.h */
46 #include <linux/pagevec.h>
49 * struct OBD_{ALLOC,FREE}*()
52 #include <obd_support.h>
54 #include "osd_internal.h"
57 #include <ldiskfs/ldiskfs_extents.h>
59 static inline bool osd_use_page_cache(struct osd_device *d)
61 /* do not use pagecache if write and read caching are disabled */
62 if (d->od_writethrough_cache + d->od_read_cache == 0)
64 /* use pagecache by default */
68 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
69 int rw, int line, int pages)
73 LASSERTF(iobuf->dr_elapsed_valid == 0,
74 "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
75 atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
77 LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
79 init_waitqueue_head(&iobuf->dr_wait);
80 atomic_set(&iobuf->dr_numreqs, 0);
85 iobuf->dr_elapsed = ktime_set(0, 0);
86 /* must be counted before, so assert */
88 iobuf->dr_init_at = line;
90 blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
91 if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
92 LASSERT(iobuf->dr_pg_buf.lb_len >=
93 pages * sizeof(iobuf->dr_pages[0]));
97 /* start with 1MB for 4K blocks */
99 while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
102 CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
103 (unsigned)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
105 blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
106 iobuf->dr_max_pages = 0;
107 CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
108 (unsigned)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
110 lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
111 iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
112 if (unlikely(iobuf->dr_blocks == NULL))
115 lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
116 iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
117 if (unlikely(iobuf->dr_pages == NULL))
120 lu_buf_realloc(&iobuf->dr_lnb_buf,
121 pages * sizeof(iobuf->dr_lnbs[0]));
122 iobuf->dr_lnbs = iobuf->dr_lnb_buf.lb_buf;
123 if (unlikely(iobuf->dr_lnbs == NULL))
126 iobuf->dr_max_pages = pages;
130 #define osd_init_iobuf(dev, iobuf, rw, pages) \
131 __osd_init_iobuf(dev, iobuf, rw, __LINE__, pages)
133 static void osd_iobuf_add_page(struct osd_iobuf *iobuf,
134 struct niobuf_local *lnb)
136 LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
137 iobuf->dr_pages[iobuf->dr_npages] = lnb->lnb_page;
138 iobuf->dr_lnbs[iobuf->dr_npages] = lnb;
142 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
144 int rw = iobuf->dr_rw;
146 if (iobuf->dr_elapsed_valid) {
147 iobuf->dr_elapsed_valid = 0;
148 LASSERT(iobuf->dr_dev == d);
149 LASSERT(iobuf->dr_frags > 0);
150 lprocfs_oh_tally(&d->od_brw_stats.
151 hist[BRW_R_DIO_FRAGS+rw],
153 lprocfs_oh_tally_log2(&d->od_brw_stats.hist[BRW_R_IO_TIME+rw],
154 ktime_to_ms(iobuf->dr_elapsed));
158 #ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
159 static void dio_complete_routine(struct bio *bio)
161 int error = bio->bi_status;
163 static void dio_complete_routine(struct bio *bio, int error)
166 struct osd_iobuf *iobuf = bio->bi_private;
169 /* CAVEAT EMPTOR: possibly in IRQ context
170 * DO NOT record procfs stats here!!! */
172 if (unlikely(iobuf == NULL)) {
173 CERROR("***** bio->bi_private is NULL! This should never "
174 "happen. Normally, I would crash here, but instead I "
175 "will dump the bio contents to the console. Please "
176 "report this to <https://jira.whamcloud.com/> , along "
177 "with any interesting messages leading up to this point "
178 "(like SCSI errors, perhaps). Because bi_private is "
179 "NULL, I can't wake up the thread that initiated this "
180 "IO - you will probably have to reboot this node.\n");
181 CERROR("bi_next: %p, bi_flags: %lx, " __stringify(bi_opf)
182 ": %x, bi_vcnt: %d, bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, bi_private: %p\n",
183 bio->bi_next, (unsigned long)bio->bi_flags,
184 (unsigned int)bio->bi_opf, bio->bi_vcnt, bio_idx(bio),
185 bio_sectors(bio) << 9, bio->bi_end_io,
186 atomic_read(&bio->__bi_cnt),
191 /* the check is outside of the cycle for performance reason -bzzz */
192 if (!bio_data_dir(bio)) {
193 DECLARE_BVEC_ITER_ALL(iter_all);
195 bio_for_each_segment_all(bvl, bio, iter_all) {
196 if (likely(error == 0))
197 SetPageUptodate(bvl_to_page(bvl));
198 LASSERT(PageLocked(bvl_to_page(bvl)));
200 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
202 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
205 /* any real error is good enough -bzzz */
206 if (error != 0 && iobuf->dr_error == 0)
207 iobuf->dr_error = error;
210 * set dr_elapsed before dr_numreqs turns to 0, otherwise
211 * it's possible that service thread will see dr_numreqs
212 * is zero, but dr_elapsed is not set yet, leading to lost
213 * data in this processing and an assertion in a subsequent
216 if (atomic_read(&iobuf->dr_numreqs) == 1) {
217 ktime_t now = ktime_get();
219 iobuf->dr_elapsed = ktime_sub(now, iobuf->dr_start_time);
220 iobuf->dr_elapsed_valid = 1;
222 if (atomic_dec_and_test(&iobuf->dr_numreqs))
223 wake_up(&iobuf->dr_wait);
225 /* Completed bios used to be chained off iobuf->dr_bios and freed in
226 * filter_clear_dreq(). It was then possible to exhaust the biovec-256
227 * mempool when serious on-disk fragmentation was encountered,
228 * deadlocking the OST. The bios are now released as soon as complete
229 * so the pool cannot be exhausted while IOs are competing. bug 10076 */
233 static void record_start_io(struct osd_iobuf *iobuf, int size)
235 struct osd_device *osd = iobuf->dr_dev;
236 struct obd_histogram *h = osd->od_brw_stats.hist;
239 atomic_inc(&iobuf->dr_numreqs);
241 if (iobuf->dr_rw == 0) {
242 atomic_inc(&osd->od_r_in_flight);
243 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
244 atomic_read(&osd->od_r_in_flight));
245 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
246 } else if (iobuf->dr_rw == 1) {
247 atomic_inc(&osd->od_w_in_flight);
248 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
249 atomic_read(&osd->od_w_in_flight));
250 lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
256 static void osd_submit_bio(int rw, struct bio *bio)
258 LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
259 #ifdef HAVE_SUBMIT_BIO_2ARGS
260 submit_bio(rw ? WRITE : READ, bio);
267 static int can_be_merged(struct bio *bio, sector_t sector)
272 return bio_end_sector(bio) == sector ? 1 : 0;
275 #if IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)
277 * This function will change the data written, thus it should only be
278 * used when checking data integrity feature
280 static void bio_integrity_fault_inject(struct bio *bio)
282 struct bio_vec *bvec;
283 DECLARE_BVEC_ITER_ALL(iter_all);
287 bio_for_each_segment_all(bvec, bio, iter_all) {
288 struct page *page = bvec->bv_page;
298 static int bio_dif_compare(__u16 *expected_guard_buf, void *bio_prot_buf,
299 unsigned int sectors, int tuple_size)
301 __u16 *expected_guard;
305 expected_guard = expected_guard_buf;
306 for (i = 0; i < sectors; i++) {
307 bio_guard = (__u16 *)bio_prot_buf;
308 if (*bio_guard != *expected_guard) {
309 CERROR("unexpected guard tags on sector %d "
310 "expected guard %u, bio guard "
311 "%u, sectors %u, tuple size %d\n",
312 i, *expected_guard, *bio_guard, sectors,
317 bio_prot_buf += tuple_size;
322 static int osd_bio_integrity_compare(struct bio *bio, struct block_device *bdev,
323 struct osd_iobuf *iobuf, int index)
325 struct blk_integrity *bi = bdev_get_integrity(bdev);
326 struct bio_integrity_payload *bip = bio->bi_integrity;
327 struct niobuf_local *lnb;
328 unsigned short sector_size = blk_integrity_interval(bi);
329 void *bio_prot_buf = page_address(bip->bip_vec->bv_page) +
330 bip->bip_vec->bv_offset;
332 sector_t sector = bio_start_sector(bio);
333 unsigned int sectors, total;
334 DECLARE_BVEC_ITER_ALL(iter_all);
335 __u16 *expected_guard;
339 bio_for_each_segment_all(bv, bio, iter_all) {
340 lnb = iobuf->dr_lnbs[index];
341 expected_guard = lnb->lnb_guards;
342 sectors = bv->bv_len / sector_size;
343 if (lnb->lnb_guard_rpc) {
344 rc = bio_dif_compare(expected_guard, bio_prot_buf,
345 sectors, bi->tuple_size);
351 bio_prot_buf += sectors * bi->tuple_size;
352 total += sectors * bi->tuple_size;
353 LASSERT(total <= bip_size(bio->bi_integrity));
359 static int osd_bio_integrity_handle(struct osd_device *osd, struct bio *bio,
360 struct osd_iobuf *iobuf,
361 int start_page_idx, bool fault_inject,
362 bool integrity_enabled)
364 struct super_block *sb = osd_sb(osd);
365 integrity_gen_fn *generate_fn = NULL;
366 integrity_vrfy_fn *verify_fn = NULL;
371 if (!integrity_enabled)
374 rc = osd_get_integrity_profile(osd, &generate_fn, &verify_fn);
378 rc = bio_integrity_prep_fn(bio, generate_fn, verify_fn);
382 /* Verify and inject fault only when writing */
383 if (iobuf->dr_rw == 1) {
384 if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_CMP))) {
385 rc = osd_bio_integrity_compare(bio, sb->s_bdev, iobuf,
391 if (unlikely(fault_inject))
392 bio_integrity_fault_inject(bio);
398 #ifdef HAVE_BIO_INTEGRITY_PREP_FN
399 # ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
400 static void dio_integrity_complete_routine(struct bio *bio)
402 static void dio_integrity_complete_routine(struct bio *bio, int error)
405 struct osd_bio_private *bio_private = bio->bi_private;
407 bio->bi_private = bio_private->obp_iobuf;
408 osd_dio_complete_routine(bio, error);
410 OBD_FREE_PTR(bio_private);
412 #endif /* HAVE_BIO_INTEGRITY_PREP_FN */
413 #else /* !CONFIG_BLK_DEV_INTEGRITY */
414 #define osd_bio_integrity_handle(osd, bio, iobuf, start_page_idx, \
415 fault_inject, integrity_enabled) 0
416 #endif /* CONFIG_BLK_DEV_INTEGRITY */
418 static int osd_bio_init(struct bio *bio, struct osd_iobuf *iobuf,
419 bool integrity_enabled, int start_page_idx,
420 struct osd_bio_private **pprivate)
426 #ifdef HAVE_BIO_INTEGRITY_PREP_FN
427 if (integrity_enabled) {
428 struct osd_bio_private *bio_private = NULL;
430 OBD_ALLOC_GFP(bio_private, sizeof(*bio_private), GFP_NOIO);
431 if (bio_private == NULL)
433 bio->bi_end_io = dio_integrity_complete_routine;
434 bio->bi_private = bio_private;
435 bio_private->obp_start_page_idx = start_page_idx;
436 bio_private->obp_iobuf = iobuf;
437 *pprivate = bio_private;
441 bio->bi_end_io = dio_complete_routine;
442 bio->bi_private = iobuf;
448 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
449 struct osd_iobuf *iobuf)
451 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
452 struct page **pages = iobuf->dr_pages;
453 int npages = iobuf->dr_npages;
454 sector_t *blocks = iobuf->dr_blocks;
455 int total_blocks = npages * blocks_per_page;
456 struct super_block *sb = inode->i_sb;
457 int sector_bits = sb->s_blocksize_bits - 9;
458 unsigned int blocksize = sb->s_blocksize;
459 struct block_device *bdev = sb->s_bdev;
460 struct osd_bio_private *bio_private = NULL;
461 struct bio *bio = NULL;
462 int bio_start_page_idx;
464 unsigned int page_offset;
472 bool integrity_enabled;
473 struct blk_plug plug;
476 fault_inject = OBD_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_FAULT);
477 LASSERT(iobuf->dr_npages == npages);
479 integrity_enabled = bdev_integrity_enabled(bdev, iobuf->dr_rw);
481 osd_brw_stats_update(osd, iobuf);
482 iobuf->dr_start_time = ktime_get();
484 blk_start_plug(&plug);
485 for (page_idx = 0, block_idx = 0;
487 page_idx++, block_idx += blocks_per_page) {
489 page = pages[page_idx];
490 LASSERT(block_idx + blocks_per_page <= total_blocks);
492 for (i = 0, page_offset = 0;
494 i += nblocks, page_offset += blocksize * nblocks) {
498 if (blocks[block_idx + i] == 0) { /* hole */
499 LASSERTF(iobuf->dr_rw == 0,
500 "page_idx %u, block_idx %u, i %u\n",
501 page_idx, block_idx, i);
502 memset(kmap(page) + page_offset, 0, blocksize);
507 sector = (sector_t)blocks[block_idx + i] << sector_bits;
509 /* Additional contiguous file blocks? */
510 while (i + nblocks < blocks_per_page &&
511 (sector + (nblocks << sector_bits)) ==
512 ((sector_t)blocks[block_idx + i + nblocks] <<
517 can_be_merged(bio, sector) &&
518 bio_add_page(bio, page,
519 blocksize * nblocks, page_offset) != 0)
520 continue; /* added this frag OK */
523 struct request_queue *q = bio_get_queue(bio);
524 unsigned int bi_size = bio_sectors(bio) << 9;
526 /* Dang! I have to fragment this I/O */
528 "bio++ sz %d vcnt %d(%d) sectors %d(%d) psg %d(%d)\n",
529 bi_size, bio->bi_vcnt, bio->bi_max_vecs,
531 queue_max_sectors(q),
532 osd_bio_nr_segs(bio),
533 queue_max_segments(q));
534 rc = osd_bio_integrity_handle(osd, bio,
535 iobuf, bio_start_page_idx,
536 fault_inject, integrity_enabled);
542 record_start_io(iobuf, bi_size);
543 osd_submit_bio(iobuf->dr_rw, bio);
546 bio_start_page_idx = page_idx;
547 /* allocate new bio */
548 bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
549 (npages - page_idx) *
552 CERROR("Can't allocate bio %u*%u = %u pages\n",
553 (npages - page_idx), blocks_per_page,
554 (npages - page_idx) * blocks_per_page);
559 bio_set_dev(bio, bdev);
560 bio_set_sector(bio, sector);
561 bio->bi_opf = iobuf->dr_rw ? WRITE : READ;
562 rc = osd_bio_init(bio, iobuf, integrity_enabled,
563 bio_start_page_idx, &bio_private);
569 rc = bio_add_page(bio, page,
570 blocksize * nblocks, page_offset);
576 rc = osd_bio_integrity_handle(osd, bio, iobuf,
585 record_start_io(iobuf, bio_sectors(bio) << 9);
586 osd_submit_bio(iobuf->dr_rw, bio);
591 blk_finish_plug(&plug);
593 /* in order to achieve better IO throughput, we don't wait for writes
594 * completion here. instead we proceed with transaction commit in
595 * parallel and wait for IO completion once transaction is stopped
596 * see osd_trans_stop() for more details -bzzz */
597 if (iobuf->dr_rw == 0 || fault_inject) {
598 wait_event(iobuf->dr_wait,
599 atomic_read(&iobuf->dr_numreqs) == 0);
600 osd_fini_iobuf(osd, iobuf);
604 rc = iobuf->dr_error;
607 OBD_FREE_PTR(bio_private);
613 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
614 struct niobuf_local *lnb, int maxlnb)
622 int poff = offset & (PAGE_SIZE - 1);
623 int plen = PAGE_SIZE - poff;
625 if (*nrpages >= maxlnb) {
632 lnb->lnb_file_offset = offset;
633 lnb->lnb_page_offset = poff;
635 /* lnb->lnb_flags = rnb->rnb_flags; */
637 lnb->lnb_page = NULL;
639 lnb->lnb_guard_rpc = 0;
640 lnb->lnb_guard_disk = 0;
643 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
654 static struct page *osd_get_page(const struct lu_env *env, struct dt_object *dt,
655 loff_t offset, gfp_t gfp_mask, bool cache)
657 struct osd_thread_info *oti = osd_oti_get(env);
658 struct inode *inode = osd_dt_obj(dt)->oo_inode;
659 struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
666 page = find_or_create_page(inode->i_mapping,
667 offset >> PAGE_SHIFT, gfp_mask);
670 LASSERT(!PagePrivate2(page));
671 wait_on_page_writeback(page);
673 lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
679 if (inode->i_mapping->nrpages) {
680 /* consult with pagecache, but do not create new pages */
681 /* this is normally used once */
682 page = find_lock_page(inode->i_mapping, offset >> PAGE_SHIFT);
684 wait_on_page_writeback(page);
689 LASSERT(oti->oti_dio_pages);
690 cur = oti->oti_dio_pages_used;
691 page = oti->oti_dio_pages[cur];
693 if (unlikely(!page)) {
694 LASSERT(cur < PTLRPC_MAX_BRW_PAGES);
695 page = alloc_page(gfp_mask);
698 oti->oti_dio_pages[cur] = page;
699 SetPagePrivate2(page);
703 ClearPageUptodate(page);
704 page->index = offset >> PAGE_SHIFT;
705 oti->oti_dio_pages_used++;
711 * there are following "locks":
722 * - lock pages, unlock
724 * - lock partial page
730 * Unlock and release pages loaded by osd_bufs_get()
732 * Unlock \a npages pages from \a lnb and drop the refcount on them.
734 * \param env thread execution environment
735 * \param dt dt object undergoing IO (OSD object + methods)
736 * \param lnb array of pages undergoing IO
737 * \param npages number of pages in \a lnb
741 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
742 struct niobuf_local *lnb, int npages)
744 struct osd_thread_info *oti = osd_oti_get(env);
748 ll_pagevec_init(&pvec, 0);
750 for (i = 0; i < npages; i++) {
751 struct page *page = lnb[i].lnb_page;
756 /* if the page isn't cached, then reset uptodate
757 * to prevent reuse */
758 if (PagePrivate2(page)) {
759 oti->oti_dio_pages_used--;
761 if (lnb[i].lnb_locked)
763 if (pagevec_add(&pvec, page) == 0)
764 pagevec_release(&pvec);
767 lnb[i].lnb_page = NULL;
770 LASSERTF(oti->oti_dio_pages_used == 0, "%d\n", oti->oti_dio_pages_used);
772 /* Release any partial pagevec */
773 pagevec_release(&pvec);
779 * Load and lock pages undergoing IO
781 * Pages as described in the \a lnb array are fetched (from disk or cache)
782 * and locked for IO by the caller.
784 * DLM locking protects us from write and truncate competing for same region,
785 * but partial-page truncate can leave dirty pages in the cache for ldiskfs.
786 * It's possible the writeout on a such a page is in progress when we access
787 * it. It's also possible that during this writeout we put new (partial) data
788 * into the page, but won't be able to proceed in filter_commitrw_write().
789 * Therefore, just wait for writeout completion as it should be rare enough.
791 * \param env thread execution environment
792 * \param dt dt object undergoing IO (OSD object + methods)
793 * \param pos byte offset of IO start
794 * \param len number of bytes of IO
795 * \param lnb array of extents undergoing IO
796 * \param rw read or write operation, and other flags
797 * \param capa capabilities
799 * \retval pages (zero or more) loaded successfully
800 * \retval -ENOMEM on memory/page allocation error
802 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
803 loff_t pos, ssize_t len, struct niobuf_local *lnb,
804 int maxlnb, enum dt_bufs_type rw)
806 struct osd_thread_info *oti = osd_oti_get(env);
807 struct osd_object *obj = osd_dt_obj(dt);
808 struct osd_device *osd = osd_obj2dev(obj);
809 int npages, i, iosize, rc = 0;
814 LASSERT(obj->oo_inode);
816 rc = osd_map_remote_to_local(pos, len, &npages, lnb, maxlnb);
820 write = rw & DT_BUFS_TYPE_WRITE;
822 fsize = lnb[npages - 1].lnb_file_offset + lnb[npages - 1].lnb_len;
823 iosize = fsize - lnb[0].lnb_file_offset;
824 fsize = max(fsize, i_size_read(obj->oo_inode));
826 cache = rw & DT_BUFS_TYPE_READAHEAD;
830 cache = osd_use_page_cache(osd);
833 if (!osd->od_writethrough_cache) {
837 if (iosize > osd->od_writethrough_max_iosize) {
842 if (!osd->od_read_cache) {
846 if (iosize > osd->od_readcache_max_iosize) {
851 /* don't use cache on large files */
852 if (osd->od_readcache_max_filesize &&
853 fsize > osd->od_readcache_max_filesize)
859 if (!cache && unlikely(!oti->oti_dio_pages)) {
860 OBD_ALLOC_PTR_ARRAY(oti->oti_dio_pages, PTLRPC_MAX_BRW_PAGES);
861 if (!oti->oti_dio_pages)
865 /* this could also try less hard for DT_BUFS_TYPE_READAHEAD pages */
866 gfp_mask = rw & DT_BUFS_TYPE_LOCAL ? (GFP_NOFS | __GFP_HIGHMEM) :
868 for (i = 0; i < npages; i++, lnb++) {
869 lnb->lnb_page = osd_get_page(env, dt, lnb->lnb_file_offset,
871 if (lnb->lnb_page == NULL)
872 GOTO(cleanup, rc = -ENOMEM);
878 /* XXX: this version doesn't invalidate cached pages, but use them */
879 if (!cache && write && obj->oo_inode->i_mapping->nrpages) {
880 /* do not allow data aliasing, invalidate pagecache */
881 /* XXX: can be quite expensive in mixed case */
882 invalidate_mapping_pages(obj->oo_inode->i_mapping,
883 lnb[0].lnb_file_offset >> PAGE_SHIFT,
884 lnb[npages - 1].lnb_file_offset >> PAGE_SHIFT);
892 osd_bufs_put(env, dt, lnb - i, i);
896 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
897 int pages, sector_t *blocks,
900 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
902 struct page *fp = NULL;
904 pgoff_t max_page_index;
905 handle_t *handle = NULL;
907 max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
909 CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
910 inode->i_ino, pages, (*page)->index);
913 create = LDISKFS_GET_BLOCKS_CREATE;
914 handle = ldiskfs_journal_current_handle();
915 LASSERT(handle != NULL);
916 rc = osd_attach_jinode(inode);
920 /* pages are sorted already. so, we just have to find
921 * contig. space and process them properly */
923 long blen, total = 0;
924 struct ldiskfs_map_blocks map = { 0 };
926 if (fp == NULL) { /* start new extent */
931 } else if (fp->index + clen == (*page)->index) {
932 /* continue the extent */
938 if (fp->index + clen >= max_page_index)
939 GOTO(cleanup, rc = -EFBIG);
940 /* process found extent */
941 map.m_lblk = fp->index * blocks_per_page;
942 map.m_len = blen = clen * blocks_per_page;
944 rc = ldiskfs_map_blocks(handle, inode, &map, create);
947 for (; total < blen && c < map.m_len; c++, total++) {
949 *(blocks + total) = 0;
953 *(blocks + total) = map.m_pblk + c;
954 /* unmap any possible underlying
955 * metadata from the block device
956 * mapping. bug 6998. */
957 if ((map.m_flags & LDISKFS_MAP_NEW) &&
966 if (rc == 0 && total < blen) {
967 map.m_lblk = fp->index * blocks_per_page + total;
968 map.m_len = blen - total;
974 /* look for next extent */
976 blocks += blocks_per_page * clen;
982 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
983 struct niobuf_local *lnb, int npages)
985 struct osd_thread_info *oti = osd_oti_get(env);
986 struct osd_iobuf *iobuf = &oti->oti_iobuf;
987 struct inode *inode = osd_dt_obj(dt)->oo_inode;
988 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
997 rc = osd_init_iobuf(osd, iobuf, 0, npages);
998 if (unlikely(rc != 0))
1001 isize = i_size_read(inode);
1002 maxidx = ((isize + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1;
1004 start = ktime_get();
1005 for (i = 0; i < npages; i++) {
1008 * till commit the content of the page is undefined
1009 * we'll set it uptodate once bulk is done. otherwise
1010 * subsequent reads can access non-stable data
1012 ClearPageUptodate(lnb[i].lnb_page);
1014 if (lnb[i].lnb_len == PAGE_SIZE)
1017 if (maxidx >= lnb[i].lnb_page->index) {
1018 osd_iobuf_add_page(iobuf, &lnb[i]);
1021 char *p = kmap(lnb[i].lnb_page);
1023 off = lnb[i].lnb_page_offset;
1026 off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
1029 memset(p + off, 0, PAGE_SIZE - off);
1030 kunmap(lnb[i].lnb_page);
1034 timediff = ktime_us_delta(end, start);
1035 lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1037 if (iobuf->dr_npages) {
1038 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1040 iobuf->dr_blocks, 0);
1041 if (likely(rc == 0)) {
1042 rc = osd_do_bio(osd, inode, iobuf);
1043 /* do IO stats for preparation reads */
1044 osd_fini_iobuf(osd, iobuf);
1050 struct osd_fextent {
1053 unsigned int mapped:1;
1056 static int osd_is_mapped(struct dt_object *dt, __u64 offset,
1057 struct osd_fextent *cached_extent)
1059 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1060 sector_t block = offset >> inode->i_blkbits;
1062 struct fiemap_extent_info fei = { 0 };
1063 struct fiemap_extent fe = { 0 };
1064 mm_segment_t saved_fs;
1067 if (block >= cached_extent->start && block < cached_extent->end)
1068 return cached_extent->mapped;
1070 if (i_size_read(inode) == 0)
1073 /* Beyond EOF, must not be mapped */
1074 if (((i_size_read(inode) - 1) >> inode->i_blkbits) < block)
1077 fei.fi_extents_max = 1;
1078 fei.fi_extents_start = &fe;
1080 saved_fs = get_fs();
1082 rc = inode->i_op->fiemap(inode, &fei, offset, FIEMAP_MAX_OFFSET-offset);
1087 start = fe.fe_logical >> inode->i_blkbits;
1089 if (start > block) {
1090 cached_extent->start = block;
1091 cached_extent->end = start;
1092 cached_extent->mapped = 0;
1094 cached_extent->start = start;
1095 cached_extent->end = (fe.fe_logical + fe.fe_length) >>
1097 cached_extent->mapped = 1;
1100 return cached_extent->mapped;
1103 static int osd_declare_write_commit(const struct lu_env *env,
1104 struct dt_object *dt,
1105 struct niobuf_local *lnb, int npages,
1106 struct thandle *handle)
1108 const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1109 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1110 struct osd_thandle *oh;
1117 long long quota_space = 0;
1118 struct osd_fextent extent = { 0 };
1119 enum osd_quota_local_flags local_flags = 0;
1120 enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
1123 LASSERT(handle != NULL);
1124 oh = container_of(handle, struct osd_thandle, ot_super);
1125 LASSERT(oh->ot_handle == NULL);
1129 /* calculate number of extents (probably better to pass nb) */
1130 for (i = 0; i < npages; i++) {
1131 if (i && lnb[i].lnb_file_offset !=
1132 lnb[i - 1].lnb_file_offset + lnb[i - 1].lnb_len)
1135 if (osd_is_mapped(dt, lnb[i].lnb_file_offset, &extent))
1136 lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1138 quota_space += PAGE_SIZE;
1140 /* ignore quota for the whole request if any page is from
1141 * client cache or written by root.
1143 * XXX once we drop the 1.8 client support, the checking
1144 * for whether page is from cache can be simplified as:
1145 * !(lnb[i].flags & OBD_BRW_SYNC)
1147 * XXX we could handle this on per-lnb basis as done by
1149 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
1150 (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
1152 declare_flags |= OSD_QID_FORCE;
1156 * each extent can go into new leaf causing a split
1157 * 5 is max tree depth: inode + 4 index blocks
1158 * with blockmaps, depth is 3 at most
1160 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1162 * many concurrent threads may grow tree by the time
1163 * our transaction starts. so, consider 2 is a min depth
1165 depth = ext_depth(inode);
1166 depth = max(depth, 1) + 1;
1168 credits++; /* inode */
1169 credits += depth * 2 * extents;
1173 credits++; /* inode */
1174 credits += depth * extents;
1177 /* quota space for metadata blocks */
1178 quota_space += depth * extents * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1180 /* quota space should be reported in 1K blocks */
1181 quota_space = toqb(quota_space);
1183 /* each new block can go in different group (bitmap + gd) */
1185 /* we can't dirty more bitmap blocks than exist */
1186 if (newblocks > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1187 credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1189 credits += newblocks;
1191 /* we can't dirty more gd blocks than exist */
1192 if (newblocks > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1193 credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1195 credits += newblocks;
1197 osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1199 /* make sure the over quota flags were not set */
1200 lnb[0].lnb_flags &= ~OBD_BRW_OVER_ALLQUOTA;
1202 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1203 i_projid_read(inode), quota_space, oh,
1204 osd_dt_obj(dt), &local_flags, declare_flags);
1206 /* we need only to store the overquota flags in the first lnb for
1207 * now, once we support multiple objects BRW, this code needs be
1209 if (local_flags & QUOTA_FL_OVER_USRQUOTA)
1210 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
1211 if (local_flags & QUOTA_FL_OVER_GRPQUOTA)
1212 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
1213 if (local_flags & QUOTA_FL_OVER_PRJQUOTA)
1214 lnb[0].lnb_flags |= OBD_BRW_OVER_PRJQUOTA;
1217 rc = osd_trunc_lock(osd_dt_obj(dt), oh, true);
1222 /* Check if a block is allocated or not */
1223 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1224 struct niobuf_local *lnb, int npages,
1225 struct thandle *thandle, __u64 user_size)
1227 struct osd_thread_info *oti = osd_oti_get(env);
1228 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1229 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1230 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1236 rc = osd_init_iobuf(osd, iobuf, 1, npages);
1237 if (unlikely(rc != 0))
1240 disk_size = i_size_read(inode);
1241 /* if disk_size is already bigger than specified user_size,
1244 if (disk_size > user_size)
1246 dquot_initialize(inode);
1248 for (i = 0; i < npages; i++) {
1249 if (lnb[i].lnb_rc == -ENOSPC &&
1250 (lnb[i].lnb_flags & OBD_BRW_MAPPED)) {
1251 /* Allow the write to proceed if overwriting an
1256 if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
1257 CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1259 LASSERT(lnb[i].lnb_page);
1260 generic_error_remove_page(inode->i_mapping,
1265 LASSERT(PageLocked(lnb[i].lnb_page));
1266 LASSERT(!PageWriteback(lnb[i].lnb_page));
1268 if (lnb[i].lnb_file_offset + lnb[i].lnb_len > disk_size)
1269 disk_size = lnb[i].lnb_file_offset + lnb[i].lnb_len;
1272 * Since write and truncate are serialized by oo_sem, even
1273 * partial-page truncate should not leave dirty pages in the
1276 LASSERT(!PageDirty(lnb[i].lnb_page));
1278 SetPageUptodate(lnb[i].lnb_page);
1280 osd_iobuf_add_page(iobuf, &lnb[i]);
1282 /* if file has grown, take user_size into account */
1283 if (user_size && disk_size > user_size)
1284 disk_size = user_size;
1286 osd_trans_exec_op(env, thandle, OSD_OT_WRITE);
1288 if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1290 } else if (iobuf->dr_npages > 0) {
1291 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1293 iobuf->dr_blocks, 1);
1295 /* no pages to write, no transno is needed */
1296 thandle->th_local = 1;
1299 if (likely(rc == 0)) {
1300 spin_lock(&inode->i_lock);
1301 if (disk_size > i_size_read(inode)) {
1302 i_size_write(inode, disk_size);
1303 LDISKFS_I(inode)->i_disksize = disk_size;
1304 spin_unlock(&inode->i_lock);
1305 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
1307 spin_unlock(&inode->i_lock);
1310 rc = osd_do_bio(osd, inode, iobuf);
1311 /* we don't do stats here as in read path because
1312 * write is async: we'll do this in osd_put_bufs() */
1314 osd_fini_iobuf(osd, iobuf);
1317 osd_trans_exec_check(env, thandle, OSD_OT_WRITE);
1319 if (unlikely(rc != 0)) {
1320 /* if write fails, we should drop pages from the cache */
1321 for (i = 0; i < npages; i++) {
1322 if (lnb[i].lnb_page == NULL)
1324 if (!PagePrivate2(lnb[i].lnb_page)) {
1325 LASSERT(PageLocked(lnb[i].lnb_page));
1326 generic_error_remove_page(inode->i_mapping,
1335 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1336 struct niobuf_local *lnb, int npages)
1338 struct osd_thread_info *oti = osd_oti_get(env);
1339 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1340 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1341 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1342 int rc = 0, i, cache_hits = 0, cache_misses = 0;
1349 rc = osd_init_iobuf(osd, iobuf, 0, npages);
1350 if (unlikely(rc != 0))
1353 isize = i_size_read(inode);
1355 start = ktime_get();
1356 for (i = 0; i < npages; i++) {
1358 if (isize <= lnb[i].lnb_file_offset)
1359 /* If there's no more data, abort early.
1360 * lnb->lnb_rc == 0, so it's easy to detect later. */
1363 /* instead of looking if we go beyong isize, send complete
1364 * pages all the time
1366 lnb[i].lnb_rc = lnb[i].lnb_len;
1368 /* Bypass disk read if fail_loc is set properly */
1369 if (OBD_FAIL_CHECK(OBD_FAIL_OST_FAKE_RW))
1370 SetPageUptodate(lnb[i].lnb_page);
1372 if (PageUptodate(lnb[i].lnb_page)) {
1374 unlock_page(lnb[i].lnb_page);
1377 osd_iobuf_add_page(iobuf, &lnb[i]);
1379 /* no need to unlock in osd_bufs_put(), the sooner page is
1380 * unlocked, the earlier another client can access it.
1381 * notice real unlock_page() can be called few lines
1382 * below after osd_do_bio(). lnb is a per-thread, so it's
1383 * fine to have PG_locked and lnb_locked inconsistent here */
1384 lnb[i].lnb_locked = 0;
1387 timediff = ktime_us_delta(end, start);
1388 lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1390 if (cache_hits != 0)
1391 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT,
1393 if (cache_misses != 0)
1394 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS,
1396 if (cache_hits + cache_misses != 0)
1397 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
1398 cache_hits + cache_misses);
1400 if (iobuf->dr_npages) {
1401 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1403 iobuf->dr_blocks, 0);
1404 rc = osd_do_bio(osd, inode, iobuf);
1406 /* IO stats will be done in osd_bufs_put() */
1408 /* early release to let others read data during the bulk */
1409 for (i = 0; i < iobuf->dr_npages; i++) {
1410 LASSERT(PageLocked(iobuf->dr_pages[i]));
1411 if (!PagePrivate2(iobuf->dr_pages[i]))
1412 unlock_page(iobuf->dr_pages[i]);
1420 * XXX: Another layering violation for now.
1422 * We don't want to use ->f_op->read methods, because generic file write
1424 * - serializes on ->i_sem, and
1426 * - does a lot of extra work like balance_dirty_pages(),
1428 * which doesn't work for globally shared files like /last_rcvd.
1430 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1432 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1434 memcpy(buffer, (char *)ei->i_data, buflen);
1439 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1441 struct buffer_head *bh;
1442 unsigned long block;
1448 /* prevent reading after eof */
1449 spin_lock(&inode->i_lock);
1450 if (i_size_read(inode) < *offs + size) {
1451 loff_t diff = i_size_read(inode) - *offs;
1452 spin_unlock(&inode->i_lock);
1455 "size %llu is too short to read @%llu\n",
1456 i_size_read(inode), *offs);
1458 } else if (diff == 0) {
1464 spin_unlock(&inode->i_lock);
1467 blocksize = 1 << inode->i_blkbits;
1470 block = *offs >> inode->i_blkbits;
1471 boffs = *offs & (blocksize - 1);
1472 csize = min(blocksize - boffs, size);
1473 bh = __ldiskfs_bread(NULL, inode, block, 0);
1475 CERROR("%s: can't read %u@%llu on ino %lu: "
1476 "rc = %ld\n", osd_ino2name(inode),
1477 csize, *offs, inode->i_ino,
1483 memcpy(buf, bh->b_data + boffs, csize);
1486 memset(buf, 0, csize);
1496 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1497 struct lu_buf *buf, loff_t *pos)
1499 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1502 /* Read small symlink from inode body as we need to maintain correct
1503 * on-disk symlinks for ldiskfs.
1505 if (S_ISLNK(dt->do_lu.lo_header->loh_attr)) {
1506 loff_t size = i_size_read(inode);
1508 if (buf->lb_len < size)
1511 if (size < sizeof(LDISKFS_I(inode)->i_data))
1512 rc = osd_ldiskfs_readlink(inode, buf->lb_buf, size);
1514 rc = osd_ldiskfs_read(inode, buf->lb_buf, size, pos);
1516 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1522 static inline int osd_extents_enabled(struct super_block *sb,
1523 struct inode *inode)
1525 if (inode != NULL) {
1526 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
1528 } else if (ldiskfs_has_feature_extents(sb)) {
1534 int osd_calc_bkmap_credits(struct super_block *sb, struct inode *inode,
1535 const loff_t size, const loff_t pos,
1538 int credits, bits, bs, i;
1540 bits = sb->s_blocksize_bits;
1543 /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1544 * we do not expect blockmaps on the large files,
1545 * so let's shrink it to 2 levels (4GB files) */
1547 /* this is default reservation: 2 levels */
1548 credits = (blocks + 2) * 3;
1550 /* actual offset is unknown, hard to optimize */
1554 /* now check for few specific cases to optimize */
1555 if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1558 /* allocate if not allocated */
1559 if (inode == NULL) {
1560 credits += blocks * 2;
1563 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1564 LASSERT(i < LDISKFS_NDIR_BLOCKS);
1565 if (LDISKFS_I(inode)->i_data[i] == 0)
1568 } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1569 /* single indirect */
1570 credits = blocks * 3;
1571 if (inode == NULL ||
1572 LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK] == 0)
1575 /* The indirect block may be modified. */
1582 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
1583 const struct lu_buf *buf, loff_t _pos,
1584 struct thandle *handle)
1586 struct osd_object *obj = osd_dt_obj(dt);
1587 struct inode *inode = obj->oo_inode;
1588 struct super_block *sb = osd_sb(osd_obj2dev(obj));
1589 struct osd_thandle *oh;
1590 int rc = 0, est = 0, credits, blocks, allocated = 0;
1596 LASSERT(buf != NULL);
1597 LASSERT(handle != NULL);
1599 oh = container_of(handle, struct osd_thandle, ot_super);
1600 LASSERT(oh->ot_handle == NULL);
1603 bits = sb->s_blocksize_bits;
1607 /* if this is an append, then we
1608 * should expect cross-block record */
1614 /* blocks to modify */
1615 blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
1616 LASSERT(blocks > 0);
1618 if (inode != NULL && _pos != -1) {
1619 /* object size in blocks */
1620 est = (i_size_read(inode) + bs - 1) >> bits;
1621 allocated = inode->i_blocks >> (bits - 9);
1622 if (pos + size <= i_size_read(inode) && est <= allocated) {
1623 /* looks like an overwrite, no need to modify tree */
1625 /* no need to modify i_size */
1630 if (osd_extents_enabled(sb, inode)) {
1632 * many concurrent threads may grow tree by the time
1633 * our transaction starts. so, consider 2 is a min depth
1634 * for every level we may need to allocate a new block
1635 * and take some entries from the old one. so, 3 blocks
1636 * to allocate (bitmap, gd, itself) + old block - 4 per
1639 depth = inode != NULL ? ext_depth(inode) : 0;
1640 depth = max(depth, 1) + 1;
1642 /* if not append, then split may need to modify
1643 * existing blocks moving entries into the new ones */
1646 /* blocks to store data: bitmap,gd,itself */
1647 credits += blocks * 3;
1649 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
1651 /* if inode is created as part of the transaction,
1652 * then it's counted already by the creation method */
1658 osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1660 /* dt_declare_write() is usually called for system objects, such
1661 * as llog or last_rcvd files. We needn't enforce quota on those
1662 * objects, so always set the lqi_space as 0. */
1664 rc = osd_declare_inode_qid(env, i_uid_read(inode),
1666 i_projid_read(inode), 0,
1667 oh, obj, NULL, OSD_QID_BLK);
1670 rc = osd_trunc_lock(obj, oh, true);
1675 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
1677 /* LU-2634: clear the extent format for fast symlink */
1678 ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
1680 memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen);
1681 spin_lock(&inode->i_lock);
1682 LDISKFS_I(inode)->i_disksize = buflen;
1683 i_size_write(inode, buflen);
1684 spin_unlock(&inode->i_lock);
1685 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
1690 static int osd_ldiskfs_write_record(struct dt_object *dt, void *buf,
1691 int bufsize, int write_NUL, loff_t *offs,
1694 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1695 struct buffer_head *bh = NULL;
1696 loff_t offset = *offs;
1697 loff_t new_size = i_size_read(inode);
1698 unsigned long block;
1699 int blocksize = 1 << inode->i_blkbits;
1700 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1704 int dirty_inode = 0;
1705 bool create, sparse, sync = false;
1709 * long symlink write does not count the NUL terminator in
1710 * bufsize, we write it, and the inode's file size does not
1711 * count the NUL terminator as well.
1713 ((char *)buf)[bufsize] = '\0';
1717 dirty_inode = test_and_set_bit(LDISKFS_INODE_JOURNAL_DATA, &ei->i_flags);
1719 /* sparse checking is racy, but sparse is very rare case, leave as is */
1720 sparse = (new_size > 0 && (inode->i_blocks >> (inode->i_blkbits - 9)) <
1721 ((new_size - 1) >> inode->i_blkbits) + 1);
1723 while (bufsize > 0) {
1724 int credits = handle->h_buffer_credits;
1725 unsigned long last_block = (new_size == 0) ? 0 :
1726 (new_size - 1) >> inode->i_blkbits;
1731 block = offset >> inode->i_blkbits;
1732 boffs = offset & (blocksize - 1);
1733 size = min(blocksize - boffs, bufsize);
1734 sync = (block > last_block || new_size == 0 || sparse);
1737 down(&ei->i_append_sem);
1739 bh = __ldiskfs_bread(handle, inode, block, 0);
1741 if (unlikely(IS_ERR_OR_NULL(bh) && !sync))
1742 CWARN("%s: adding bh without locking off %llu (block %lu, "
1743 "size %d, offs %llu)\n", inode->i_sb->s_id,
1744 offset, block, bufsize, *offs);
1746 if (IS_ERR_OR_NULL(bh)) {
1747 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1748 int flags = LDISKFS_GET_BLOCKS_CREATE;
1750 /* while the file system is being mounted, avoid
1751 * preallocation otherwise mount can take a long
1752 * time as mballoc cache is cold.
1753 * XXX: this is a workaround until we have a proper
1755 * XXX: works with extent-based files only */
1756 if (!osd->od_cl_seq)
1757 flags |= LDISKFS_GET_BLOCKS_NO_NORMALIZE;
1758 bh = __ldiskfs_bread(handle, inode, block, flags);
1762 up(&ei->i_append_sem);
1767 if (IS_ERR_OR_NULL(bh)) {
1775 CERROR("%s: error reading offset %llu (block %lu, "
1776 "size %d, offs %llu), credits %d/%d: rc = %d\n",
1777 inode->i_sb->s_id, offset, block, bufsize, *offs,
1778 credits, handle->h_buffer_credits, err);
1782 err = ldiskfs_journal_get_write_access(handle, bh);
1784 CERROR("journal_get_write_access() returned error %d\n",
1788 LASSERTF(boffs + size <= bh->b_size,
1789 "boffs %d size %d bh->b_size %lu\n",
1790 boffs, size, (unsigned long)bh->b_size);
1792 memset(bh->b_data, 0, bh->b_size);
1794 up(&ei->i_append_sem);
1798 memcpy(bh->b_data + boffs, buf, size);
1799 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
1803 if (offset + size > new_size)
1804 new_size = offset + size;
1810 up(&ei->i_append_sem);
1817 /* correct in-core and on-disk sizes */
1818 if (new_size > i_size_read(inode)) {
1819 spin_lock(&inode->i_lock);
1820 if (new_size > i_size_read(inode))
1821 i_size_write(inode, new_size);
1822 if (i_size_read(inode) > ei->i_disksize) {
1823 ei->i_disksize = i_size_read(inode);
1826 spin_unlock(&inode->i_lock);
1829 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
1836 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
1837 const struct lu_buf *buf, loff_t *pos,
1838 struct thandle *handle)
1840 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1841 struct osd_thandle *oh;
1845 LASSERT(dt_object_exists(dt));
1847 LASSERT(handle != NULL);
1848 LASSERT(inode != NULL);
1849 dquot_initialize(inode);
1851 /* XXX: don't check: one declared chunk can be used many times */
1852 /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
1854 oh = container_of(handle, struct osd_thandle, ot_super);
1855 LASSERT(oh->ot_handle->h_transaction != NULL);
1856 osd_trans_exec_op(env, handle, OSD_OT_WRITE);
1858 /* Write small symlink to inode body as we need to maintain correct
1859 * on-disk symlinks for ldiskfs.
1860 * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
1861 * does not count it in.
1863 is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
1864 if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1865 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
1867 result = osd_ldiskfs_write_record(dt, buf->lb_buf, buf->lb_len,
1868 is_link, pos, oh->ot_handle);
1870 result = buf->lb_len;
1872 osd_trans_exec_check(env, handle, OSD_OT_WRITE);
1877 static int osd_declare_fallocate(const struct lu_env *env,
1878 struct dt_object *dt, struct thandle *th)
1880 struct osd_thandle *oh;
1881 struct inode *inode;
1886 oh = container_of(th, struct osd_thandle, ot_super);
1888 osd_trans_declare_op(env, oh, OSD_OT_PREALLOC,
1889 osd_dto_credits_noquota[DTO_WRITE_BLOCK]);
1890 inode = osd_dt_obj(dt)->oo_inode;
1893 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1894 i_projid_read(inode), 0, oh, osd_dt_obj(dt),
1899 static int osd_fallocate(const struct lu_env *env, struct dt_object *dt,
1900 __u64 start, __u64 end, int mode, struct thandle *th)
1902 struct osd_object *obj = osd_dt_obj(dt);
1903 struct inode *inode = obj->oo_inode;
1905 struct osd_thread_info *info = osd_oti_get(env);
1906 struct dentry *dentry = &info->oti_obj_dentry;
1907 struct file *file = &info->oti_file;
1911 * Only mode == 0 (which is standard prealloc) is supported now.
1912 * Rest of mode options is not supported yet.
1914 if (mode & ~FALLOC_FL_KEEP_SIZE)
1915 RETURN(-EOPNOTSUPP);
1917 LASSERT(dt_object_exists(dt));
1918 LASSERT(osd_invariant(obj));
1919 LASSERT(inode != NULL);
1920 dquot_initialize(inode);
1924 osd_trans_exec_op(env, th, OSD_OT_PREALLOC);
1927 * Because f_op->fallocate() does not have an inode arg
1929 dentry->d_inode = inode;
1930 dentry->d_sb = inode->i_sb;
1931 file->f_path.dentry = dentry;
1932 file->f_mapping = inode->i_mapping;
1933 file->f_op = inode->i_fop;
1934 file->f_inode = inode;
1935 rc = file->f_op->fallocate(file, mode, start, end - start);
1940 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
1941 __u64 start, __u64 end, struct thandle *th)
1943 struct osd_thandle *oh;
1944 struct inode *inode;
1949 oh = container_of(th, struct osd_thandle, ot_super);
1952 * we don't need to reserve credits for whole truncate
1953 * it's not possible as truncate may need to free too many
1954 * blocks and that won't fit a single transaction. instead
1955 * we reserve credits to change i_size and put inode onto
1956 * orphan list. if needed truncate will extend or restart
1959 osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
1960 osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
1962 inode = osd_dt_obj(dt)->oo_inode;
1965 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1966 i_projid_read(inode), 0, oh, osd_dt_obj(dt),
1970 rc = osd_trunc_lock(osd_dt_obj(dt), oh, false);
1975 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
1976 __u64 start, __u64 end, struct thandle *th)
1978 struct osd_object *obj = osd_dt_obj(dt);
1979 struct osd_device *osd = osd_obj2dev(obj);
1980 struct inode *inode = obj->oo_inode;
1981 struct osd_access_lock *al;
1982 struct osd_thandle *oh;
1983 int rc = 0, found = 0;
1987 LASSERT(dt_object_exists(dt));
1988 LASSERT(osd_invariant(obj));
1989 LASSERT(inode != NULL);
1990 dquot_initialize(inode);
1993 oh = container_of(th, struct osd_thandle, ot_super);
1994 LASSERT(oh->ot_handle->h_transaction != NULL);
1996 /* we used to skip truncate to current size to
1997 * optimize truncates on OST. with DoM we can
1998 * get attr_set to set specific size (MDS_REINT)
1999 * and then get truncate RPC which essentially
2000 * would be skipped. this is bad.. so, disable
2001 * this optimization on MDS till the client stop
2002 * to sent MDS_REINT (LU-11033) -bzzz */
2003 if (osd->od_is_ost && i_size_read(inode) == start)
2006 osd_trans_exec_op(env, th, OSD_OT_PUNCH);
2008 spin_lock(&inode->i_lock);
2009 if (i_size_read(inode) < start)
2011 i_size_write(inode, start);
2012 spin_unlock(&inode->i_lock);
2013 /* if object holds encrypted content, we need to make sure we truncate
2014 * on an encryption unit boundary, or subsequent reads will get
2017 if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL &&
2018 start & ~LUSTRE_ENCRYPTION_MASK)
2019 start = (start & LUSTRE_ENCRYPTION_MASK) +
2020 LUSTRE_ENCRYPTION_UNIT_SIZE;
2021 ll_truncate_pagecache(inode, start);
2023 /* optimize grow case */
2025 osd_execute_truncate(obj);
2030 /* add to orphan list to ensure truncate completion
2031 * if this transaction succeed. ldiskfs_truncate()
2032 * will take the inode out of the list */
2033 rc = ldiskfs_orphan_add(oh->ot_handle, inode);
2034 inode_unlock(inode);
2038 list_for_each_entry(al, &oh->ot_trunc_locks, tl_list) {
2039 if (obj != al->tl_obj)
2041 LASSERT(al->tl_shared == 0);
2043 /* do actual truncate in osd_trans_stop() */
2044 al->tl_truncate = 1;
2053 static int fiemap_check_ranges(struct inode *inode,
2054 u64 start, u64 len, u64 *new_len)
2063 if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
2064 maxbytes = inode->i_sb->s_maxbytes;
2066 maxbytes = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes;
2068 if (start > maxbytes)
2072 * Shrink request scope to what the fs can actually handle.
2074 if (len > maxbytes || (maxbytes - len) < start)
2075 *new_len = maxbytes - start;
2080 /* So that the fiemap access checks can't overflow on 32 bit machines. */
2081 #define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent))
2083 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
2086 struct fiemap_extent_info fieinfo = {0, };
2087 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2090 mm_segment_t cur_fs;
2093 if (inode->i_op->fiemap == NULL)
2096 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS)
2099 rc = fiemap_check_ranges(inode, fm->fm_start, fm->fm_length, &len);
2103 fieinfo.fi_flags = fm->fm_flags;
2104 fieinfo.fi_extents_max = fm->fm_extent_count;
2105 fieinfo.fi_extents_start = fm->fm_extents;
2107 if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
2108 filemap_write_and_wait(inode->i_mapping);
2110 /* Save previous value address limit */
2112 /* Set the address limit of the kernel */
2115 rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
2116 fm->fm_flags = fieinfo.fi_flags;
2117 fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
2119 /* Restore the previous address limt */
2125 static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
2126 __u64 start, __u64 end, enum lu_ladvise_type advice)
2128 struct osd_object *obj = osd_dt_obj(dt);
2133 case LU_LADVISE_DONTNEED:
2135 invalidate_mapping_pages(obj->oo_inode->i_mapping,
2136 start >> PAGE_SHIFT,
2137 (end - 1) >> PAGE_SHIFT);
2148 * in some cases we may need declare methods for objects being created
2149 * e.g., when we create symlink
2151 const struct dt_body_operations osd_body_ops_new = {
2152 .dbo_declare_write = osd_declare_write,
2155 const struct dt_body_operations osd_body_ops = {
2156 .dbo_read = osd_read,
2157 .dbo_declare_write = osd_declare_write,
2158 .dbo_write = osd_write,
2159 .dbo_bufs_get = osd_bufs_get,
2160 .dbo_bufs_put = osd_bufs_put,
2161 .dbo_write_prep = osd_write_prep,
2162 .dbo_declare_write_commit = osd_declare_write_commit,
2163 .dbo_write_commit = osd_write_commit,
2164 .dbo_read_prep = osd_read_prep,
2165 .dbo_declare_punch = osd_declare_punch,
2166 .dbo_punch = osd_punch,
2167 .dbo_fiemap_get = osd_fiemap_get,
2168 .dbo_ladvise = osd_ladvise,
2169 .dbo_declare_fallocate = osd_declare_fallocate,
2170 .dbo_fallocate = osd_fallocate,
2174 * Get a truncate lock
2176 * In order to take multi-transaction truncate out of main transaction we let
2177 * the caller grab a lock on the object passed. the lock can be shared (for
2178 * writes) and exclusive (for truncate). It's not allowed to mix truncate
2179 * and write in the same transaction handle (do not confuse with big ldiskfs
2180 * transaction containing lots of handles).
2181 * The lock must be taken at declaration.
2183 * \param obj object to lock
2185 * \shared shared or exclusive
2187 * \retval 0 lock is granted
2188 * \retval -NOMEM no memory to allocate lock
2190 int osd_trunc_lock(struct osd_object *obj, struct osd_thandle *oh, bool shared)
2192 struct osd_access_lock *al, *tmp;
2197 list_for_each_entry(tmp, &oh->ot_trunc_locks, tl_list) {
2198 if (tmp->tl_obj != obj)
2200 LASSERT(tmp->tl_shared == shared);
2201 /* found same lock */
2206 if (unlikely(al == NULL))
2209 al->tl_truncate = false;
2211 down_read(&obj->oo_ext_idx_sem);
2213 down_write(&obj->oo_ext_idx_sem);
2214 al->tl_shared = shared;
2215 lu_object_get(&obj->oo_dt.do_lu);
2217 list_add(&al->tl_list, &oh->ot_trunc_locks);
2222 void osd_trunc_unlock_all(const struct lu_env *env, struct list_head *list)
2224 struct osd_access_lock *al, *tmp;
2225 list_for_each_entry_safe(al, tmp, list, tl_list) {
2227 up_read(&al->tl_obj->oo_ext_idx_sem);
2229 up_write(&al->tl_obj->oo_ext_idx_sem);
2230 osd_object_put(env, al->tl_obj);
2231 list_del(&al->tl_list);
2236 void osd_execute_truncate(struct osd_object *obj)
2238 struct osd_device *d = osd_obj2dev(obj);
2239 struct inode *inode = obj->oo_inode;
2242 /* simulate crash before (in the middle) of delayed truncate */
2243 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FAIL_AT_TRUNCATE)) {
2244 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
2245 struct ldiskfs_sb_info *sbi = LDISKFS_SB(inode->i_sb);
2247 mutex_lock(&sbi->s_orphan_lock);
2248 list_del_init(&ei->i_orphan);
2249 mutex_unlock(&sbi->s_orphan_lock);
2253 size = i_size_read(inode);
2255 /* if object holds encrypted content, we need to make sure we truncate
2256 * on an encryption unit boundary, or block content will get corrupted
2258 if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL &&
2259 size & ~LUSTRE_ENCRYPTION_MASK)
2260 inode->i_size = (size & LUSTRE_ENCRYPTION_MASK) +
2261 LUSTRE_ENCRYPTION_UNIT_SIZE;
2262 ldiskfs_truncate(inode);
2263 inode_unlock(inode);
2264 if (inode->i_size != size) {
2265 spin_lock(&inode->i_lock);
2266 i_size_write(inode, size);
2267 LDISKFS_I(inode)->i_disksize = size;
2268 spin_unlock(&inode->i_lock);
2269 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
2273 * For a partial-page truncate, flush the page to disk immediately to
2274 * avoid data corruption during direct disk write. b=17397
2276 if ((size & ~PAGE_MASK) == 0)
2278 if (osd_use_page_cache(d)) {
2279 filemap_fdatawrite_range(inode->i_mapping, size, size + 1);
2281 /* Notice we use "wait" version to ensure I/O is complete */
2282 filemap_write_and_wait_range(inode->i_mapping, size, size + 1);
2283 invalidate_mapping_pages(inode->i_mapping, size >> PAGE_SHIFT,
2284 size >> PAGE_SHIFT);
2288 void osd_process_truncates(struct list_head *list)
2290 struct osd_access_lock *al;
2292 LASSERT(journal_current_handle() == NULL);
2294 list_for_each_entry(al, list, tl_list) {
2297 if (!al->tl_truncate)
2299 osd_execute_truncate(al->tl_obj);