4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
35 * Author: Nikita Danilov <nikita@clusterfs.com>
36 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
40 #define DEBUG_SUBSYSTEM S_OSD
42 /* prerequisite for linux/xattr.h */
43 #include <linux/types.h>
44 /* prerequisite for linux/xattr.h */
47 #include <linux/swap.h>
48 #include <linux/pagevec.h>
51 * struct OBD_{ALLOC,FREE}*()
53 #include <obd_support.h>
54 #include <libcfs/libcfs.h>
56 #include "osd_internal.h"
59 #include <ldiskfs/ldiskfs_extents.h>
60 #include <ldiskfs/ldiskfs.h>
62 static inline bool osd_use_page_cache(struct osd_device *d)
64 /* do not use pagecache if write and read caching are disabled */
65 if (d->od_writethrough_cache + d->od_read_cache == 0)
67 /* use pagecache by default */
71 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
72 int rw, int line, int pages)
76 LASSERTF(iobuf->dr_elapsed_valid == 0,
77 "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
78 atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
80 LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
82 init_waitqueue_head(&iobuf->dr_wait);
83 atomic_set(&iobuf->dr_numreqs, 0);
88 iobuf->dr_elapsed = ktime_set(0, 0);
89 /* must be counted before, so assert */
91 iobuf->dr_init_at = line;
93 blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
94 if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
95 LASSERT(iobuf->dr_pg_buf.lb_len >=
96 pages * sizeof(iobuf->dr_pages[0]));
100 /* start with 1MB for 4K blocks */
102 while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
105 CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
106 (unsigned int)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
108 blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
109 iobuf->dr_max_pages = 0;
110 CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
111 (unsigned int)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
113 lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
114 iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
115 if (unlikely(iobuf->dr_blocks == NULL))
118 lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
119 iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
120 if (unlikely(iobuf->dr_pages == NULL))
123 lu_buf_realloc(&iobuf->dr_lnb_buf,
124 pages * sizeof(iobuf->dr_lnbs[0]));
125 iobuf->dr_lnbs = iobuf->dr_lnb_buf.lb_buf;
126 if (unlikely(iobuf->dr_lnbs == NULL))
129 iobuf->dr_max_pages = pages;
133 #define osd_init_iobuf(dev, iobuf, rw, pages) \
134 __osd_init_iobuf(dev, iobuf, rw, __LINE__, pages)
136 static void osd_iobuf_add_page(struct osd_iobuf *iobuf,
137 struct niobuf_local *lnb)
139 LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
140 iobuf->dr_pages[iobuf->dr_npages] = lnb->lnb_page;
141 iobuf->dr_lnbs[iobuf->dr_npages] = lnb;
145 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
147 int rw = iobuf->dr_rw;
149 if (iobuf->dr_elapsed_valid) {
150 struct brw_stats *h = &d->od_brw_stats;
152 iobuf->dr_elapsed_valid = 0;
153 LASSERT(iobuf->dr_dev == d);
154 LASSERT(iobuf->dr_frags > 0);
155 lprocfs_oh_tally_pcpu(&h->bs_hist[BRW_R_DIO_FRAGS+rw],
157 lprocfs_oh_tally_log2_pcpu(&h->bs_hist[BRW_R_IO_TIME+rw],
158 ktime_to_ms(iobuf->dr_elapsed));
162 #ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
163 static void dio_complete_routine(struct bio *bio)
165 int error = blk_status_to_errno(bio->bi_status);
167 static void dio_complete_routine(struct bio *bio, int error)
170 struct osd_iobuf *iobuf = bio->bi_private;
173 /* CAVEAT EMPTOR: possibly in IRQ context
174 * DO NOT record procfs stats here!!!
177 if (unlikely(iobuf == NULL)) {
178 CERROR("***** bio->bi_private is NULL! Dump the bio contents to the console. Please report this to <https://jira.whamcloud.com/>, and probably have to reboot this node.\n");
179 CERROR("bi_next: %p, bi_flags: %lx, " __stringify(bi_opf)
180 ": %x, bi_vcnt: %d, bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, bi_private: %p\n",
181 bio->bi_next, (unsigned long)bio->bi_flags,
182 (unsigned int)bio->bi_opf, bio->bi_vcnt, bio_idx(bio),
183 bio_sectors(bio) << 9, bio->bi_end_io,
184 atomic_read(&bio->__bi_cnt),
189 /* the check is outside of the cycle for performance reason -bzzz */
190 if (!bio_data_dir(bio)) {
191 DECLARE_BVEC_ITER_ALL(iter_all);
193 bio_for_each_segment_all(bvl, bio, iter_all) {
194 if (likely(error == 0))
195 SetPageUptodate(bvl_to_page(bvl));
196 LASSERT(PageLocked(bvl_to_page(bvl)));
198 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
200 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
203 /* any real error is good enough -bzzz */
204 if (error != 0 && iobuf->dr_error == 0)
205 iobuf->dr_error = error;
208 * set dr_elapsed before dr_numreqs turns to 0, otherwise
209 * it's possible that service thread will see dr_numreqs
210 * is zero, but dr_elapsed is not set yet, leading to lost
211 * data in this processing and an assertion in a subsequent
214 if (atomic_read(&iobuf->dr_numreqs) == 1) {
215 ktime_t now = ktime_get();
217 iobuf->dr_elapsed = ktime_sub(now, iobuf->dr_start_time);
218 iobuf->dr_elapsed_valid = 1;
220 if (atomic_dec_and_test(&iobuf->dr_numreqs))
221 wake_up(&iobuf->dr_wait);
223 /* Completed bios used to be chained off iobuf->dr_bios and freed in
224 * filter_clear_dreq(). It was then possible to exhaust the biovec-256
225 * mempool when serious on-disk fragmentation was encountered,
226 * deadlocking the OST. The bios are now released as soon as complete
227 * so the pool cannot be exhausted while IOs are competing. b=10076
232 static void record_start_io(struct osd_iobuf *iobuf, int size)
234 struct osd_device *osd = iobuf->dr_dev;
235 struct brw_stats *h = &osd->od_brw_stats;
238 atomic_inc(&iobuf->dr_numreqs);
240 if (iobuf->dr_rw == 0) {
241 atomic_inc(&osd->od_r_in_flight);
242 lprocfs_oh_tally_pcpu(&h->bs_hist[BRW_R_RPC_HIST],
243 atomic_read(&osd->od_r_in_flight));
244 lprocfs_oh_tally_log2_pcpu(&h->bs_hist[BRW_R_DISK_IOSIZE],
246 } else if (iobuf->dr_rw == 1) {
247 atomic_inc(&osd->od_w_in_flight);
248 lprocfs_oh_tally_pcpu(&h->bs_hist[BRW_W_RPC_HIST],
249 atomic_read(&osd->od_w_in_flight));
250 lprocfs_oh_tally_log2_pcpu(&h->bs_hist[BRW_W_DISK_IOSIZE],
257 static void osd_submit_bio(int rw, struct bio *bio)
259 LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
260 #ifdef HAVE_SUBMIT_BIO_2ARGS
261 submit_bio(rw ? WRITE : READ, bio);
268 static int can_be_merged(struct bio *bio, sector_t sector)
273 return bio_end_sector(bio) == sector ? 1 : 0;
276 #if IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)
277 #ifdef HAVE_BIO_INTEGRITY_PREP_FN
278 # ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
279 static void dio_integrity_complete_routine(struct bio *bio)
281 static void dio_integrity_complete_routine(struct bio *bio, int error)
284 struct osd_bio_private *bio_private = bio->bi_private;
286 bio->bi_private = bio_private->obp_iobuf;
287 osd_dio_complete_routine(bio, error);
289 OBD_FREE_PTR(bio_private);
293 * This function will change the data written, thus it should only be
294 * used when checking data integrity feature
296 static void bio_integrity_fault_inject(struct bio *bio)
298 struct bio_vec *bvec;
299 DECLARE_BVEC_ITER_ALL(iter_all);
303 bio_for_each_segment_all(bvec, bio, iter_all) {
304 struct page *page = bvec->bv_page;
314 static int bio_dif_compare(__u16 *expected_guard_buf, void *bio_prot_buf,
315 unsigned int sectors, int tuple_size)
317 __be16 *expected_guard;
321 expected_guard = expected_guard_buf;
322 for (i = 0; i < sectors; i++) {
323 bio_guard = (__u16 *)bio_prot_buf;
324 if (*bio_guard != *expected_guard) {
326 "unexpected guard tags on sector %d expected guard %u, bio guard %u, sectors %u, tuple size %d\n",
327 i, *expected_guard, *bio_guard, sectors,
332 bio_prot_buf += tuple_size;
337 static int osd_bio_integrity_compare(struct bio *bio, struct block_device *bdev,
338 struct osd_iobuf *iobuf, int index)
340 struct blk_integrity *bi = bdev_get_integrity(bdev);
341 struct bio_integrity_payload *bip = bio->bi_integrity;
342 struct niobuf_local *lnb = NULL;
343 unsigned short sector_size = blk_integrity_interval(bi);
344 void *bio_prot_buf = page_address(bip->bip_vec->bv_page) +
345 bip->bip_vec->bv_offset;
347 sector_t sector = bio_start_sector(bio);
348 unsigned int i, sectors, total;
349 DECLARE_BVEC_ITER_ALL(iter_all);
350 __be16 *expected_guard;
354 bio_for_each_segment_all(bv, bio, iter_all) {
355 for (i = index; i < iobuf->dr_npages; i++) {
356 if (iobuf->dr_pages[i] == bv->bv_page) {
357 lnb = iobuf->dr_lnbs[i];
363 expected_guard = lnb->lnb_guards;
364 sectors = bv->bv_len / sector_size;
365 if (lnb->lnb_guard_rpc) {
366 rc = bio_dif_compare(expected_guard, bio_prot_buf,
367 sectors, bi->tuple_size);
373 bio_prot_buf += sectors * bi->tuple_size;
374 total += sectors * bi->tuple_size;
375 LASSERT(total <= bip_size(bio->bi_integrity));
381 #endif /* HAVE_BIO_INTEGRITY_PREP_FN */
383 static int osd_bio_integrity_handle(struct osd_device *osd, struct bio *bio,
384 struct osd_iobuf *iobuf,
385 int start_page_idx, bool fault_inject,
386 bool integrity_enabled)
388 #ifdef HAVE_BIO_INTEGRITY_PREP_FN
389 struct super_block *sb = osd_sb(osd);
390 integrity_gen_fn *generate_fn = NULL;
391 integrity_vrfy_fn *verify_fn = NULL;
396 if (!integrity_enabled)
399 rc = osd_get_integrity_profile(osd, &generate_fn, &verify_fn);
403 # ifdef HAVE_BIO_INTEGRITY_PREP_FN_RETURNS_BOOL
404 if (!bio_integrity_prep_fn(bio, generate_fn, verify_fn))
405 RETURN(blk_status_to_errno(bio->bi_status));
407 rc = bio_integrity_prep_fn(bio, generate_fn, verify_fn);
412 /* Verify and inject fault only when writing */
413 if (iobuf->dr_rw == 1) {
414 if (unlikely(CFS_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_CMP))) {
415 rc = osd_bio_integrity_compare(bio, sb->s_bdev, iobuf,
421 if (unlikely(fault_inject))
422 bio_integrity_fault_inject(bio);
424 #endif /* HAVE_BIO_INTEGRITY_PREP_FN */
428 #else /* !CONFIG_BLK_DEV_INTEGRITY */
429 #define osd_bio_integrity_handle(osd, bio, iobuf, start_page_idx, \
430 fault_inject, integrity_enabled) 0
431 #endif /* CONFIG_BLK_DEV_INTEGRITY */
433 static int osd_bio_init(struct bio *bio, struct osd_iobuf *iobuf,
434 bool integrity_enabled, int start_page_idx,
435 struct osd_bio_private **pprivate)
441 #ifdef HAVE_BIO_INTEGRITY_PREP_FN
442 if (integrity_enabled) {
443 struct osd_bio_private *bio_private = NULL;
445 OBD_ALLOC_GFP(bio_private, sizeof(*bio_private), GFP_NOIO);
446 if (bio_private == NULL)
448 bio->bi_end_io = dio_integrity_complete_routine;
449 bio->bi_private = bio_private;
450 bio_private->obp_start_page_idx = start_page_idx;
451 bio_private->obp_iobuf = iobuf;
452 *pprivate = bio_private;
456 bio->bi_end_io = dio_complete_routine;
457 bio->bi_private = iobuf;
463 static void osd_mark_page_io_done(struct osd_iobuf *iobuf,
465 sector_t start_blocks,
468 struct niobuf_local *lnb;
469 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
470 pgoff_t pg_start, pg_end;
472 pg_start = start_blocks / blocks_per_page;
473 if (start_blocks % blocks_per_page)
475 if (count >= blocks_per_page)
476 pg_end = (start_blocks + count -
477 blocks_per_page) / blocks_per_page;
479 return; /* nothing to mark */
480 for ( ; pg_start <= pg_end; pg_start++) {
481 lnb = iobuf->dr_lnbs[pg_start];
482 lnb->lnb_flags |= OBD_BRW_DONE;
487 * Linux v5.12-rc1-20-ga8affc03a9b3
488 * block: rename BIO_MAX_PAGES to BIO_MAX_VECS
491 #define BIO_MAX_VECS BIO_MAX_PAGES
494 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
495 struct osd_iobuf *iobuf, sector_t start_blocks,
498 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
499 struct page **pages = iobuf->dr_pages;
500 int npages = iobuf->dr_npages;
501 sector_t *blocks = iobuf->dr_blocks;
502 struct super_block *sb = inode->i_sb;
503 int sector_bits = sb->s_blocksize_bits - 9;
504 unsigned int blocksize = sb->s_blocksize;
505 struct block_device *bdev = sb->s_bdev;
506 struct osd_bio_private *bio_private = NULL;
507 struct bio *bio = NULL;
508 int bio_start_page_idx;
510 unsigned int page_offset;
513 int block_idx, block_idx_end;
514 int page_idx, page_idx_start;
518 bool integrity_enabled;
519 struct blk_plug plug;
520 int blocks_left_page;
524 fault_inject = CFS_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_FAULT);
525 LASSERT(iobuf->dr_npages == npages);
527 integrity_enabled = bdev_integrity_enabled(bdev, iobuf->dr_rw);
529 osd_brw_stats_update(osd, iobuf);
530 iobuf->dr_start_time = ktime_get();
533 count = npages * blocks_per_page;
534 block_idx_end = start_blocks + count;
536 blk_start_plug(&plug);
538 page_idx_start = start_blocks / blocks_per_page;
539 for (page_idx = page_idx_start, block_idx = start_blocks;
540 block_idx < block_idx_end; page_idx++,
541 block_idx += blocks_left_page) {
542 /* For cases where the filesystems blocksize is not the
543 * same as PAGE_SIZE (e.g. ARM with PAGE_SIZE=64KB and
544 * blocksize=4KB), there will be multiple blocks to
545 * read/write per page. Also, the start and end block may
546 * not be aligned to the start and end of the page, so the
547 * first page may skip some blocks at the start ("i != 0",
548 * "blocks_left_page" is reduced), and the last page may
549 * skip some blocks at the end (limited by "count").
551 page = pages[page_idx];
552 LASSERT(page_idx < iobuf->dr_npages);
554 i = block_idx % blocks_per_page;
555 blocks_left_page = blocks_per_page - i;
556 if (block_idx + blocks_left_page > block_idx_end)
557 blocks_left_page = block_idx_end - block_idx;
558 page_offset = i * blocksize;
559 for (i = 0; i < blocks_left_page;
560 i += nblocks, page_offset += blocksize * nblocks) {
563 if (blocks[block_idx + i] == 0) { /* hole */
564 LASSERTF(iobuf->dr_rw == 0,
565 "page_idx %u, block_idx %u, i %u,"
566 "start_blocks: %llu, count: %llu, npages: %d\n",
567 page_idx, block_idx, i,
568 (unsigned long long)start_blocks,
569 (unsigned long long)count, npages);
570 memset(kmap(page) + page_offset, 0, blocksize);
575 sector = (sector_t)blocks[block_idx + i] << sector_bits;
577 /* Additional contiguous file blocks? */
578 while (i + nblocks < blocks_left_page &&
579 (sector + (nblocks << sector_bits)) ==
580 ((sector_t)blocks[block_idx + i + nblocks] <<
584 if (bio && can_be_merged(bio, sector) &&
585 bio_add_page(bio, page, blocksize * nblocks,
587 continue; /* added this frag OK */
590 struct request_queue *q = bio_get_queue(bio);
591 unsigned int bi_size = bio_sectors(bio) << 9;
593 /* Dang! I have to fragment this I/O */
595 "bio++ sz %d vcnt %d(%d) sectors %d(%d) psg %d(%d)\n",
596 bi_size, bio->bi_vcnt, bio->bi_max_vecs,
598 queue_max_sectors(q),
599 osd_bio_nr_segs(bio),
600 queue_max_segments(q));
601 rc = osd_bio_integrity_handle(osd, bio,
602 iobuf, bio_start_page_idx,
603 fault_inject, integrity_enabled);
609 record_start_io(iobuf, bi_size);
610 osd_submit_bio(iobuf->dr_rw, bio);
613 bio_start_page_idx = page_idx;
614 /* allocate new bio */
616 bio = cfs_bio_alloc(bdev,
617 min_t(unsigned short, BIO_MAX_VECS,
618 (block_idx_end - block_idx +
619 blocks_left_page - 1)),
620 iobuf->dr_rw ? REQ_OP_WRITE
624 CERROR("Can't allocate bio %u pages\n",
625 block_idx_end - block_idx +
626 blocks_left_page - 1);
630 bio_set_sector(bio, sector);
631 rc = osd_bio_init(bio, iobuf, integrity_enabled,
632 bio_start_page_idx, &bio_private);
638 rc = bio_add_page(bio, page,
639 blocksize * nblocks, page_offset);
645 rc = osd_bio_integrity_handle(osd, bio, iobuf,
654 record_start_io(iobuf, bio_sectors(bio) << 9);
655 osd_submit_bio(iobuf->dr_rw, bio);
660 blk_finish_plug(&plug);
662 /* in order to achieve better IO throughput, we don't wait for writes
663 * completion here. instead we proceed with transaction commit in
664 * parallel and wait for IO completion once transaction is stopped
665 * see osd_trans_stop() for more details -bzzz
667 if (iobuf->dr_rw == 0 || fault_inject) {
668 wait_event(iobuf->dr_wait,
669 atomic_read(&iobuf->dr_numreqs) == 0);
670 osd_fini_iobuf(osd, iobuf);
674 rc = iobuf->dr_error;
677 OBD_FREE_PTR(bio_private);
681 if (rc == 0 && iobuf->dr_rw)
682 osd_mark_page_io_done(iobuf, inode,
683 start_blocks, count);
688 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
689 struct niobuf_local *lnb, int maxlnb)
697 int poff = offset & (PAGE_SIZE - 1);
698 int plen = PAGE_SIZE - poff;
700 if (*nrpages >= maxlnb) {
707 lnb->lnb_file_offset = offset;
708 lnb->lnb_page_offset = poff;
710 /* lnb->lnb_flags = rnb->rnb_flags; */
712 lnb->lnb_page = NULL;
714 lnb->lnb_guard_rpc = 0;
715 lnb->lnb_guard_disk = 0;
718 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
729 static struct page *osd_get_page(const struct lu_env *env, struct dt_object *dt,
730 loff_t offset, gfp_t gfp_mask, bool cache)
732 struct osd_thread_info *oti = osd_oti_get(env);
733 struct inode *inode = osd_dt_obj(dt)->oo_inode;
734 struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
741 page = find_or_create_page(inode->i_mapping,
742 offset >> PAGE_SHIFT, gfp_mask);
745 LASSERT(!PagePrivate2(page));
746 wait_on_page_writeback(page);
748 lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
754 if (inode->i_mapping->nrpages) {
755 /* consult with pagecache, but do not create new pages */
756 /* this is normally used once */
757 page = find_lock_page(inode->i_mapping, offset >> PAGE_SHIFT);
759 wait_on_page_writeback(page);
764 LASSERT(oti->oti_dio_pages);
765 cur = oti->oti_dio_pages_used;
766 page = oti->oti_dio_pages[cur];
768 if (unlikely(!page)) {
769 LASSERT(cur < PTLRPC_MAX_BRW_PAGES);
770 page = alloc_page(gfp_mask);
773 oti->oti_dio_pages[cur] = page;
774 SetPagePrivate2(page);
778 ClearPageUptodate(page);
779 page->index = offset >> PAGE_SHIFT;
780 oti->oti_dio_pages_used++;
786 * there are following "locks":
797 * - lock pages, unlock
799 * - lock partial page
805 * Unlock and release pages loaded by osd_bufs_get()
807 * Unlock \a npages pages from \a lnb and drop the refcount on them.
809 * \param env thread execution environment
810 * \param dt dt object undergoing IO (OSD object + methods)
811 * \param lnb array of pages undergoing IO
812 * \param npages number of pages in \a lnb
816 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
817 struct niobuf_local *lnb, int npages)
819 struct osd_thread_info *oti = osd_oti_get(env);
823 ll_pagevec_init(&pvec, 0);
825 for (i = 0; i < npages; i++) {
826 struct page *page = lnb[i].lnb_page;
831 /* if the page isn't cached, then reset uptodate
834 if (PagePrivate2(page)) {
835 oti->oti_dio_pages_used--;
837 if (lnb[i].lnb_locked)
839 if (pagevec_add(&pvec, page) == 0)
840 pagevec_release(&pvec);
843 lnb[i].lnb_page = NULL;
846 LASSERTF(oti->oti_dio_pages_used == 0, "%d\n", oti->oti_dio_pages_used);
848 /* Release any partial pagevec */
849 pagevec_release(&pvec);
855 * Load and lock pages undergoing IO
857 * Pages as described in the \a lnb array are fetched (from disk or cache)
858 * and locked for IO by the caller.
860 * DLM locking protects us from write and truncate competing for same region,
861 * but partial-page truncate can leave dirty pages in the cache for ldiskfs.
862 * It's possible the writeout on a such a page is in progress when we access
863 * it. It's also possible that during this writeout we put new (partial) data
864 * into the page, but won't be able to proceed in filter_commitrw_write().
865 * Therefore, just wait for writeout completion as it should be rare enough.
867 * \param env thread execution environment
868 * \param dt dt object undergoing IO (OSD object + methods)
869 * \param pos byte offset of IO start
870 * \param len number of bytes of IO
871 * \param lnb array of extents undergoing IO
872 * \param rw read or write operation, and other flags
873 * \param capa capabilities
875 * \retval pages (zero or more) loaded successfully
876 * \retval -ENOMEM on memory/page allocation error
878 static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
879 loff_t pos, ssize_t len, struct niobuf_local *lnb,
880 int maxlnb, enum dt_bufs_type rw)
882 struct osd_thread_info *oti = osd_oti_get(env);
883 struct osd_object *obj = osd_dt_obj(dt);
884 struct osd_device *osd = osd_obj2dev(obj);
885 int npages, i, iosize, rc = 0;
890 LASSERT(obj->oo_inode);
892 if (unlikely(obj->oo_destroyed))
895 rc = osd_map_remote_to_local(pos, len, &npages, lnb, maxlnb);
899 write = rw & DT_BUFS_TYPE_WRITE;
901 fsize = lnb[npages - 1].lnb_file_offset + lnb[npages - 1].lnb_len;
902 iosize = fsize - lnb[0].lnb_file_offset;
903 fsize = max(fsize, i_size_read(obj->oo_inode));
905 cache = rw & DT_BUFS_TYPE_READAHEAD;
909 cache = osd_use_page_cache(osd);
912 if (!osd->od_writethrough_cache) {
916 if (iosize > osd->od_writethrough_max_iosize) {
921 if (!osd->od_read_cache) {
925 if (iosize > osd->od_readcache_max_iosize) {
930 /* don't use cache on large files */
931 if (osd->od_readcache_max_filesize &&
932 fsize > osd->od_readcache_max_filesize)
938 if (!cache && unlikely(!oti->oti_dio_pages)) {
939 OBD_ALLOC_PTR_ARRAY_LARGE(oti->oti_dio_pages,
940 PTLRPC_MAX_BRW_PAGES);
941 if (!oti->oti_dio_pages)
945 /* this could also try less hard for DT_BUFS_TYPE_READAHEAD pages */
946 gfp_mask = rw & DT_BUFS_TYPE_LOCAL ? (GFP_NOFS | __GFP_HIGHMEM) :
948 for (i = 0; i < npages; i++, lnb++) {
949 lnb->lnb_page = osd_get_page(env, dt, lnb->lnb_file_offset,
951 if (lnb->lnb_page == NULL)
952 GOTO(cleanup, rc = -ENOMEM);
956 mark_page_accessed(lnb->lnb_page);
960 /* XXX: this version doesn't invalidate cached pages, but use them */
961 if (!cache && write && obj->oo_inode->i_mapping->nrpages) {
962 /* do not allow data aliasing, invalidate pagecache */
963 /* XXX: can be quite expensive in mixed case */
964 invalidate_mapping_pages(obj->oo_inode->i_mapping,
965 lnb[0].lnb_file_offset >> PAGE_SHIFT,
966 lnb[npages - 1].lnb_file_offset >> PAGE_SHIFT);
974 osd_bufs_put(env, dt, lnb - i, i);
977 /* Borrow @ext4_chunk_trans_blocks */
978 static int osd_chunk_trans_blocks(struct inode *inode, int nrblocks)
980 ldiskfs_group_t groups;
986 depth = ext_depth(inode);
987 idxblocks = depth * 2;
990 * Now let's see how many group bitmaps and group descriptors need
993 groups = idxblocks + 1;
995 if (groups > LDISKFS_SB(inode->i_sb)->s_groups_count)
996 groups = LDISKFS_SB(inode->i_sb)->s_groups_count;
997 if (gdpblocks > LDISKFS_SB(inode->i_sb)->s_gdb_count)
998 gdpblocks = LDISKFS_SB(inode->i_sb)->s_gdb_count;
1000 /* bitmaps and block group descriptor blocks */
1001 ret = idxblocks + groups + gdpblocks;
1003 /* Blocks for super block, inode, quota and xattr blocks */
1004 ret += LDISKFS_META_TRANS_BLOCKS(inode->i_sb);
1009 #ifdef HAVE_LDISKFS_JOURNAL_ENSURE_CREDITS
1010 static int osd_extend_restart_trans(handle_t *handle, int needed,
1011 struct inode *inode)
1015 rc = ldiskfs_journal_ensure_credits(handle, needed,
1016 ldiskfs_trans_default_revoke_credits(inode->i_sb));
1017 /* this means journal has been restarted */
1024 static int osd_extend_restart_trans(handle_t *handle, int needed,
1025 struct inode *inode)
1029 if (ldiskfs_handle_has_enough_credits(handle, needed))
1031 rc = ldiskfs_journal_extend(handle,
1032 needed - handle->h_buffer_credits);
1036 return ldiskfs_journal_restart(handle, needed);
1038 #endif /* HAVE_LDISKFS_JOURNAL_ENSURE_CREDITS */
1040 static int osd_ldiskfs_map_write(struct inode *inode, struct osd_iobuf *iobuf,
1041 struct osd_device *osd, sector_t start_blocks,
1042 sector_t count, loff_t *disk_size,
1045 /* if file has grown, take user_size into account */
1046 if (user_size && *disk_size > user_size)
1047 *disk_size = user_size;
1049 spin_lock(&inode->i_lock);
1050 if (*disk_size > i_size_read(inode)) {
1051 i_size_write(inode, *disk_size);
1052 LDISKFS_I(inode)->i_disksize = *disk_size;
1053 spin_unlock(&inode->i_lock);
1054 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
1056 spin_unlock(&inode->i_lock);
1060 * We don't do stats here as in read path because
1061 * write is async: we'll do this in osd_put_bufs()
1063 return osd_do_bio(osd, inode, iobuf, start_blocks, count);
1066 static unsigned int osd_extent_bytes(const struct osd_device *o)
1068 unsigned int *extent_bytes_ptr =
1069 raw_cpu_ptr(o->od_extent_bytes_percpu);
1071 if (likely(*extent_bytes_ptr))
1072 return *extent_bytes_ptr;
1074 /* initialize on first access or CPU hotplug */
1075 if (!ldiskfs_has_feature_extents(osd_sb(o)))
1076 *extent_bytes_ptr = 1 << osd_sb(o)->s_blocksize_bits;
1078 *extent_bytes_ptr = OSD_DEFAULT_EXTENT_BYTES;
1080 return *extent_bytes_ptr;
1083 #define EXTENT_BYTES_DECAY 64
1084 static void osd_decay_extent_bytes(struct osd_device *osd,
1085 unsigned int new_bytes)
1087 unsigned int old_bytes;
1089 if (!ldiskfs_has_feature_extents(osd_sb(osd)))
1092 old_bytes = osd_extent_bytes(osd);
1093 *raw_cpu_ptr(osd->od_extent_bytes_percpu) =
1094 (old_bytes * (EXTENT_BYTES_DECAY - 1) +
1095 min(new_bytes, OSD_DEFAULT_EXTENT_BYTES) +
1096 EXTENT_BYTES_DECAY - 1) / EXTENT_BYTES_DECAY;
1099 static int osd_ldiskfs_map_inode_pages(struct inode *inode,
1100 struct osd_iobuf *iobuf,
1101 struct osd_device *osd,
1102 int create, __u64 user_size,
1104 struct thandle *thandle)
1106 int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
1107 int blocksize = 1 << inode->i_blkbits;
1108 int rc = 0, i = 0, mapped_index = 0;
1109 struct page *fp = NULL;
1111 pgoff_t max_page_index;
1112 handle_t *handle = NULL;
1113 sector_t start_blocks = 0, count = 0;
1114 loff_t disk_size = 0;
1115 struct page **page = iobuf->dr_pages;
1116 int pages = iobuf->dr_npages;
1117 sector_t *blocks = iobuf->dr_blocks;
1118 struct niobuf_local *lnb1, *lnb2;
1119 loff_t size1, size2;
1121 max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
1123 CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
1124 inode->i_ino, pages, (*page)->index);
1127 create = LDISKFS_GET_BLOCKS_CREATE;
1128 handle = ldiskfs_journal_current_handle();
1129 LASSERT(handle != NULL);
1130 rc = osd_attach_jinode(inode);
1133 disk_size = i_size_read(inode);
1134 /* if disk_size is already bigger than specified user_size,
1137 if (disk_size > user_size)
1140 /* pages are sorted already. so, we just have to find
1141 * contig. space and process them properly
1144 long blen, total = 0, previous_total = 0;
1145 struct ldiskfs_map_blocks map = { 0 };
1148 if (fp == NULL) { /* start new extent */
1153 } else if (fp->index + clen == (*page)->index) {
1154 /* continue the extent */
1160 if (fp->index + clen >= max_page_index)
1161 GOTO(cleanup, rc = -EFBIG);
1162 /* process found extent */
1163 map.m_lblk = fp->index * blocks_per_page;
1164 map.m_len = blen = clen * blocks_per_page;
1167 * For PAGE_SIZE > blocksize block allocation mapping, the
1168 * ldiskfs_map_blocks() aims at looking up already mapped
1169 * blocks, recording them to iobuf->dr_blocks and fixing up
1170 * m_lblk, m_len for un-allocated blocks to be created/mapped
1171 * in the second ldiskfs_map_blocks().
1173 * M_lblk should be the first un-allocated block if m_lblk
1174 * points at an already allocated block when create = 1,
1175 * ldiskfs_map_blocks() will just return with already
1176 * allocated blocks and without allocating any requested
1177 * new blocks for the extent. For PAGE_SIZE = blocksize
1178 * case, if m_lblk points at an already allocated block it
1179 * will point at an un-allocated block in next restart
1180 * transaction, because the already mapped block/page will
1181 * be filtered out in next restart transaction via flag
1182 * OBD_BRW_DONE in osd_declare_write_commit().
1184 if (create && PAGE_SIZE > blocksize) {
1185 /* With flags=0 just for already mapped blocks lookup */
1186 rc = ldiskfs_map_blocks(handle, inode, &map, 0);
1187 if (rc > 0 && map.m_flags & LDISKFS_MAP_MAPPED) {
1188 for (; total < blen && total < map.m_len;
1190 *(blocks + total) = map.m_pblk + total;
1192 /* The extent is already full mapped */
1193 if (total == blen) {
1195 goto ext_already_mapped;
1199 * Fixup or reset m_lblk and m_len for un-mapped blocks.
1200 * The second ldiskfs_map_blocks() will create and map
1203 map.m_lblk = fp->index * blocks_per_page + total;
1204 map.m_len = blen - total;
1209 * We might restart transaction for block allocations,
1210 * in order to make sure data ordered mode, issue IO, disk
1211 * size update and block allocations need be within same
1212 * transaction to make sure consistency.
1214 if (handle && check_credits) {
1215 struct osd_thandle *oh;
1217 LASSERT(thandle != NULL);
1218 oh = container_of(thandle, struct osd_thandle,
1221 * only issue IO if restart transaction needed,
1222 * as update disk size need hold inode lock, we
1223 * want to avoid that as much as possible.
1225 if (oh->oh_declared_ext <= 0) {
1226 rc = osd_ldiskfs_map_write(inode,
1227 iobuf, osd, start_blocks,
1228 count, &disk_size, user_size);
1231 thandle->th_restart_tran = 1;
1232 GOTO(cleanup, rc = -EAGAIN);
1235 if (CFS_FAIL_CHECK(OBD_FAIL_OST_RESTART_IO))
1236 oh->oh_declared_ext = 0;
1238 oh->oh_declared_ext--;
1242 rc = ldiskfs_map_blocks(handle, inode, &map, create);
1243 time = ktime_sub(ktime_get(), time);
1246 struct brw_stats *h = &osd->od_brw_stats;
1249 idx = map.m_flags & LDISKFS_MAP_NEW ?
1250 BRW_ALLOC_TIME : BRW_MAP_TIME;
1251 lprocfs_oh_tally_log2_pcpu(&h->bs_hist[idx],
1254 for (; total < blen && c < map.m_len; c++, total++) {
1256 *(blocks + total) = 0;
1260 if ((map.m_flags & LDISKFS_MAP_UNWRITTEN) &&
1262 /* don't try to read allocated, but
1263 * unwritten blocks, instead fill the
1264 * patches with zeros in osd_do_bio() */
1265 *(blocks + total) = 0;
1268 *(blocks + total) = map.m_pblk + c;
1269 /* unmap any possible underlying
1270 * metadata from the block device
1273 if ((map.m_flags & LDISKFS_MAP_NEW) &&
1275 clean_bdev_aliases(inode->i_sb->s_bdev,
1282 if (rc == 0 && create) {
1283 count += (total - previous_total);
1284 mapped_index = (count + blocks_per_page -
1285 1) / blocks_per_page - 1;
1286 lnb1 = iobuf->dr_lnbs[i - clen];
1287 lnb2 = iobuf->dr_lnbs[mapped_index];
1288 size1 = lnb1->lnb_file_offset -
1289 (lnb1->lnb_file_offset % PAGE_SIZE) +
1290 (total << inode->i_blkbits);
1291 size2 = lnb2->lnb_file_offset + lnb2->lnb_len;
1295 if (size1 > disk_size)
1299 if (rc == 0 && total < blen) {
1301 * decay extent blocks if we could not
1302 * allocate extent once.
1304 osd_decay_extent_bytes(osd,
1305 (total - previous_total) << inode->i_blkbits);
1306 map.m_lblk = fp->index * blocks_per_page + total;
1307 map.m_len = blen - total;
1308 previous_total = total;
1314 * decay extent blocks if we could allocate
1315 * good large extent.
1317 if (total - previous_total >=
1318 osd_extent_bytes(osd) >> inode->i_blkbits)
1319 osd_decay_extent_bytes(osd,
1320 (total - previous_total) << inode->i_blkbits);
1321 /* look for next extent */
1323 blocks += blocks_per_page * clen;
1326 if (rc == 0 && create &&
1327 start_blocks < pages * blocks_per_page) {
1328 rc = osd_ldiskfs_map_write(inode, iobuf, osd, start_blocks,
1329 count, &disk_size, user_size);
1330 LASSERT(start_blocks + count == pages * blocks_per_page);
1335 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
1336 struct niobuf_local *lnb, int npages)
1338 struct osd_thread_info *oti = osd_oti_get(env);
1339 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1340 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1341 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1350 rc = osd_init_iobuf(osd, iobuf, 0, npages);
1351 if (unlikely(rc != 0))
1354 isize = i_size_read(inode);
1355 maxidx = ((isize + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1;
1357 start = ktime_get();
1358 for (i = 0; i < npages; i++) {
1361 * till commit the content of the page is undefined
1362 * we'll set it uptodate once bulk is done. otherwise
1363 * subsequent reads can access non-stable data
1365 ClearPageUptodate(lnb[i].lnb_page);
1367 if (lnb[i].lnb_len == PAGE_SIZE)
1370 if (maxidx >= lnb[i].lnb_page->index) {
1371 osd_iobuf_add_page(iobuf, &lnb[i]);
1374 char *p = kmap(lnb[i].lnb_page);
1376 off = lnb[i].lnb_page_offset;
1379 off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
1382 memset(p + off, 0, PAGE_SIZE - off);
1383 kunmap(lnb[i].lnb_page);
1387 timediff = ktime_us_delta(end, start);
1388 lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1390 if (iobuf->dr_npages) {
1391 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd, 0,
1393 if (likely(rc == 0)) {
1394 rc = osd_do_bio(osd, inode, iobuf, 0, 0);
1395 /* do IO stats for preparation reads */
1396 osd_fini_iobuf(osd, iobuf);
1402 struct osd_fextent {
1406 unsigned int mapped:1;
1410 #define DECLARE_MM_SEGMENT_T(name) mm_segment_t name
1411 #define access_set_kernel(saved_fs, fei) \
1413 saved_fs = get_fs(); \
1414 set_fs(KERNEL_DS); \
1416 #define access_unset_kernel(saved_fs, fei) set_fs((saved_fs))
1418 #define DECLARE_MM_SEGMENT_T(name)
1419 #define access_set_kernel(saved_fs, fei) \
1420 (fei)->fi_flags |= LDISKFS_FIEMAP_FLAG_MEMCPY
1421 #define access_unset_kernel(saved_fs, fei) \
1422 (fei)->fi_flags &= ~(LDISKFS_FIEMAP_FLAG_MEMCPY)
1423 #endif /* KERNEL_DS */
1425 static int osd_is_mapped(struct dt_object *dt, __u64 offset,
1426 struct osd_fextent *cached_extent)
1428 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1429 sector_t block = offset >> inode->i_blkbits;
1431 struct fiemap_extent_info fei = { 0 };
1432 struct fiemap_extent fe = { 0 };
1434 DECLARE_MM_SEGMENT_T(saved_fs);
1436 if (block >= cached_extent->start && block < cached_extent->end)
1437 return cached_extent->mapped;
1439 if (i_size_read(inode) == 0)
1442 /* Beyond EOF, must not be mapped */
1443 if (((i_size_read(inode) - 1) >> inode->i_blkbits) < block)
1446 fei.fi_extents_max = 1;
1447 fei.fi_extents_start = &fe;
1448 access_set_kernel(saved_fs, &fei);
1449 rc = inode->i_op->fiemap(inode, &fei, offset, FIEMAP_MAX_OFFSET-offset);
1450 access_unset_kernel(saved_fs, &fei);
1454 start = fe.fe_logical >> inode->i_blkbits;
1455 cached_extent->flags = fe.fe_flags;
1456 if (fei.fi_extents_mapped == 0) {
1457 /* a special case - no extent found at this offset and forward.
1458 * we can consider this as a hole to EOF. it's safe to cache
1459 * as other threads can not allocate/punch blocks this thread
1460 * is working on (LDLM). */
1461 cached_extent->start = block;
1462 cached_extent->end = i_size_read(inode) >> inode->i_blkbits;
1463 cached_extent->mapped = 0;
1467 if (start > block) {
1468 cached_extent->start = block;
1469 cached_extent->end = start;
1470 cached_extent->mapped = 0;
1472 cached_extent->start = start;
1473 cached_extent->end = (fe.fe_logical + fe.fe_length) >>
1475 cached_extent->mapped = 1;
1478 return cached_extent->mapped;
1481 #define MAX_EXTENTS_PER_WRITE 100
1482 static int osd_declare_write_commit(const struct lu_env *env,
1483 struct dt_object *dt,
1484 struct niobuf_local *lnb, int npages,
1485 struct thandle *handle)
1487 const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1488 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1489 struct osd_thandle *oh;
1490 int extents = 0, new_meta = 0;
1491 int depth, new_blocks = 0;
1493 int dirty_groups = 0;
1496 long long quota_space = 0;
1497 struct osd_fextent mapped = { 0 }, extent = { 0 };
1498 enum osd_quota_local_flags local_flags = 0;
1499 enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
1500 unsigned int extent_bytes;
1503 LASSERT(handle != NULL);
1504 oh = container_of(handle, struct osd_thandle, ot_super);
1505 LASSERT(oh->ot_handle == NULL);
1508 * We track a decaying average extent blocks per filesystem,
1509 * for most of time, it will be 1M, with filesystem becoming
1510 * heavily-fragmented, it will be reduced to 4K at the worst.
1512 extent_bytes = osd_extent_bytes(osd);
1513 LASSERT(extent_bytes >= osd_sb(osd)->s_blocksize);
1515 /* calculate number of extents (probably better to pass nb) */
1516 for (i = 0; i < npages; i++) {
1517 /* ignore quota for the whole request if any page is from
1518 * client cache or written by root.
1520 * XXX we could handle this on per-lnb basis as done by
1523 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
1524 (lnb[i].lnb_flags & OBD_BRW_SYS_RESOURCE) ||
1525 !(lnb[i].lnb_flags & OBD_BRW_SYNC))
1526 declare_flags |= OSD_QID_FORCE;
1529 * Convert unwritten extent might need split extents, could
1532 if (osd_is_mapped(dt, lnb[i].lnb_file_offset, &mapped) &&
1533 !(mapped.flags & FIEMAP_EXTENT_UNWRITTEN)) {
1534 lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1538 if (lnb[i].lnb_flags & OBD_BRW_DONE) {
1539 lnb[i].lnb_flags |= OBD_BRW_MAPPED;
1543 /* count only unmapped changes */
1545 if (lnb[i].lnb_file_offset != extent.end || extent.end == 0) {
1546 if (extent.end != 0)
1547 extents += (extent.end - extent.start +
1548 extent_bytes - 1) / extent_bytes;
1549 extent.start = lnb[i].lnb_file_offset;
1550 extent.end = lnb[i].lnb_file_offset + lnb[i].lnb_len;
1552 extent.end += lnb[i].lnb_len;
1555 quota_space += PAGE_SIZE;
1558 credits++; /* inode */
1560 * overwrite case, no need to modify tree and
1566 extents += (extent.end - extent.start +
1567 extent_bytes - 1) / extent_bytes;
1569 * with system space usage growing up, mballoc codes won't
1570 * try best to scan block group to align best free extent as
1571 * we can. So extent bytes per extent could be decayed to a
1572 * very small value, this could make us reserve too many credits.
1573 * We could be more optimistic in the credit reservations, even
1574 * in a case where the filesystem is nearly full, it is extremely
1575 * unlikely that the worst case would ever be hit.
1577 if (extents > MAX_EXTENTS_PER_WRITE)
1578 extents = MAX_EXTENTS_PER_WRITE;
1581 * If we add a single extent, then in the worse case, each tree
1582 * level index/leaf need to be changed in case of the tree split.
1583 * If more extents are inserted, they could cause the whole tree
1584 * split more than once, but this is really rare.
1586 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
1588 * many concurrent threads may grow tree by the time
1589 * our transaction starts. so, consider 2 is a min depth.
1591 depth = ext_depth(inode);
1592 depth = min(max(depth, 1) + 1, LDISKFS_MAX_EXTENT_DEPTH);
1594 credits += depth * 2 * extents;
1597 credits += depth * 3 * extents;
1598 new_meta = depth * 2 * extents;
1602 * With N contiguous data blocks, we need at most
1603 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
1604 * 2 dindirect blocks, and 1 tindirect block
1606 new_meta = DIV_ROUND_UP(new_blocks,
1607 LDISKFS_ADDR_PER_BLOCK(inode->i_sb)) + 4;
1608 credits += new_meta;
1610 dirty_groups += (extents + new_meta);
1612 oh->oh_declared_ext = extents;
1614 /* quota space for metadata blocks */
1615 quota_space += new_meta * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1617 /* quota space should be reported in 1K blocks */
1618 quota_space = toqb(quota_space);
1620 /* each new block can go in different group (bitmap + gd) */
1622 /* we can't dirty more bitmap blocks than exist */
1623 if (dirty_groups > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1624 credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1626 credits += dirty_groups;
1628 /* we can't dirty more gd blocks than exist */
1629 if (dirty_groups > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1630 credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1632 credits += dirty_groups;
1635 "%s: inode #%lu extent_bytes %u extents %d credits %d\n",
1636 osd_ino2name(inode), inode->i_ino, extent_bytes, extents,
1640 osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1642 /* make sure the over quota flags were not set */
1643 lnb[0].lnb_flags &= ~OBD_BRW_OVER_ALLQUOTA;
1645 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1646 i_projid_read(inode), quota_space, oh,
1647 osd_dt_obj(dt), &local_flags, declare_flags);
1649 /* we need only to store the overquota flags in the first lnb for
1650 * now, once we support multiple objects BRW, this code needs be
1653 if (local_flags & QUOTA_FL_OVER_USRQUOTA)
1654 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
1655 if (local_flags & QUOTA_FL_OVER_GRPQUOTA)
1656 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
1657 if (local_flags & QUOTA_FL_OVER_PRJQUOTA)
1658 lnb[0].lnb_flags |= OBD_BRW_OVER_PRJQUOTA;
1659 if (local_flags & QUOTA_FL_ROOT_PRJQUOTA)
1660 lnb[0].lnb_flags |= OBD_BRW_ROOT_PRJQUOTA;
1663 rc = osd_trunc_lock(osd_dt_obj(dt), oh, true);
1668 /* Check if a block is allocated or not */
1669 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1670 struct niobuf_local *lnb, int npages,
1671 struct thandle *thandle, __u64 user_size)
1673 struct osd_thread_info *oti = osd_oti_get(env);
1674 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1675 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1676 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1677 int rc = 0, i, check_credits = 0;
1681 rc = osd_init_iobuf(osd, iobuf, 1, npages);
1682 if (unlikely(rc != 0))
1685 dquot_initialize(inode);
1687 for (i = 0; i < npages; i++) {
1688 if (lnb[i].lnb_rc == -ENOSPC &&
1689 (lnb[i].lnb_flags & OBD_BRW_MAPPED)) {
1690 /* Allow the write to proceed if overwriting an
1696 if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
1697 CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1699 LASSERT(lnb[i].lnb_page);
1700 generic_error_remove_page(inode->i_mapping,
1705 if (lnb[i].lnb_flags & OBD_BRW_DONE)
1708 if (!(lnb[i].lnb_flags & OBD_BRW_MAPPED))
1711 LASSERT(PageLocked(lnb[i].lnb_page));
1712 LASSERT(!PageWriteback(lnb[i].lnb_page));
1715 * Since write and truncate are serialized by oo_sem, even
1716 * partial-page truncate should not leave dirty pages in the
1719 LASSERT(!PageDirty(lnb[i].lnb_page));
1721 SetPageUptodate(lnb[i].lnb_page);
1723 osd_iobuf_add_page(iobuf, &lnb[i]);
1726 osd_trans_exec_op(env, thandle, OSD_OT_WRITE);
1728 if (CFS_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1730 } else if (iobuf->dr_npages > 0) {
1731 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd,
1736 /* no pages to write, no transno is needed */
1737 thandle->th_local = 1;
1740 if (rc != 0 && !thandle->th_restart_tran)
1741 osd_fini_iobuf(osd, iobuf);
1743 osd_trans_exec_check(env, thandle, OSD_OT_WRITE);
1745 if (unlikely(rc != 0 && !thandle->th_restart_tran)) {
1746 /* if write fails, we should drop pages from the cache */
1747 for (i = 0; i < npages; i++) {
1748 if (lnb[i].lnb_page == NULL)
1750 if (!PagePrivate2(lnb[i].lnb_page)) {
1751 LASSERT(PageLocked(lnb[i].lnb_page));
1752 generic_error_remove_page(inode->i_mapping,
1761 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1762 struct niobuf_local *lnb, int npages)
1764 struct osd_thread_info *oti = osd_oti_get(env);
1765 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1766 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1767 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1768 int rc = 0, i, cache_hits = 0, cache_misses = 0;
1775 rc = osd_init_iobuf(osd, iobuf, 0, npages);
1776 if (unlikely(rc != 0))
1779 isize = i_size_read(inode);
1781 start = ktime_get();
1782 for (i = 0; i < npages; i++) {
1784 if (isize <= lnb[i].lnb_file_offset)
1785 /* If there's no more data, abort early.
1786 * lnb->lnb_rc == 0, so it's easy to detect later.
1790 /* instead of looking if we go beyong isize, send complete
1791 * pages all the time
1793 lnb[i].lnb_rc = lnb[i].lnb_len;
1795 /* Bypass disk read if fail_loc is set properly */
1796 if (CFS_FAIL_CHECK_QUIET(OBD_FAIL_OST_FAKE_RW))
1797 SetPageUptodate(lnb[i].lnb_page);
1799 if (PageUptodate(lnb[i].lnb_page)) {
1801 unlock_page(lnb[i].lnb_page);
1804 osd_iobuf_add_page(iobuf, &lnb[i]);
1806 /* no need to unlock in osd_bufs_put(), the sooner page is
1807 * unlocked, the earlier another client can access it.
1808 * notice real unlock_page() can be called few lines
1809 * below after osd_do_bio(). lnb is a per-thread, so it's
1810 * fine to have PG_locked and lnb_locked inconsistent here
1812 lnb[i].lnb_locked = 0;
1815 timediff = ktime_us_delta(end, start);
1816 lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1818 if (cache_hits != 0)
1819 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT,
1821 if (cache_misses != 0)
1822 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS,
1824 if (cache_hits + cache_misses != 0)
1825 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
1826 cache_hits + cache_misses);
1828 if (iobuf->dr_npages) {
1829 rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd, 0,
1832 rc = osd_do_bio(osd, inode, iobuf, 0, 0);
1834 /* IO stats will be done in osd_bufs_put() */
1836 /* early release to let others read data during the bulk */
1837 for (i = 0; i < iobuf->dr_npages; i++) {
1838 LASSERT(PageLocked(iobuf->dr_pages[i]));
1839 if (!PagePrivate2(iobuf->dr_pages[i]))
1840 unlock_page(iobuf->dr_pages[i]);
1848 * XXX: Another layering violation for now.
1850 * We don't want to use ->f_op->read methods, because generic file write
1852 * - serializes on ->i_sem, and
1854 * - does a lot of extra work like balance_dirty_pages(),
1856 * which doesn't work for globally shared files like /last_rcvd.
1858 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1860 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1862 memcpy(buffer, (char *)ei->i_data, buflen);
1867 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1869 struct buffer_head *bh;
1870 unsigned long block;
1876 /* prevent reading after eof */
1877 spin_lock(&inode->i_lock);
1878 if (i_size_read(inode) < *offs + size) {
1879 loff_t diff = i_size_read(inode) - *offs;
1881 spin_unlock(&inode->i_lock);
1884 "size %llu is too short to read @%llu\n",
1885 i_size_read(inode), *offs);
1887 } else if (diff == 0) {
1893 spin_unlock(&inode->i_lock);
1896 blocksize = 1 << inode->i_blkbits;
1899 block = *offs >> inode->i_blkbits;
1900 boffs = *offs & (blocksize - 1);
1901 csize = min(blocksize - boffs, size);
1902 bh = __ldiskfs_bread(NULL, inode, block, 0);
1904 CERROR("%s: can't read %u@%llu on ino %lu: rc = %ld\n",
1905 osd_ino2name(inode), csize, *offs, inode->i_ino,
1911 memcpy(buf, bh->b_data + boffs, csize);
1914 memset(buf, 0, csize);
1924 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1925 struct lu_buf *buf, loff_t *pos)
1927 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1930 /* Read small symlink from inode body as we need to maintain correct
1931 * on-disk symlinks for ldiskfs.
1933 if (S_ISLNK(dt->do_lu.lo_header->loh_attr)) {
1934 loff_t size = i_size_read(inode);
1936 if (buf->lb_len < size)
1939 if (size < sizeof(LDISKFS_I(inode)->i_data))
1940 rc = osd_ldiskfs_readlink(inode, buf->lb_buf, size);
1942 rc = osd_ldiskfs_read(inode, buf->lb_buf, size, pos);
1944 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1950 static inline int osd_extents_enabled(struct super_block *sb,
1951 struct inode *inode)
1953 if (inode != NULL) {
1954 if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
1956 } else if (ldiskfs_has_feature_extents(sb)) {
1962 int osd_calc_bkmap_credits(struct super_block *sb, struct inode *inode,
1963 const loff_t size, const loff_t pos,
1966 int credits, bits, bs, i;
1968 bits = sb->s_blocksize_bits;
1971 /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1972 * we do not expect blockmaps on the large files,
1973 * so let's shrink it to 2 levels (4GB files)
1976 /* this is default reservation: 2 levels */
1977 credits = (blocks + 2) * 3;
1979 /* actual offset is unknown, hard to optimize */
1983 /* now check for few specific cases to optimize */
1984 if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1987 /* allocate if not allocated */
1988 if (inode == NULL) {
1989 credits += blocks * 2;
1992 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1993 LASSERT(i < LDISKFS_NDIR_BLOCKS);
1994 if (LDISKFS_I(inode)->i_data[i] == 0)
1997 } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1998 /* single indirect */
1999 credits = blocks * 3;
2000 if (inode == NULL ||
2001 LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK] == 0)
2004 /* The indirect block may be modified. */
2011 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
2012 const struct lu_buf *buf, loff_t _pos,
2013 struct thandle *handle)
2015 struct osd_object *obj = osd_dt_obj(dt);
2016 struct inode *inode = obj->oo_inode;
2017 struct super_block *sb = osd_sb(osd_obj2dev(obj));
2018 struct osd_thandle *oh;
2019 int rc = 0, est = 0, credits, blocks, allocated = 0;
2025 LASSERT(buf != NULL);
2026 LASSERT(handle != NULL);
2028 oh = container_of(handle, struct osd_thandle, ot_super);
2029 LASSERT(oh->ot_handle == NULL);
2032 bits = sb->s_blocksize_bits;
2035 if (osd_tx_was_declared(env, oh, dt, DTO_WRITE_BASE, _pos))
2039 /* if this is an append, then we
2040 * should expect cross-block record
2047 /* blocks to modify */
2048 blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
2049 LASSERT(blocks > 0);
2051 if (inode != NULL && _pos != -1) {
2052 /* object size in blocks */
2053 est = (i_size_read(inode) + bs - 1) >> bits;
2054 allocated = inode->i_blocks >> (bits - 9);
2055 if (pos + size <= i_size_read(inode) && est <= allocated) {
2056 /* looks like an overwrite, no need to modify tree */
2058 /* no need to modify i_size */
2063 if (osd_extents_enabled(sb, inode)) {
2065 * many concurrent threads may grow tree by the time
2066 * our transaction starts. so, consider 2 is a min depth
2067 * for every level we may need to allocate a new block
2068 * and take some entries from the old one. so, 3 blocks
2069 * to allocate (bitmap, gd, itself) + old block - 4 per
2072 depth = inode != NULL ? ext_depth(inode) : 0;
2073 depth = min(max(depth, 1) + 3, LDISKFS_MAX_EXTENT_DEPTH);
2075 /* if not append, then split may need to modify
2076 * existing blocks moving entries into the new ones
2080 /* blocks to store data: bitmap,gd,itself */
2081 credits += blocks * 3;
2083 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
2085 /* if inode is created as part of the transaction,
2086 * then it's counted already by the creation method
2093 osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
2095 /* dt_declare_write() is usually called for system objects, such
2096 * as llog or last_rcvd files. We needn't enforce quota on those
2097 * objects, so always set the lqi_space as 0.
2100 rc = osd_declare_inode_qid(env, i_uid_read(inode),
2102 i_projid_read(inode), 0,
2103 oh, obj, NULL, OSD_QID_BLK);
2106 rc = osd_trunc_lock(obj, oh, true);
2111 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
2113 /* LU-2634: clear the extent format for fast symlink */
2114 ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
2116 /* Copying the NUL byte terminating the link target as well */
2117 memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen + 1);
2118 spin_lock(&inode->i_lock);
2119 LDISKFS_I(inode)->i_disksize = buflen;
2120 i_size_write(inode, buflen);
2121 spin_unlock(&inode->i_lock);
2122 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
2127 static int osd_ldiskfs_write_record(struct dt_object *dt, void *buf,
2128 int bufsize, int write_NUL, loff_t *offs,
2131 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2132 struct buffer_head *bh = NULL;
2133 loff_t offset = *offs;
2134 loff_t new_size = i_size_read(inode);
2135 unsigned long block;
2136 int blocksize = 1 << inode->i_blkbits;
2137 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
2141 int dirty_inode = 0;
2142 bool create, sparse, sync = false;
2146 * long symlink write does not count the NUL terminator in
2147 * bufsize, we write it, and the inode's file size does not
2148 * count the NUL terminator as well.
2150 ((char *)buf)[bufsize] = '\0';
2154 /* only the first flag-set matters */
2155 dirty_inode = !test_and_set_bit(LDISKFS_INODE_JOURNAL_DATA,
2158 /* sparse checking is racy, but sparse is very rare case, leave as is */
2159 sparse = (new_size > 0 && (inode->i_blocks >> (inode->i_blkbits - 9)) <
2160 ((new_size - 1) >> inode->i_blkbits) + 1);
2162 while (bufsize > 0) {
2163 int credits = handle->h_buffer_credits;
2164 unsigned long last_block = (new_size == 0) ? 0 :
2165 (new_size - 1) >> inode->i_blkbits;
2170 block = offset >> inode->i_blkbits;
2171 boffs = offset & (blocksize - 1);
2172 size = min(blocksize - boffs, bufsize);
2173 sync = (block > last_block || new_size == 0 || sparse);
2176 down(&ei->i_append_sem);
2178 bh = __ldiskfs_bread(handle, inode, block, 0);
2180 if (unlikely(IS_ERR_OR_NULL(bh) && !sync))
2182 "%s: adding bh without locking off %llu (block %lu, size %d, offs %llu)\n",
2183 osd_ino2name(inode),
2184 offset, block, bufsize, *offs);
2186 if (IS_ERR_OR_NULL(bh)) {
2187 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
2188 int flags = LDISKFS_GET_BLOCKS_CREATE;
2190 /* while the file system is being mounted, avoid
2191 * preallocation otherwise mount can take a long
2192 * time as mballoc cache is cold.
2193 * XXX: this is a workaround until we have a proper
2195 * XXX: works with extent-based files only */
2196 if (!osd->od_cl_seq)
2197 flags |= LDISKFS_GET_BLOCKS_NO_NORMALIZE;
2198 bh = __ldiskfs_bread(handle, inode, block, flags);
2202 up(&ei->i_append_sem);
2207 if (IS_ERR_OR_NULL(bh)) {
2216 "%s: error reading offset %llu (block %lu, size %d, offs %llu), credits %d/%d: rc = %d\n",
2217 osd_ino2name(inode), offset, block, bufsize,
2218 *offs, credits, handle->h_buffer_credits, err);
2222 err = osd_ldiskfs_journal_get_write_access(handle, inode->i_sb,
2226 CERROR("journal_get_write_access() returned error %d\n",
2230 LASSERTF(boffs + size <= bh->b_size,
2231 "boffs %d size %d bh->b_size %lu\n",
2232 boffs, size, (unsigned long)bh->b_size);
2234 memset(bh->b_data, 0, bh->b_size);
2236 up(&ei->i_append_sem);
2240 memcpy(bh->b_data + boffs, buf, size);
2241 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2245 if (offset + size > new_size)
2246 new_size = offset + size;
2252 up(&ei->i_append_sem);
2259 /* correct in-core and on-disk sizes */
2260 if (new_size > i_size_read(inode)) {
2261 spin_lock(&inode->i_lock);
2262 if (new_size > i_size_read(inode))
2263 i_size_write(inode, new_size);
2264 if (i_size_read(inode) > ei->i_disksize) {
2265 ei->i_disksize = i_size_read(inode);
2268 spin_unlock(&inode->i_lock);
2271 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
2278 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
2279 const struct lu_buf *buf, loff_t *pos,
2280 struct thandle *handle)
2282 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2283 struct osd_thandle *oh;
2287 LASSERT(dt_object_exists(dt));
2289 LASSERT(handle != NULL);
2290 LASSERT(inode != NULL);
2291 dquot_initialize(inode);
2293 /* XXX: don't check: one declared chunk can be used many times */
2294 /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
2296 oh = container_of(handle, struct osd_thandle, ot_super);
2297 LASSERT(oh->ot_handle->h_transaction != NULL);
2298 osd_trans_exec_op(env, handle, OSD_OT_WRITE);
2300 /* Write small symlink to inode body as we need to maintain correct
2301 * on-disk symlinks for ldiskfs.
2302 * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
2303 * does not count it in.
2305 is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
2306 if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
2307 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
2309 result = osd_ldiskfs_write_record(dt, buf->lb_buf, buf->lb_len,
2310 is_link, pos, oh->ot_handle);
2312 result = buf->lb_len;
2314 osd_trans_exec_check(env, handle, OSD_OT_WRITE);
2319 static int osd_declare_fallocate(const struct lu_env *env,
2320 struct dt_object *dt, __u64 start, __u64 end,
2321 int mode, struct thandle *th)
2323 struct osd_thandle *oh = container_of(th, struct osd_thandle, ot_super);
2324 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
2325 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2326 long long quota_space = 0;
2327 /* 5 is max tree depth. (inode + 4 index blocks) */
2334 * mode == 0 (which is standard prealloc) and PUNCH is supported
2335 * Rest of mode options is not supported yet.
2337 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2338 RETURN(-EOPNOTSUPP);
2340 /* disable fallocate completely */
2341 if (osd_dev(dt->do_lu.lo_dev)->od_fallocate_zero_blocks < 0)
2342 RETURN(-EOPNOTSUPP);
2347 if (mode & FALLOC_FL_PUNCH_HOLE) {
2348 rc = osd_declare_inode_qid(env, i_uid_read(inode),
2350 i_projid_read(inode), 0, oh,
2351 osd_dt_obj(dt), NULL, OSD_QID_BLK);
2353 rc = osd_trunc_lock(osd_dt_obj(dt), oh, false);
2357 /* quota space for metadata blocks
2358 * approximate metadata estimate should be good enough.
2360 quota_space += PAGE_SIZE;
2361 quota_space += depth * LDISKFS_BLOCK_SIZE(osd_sb(osd));
2363 /* quota space should be reported in 1K blocks */
2364 quota_space = toqb(quota_space) + toqb(end - start) +
2365 LDISKFS_META_TRANS_BLOCKS(inode->i_sb);
2367 /* We don't need to reserve credits for whole fallocate here.
2368 * We reserve space only for metadata. Fallocate credits are
2369 * extended as required
2371 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2372 i_projid_read(inode), quota_space, oh,
2373 osd_dt_obj(dt), NULL, OSD_QID_BLK);
2377 static int osd_fallocate_preallocate(const struct lu_env *env,
2378 struct dt_object *dt,
2379 __u64 start, __u64 end, int mode,
2382 struct osd_thandle *oh = container_of(th, struct osd_thandle, ot_super);
2383 handle_t *handle = ldiskfs_journal_current_handle();
2384 unsigned int save_credits = oh->ot_credits;
2385 struct osd_object *obj = osd_dt_obj(dt);
2386 struct inode *inode = obj->oo_inode;
2387 struct ldiskfs_map_blocks map;
2388 unsigned int credits;
2389 ldiskfs_lblk_t blen;
2390 ldiskfs_lblk_t boff;
2391 loff_t new_size = 0;
2398 LASSERT(dt_object_exists(dt));
2399 LASSERT(osd_invariant(obj));
2400 LASSERT(inode != NULL);
2402 CDEBUG(D_INODE, "fallocate: inode #%lu: start %llu end %llu mode %d\n",
2403 inode->i_ino, start, end, mode);
2405 dquot_initialize(inode);
2409 boff = start >> inode->i_blkbits;
2410 blen = (ALIGN(end, 1 << inode->i_blkbits) >> inode->i_blkbits) - boff;
2412 /* Create and mark new extents as either zero or unwritten */
2413 flags = (osd_dev(dt->do_lu.lo_dev)->od_fallocate_zero_blocks ||
2414 !ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS)) ?
2415 LDISKFS_GET_BLOCKS_CREATE_ZERO :
2416 LDISKFS_GET_BLOCKS_CREATE_UNWRIT_EXT;
2417 #ifdef LDISKFS_GET_BLOCKS_KEEP_SIZE
2418 if (mode & FALLOC_FL_KEEP_SIZE)
2419 flags |= LDISKFS_GET_BLOCKS_KEEP_SIZE;
2423 if (!(mode & FALLOC_FL_KEEP_SIZE) && (end > i_size_read(inode) ||
2424 end > LDISKFS_I(inode)->i_disksize)) {
2426 rc = inode_newsize_ok(inode, new_size);
2431 inode_dio_wait(inode);
2436 /* Don't normalize the request if it can fit in one extent so
2437 * that it doesn't get unnecessarily split into multiple extents.
2439 if (blen <= EXT_UNWRITTEN_MAX_LEN)
2440 flags |= LDISKFS_GET_BLOCKS_NO_NORMALIZE;
2443 * credits to insert 1 extent into extent tree.
2445 credits = osd_chunk_trans_blocks(inode, blen);
2446 depth = ext_depth(inode);
2448 while (rc >= 0 && blen) {
2452 * Recalculate credits when extent tree depth changes.
2454 if (depth != ext_depth(inode)) {
2455 credits = osd_chunk_trans_blocks(inode, blen);
2456 depth = ext_depth(inode);
2459 /* TODO: quota check */
2460 rc = osd_extend_restart_trans(handle, credits, inode);
2464 rc = ldiskfs_map_blocks(handle, inode, &map, flags);
2467 "inode #%lu: block %u: len %u: ldiskfs_map_blocks returned %d\n",
2468 inode->i_ino, map.m_lblk, map.m_len, rc);
2469 ldiskfs_mark_inode_dirty(handle, inode);
2474 map.m_len = blen = blen - rc;
2475 epos = (loff_t)map.m_lblk << inode->i_blkbits;
2476 inode->i_ctime = current_time(inode);
2480 if (ldiskfs_update_inode_size(inode, epos) & 0x1)
2481 inode->i_mtime = inode->i_ctime;
2482 #ifdef LDISKFS_EOFBLOCKS_FL
2484 if (epos > inode->i_size)
2485 ldiskfs_set_inode_flag(inode,
2486 LDISKFS_INODE_EOFBLOCKS);
2490 ldiskfs_mark_inode_dirty(handle, inode);
2494 /* extand credits if needed for operations such as attribute set */
2496 rc = osd_extend_restart_trans(handle, save_credits, inode);
2498 inode_unlock(inode);
2503 static int osd_fallocate_punch(const struct lu_env *env, struct dt_object *dt,
2504 __u64 start, __u64 end, int mode,
2507 struct osd_object *obj = osd_dt_obj(dt);
2508 struct inode *inode = obj->oo_inode;
2509 struct osd_access_lock *al;
2510 struct osd_thandle *oh;
2511 int rc = 0, found = 0;
2515 LASSERT(dt_object_exists(dt));
2516 LASSERT(osd_invariant(obj));
2517 LASSERT(inode != NULL);
2519 dquot_initialize(inode);
2522 oh = container_of(th, struct osd_thandle, ot_super);
2523 LASSERT(oh->ot_handle->h_transaction != NULL);
2525 list_for_each_entry(al, &oh->ot_trunc_locks, tl_list) {
2526 if (obj != al->tl_obj)
2528 LASSERT(al->tl_shared == 0);
2530 /* do actual punch in osd_trans_stop() */
2531 al->tl_start = start;
2534 al->tl_punch = true;
2541 static int osd_fallocate(const struct lu_env *env, struct dt_object *dt,
2542 __u64 start, __u64 end, int mode, struct thandle *th)
2548 if (mode & FALLOC_FL_PUNCH_HOLE) {
2550 rc = osd_fallocate_punch(env, dt, start, end, mode, th);
2552 /* standard preallocate */
2553 rc = osd_fallocate_preallocate(env, dt, start, end, mode, th);
2558 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
2559 __u64 start, __u64 end, struct thandle *th)
2561 struct osd_thandle *oh;
2562 struct osd_object *obj = osd_dt_obj(dt);
2563 struct inode *inode;
2568 oh = container_of(th, struct osd_thandle, ot_super);
2571 * we don't need to reserve credits for whole truncate
2572 * it's not possible as truncate may need to free too many
2573 * blocks and that won't fit a single transaction. instead
2574 * we reserve credits to change i_size and put inode onto
2575 * orphan list. if needed truncate will extend or restart
2578 osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
2579 osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
2581 inode = obj->oo_inode;
2584 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
2585 i_projid_read(inode), 0, oh, obj,
2588 /* if object holds encrypted content, we need to make sure we truncate
2589 * on an encryption unit boundary, or subsequent reads will get
2593 if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL &&
2594 start & ~LUSTRE_ENCRYPTION_MASK)
2595 start = (start & LUSTRE_ENCRYPTION_MASK) +
2596 LUSTRE_ENCRYPTION_UNIT_SIZE;
2597 ll_truncate_pagecache(inode, start);
2598 rc = osd_trunc_lock(obj, oh, false);
2604 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
2605 __u64 start, __u64 end, struct thandle *th)
2607 struct osd_object *obj = osd_dt_obj(dt);
2608 struct osd_device *osd = osd_obj2dev(obj);
2609 struct inode *inode = obj->oo_inode;
2610 struct osd_access_lock *al;
2611 struct osd_thandle *oh;
2612 int rc = 0, found = 0;
2616 LASSERT(dt_object_exists(dt));
2617 LASSERT(osd_invariant(obj));
2618 LASSERT(inode != NULL);
2619 dquot_initialize(inode);
2622 oh = container_of(th, struct osd_thandle, ot_super);
2623 LASSERT(oh->ot_handle->h_transaction != NULL);
2625 /* we used to skip truncate to current size to
2626 * optimize truncates on OST. with DoM we can
2627 * get attr_set to set specific size (MDS_REINT)
2628 * and then get truncate RPC which essentially
2629 * would be skipped. this is bad.. so, disable
2630 * this optimization on MDS till the client stop
2631 * to sent MDS_REINT (LU-11033) -bzzz
2633 if (osd->od_is_ost && i_size_read(inode) == start)
2636 osd_trans_exec_op(env, th, OSD_OT_PUNCH);
2638 spin_lock(&inode->i_lock);
2639 if (i_size_read(inode) < start)
2641 i_size_write(inode, start);
2642 spin_unlock(&inode->i_lock);
2644 /* optimize grow case */
2646 osd_execute_truncate(obj);
2651 /* add to orphan list to ensure truncate completion
2652 * if this transaction succeed. ldiskfs_truncate()
2653 * will take the inode out of the list
2655 rc = ldiskfs_orphan_add(oh->ot_handle, inode);
2656 inode_unlock(inode);
2660 list_for_each_entry(al, &oh->ot_trunc_locks, tl_list) {
2661 if (obj != al->tl_obj)
2663 LASSERT(al->tl_shared == 0);
2665 /* do actual truncate in osd_trans_stop() */
2666 al->tl_truncate = 1;
2675 static int fiemap_check_ranges(struct inode *inode,
2676 u64 start, u64 len, u64 *new_len)
2685 if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
2686 maxbytes = inode->i_sb->s_maxbytes;
2688 maxbytes = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes;
2690 if (start > maxbytes)
2694 * Shrink request scope to what the fs can actually handle.
2696 if (len > maxbytes || (maxbytes - len) < start)
2697 *new_len = maxbytes - start;
2702 /* So that the fiemap access checks can't overflow on 32 bit machines. */
2703 #define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent))
2705 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
2708 struct fiemap_extent_info fieinfo = {0, };
2709 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2712 DECLARE_MM_SEGMENT_T(saved_fs);
2715 if (inode->i_op->fiemap == NULL)
2718 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS)
2721 rc = fiemap_check_ranges(inode, fm->fm_start, fm->fm_length, &len);
2725 fieinfo.fi_flags = fm->fm_flags;
2726 fieinfo.fi_extents_max = fm->fm_extent_count;
2727 fieinfo.fi_extents_start = fm->fm_extents;
2729 if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
2730 filemap_write_and_wait(inode->i_mapping);
2732 access_set_kernel(saved_fs, &fieinfo);
2733 rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
2734 access_unset_kernel(saved_fs, &fieinfo);
2735 fm->fm_flags = fieinfo.fi_flags;
2736 fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
2741 static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
2742 __u64 start, __u64 end, enum lu_ladvise_type advice)
2744 struct osd_object *obj = osd_dt_obj(dt);
2749 case LU_LADVISE_DONTNEED:
2751 invalidate_mapping_pages(obj->oo_inode->i_mapping,
2752 start >> PAGE_SHIFT,
2753 (end - 1) >> PAGE_SHIFT);
2763 static loff_t osd_lseek(const struct lu_env *env, struct dt_object *dt,
2764 loff_t offset, int whence)
2766 struct osd_object *obj = osd_dt_obj(dt);
2767 struct osd_device *dev = osd_obj2dev(obj);
2768 struct inode *inode = obj->oo_inode;
2773 LASSERT(dt_object_exists(dt));
2774 LASSERT(osd_invariant(obj));
2776 LASSERT(offset >= 0);
2778 file = alloc_file_pseudo(inode, dev->od_mnt, "/", O_NOATIME,
2781 RETURN(PTR_ERR(file));
2783 file->f_mode |= FMODE_64BITHASH;
2784 result = file->f_op->llseek(file, offset, whence);
2788 * If 'offset' is beyond end of object file then treat it as not error
2789 * but valid case for SEEK_HOLE and return 'offset' as result.
2790 * LOV will decide if it is beyond real end of file or not.
2792 if (whence == SEEK_HOLE && result == -ENXIO)
2795 CDEBUG(D_INFO, "seek %s from %lld: %lld\n", whence == SEEK_HOLE ?
2796 "hole" : "data", offset, result);
2801 * in some cases we may need declare methods for objects being created
2802 * e.g., when we create symlink
2804 const struct dt_body_operations osd_body_ops_new = {
2805 .dbo_declare_write = osd_declare_write,
2808 const struct dt_body_operations osd_body_ops = {
2809 .dbo_read = osd_read,
2810 .dbo_declare_write = osd_declare_write,
2811 .dbo_write = osd_write,
2812 .dbo_bufs_get = osd_bufs_get,
2813 .dbo_bufs_put = osd_bufs_put,
2814 .dbo_write_prep = osd_write_prep,
2815 .dbo_declare_write_commit = osd_declare_write_commit,
2816 .dbo_write_commit = osd_write_commit,
2817 .dbo_read_prep = osd_read_prep,
2818 .dbo_declare_punch = osd_declare_punch,
2819 .dbo_punch = osd_punch,
2820 .dbo_fiemap_get = osd_fiemap_get,
2821 .dbo_ladvise = osd_ladvise,
2822 .dbo_declare_fallocate = osd_declare_fallocate,
2823 .dbo_fallocate = osd_fallocate,
2824 .dbo_lseek = osd_lseek,
2828 * Get a truncate lock
2830 * In order to take multi-transaction truncate out of main transaction we let
2831 * the caller grab a lock on the object passed. the lock can be shared (for
2832 * writes) and exclusive (for truncate). It's not allowed to mix truncate
2833 * and write in the same transaction handle (do not confuse with big ldiskfs
2834 * transaction containing lots of handles).
2835 * The lock must be taken at declaration.
2837 * \param obj object to lock
2839 * \shared shared or exclusive
2841 * \retval 0 lock is granted
2842 * \retval -NOMEM no memory to allocate lock
2844 int osd_trunc_lock(struct osd_object *obj, struct osd_thandle *oh, bool shared)
2846 struct osd_access_lock *al, *tmp;
2851 list_for_each_entry(tmp, &oh->ot_trunc_locks, tl_list) {
2852 if (tmp->tl_obj != obj)
2854 LASSERT(tmp->tl_shared == shared);
2855 /* found same lock */
2860 if (unlikely(al == NULL))
2863 al->tl_truncate = false;
2865 down_read(&obj->oo_ext_idx_sem);
2867 down_write(&obj->oo_ext_idx_sem);
2868 al->tl_shared = shared;
2869 lu_object_get(&obj->oo_dt.do_lu);
2871 list_add(&al->tl_list, &oh->ot_trunc_locks);
2876 void osd_trunc_unlock_all(const struct lu_env *env, struct list_head *list)
2878 struct osd_access_lock *al, *tmp;
2880 list_for_each_entry_safe(al, tmp, list, tl_list) {
2882 up_read(&al->tl_obj->oo_ext_idx_sem);
2884 up_write(&al->tl_obj->oo_ext_idx_sem);
2885 osd_object_put(env, al->tl_obj);
2886 list_del(&al->tl_list);
2891 /* For a partial-page punch, flush punch range to disk immediately */
2892 static void osd_partial_page_flush_punch(struct osd_device *d,
2893 struct inode *inode, loff_t start,
2896 if (osd_use_page_cache(d)) {
2897 filemap_fdatawrite_range(inode->i_mapping, start, end);
2899 /* Notice we use "wait" version to ensure I/O is complete */
2900 filemap_write_and_wait_range(inode->i_mapping, start,
2902 invalidate_mapping_pages(inode->i_mapping, start >> PAGE_SHIFT,
2908 * For a partial-page truncate, flush the page to disk immediately to
2909 * avoid data corruption during direct disk write. b=17397
2911 static void osd_partial_page_flush(struct osd_device *d, struct inode *inode,
2914 if (!(offset & ~PAGE_MASK))
2917 if (osd_use_page_cache(d)) {
2918 filemap_fdatawrite_range(inode->i_mapping, offset, offset + 1);
2920 /* Notice we use "wait" version to ensure I/O is complete */
2921 filemap_write_and_wait_range(inode->i_mapping, offset,
2923 invalidate_mapping_pages(inode->i_mapping, offset >> PAGE_SHIFT,
2924 offset >> PAGE_SHIFT);
2928 void osd_execute_truncate(struct osd_object *obj)
2930 struct osd_device *d = osd_obj2dev(obj);
2931 struct inode *inode = obj->oo_inode;
2934 /* simulate crash before (in the middle) of delayed truncate */
2935 if (CFS_FAIL_CHECK(OBD_FAIL_OSD_FAIL_AT_TRUNCATE)) {
2936 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
2937 struct ldiskfs_sb_info *sbi = LDISKFS_SB(inode->i_sb);
2939 mutex_lock(&sbi->s_orphan_lock);
2940 list_del_init(&ei->i_orphan);
2941 mutex_unlock(&sbi->s_orphan_lock);
2945 size = i_size_read(inode);
2947 /* if object holds encrypted content, we need to make sure we truncate
2948 * on an encryption unit boundary, or block content will get corrupted
2950 if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL &&
2951 size & ~LUSTRE_ENCRYPTION_MASK)
2952 inode->i_size = (size & LUSTRE_ENCRYPTION_MASK) +
2953 LUSTRE_ENCRYPTION_UNIT_SIZE;
2954 ldiskfs_truncate(inode);
2955 inode_unlock(inode);
2956 if (inode->i_size != size) {
2957 spin_lock(&inode->i_lock);
2958 i_size_write(inode, size);
2959 LDISKFS_I(inode)->i_disksize = size;
2960 spin_unlock(&inode->i_lock);
2961 osd_dirty_inode(inode, I_DIRTY_DATASYNC);
2963 osd_partial_page_flush(d, inode, size);
2966 static int osd_execute_punch(const struct lu_env *env, struct osd_object *obj,
2967 loff_t start, loff_t end, int mode)
2969 struct osd_device *d = osd_obj2dev(obj);
2970 struct inode *inode = obj->oo_inode;
2974 file = alloc_file_pseudo(inode, d->od_mnt, "/", O_NOATIME,
2977 RETURN(PTR_ERR(file));
2979 file->f_mode |= FMODE_64BITHASH;
2980 rc = file->f_op->fallocate(file, mode, start, end - start);
2984 osd_partial_page_flush_punch(d, inode, start, end - 1);
2988 int osd_process_truncates(const struct lu_env *env, struct list_head *list)
2990 struct osd_access_lock *al;
2993 LASSERT(!journal_current_handle());
2995 list_for_each_entry(al, list, tl_list) {
2998 if (al->tl_truncate)
2999 osd_execute_truncate(al->tl_obj);
3000 else if (al->tl_punch)
3001 rc = osd_execute_punch(env, al->tl_obj, al->tl_start,
3002 al->tl_end, al->tl_mode);