4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
40 * Author: Nikita Danilov <nikita@clusterfs.com>
41 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
45 /* LUSTRE_VERSION_CODE */
46 #include <lustre_ver.h>
47 /* prerequisite for linux/xattr.h */
48 #include <linux/types.h>
49 /* prerequisite for linux/xattr.h */
53 * struct OBD_{ALLOC,FREE}*()
56 #include <obd_support.h>
58 #include "osd_internal.h"
61 #include <ldiskfs/ldiskfs_extents.h>
63 static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf,
64 int rw, int line, int pages)
68 LASSERTF(iobuf->dr_elapsed_valid == 0,
69 "iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
70 atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
72 LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
74 init_waitqueue_head(&iobuf->dr_wait);
75 atomic_set(&iobuf->dr_numreqs, 0);
80 iobuf->dr_elapsed = 0;
81 /* must be counted before, so assert */
83 iobuf->dr_init_at = line;
85 blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits);
86 if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
87 LASSERT(iobuf->dr_pg_buf.lb_len >=
88 pages * sizeof(iobuf->dr_pages[0]));
92 /* start with 1MB for 4K blocks */
94 while (i <= PTLRPC_MAX_BRW_PAGES && i < pages)
97 CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
98 (unsigned)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
100 blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits);
101 iobuf->dr_max_pages = 0;
102 CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
103 (unsigned)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
105 lu_buf_realloc(&iobuf->dr_bl_buf, blocks * sizeof(iobuf->dr_blocks[0]));
106 iobuf->dr_blocks = iobuf->dr_bl_buf.lb_buf;
107 if (unlikely(iobuf->dr_blocks == NULL))
110 lu_buf_realloc(&iobuf->dr_pg_buf, pages * sizeof(iobuf->dr_pages[0]));
111 iobuf->dr_pages = iobuf->dr_pg_buf.lb_buf;
112 if (unlikely(iobuf->dr_pages == NULL))
115 iobuf->dr_max_pages = pages;
119 #define osd_init_iobuf(dev, iobuf, rw, pages) \
120 __osd_init_iobuf(dev, iobuf, rw, __LINE__, pages)
122 static void osd_iobuf_add_page(struct osd_iobuf *iobuf, struct page *page)
124 LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
125 iobuf->dr_pages[iobuf->dr_npages++] = page;
128 void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf)
130 int rw = iobuf->dr_rw;
132 if (iobuf->dr_elapsed_valid) {
133 iobuf->dr_elapsed_valid = 0;
134 LASSERT(iobuf->dr_dev == d);
135 LASSERT(iobuf->dr_frags > 0);
136 lprocfs_oh_tally(&d->od_brw_stats.
137 hist[BRW_R_DIO_FRAGS+rw],
139 lprocfs_oh_tally_log2(&d->od_brw_stats.hist[BRW_R_IO_TIME+rw],
144 #ifndef REQ_WRITE /* pre-2.6.35 */
145 #define __REQ_WRITE BIO_RW
148 static void dio_complete_routine(struct bio *bio, int error)
150 struct osd_iobuf *iobuf = bio->bi_private;
154 /* CAVEAT EMPTOR: possibly in IRQ context
155 * DO NOT record procfs stats here!!! */
157 if (unlikely(iobuf == NULL)) {
158 CERROR("***** bio->bi_private is NULL! This should never "
159 "happen. Normally, I would crash here, but instead I "
160 "will dump the bio contents to the console. Please "
161 "report this to <https://jira.hpdd.intel.com/> , along "
162 "with any interesting messages leading up to this point "
163 "(like SCSI errors, perhaps). Because bi_private is "
164 "NULL, I can't wake up the thread that initiated this "
165 "IO - you will probably have to reboot this node.\n");
166 CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
167 "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
168 "bi_private: %p\n", bio->bi_next, bio->bi_flags,
169 bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
170 bio->bi_end_io, atomic_read(&bio->bi_cnt),
175 /* the check is outside of the cycle for performance reason -bzzz */
176 if (!test_bit(__REQ_WRITE, &bio->bi_rw)) {
177 bio_for_each_segment(bvl, bio, i) {
178 if (likely(error == 0))
179 SetPageUptodate(bvl->bv_page);
180 LASSERT(PageLocked(bvl->bv_page));
182 atomic_dec(&iobuf->dr_dev->od_r_in_flight);
184 atomic_dec(&iobuf->dr_dev->od_w_in_flight);
187 /* any real error is good enough -bzzz */
188 if (error != 0 && iobuf->dr_error == 0)
189 iobuf->dr_error = error;
192 * set dr_elapsed before dr_numreqs turns to 0, otherwise
193 * it's possible that service thread will see dr_numreqs
194 * is zero, but dr_elapsed is not set yet, leading to lost
195 * data in this processing and an assertion in a subsequent
198 if (atomic_read(&iobuf->dr_numreqs) == 1) {
199 iobuf->dr_elapsed = jiffies - iobuf->dr_start_time;
200 iobuf->dr_elapsed_valid = 1;
202 if (atomic_dec_and_test(&iobuf->dr_numreqs))
203 wake_up(&iobuf->dr_wait);
205 /* Completed bios used to be chained off iobuf->dr_bios and freed in
206 * filter_clear_dreq(). It was then possible to exhaust the biovec-256
207 * mempool when serious on-disk fragmentation was encountered,
208 * deadlocking the OST. The bios are now released as soon as complete
209 * so the pool cannot be exhausted while IOs are competing. bug 10076 */
213 static void record_start_io(struct osd_iobuf *iobuf, int size)
215 struct osd_device *osd = iobuf->dr_dev;
216 struct obd_histogram *h = osd->od_brw_stats.hist;
219 atomic_inc(&iobuf->dr_numreqs);
221 if (iobuf->dr_rw == 0) {
222 atomic_inc(&osd->od_r_in_flight);
223 lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
224 atomic_read(&osd->od_r_in_flight));
225 lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
226 } else if (iobuf->dr_rw == 1) {
227 atomic_inc(&osd->od_w_in_flight);
228 lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
229 atomic_read(&osd->od_w_in_flight));
230 lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
236 static void osd_submit_bio(int rw, struct bio *bio)
238 LASSERTF(rw == 0 || rw == 1, "%x\n", rw);
240 submit_bio(READ, bio);
242 submit_bio(WRITE, bio);
245 static int can_be_merged(struct bio *bio, sector_t sector)
252 size = bio->bi_size >> 9;
253 return bio->bi_sector + size == sector ? 1 : 0;
256 static int osd_do_bio(struct osd_device *osd, struct inode *inode,
257 struct osd_iobuf *iobuf)
259 int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
260 struct page **pages = iobuf->dr_pages;
261 int npages = iobuf->dr_npages;
262 unsigned long *blocks = iobuf->dr_blocks;
263 int total_blocks = npages * blocks_per_page;
264 int sector_bits = inode->i_sb->s_blocksize_bits - 9;
265 unsigned int blocksize = inode->i_sb->s_blocksize;
266 struct bio *bio = NULL;
268 unsigned int page_offset;
277 LASSERT(iobuf->dr_npages == npages);
279 osd_brw_stats_update(osd, iobuf);
280 iobuf->dr_start_time = cfs_time_current();
282 for (page_idx = 0, block_idx = 0;
284 page_idx++, block_idx += blocks_per_page) {
286 page = pages[page_idx];
287 LASSERT(block_idx + blocks_per_page <= total_blocks);
289 for (i = 0, page_offset = 0;
291 i += nblocks, page_offset += blocksize * nblocks) {
295 if (blocks[block_idx + i] == 0) { /* hole */
296 LASSERTF(iobuf->dr_rw == 0,
297 "page_idx %u, block_idx %u, i %u\n",
298 page_idx, block_idx, i);
299 memset(kmap(page) + page_offset, 0, blocksize);
304 sector = (sector_t)blocks[block_idx + i] << sector_bits;
306 /* Additional contiguous file blocks? */
307 while (i + nblocks < blocks_per_page &&
308 (sector + (nblocks << sector_bits)) ==
309 ((sector_t)blocks[block_idx + i + nblocks] <<
314 can_be_merged(bio, sector) &&
315 bio_add_page(bio, page,
316 blocksize * nblocks, page_offset) != 0)
317 continue; /* added this frag OK */
320 struct request_queue *q =
321 bdev_get_queue(bio->bi_bdev);
323 /* Dang! I have to fragment this I/O */
324 CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
325 "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
327 bio->bi_vcnt, bio->bi_max_vecs,
328 bio->bi_size >> 9, queue_max_sectors(q),
329 bio_phys_segments(q, bio),
330 queue_max_phys_segments(q),
331 0, queue_max_hw_segments(q));
333 record_start_io(iobuf, bio->bi_size);
334 osd_submit_bio(iobuf->dr_rw, bio);
337 /* allocate new bio */
338 bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
339 (npages - page_idx) *
342 CERROR("Can't allocate bio %u*%u = %u pages\n",
343 (npages - page_idx), blocks_per_page,
344 (npages - page_idx) * blocks_per_page);
349 bio->bi_bdev = inode->i_sb->s_bdev;
350 bio->bi_sector = sector;
351 bio->bi_rw = (iobuf->dr_rw == 0) ? READ : WRITE;
352 bio->bi_end_io = dio_complete_routine;
353 bio->bi_private = iobuf;
355 rc = bio_add_page(bio, page,
356 blocksize * nblocks, page_offset);
362 record_start_io(iobuf, bio->bi_size);
363 osd_submit_bio(iobuf->dr_rw, bio);
368 /* in order to achieve better IO throughput, we don't wait for writes
369 * completion here. instead we proceed with transaction commit in
370 * parallel and wait for IO completion once transaction is stopped
371 * see osd_trans_stop() for more details -bzzz */
372 if (iobuf->dr_rw == 0) {
373 wait_event(iobuf->dr_wait,
374 atomic_read(&iobuf->dr_numreqs) == 0);
375 osd_fini_iobuf(osd, iobuf);
379 rc = iobuf->dr_error;
383 static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
384 struct niobuf_local *lnb)
391 int poff = offset & (PAGE_CACHE_SIZE - 1);
392 int plen = PAGE_CACHE_SIZE - poff;
396 lnb->lnb_file_offset = offset;
397 lnb->lnb_page_offset = poff;
399 /* lnb->lnb_flags = rnb->rnb_flags; */
401 lnb->lnb_page = NULL;
404 LASSERTF(plen <= len, "plen %u, len %lld\n", plen,
415 struct page *osd_get_page(struct dt_object *dt, loff_t offset, int rw)
417 struct inode *inode = osd_dt_obj(dt)->oo_inode;
418 struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
423 page = find_or_create_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
424 GFP_NOFS | __GFP_HIGHMEM);
425 if (unlikely(page == NULL))
426 lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
432 * there are following "locks":
449 int osd_bufs_get(const struct lu_env *env, struct dt_object *d, loff_t pos,
450 ssize_t len, struct niobuf_local *lnb, int rw,
451 struct lustre_capa *capa)
453 struct osd_object *obj = osd_dt_obj(d);
454 int npages, i, rc = 0;
456 LASSERT(obj->oo_inode);
458 osd_map_remote_to_local(pos, len, &npages, lnb);
460 for (i = 0; i < npages; i++, lnb++) {
461 lnb->lnb_page = osd_get_page(d, lnb->lnb_file_offset, rw);
462 if (lnb->lnb_page == NULL)
463 GOTO(cleanup, rc = -ENOMEM);
465 /* DLM locking protects us from write and truncate competing
466 * for same region, but truncate can leave dirty page in the
467 * cache. it's possible the writeout on a such a page is in
468 * progress when we access it. it's also possible that during
469 * this writeout we put new (partial) data, but then won't
470 * be able to proceed in filter_commitrw_write(). thus let's
471 * just wait for writeout completion, should be rare enough.
473 wait_on_page_writeback(lnb->lnb_page);
474 BUG_ON(PageWriteback(lnb->lnb_page));
476 lu_object_get(&d->do_lu);
484 static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
485 struct niobuf_local *lnb, int npages)
489 for (i = 0; i < npages; i++) {
490 if (lnb[i].lnb_page == NULL)
492 LASSERT(PageLocked(lnb[i].lnb_page));
493 unlock_page(lnb[i].lnb_page);
494 page_cache_release(lnb[i].lnb_page);
495 lu_object_put(env, &dt->do_lu);
496 lnb[i].lnb_page = NULL;
502 #ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
503 #define ldiskfs_ext_pblock(ex) ext_pblock((ex))
507 unsigned long *blocks;
514 static long ldiskfs_ext_find_goal(struct inode *inode,
515 struct ldiskfs_ext_path *path,
516 unsigned long block, int *aflags)
518 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
519 unsigned long bg_start;
520 unsigned long colour;
524 struct ldiskfs_extent *ex;
525 depth = path->p_depth;
527 /* try to predict block placement */
528 if ((ex = path[depth].p_ext))
529 return ldiskfs_ext_pblock(ex) +
530 (block - le32_to_cpu(ex->ee_block));
532 /* it looks index is empty
533 * try to find starting from index itself */
534 if (path[depth].p_bh)
535 return path[depth].p_bh->b_blocknr;
538 /* OK. use inode's group */
539 bg_start = (ei->i_block_group * LDISKFS_BLOCKS_PER_GROUP(inode->i_sb)) +
540 le32_to_cpu(LDISKFS_SB(inode->i_sb)->s_es->s_first_data_block);
541 colour = (current->pid % 16) *
542 (LDISKFS_BLOCKS_PER_GROUP(inode->i_sb) / 16);
543 return bg_start + colour + block;
546 static unsigned long new_blocks(handle_t *handle, struct inode *inode,
547 struct ldiskfs_ext_path *path,
548 unsigned long block, unsigned long *count,
551 struct ldiskfs_allocation_request ar;
552 unsigned long pblock;
555 /* find neighbour allocated blocks */
557 *err = ldiskfs_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
561 *err = ldiskfs_ext_search_right(inode, path, &ar.lright, &ar.pright);
565 /* allocate new block */
566 ar.goal = ldiskfs_ext_find_goal(inode, path, block, &aflags);
570 ar.flags = LDISKFS_MB_HINT_DATA;
571 pblock = ldiskfs_mb_new_blocks(handle, &ar, err);
576 static int ldiskfs_ext_new_extent_cb(struct inode *inode,
577 struct ldiskfs_ext_path *path,
578 struct ldiskfs_ext_cache *cex,
579 #ifdef HAVE_EXT_PREPARE_CB_EXTENT
580 struct ldiskfs_extent *ex,
584 struct bpointers *bp = cbdata;
585 struct ldiskfs_extent nex;
586 unsigned long pblock;
592 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
593 if (cex->ec_type == LDISKFS_EXT_CACHE_EXTENT) {
595 if ((cex->ec_len != 0) && (cex->ec_start != 0)) {
601 if (bp->create == 0) {
603 if (cex->ec_block < bp->start)
604 i = bp->start - cex->ec_block;
605 if (i >= cex->ec_len)
606 CERROR("nothing to do?! i = %d, e_num = %u\n",
608 for (; i < cex->ec_len && bp->num; i++) {
618 tgen = LDISKFS_I(inode)->i_ext_generation;
619 count = ldiskfs_ext_calc_credits_for_insert(inode, path);
621 handle = ldiskfs_journal_start(inode, count + LDISKFS_ALLOC_NEEDED + 1);
622 if (IS_ERR(handle)) {
623 return PTR_ERR(handle);
626 if (tgen != LDISKFS_I(inode)->i_ext_generation) {
627 /* the tree has changed. so path can be invalid at moment */
628 ldiskfs_journal_stop(handle);
632 /* In 2.6.32 kernel, ldiskfs_ext_walk_space()'s callback func is not
633 * protected by i_data_sem as whole. so we patch it to store
634 * generation to path and now verify the tree hasn't changed */
635 down_write((&LDISKFS_I(inode)->i_data_sem));
637 /* validate extent, make sure the extent tree does not changed */
638 if (LDISKFS_I(inode)->i_ext_generation != path[0].p_generation) {
639 /* cex is invalid, try again */
640 up_write(&LDISKFS_I(inode)->i_data_sem);
641 ldiskfs_journal_stop(handle);
646 pblock = new_blocks(handle, inode, path, cex->ec_block, &count, &err);
649 BUG_ON(count > cex->ec_len);
651 /* insert new extent */
652 nex.ee_block = cpu_to_le32(cex->ec_block);
653 ldiskfs_ext_store_pblock(&nex, pblock);
654 nex.ee_len = cpu_to_le16(count);
655 err = ldiskfs_ext_insert_extent(handle, inode, path, &nex, 0);
657 /* free data blocks we just allocated */
658 /* not a good idea to call discard here directly,
659 * but otherwise we'd need to call it every free() */
660 ldiskfs_discard_preallocations(inode);
661 #ifdef HAVE_EXT_FREE_BLOCK_WITH_BUFFER_HEAD /* Introduced in 2.6.32-rc7 */
662 ldiskfs_free_blocks(handle, inode, NULL,
663 ldiskfs_ext_pblock(&nex),
664 le16_to_cpu(nex.ee_len), 0);
666 ldiskfs_free_blocks(handle, inode, ldiskfs_ext_pblock(&nex),
667 le16_to_cpu(nex.ee_len), 0);
673 * Putting len of the actual extent we just inserted,
674 * we are asking ldiskfs_ext_walk_space() to continue
675 * scaning after that block
677 cex->ec_len = le16_to_cpu(nex.ee_len);
678 cex->ec_start = ldiskfs_ext_pblock(&nex);
679 BUG_ON(le16_to_cpu(nex.ee_len) == 0);
680 BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
683 up_write((&LDISKFS_I(inode)->i_data_sem));
684 ldiskfs_journal_stop(handle);
689 CERROR("hmm. why do we find this extent?\n");
690 CERROR("initial space: %lu:%u\n",
691 bp->start, bp->init_num);
692 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
693 CERROR("current extent: %u/%u/%llu %d\n",
694 cex->ec_block, cex->ec_len,
695 (unsigned long long)cex->ec_start,
698 CERROR("current extent: %u/%u/%llu\n",
699 cex->ec_block, cex->ec_len,
700 (unsigned long long)cex->ec_start);
704 if (cex->ec_block < bp->start)
705 i = bp->start - cex->ec_block;
706 if (i >= cex->ec_len)
707 CERROR("nothing to do?! i = %d, e_num = %u\n",
709 for (; i < cex->ec_len && bp->num; i++) {
710 *(bp->blocks) = cex->ec_start + i;
711 #ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */
712 if (cex->ec_type != LDISKFS_EXT_CACHE_EXTENT) {
714 if ((cex->ec_len == 0) || (cex->ec_start == 0)) {
716 /* unmap any possible underlying metadata from
717 * the block device mapping. bug 6998. */
718 unmap_underlying_metadata(inode->i_sb->s_bdev,
729 int osd_ldiskfs_map_nblocks(struct inode *inode, unsigned long block,
730 unsigned long num, unsigned long *blocks,
736 CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
737 block, block + num - 1, (unsigned) inode->i_ino);
741 bp.init_num = bp.num = num;
744 err = ldiskfs_ext_walk_space(inode, block, num,
745 ldiskfs_ext_new_extent_cb, &bp);
746 ldiskfs_ext_invalidate_cache(inode);
751 int osd_ldiskfs_map_ext_inode_pages(struct inode *inode, struct page **page,
752 int pages, unsigned long *blocks,
755 int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
757 struct page *fp = NULL;
760 CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
761 inode->i_ino, pages, (*page)->index);
763 /* pages are sorted already. so, we just have to find
764 * contig. space and process them properly */
767 /* start new extent */
772 } else if (fp->index + clen == (*page)->index) {
773 /* continue the extent */
780 /* process found extent */
781 rc = osd_ldiskfs_map_nblocks(inode, fp->index * blocks_per_page,
782 clen * blocks_per_page, blocks,
787 /* look for next extent */
789 blocks += blocks_per_page * clen;
793 rc = osd_ldiskfs_map_nblocks(inode, fp->index * blocks_per_page,
794 clen * blocks_per_page, blocks,
800 int osd_ldiskfs_map_bm_inode_pages(struct inode *inode, struct page **page,
801 int pages, unsigned long *blocks,
804 int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
808 for (i = 0, b = blocks; i < pages; i++, page++) {
809 rc = ldiskfs_map_inode_page(inode, *page, b, create);
811 CERROR("ino %lu, blk %lu create %d: rc %d\n",
812 inode->i_ino, *b, create, rc);
816 b += blocks_per_page;
821 static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
822 int pages, unsigned long *blocks,
827 if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS)) {
828 rc = osd_ldiskfs_map_ext_inode_pages(inode, page, pages,
832 rc = osd_ldiskfs_map_bm_inode_pages(inode, page, pages, blocks, create);
837 static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
838 struct niobuf_local *lnb, int npages)
840 struct osd_thread_info *oti = osd_oti_get(env);
841 struct osd_iobuf *iobuf = &oti->oti_iobuf;
842 struct inode *inode = osd_dt_obj(dt)->oo_inode;
843 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
844 struct timeval start;
846 unsigned long timediff;
855 rc = osd_init_iobuf(osd, iobuf, 0, npages);
856 if (unlikely(rc != 0))
859 isize = i_size_read(inode);
860 maxidx = ((isize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - 1;
862 if (osd->od_writethrough_cache)
864 if (isize > osd->od_readcache_max_filesize)
867 do_gettimeofday(&start);
868 for (i = 0; i < npages; i++) {
871 generic_error_remove_page(inode->i_mapping,
875 * till commit the content of the page is undefined
876 * we'll set it uptodate once bulk is done. otherwise
877 * subsequent reads can access non-stable data
879 ClearPageUptodate(lnb[i].lnb_page);
881 if (lnb[i].lnb_len == PAGE_CACHE_SIZE)
884 if (maxidx >= lnb[i].lnb_page->index) {
885 osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
888 char *p = kmap(lnb[i].lnb_page);
890 off = lnb[i].lnb_page_offset;
893 off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
896 memset(p + off, 0, PAGE_CACHE_SIZE - off);
897 kunmap(lnb[i].lnb_page);
900 do_gettimeofday(&end);
901 timediff = cfs_timeval_sub(&end, &start, NULL);
902 lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
904 if (iobuf->dr_npages) {
905 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
907 iobuf->dr_blocks, 0);
908 if (likely(rc == 0)) {
909 rc = osd_do_bio(osd, inode, iobuf);
910 /* do IO stats for preparation reads */
911 osd_fini_iobuf(osd, iobuf);
917 /* Check if a block is allocated or not */
918 static int osd_is_mapped(struct inode *inode, obd_size offset)
920 sector_t (*fs_bmap)(struct address_space *, sector_t);
922 fs_bmap = inode->i_mapping->a_ops->bmap;
924 /* We can't know if we are overwriting or not */
925 if (unlikely(fs_bmap == NULL))
928 if (i_size_read(inode) == 0)
931 /* Beyond EOF, must not be mapped */
932 if (((i_size_read(inode) - 1) >> inode->i_blkbits) <
933 (offset >> inode->i_blkbits))
936 if (fs_bmap(inode->i_mapping, offset >> inode->i_blkbits) == 0)
942 static int osd_declare_write_commit(const struct lu_env *env,
943 struct dt_object *dt,
944 struct niobuf_local *lnb, int npages,
945 struct thandle *handle)
947 const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
948 struct inode *inode = osd_dt_obj(dt)->oo_inode;
949 struct osd_thandle *oh;
956 bool ignore_quota = false;
957 long long quota_space = 0;
960 LASSERT(handle != NULL);
961 oh = container_of0(handle, struct osd_thandle, ot_super);
962 LASSERT(oh->ot_handle == NULL);
966 /* calculate number of extents (probably better to pass nb) */
967 for (i = 0; i < npages; i++) {
968 if (i && lnb[i].lnb_file_offset !=
969 lnb[i - 1].lnb_file_offset + lnb[i - 1].lnb_len)
972 if (!osd_is_mapped(inode, lnb[i].lnb_file_offset))
973 quota_space += PAGE_CACHE_SIZE;
975 /* ignore quota for the whole request if any page is from
976 * client cache or written by root.
978 * XXX once we drop the 1.8 client support, the checking
979 * for whether page is from cache can be simplified as:
980 * !(lnb[i].flags & OBD_BRW_SYNC)
982 * XXX we could handle this on per-lnb basis as done by
984 if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
985 (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
991 * each extent can go into new leaf causing a split
992 * 5 is max tree depth: inode + 4 index blocks
993 * with blockmaps, depth is 3 at most
995 if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS)) {
997 * many concurrent threads may grow tree by the time
998 * our transaction starts. so, consider 2 is a min depth
1000 depth = ext_depth(inode);
1001 depth = max(depth, 1) + 1;
1003 oh->ot_credits++; /* inode */
1004 oh->ot_credits += depth * 2 * extents;
1008 oh->ot_credits++; /* inode */
1009 oh->ot_credits += depth * extents;
1012 /* quota space for metadata blocks */
1013 quota_space += depth * extents * LDISKFS_BLOCK_SIZE(osd_sb(osd));
1015 /* quota space should be reported in 1K blocks */
1016 quota_space = toqb(quota_space);
1018 /* each new block can go in different group (bitmap + gd) */
1020 /* we can't dirty more bitmap blocks than exist */
1021 if (newblocks > LDISKFS_SB(osd_sb(osd))->s_groups_count)
1022 oh->ot_credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
1024 oh->ot_credits += newblocks;
1026 /* we can't dirty more gd blocks than exist */
1027 if (newblocks > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
1028 oh->ot_credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
1030 oh->ot_credits += newblocks;
1032 /* make sure the over quota flags were not set */
1033 lnb[0].lnb_flags &= ~(OBD_BRW_OVER_USRQUOTA | OBD_BRW_OVER_GRPQUOTA);
1035 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1036 quota_space, oh, true, true, &flags,
1039 /* we need only to store the overquota flags in the first lnb for
1040 * now, once we support multiple objects BRW, this code needs be
1042 if (flags & QUOTA_FL_OVER_USRQUOTA)
1043 lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA;
1044 if (flags & QUOTA_FL_OVER_GRPQUOTA)
1045 lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA;
1050 /* Check if a block is allocated or not */
1051 static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
1052 struct niobuf_local *lnb, int npages,
1053 struct thandle *thandle)
1055 struct osd_thread_info *oti = osd_oti_get(env);
1056 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1057 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1058 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1064 rc = osd_init_iobuf(osd, iobuf, 1, npages);
1065 if (unlikely(rc != 0))
1068 isize = i_size_read(inode);
1069 ll_vfs_dq_init(inode);
1071 for (i = 0; i < npages; i++) {
1072 if (lnb[i].lnb_rc == -ENOSPC &&
1073 osd_is_mapped(inode, lnb[i].lnb_file_offset)) {
1074 /* Allow the write to proceed if overwriting an
1079 if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */
1080 CDEBUG(D_INODE, "Skipping [%d] == %d\n", i,
1082 LASSERT(lnb[i].lnb_page);
1083 generic_error_remove_page(inode->i_mapping,
1088 LASSERT(PageLocked(lnb[i].lnb_page));
1089 LASSERT(!PageWriteback(lnb[i].lnb_page));
1091 if (lnb[i].lnb_file_offset + lnb[i].lnb_len > isize)
1092 isize = lnb[i].lnb_file_offset + lnb[i].lnb_len;
1095 * Since write and truncate are serialized by oo_sem, even
1096 * partial-page truncate should not leave dirty pages in the
1099 LASSERT(!PageDirty(lnb[i].lnb_page));
1101 SetPageUptodate(lnb[i].lnb_page);
1103 osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1106 if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
1108 } else if (iobuf->dr_npages > 0) {
1109 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1111 iobuf->dr_blocks, 1);
1113 /* no pages to write, no transno is needed */
1114 thandle->th_local = 1;
1117 if (likely(rc == 0)) {
1118 if (isize > i_size_read(inode)) {
1119 i_size_write(inode, isize);
1120 LDISKFS_I(inode)->i_disksize = isize;
1121 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1124 rc = osd_do_bio(osd, inode, iobuf);
1125 /* we don't do stats here as in read path because
1126 * write is async: we'll do this in osd_put_bufs() */
1128 osd_fini_iobuf(osd, iobuf);
1131 if (unlikely(rc != 0)) {
1132 /* if write fails, we should drop pages from the cache */
1133 for (i = 0; i < npages; i++) {
1134 if (lnb[i].lnb_page == NULL)
1136 LASSERT(PageLocked(lnb[i].lnb_page));
1137 generic_error_remove_page(inode->i_mapping,
1145 static int osd_read_prep(const struct lu_env *env, struct dt_object *dt,
1146 struct niobuf_local *lnb, int npages)
1148 struct osd_thread_info *oti = osd_oti_get(env);
1149 struct osd_iobuf *iobuf = &oti->oti_iobuf;
1150 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1151 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1152 struct timeval start, end;
1153 unsigned long timediff;
1154 int rc = 0, i, m = 0, cache = 0, cache_hits = 0, cache_misses = 0;
1158 rc = osd_init_iobuf(osd, iobuf, 0, npages);
1159 if (unlikely(rc != 0))
1162 if (osd->od_read_cache)
1164 if (i_size_read(inode) > osd->od_readcache_max_filesize)
1167 do_gettimeofday(&start);
1168 for (i = 0; i < npages; i++) {
1170 if (i_size_read(inode) <= lnb[i].lnb_file_offset)
1171 /* If there's no more data, abort early.
1172 * lnb->lnb_rc == 0, so it's easy to detect later. */
1175 if (i_size_read(inode) <
1176 lnb[i].lnb_file_offset + lnb[i].lnb_len - 1)
1177 lnb[i].lnb_rc = i_size_read(inode) -
1178 lnb[i].lnb_file_offset;
1180 lnb[i].lnb_rc = lnb[i].lnb_len;
1181 m += lnb[i].lnb_len;
1183 if (PageUptodate(lnb[i].lnb_page)) {
1187 osd_iobuf_add_page(iobuf, lnb[i].lnb_page);
1191 generic_error_remove_page(inode->i_mapping,
1194 do_gettimeofday(&end);
1195 timediff = cfs_timeval_sub(&end, &start, NULL);
1196 lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
1198 if (cache_hits != 0)
1199 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT,
1201 if (cache_misses != 0)
1202 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS,
1204 if (cache_hits + cache_misses != 0)
1205 lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS,
1206 cache_hits + cache_misses);
1208 if (iobuf->dr_npages) {
1209 rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
1211 iobuf->dr_blocks, 0);
1212 rc = osd_do_bio(osd, inode, iobuf);
1214 /* IO stats will be done in osd_bufs_put() */
1221 * XXX: Another layering violation for now.
1223 * We don't want to use ->f_op->read methods, because generic file write
1225 * - serializes on ->i_sem, and
1227 * - does a lot of extra work like balance_dirty_pages(),
1229 * which doesn't work for globally shared files like /last_rcvd.
1231 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
1233 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
1235 memcpy(buffer, (char *)ei->i_data, buflen);
1240 int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
1242 struct buffer_head *bh;
1243 unsigned long block;
1250 /* prevent reading after eof */
1251 spin_lock(&inode->i_lock);
1252 if (i_size_read(inode) < *offs + size) {
1253 loff_t diff = i_size_read(inode) - *offs;
1254 spin_unlock(&inode->i_lock);
1256 CDEBUG(D_EXT2, "size %llu is too short to read @%llu\n",
1257 i_size_read(inode), *offs);
1259 } else if (diff == 0) {
1265 spin_unlock(&inode->i_lock);
1268 blocksize = 1 << inode->i_blkbits;
1271 block = *offs >> inode->i_blkbits;
1272 boffs = *offs & (blocksize - 1);
1273 csize = min(blocksize - boffs, size);
1274 bh = ldiskfs_bread(NULL, inode, block, 0, &err);
1276 CERROR("%s: can't read %u@%llu on ino %lu: rc = %d\n",
1277 LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
1278 csize, *offs, inode->i_ino, err);
1282 memcpy(buf, bh->b_data + boffs, csize);
1292 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1293 struct lu_buf *buf, loff_t *pos,
1294 struct lustre_capa *capa)
1296 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1299 if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_READ))
1302 /* Read small symlink from inode body as we need to maintain correct
1303 * on-disk symlinks for ldiskfs.
1305 if (S_ISLNK(dt->do_lu.lo_header->loh_attr) &&
1306 (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1307 rc = osd_ldiskfs_readlink(inode, buf->lb_buf, buf->lb_len);
1309 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1314 static inline int osd_extents_enabled(struct super_block *sb,
1315 struct inode *inode)
1317 if (inode != NULL) {
1318 if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
1320 } else if (LDISKFS_HAS_INCOMPAT_FEATURE(sb,
1321 LDISKFS_FEATURE_INCOMPAT_EXTENTS))
1326 static inline int osd_calc_bkmap_credits(struct super_block *sb,
1327 struct inode *inode,
1332 int credits, bits, bs, i;
1334 bits = sb->s_blocksize_bits;
1337 /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
1338 * we do not expect blockmaps on the large files,
1339 * so let's shrink it to 2 levels (4GB files) */
1341 /* this is default reservation: 2 levels */
1342 credits = (blocks + 2) * 3;
1344 /* actual offset is unknown, hard to optimize */
1348 /* now check for few specific cases to optimize */
1349 if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
1352 /* allocate if not allocated */
1353 if (inode == NULL) {
1354 credits += blocks * 2;
1357 for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
1358 LASSERT(i < LDISKFS_NDIR_BLOCKS);
1359 if (LDISKFS_I(inode)->i_data[i] == 0)
1362 } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
1363 /* single indirect */
1364 credits = blocks * 3;
1365 /* probably indirect block has been allocated already */
1366 if (!inode || LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK])
1373 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
1374 const struct lu_buf *buf, loff_t _pos,
1375 struct thandle *handle)
1377 struct osd_object *obj = osd_dt_obj(dt);
1378 struct inode *inode = obj->oo_inode;
1379 struct super_block *sb = osd_sb(osd_obj2dev(obj));
1380 struct osd_thandle *oh;
1381 int rc = 0, est = 0, credits, blocks, allocated = 0;
1387 LASSERT(buf != NULL);
1388 LASSERT(handle != NULL);
1390 oh = container_of0(handle, struct osd_thandle, ot_super);
1391 LASSERT(oh->ot_handle == NULL);
1394 bits = sb->s_blocksize_bits;
1398 /* if this is an append, then we
1399 * should expect cross-block record */
1405 /* blocks to modify */
1406 blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
1407 LASSERT(blocks > 0);
1409 if (inode != NULL && _pos != -1) {
1410 /* object size in blocks */
1411 est = (i_size_read(inode) + bs - 1) >> bits;
1412 allocated = inode->i_blocks >> (bits - 9);
1413 if (pos + size <= i_size_read(inode) && est <= allocated) {
1414 /* looks like an overwrite, no need to modify tree */
1416 /* no need to modify i_size */
1421 if (osd_extents_enabled(sb, inode)) {
1423 * many concurrent threads may grow tree by the time
1424 * our transaction starts. so, consider 2 is a min depth
1425 * for every level we may need to allocate a new block
1426 * and take some entries from the old one. so, 3 blocks
1427 * to allocate (bitmap, gd, itself) + old block - 4 per
1430 depth = inode != NULL ? ext_depth(inode) : 0;
1431 depth = max(depth, 1) + 1;
1433 /* if not append, then split may need to modify
1434 * existing blocks moving entries into the new ones */
1437 /* blocks to store data: bitmap,gd,itself */
1438 credits += blocks * 3;
1440 credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
1442 /* if inode is created as part of the transaction,
1443 * then it's counted already by the creation method */
1449 osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
1451 /* dt_declare_write() is usually called for system objects, such
1452 * as llog or last_rcvd files. We needn't enforce quota on those
1453 * objects, so always set the lqi_space as 0. */
1455 rc = osd_declare_inode_qid(env, i_uid_read(inode),
1456 i_gid_read(inode), 0, oh, true,
1461 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
1463 /* LU-2634: clear the extent format for fast symlink */
1464 ldiskfs_clear_inode_flag(inode, LDISKFS_INODE_EXTENTS);
1466 memcpy((char *)&LDISKFS_I(inode)->i_data, (char *)buffer, buflen);
1467 LDISKFS_I(inode)->i_disksize = buflen;
1468 i_size_write(inode, buflen);
1469 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1474 int osd_ldiskfs_write_record(struct inode *inode, void *buf, int bufsize,
1475 int write_NUL, loff_t *offs, handle_t *handle)
1477 struct buffer_head *bh = NULL;
1478 loff_t offset = *offs;
1479 loff_t new_size = i_size_read(inode);
1480 unsigned long block;
1481 int blocksize = 1 << inode->i_blkbits;
1485 int dirty_inode = 0;
1489 * long symlink write does not count the NUL terminator in
1490 * bufsize, we write it, and the inode's file size does not
1491 * count the NUL terminator as well.
1493 ((char *)buf)[bufsize] = '\0';
1496 while (bufsize > 0) {
1500 block = offset >> inode->i_blkbits;
1501 boffs = offset & (blocksize - 1);
1502 size = min(blocksize - boffs, bufsize);
1503 bh = ldiskfs_bread(handle, inode, block, 1, &err);
1505 CERROR("%s: error reading offset %llu (block %lu): "
1507 inode->i_sb->s_id, offset, block, err);
1511 err = ldiskfs_journal_get_write_access(handle, bh);
1513 CERROR("journal_get_write_access() returned error %d\n",
1517 LASSERTF(boffs + size <= bh->b_size,
1518 "boffs %d size %d bh->b_size %lu\n",
1519 boffs, size, (unsigned long)bh->b_size);
1520 memcpy(bh->b_data + boffs, buf, size);
1521 err = ldiskfs_journal_dirty_metadata(handle, bh);
1525 if (offset + size > new_size)
1526 new_size = offset + size;
1536 /* correct in-core and on-disk sizes */
1537 if (new_size > i_size_read(inode)) {
1538 spin_lock(&inode->i_lock);
1539 if (new_size > i_size_read(inode))
1540 i_size_write(inode, new_size);
1541 if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
1542 LDISKFS_I(inode)->i_disksize = i_size_read(inode);
1545 spin_unlock(&inode->i_lock);
1547 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1555 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
1556 const struct lu_buf *buf, loff_t *pos,
1557 struct thandle *handle, struct lustre_capa *capa,
1560 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1561 struct osd_thandle *oh;
1565 LASSERT(dt_object_exists(dt));
1567 if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_WRITE))
1570 LASSERT(handle != NULL);
1571 LASSERT(inode != NULL);
1572 ll_vfs_dq_init(inode);
1574 /* XXX: don't check: one declared chunk can be used many times */
1575 /* osd_trans_exec_op(env, handle, OSD_OT_WRITE); */
1577 oh = container_of(handle, struct osd_thandle, ot_super);
1578 LASSERT(oh->ot_handle->h_transaction != NULL);
1579 /* Write small symlink to inode body as we need to maintain correct
1580 * on-disk symlinks for ldiskfs.
1581 * Note: the buf->lb_buf contains a NUL terminator while buf->lb_len
1582 * does not count it in.
1584 is_link = S_ISLNK(dt->do_lu.lo_header->loh_attr);
1585 if (is_link && (buf->lb_len < sizeof(LDISKFS_I(inode)->i_data)))
1586 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
1588 result = osd_ldiskfs_write_record(inode, buf->lb_buf,
1589 buf->lb_len, is_link, pos,
1592 result = buf->lb_len;
1596 static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
1597 __u64 start, __u64 end, struct thandle *th)
1599 struct osd_thandle *oh;
1600 struct inode *inode;
1605 oh = container_of(th, struct osd_thandle, ot_super);
1608 * we don't need to reserve credits for whole truncate
1609 * it's not possible as truncate may need to free too many
1610 * blocks and that won't fit a single transaction. instead
1611 * we reserve credits to change i_size and put inode onto
1612 * orphan list. if needed truncate will extend or restart
1615 osd_trans_declare_op(env, oh, OSD_OT_PUNCH,
1616 osd_dto_credits_noquota[DTO_ATTR_SET_BASE] + 3);
1618 inode = osd_dt_obj(dt)->oo_inode;
1621 rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
1622 0, oh, true, true, NULL, false);
1626 static int osd_punch(const struct lu_env *env, struct dt_object *dt,
1627 __u64 start, __u64 end, struct thandle *th,
1628 struct lustre_capa *capa)
1630 struct osd_thandle *oh;
1631 struct osd_object *obj = osd_dt_obj(dt);
1632 struct inode *inode = obj->oo_inode;
1635 int rc = 0, rc2 = 0;
1638 LASSERT(end == OBD_OBJECT_EOF);
1639 LASSERT(dt_object_exists(dt));
1640 LASSERT(osd_invariant(obj));
1641 LASSERT(inode != NULL);
1642 ll_vfs_dq_init(inode);
1645 oh = container_of(th, struct osd_thandle, ot_super);
1646 LASSERT(oh->ot_handle->h_transaction != NULL);
1648 osd_trans_exec_op(env, th, OSD_OT_PUNCH);
1650 tid = oh->ot_handle->h_transaction->t_tid;
1652 i_size_write(inode, start);
1653 ll_truncate_pagecache(inode, start);
1654 #ifdef HAVE_INODEOPS_TRUNCATE
1655 if (inode->i_op->truncate) {
1656 inode->i_op->truncate(inode);
1659 ldiskfs_truncate(inode);
1662 * For a partial-page truncate, flush the page to disk immediately to
1663 * avoid data corruption during direct disk write. b=17397
1665 if ((start & ~CFS_PAGE_MASK) != 0)
1666 rc = filemap_fdatawrite_range(inode->i_mapping, start, start+1);
1668 h = journal_current_handle();
1670 LASSERT(h == oh->ot_handle);
1672 if (tid != h->h_transaction->t_tid) {
1673 int credits = oh->ot_credits;
1675 * transaction has changed during truncate
1676 * we need to restart the handle with our credits
1678 if (h->h_buffer_credits < credits) {
1679 if (ldiskfs_journal_extend(h, credits))
1680 rc2 = ldiskfs_journal_restart(h, credits);
1684 RETURN(rc == 0 ? rc2 : rc);
1687 static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
1688 struct ll_user_fiemap *fm)
1690 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1691 struct osd_thread_info *info = osd_oti_get(env);
1692 struct dentry *dentry = &info->oti_obj_dentry;
1693 struct file *file = &info->oti_file;
1694 mm_segment_t saved_fs;
1698 dentry->d_inode = inode;
1699 dentry->d_sb = inode->i_sb;
1700 file->f_dentry = dentry;
1701 file->f_mapping = inode->i_mapping;
1702 file->f_op = inode->i_fop;
1703 set_file_inode(file, inode);
1705 saved_fs = get_fs();
1707 /* ldiskfs_ioctl does not have a inode argument */
1708 if (inode->i_fop->unlocked_ioctl)
1709 rc = inode->i_fop->unlocked_ioctl(file, FSFILT_IOC_FIEMAP,
1718 * in some cases we may need declare methods for objects being created
1719 * e.g., when we create symlink
1721 const struct dt_body_operations osd_body_ops_new = {
1722 .dbo_declare_write = osd_declare_write,
1725 const struct dt_body_operations osd_body_ops = {
1726 .dbo_read = osd_read,
1727 .dbo_declare_write = osd_declare_write,
1728 .dbo_write = osd_write,
1729 .dbo_bufs_get = osd_bufs_get,
1730 .dbo_bufs_put = osd_bufs_put,
1731 .dbo_write_prep = osd_write_prep,
1732 .dbo_declare_write_commit = osd_declare_write_commit,
1733 .dbo_write_commit = osd_write_commit,
1734 .dbo_read_prep = osd_read_prep,
1735 .dbo_declare_punch = osd_declare_punch,
1736 .dbo_punch = osd_punch,
1737 .dbo_fiemap_get = osd_fiemap_get,