X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;ds=sidebyside;f=lustre%2Fosd-ldiskfs%2Fosd_io.c;h=1e4e9154592c3381dc0fb08926be286533e75b91;hb=14d162c5438de959d0ea01fb1b40a7c5dfa764d1;hp=79c4675749de9463dd07b18aec706a4719b15d5c;hpb=98060d83459ba10409f295898f0ec917f938b4d3;p=fs%2Flustre-release.git diff --git a/lustre/osd-ldiskfs/osd_io.c b/lustre/osd-ldiskfs/osd_io.c index 79c4675..1e4e915 100644 --- a/lustre/osd-ldiskfs/osd_io.c +++ b/lustre/osd-ldiskfs/osd_io.c @@ -60,37 +60,6 @@ /* ext_depth() */ #include -#ifndef HAVE_PAGE_CONSTANT -#define mapping_cap_page_constant_write(mapping) 0 -#define SetPageConstant(page) do {} while (0) -#define ClearPageConstant(page) do {} while (0) -#endif - -#ifndef HAS_GENERIC_ERROR_REMOVE_PAGE -int generic_error_remove_page(struct address_space *mapping, struct page *page) -{ - if (mapping == NULL) - return -EINVAL; - - if (mapping != page->mapping) - return -EIO; - /* - * Only punch for normal data pages for now. - * Handling other types like directories would need more auditing. - */ - if (!S_ISREG(mapping->host->i_mode)) - return -EIO; - - if (page_mapped(page)) { - unmap_mapping_range(mapping, - (loff_t)page->index << PAGE_CACHE_SHIFT, - PAGE_CACHE_SIZE, 0); - } - truncate_complete_page(mapping, page); - return 0; -} -#endif - static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf, int rw, int line, int pages) { @@ -98,22 +67,22 @@ static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf, LASSERTF(iobuf->dr_elapsed_valid == 0, "iobuf %p, reqs %d, rw %d, line %d\n", iobuf, - cfs_atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw, + atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw, iobuf->dr_init_at); LASSERT(pages <= PTLRPC_MAX_BRW_PAGES); - cfs_waitq_init(&iobuf->dr_wait); - cfs_atomic_set(&iobuf->dr_numreqs, 0); - iobuf->dr_npages = 0; - iobuf->dr_error = 0; - iobuf->dr_dev = d; - iobuf->dr_frags = 0; - iobuf->dr_elapsed = 0; - /* must be counted before, so assert */ - iobuf->dr_rw = rw; + init_waitqueue_head(&iobuf->dr_wait); + atomic_set(&iobuf->dr_numreqs, 0); + iobuf->dr_npages = 0; + iobuf->dr_error = 0; + iobuf->dr_dev = d; + iobuf->dr_frags = 0; + iobuf->dr_elapsed = 0; + /* must be counted before, so assert */ + iobuf->dr_rw = rw; iobuf->dr_init_at = line; - blocks = pages * (CFS_PAGE_SIZE >> osd_sb(d)->s_blocksize_bits); + blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits); if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) { LASSERT(iobuf->dr_pg_buf.lb_len >= pages * sizeof(iobuf->dr_pages[0])); @@ -128,7 +97,7 @@ static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf, CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n", (unsigned)(pages * sizeof(iobuf->dr_pages[0])), i, pages); pages = i; - blocks = pages * (CFS_PAGE_SIZE >> osd_sb(d)->s_blocksize_bits); + blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits); iobuf->dr_max_pages = 0; CDEBUG(D_OTHER, "realloc %u for %u blocks\n", (unsigned)(blocks * sizeof(iobuf->dr_blocks[0])), blocks); @@ -176,13 +145,7 @@ void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf) #define __REQ_WRITE BIO_RW #endif -#ifdef HAVE_BIO_ENDIO_2ARG -#define DIO_RETURN(a) static void dio_complete_routine(struct bio *bio, int error) -#else -#define DIO_RETURN(a) return(a) -static int dio_complete_routine(struct bio *bio, unsigned int done, int error) -#endif { struct osd_iobuf *iobuf = bio->bi_private; struct bio_vec *bvl; @@ -191,23 +154,23 @@ static int dio_complete_routine(struct bio *bio, unsigned int done, int error) /* CAVEAT EMPTOR: possibly in IRQ context * DO NOT record procfs stats here!!! */ - if (unlikely(iobuf == NULL)) { - CERROR("***** bio->bi_private is NULL! This should never " - "happen. Normally, I would crash here, but instead I " - "will dump the bio contents to the console. Please " - "report this to , along " - "with any interesting messages leading up to this point " - "(like SCSI errors, perhaps). Because bi_private is " - "NULL, I can't wake up the thread that initiated this " - "IO - you will probably have to reboot this node.\n"); - CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, " - "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, " - "bi_private: %p\n", bio->bi_next, bio->bi_flags, - bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size, - bio->bi_end_io, cfs_atomic_read(&bio->bi_cnt), - bio->bi_private); - DIO_RETURN(0); - } + if (unlikely(iobuf == NULL)) { + CERROR("***** bio->bi_private is NULL! This should never " + "happen. Normally, I would crash here, but instead I " + "will dump the bio contents to the console. Please " + "report this to , along " + "with any interesting messages leading up to this point " + "(like SCSI errors, perhaps). Because bi_private is " + "NULL, I can't wake up the thread that initiated this " + "IO - you will probably have to reboot this node.\n"); + CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, " + "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, " + "bi_private: %p\n", bio->bi_next, bio->bi_flags, + bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size, + bio->bi_end_io, atomic_read(&bio->bi_cnt), + bio->bi_private); + return; + } /* the check is outside of the cycle for performance reason -bzzz */ if (!test_bit(__REQ_WRITE, &bio->bi_rw)) { @@ -215,24 +178,15 @@ static int dio_complete_routine(struct bio *bio, unsigned int done, int error) if (likely(error == 0)) SetPageUptodate(bvl->bv_page); LASSERT(PageLocked(bvl->bv_page)); - ClearPageConstant(bvl->bv_page); } - cfs_atomic_dec(&iobuf->dr_dev->od_r_in_flight); + atomic_dec(&iobuf->dr_dev->od_r_in_flight); } else { - struct page *p = iobuf->dr_pages[0]; - if (p->mapping) { - if (mapping_cap_page_constant_write(p->mapping)) { - bio_for_each_segment(bvl, bio, i) { - ClearPageConstant(bvl->bv_page); - } - } - } - cfs_atomic_dec(&iobuf->dr_dev->od_w_in_flight); + atomic_dec(&iobuf->dr_dev->od_w_in_flight); } - /* any real error is good enough -bzzz */ - if (error != 0 && iobuf->dr_error == 0) - iobuf->dr_error = error; + /* any real error is good enough -bzzz */ + if (error != 0 && iobuf->dr_error == 0) + iobuf->dr_error = error; /* * set dr_elapsed before dr_numreqs turns to 0, otherwise @@ -241,43 +195,42 @@ static int dio_complete_routine(struct bio *bio, unsigned int done, int error) * data in this processing and an assertion in a subsequent * call to OSD. */ - if (cfs_atomic_read(&iobuf->dr_numreqs) == 1) { + if (atomic_read(&iobuf->dr_numreqs) == 1) { iobuf->dr_elapsed = jiffies - iobuf->dr_start_time; iobuf->dr_elapsed_valid = 1; } - if (cfs_atomic_dec_and_test(&iobuf->dr_numreqs)) - cfs_waitq_signal(&iobuf->dr_wait); - - /* Completed bios used to be chained off iobuf->dr_bios and freed in - * filter_clear_dreq(). It was then possible to exhaust the biovec-256 - * mempool when serious on-disk fragmentation was encountered, - * deadlocking the OST. The bios are now released as soon as complete - * so the pool cannot be exhausted while IOs are competing. bug 10076 */ - bio_put(bio); - DIO_RETURN(0); + if (atomic_dec_and_test(&iobuf->dr_numreqs)) + wake_up(&iobuf->dr_wait); + + /* Completed bios used to be chained off iobuf->dr_bios and freed in + * filter_clear_dreq(). It was then possible to exhaust the biovec-256 + * mempool when serious on-disk fragmentation was encountered, + * deadlocking the OST. The bios are now released as soon as complete + * so the pool cannot be exhausted while IOs are competing. bug 10076 */ + bio_put(bio); } static void record_start_io(struct osd_iobuf *iobuf, int size) { - struct osd_device *osd = iobuf->dr_dev; - struct obd_histogram *h = osd->od_brw_stats.hist; - - iobuf->dr_frags++; - cfs_atomic_inc(&iobuf->dr_numreqs); - - if (iobuf->dr_rw == 0) { - cfs_atomic_inc(&osd->od_r_in_flight); - lprocfs_oh_tally(&h[BRW_R_RPC_HIST], - cfs_atomic_read(&osd->od_r_in_flight)); - lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size); - } else if (iobuf->dr_rw == 1) { - cfs_atomic_inc(&osd->od_w_in_flight); - lprocfs_oh_tally(&h[BRW_W_RPC_HIST], - cfs_atomic_read(&osd->od_w_in_flight)); - lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size); - } else { - LBUG(); - } + struct osd_device *osd = iobuf->dr_dev; + struct obd_histogram *h = osd->od_brw_stats.hist; + + iobuf->dr_frags++; + atomic_inc(&iobuf->dr_numreqs); + + if (iobuf->dr_rw == 0) { + atomic_inc(&osd->od_r_in_flight); + lprocfs_oh_tally(&h[BRW_R_RPC_HIST], + atomic_read(&osd->od_r_in_flight)); + lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size); + } else if (iobuf->dr_rw == 1) { + atomic_inc(&osd->od_w_in_flight); + lprocfs_oh_tally(&h[BRW_W_RPC_HIST], + atomic_read(&osd->od_w_in_flight)); + lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size); + } else { + LBUG(); + } } static void osd_submit_bio(int rw, struct bio *bio) @@ -303,7 +256,7 @@ static int can_be_merged(struct bio *bio, sector_t sector) static int osd_do_bio(struct osd_device *osd, struct inode *inode, struct osd_iobuf *iobuf) { - int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits; + int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; struct page **pages = iobuf->dr_pages; int npages = iobuf->dr_npages; unsigned long *blocks = iobuf->dr_blocks; @@ -357,15 +310,6 @@ static int osd_do_bio(struct osd_device *osd, struct inode *inode, sector_bits)) nblocks++; - /* I only set the page to be constant only if it - * is mapped to a contiguous underlying disk block(s). - * It will then make sure the corresponding device - * cache of raid5 will be overwritten by this page. - * - jay */ - if (iobuf->dr_rw && (nblocks == blocks_per_page) && - mapping_cap_page_constant_write(inode->i_mapping)) - SetPageConstant(page); - if (bio != NULL && can_be_merged(bio, sector) && bio_add_page(bio, page, @@ -384,8 +328,7 @@ static int osd_do_bio(struct osd_device *osd, struct inode *inode, bio->bi_size >> 9, queue_max_sectors(q), bio_phys_segments(q, bio), queue_max_phys_segments(q), - bio_hw_segments(q, bio), - queue_max_hw_segments(q)); + 0, queue_max_hw_segments(q)); record_start_io(iobuf, bio->bi_size); osd_submit_bio(iobuf->dr_rw, bio); @@ -421,19 +364,20 @@ static int osd_do_bio(struct osd_device *osd, struct inode *inode, rc = 0; } - out: - /* in order to achieve better IO throughput, we don't wait for writes - * completion here. instead we proceed with transaction commit in - * parallel and wait for IO completion once transaction is stopped - * see osd_trans_stop() for more details -bzzz */ - if (iobuf->dr_rw == 0) { - cfs_wait_event(iobuf->dr_wait, - cfs_atomic_read(&iobuf->dr_numreqs) == 0); - } +out: + /* in order to achieve better IO throughput, we don't wait for writes + * completion here. instead we proceed with transaction commit in + * parallel and wait for IO completion once transaction is stopped + * see osd_trans_stop() for more details -bzzz */ + if (iobuf->dr_rw == 0) { + wait_event(iobuf->dr_wait, + atomic_read(&iobuf->dr_numreqs) == 0); + osd_fini_iobuf(osd, iobuf); + } - if (rc == 0) - rc = iobuf->dr_error; - RETURN(rc); + if (rc == 0) + rc = iobuf->dr_error; + RETURN(rc); } static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages, @@ -444,8 +388,8 @@ static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages, *nrpages = 0; while (len > 0) { - int poff = offset & (CFS_PAGE_SIZE - 1); - int plen = CFS_PAGE_SIZE - poff; + int poff = offset & (PAGE_CACHE_SIZE - 1); + int plen = PAGE_CACHE_SIZE - poff; if (plen > len) plen = len; @@ -476,7 +420,7 @@ struct page *osd_get_page(struct dt_object *dt, loff_t offset, int rw) LASSERT(inode); - page = find_or_create_page(inode->i_mapping, offset >> CFS_PAGE_SHIFT, + page = find_or_create_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, GFP_NOFS | __GFP_HIGHMEM); if (unlikely(page == NULL)) lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1); @@ -514,12 +458,6 @@ int osd_bufs_get(const struct lu_env *env, struct dt_object *d, loff_t pos, osd_map_remote_to_local(pos, len, &npages, lnb); for (i = 0; i < npages; i++, lnb++) { - - /* We still set up for ungranted pages so that granted pages - * can be written to disk as they were promised, and portals - * needs to keep the pages all aligned properly. */ - lnb->dentry = (void *) obj; - lnb->page = osd_get_page(d, lnb->lnb_file_offset, rw); if (lnb->page == NULL) GOTO(cleanup, rc = -ENOMEM); @@ -546,15 +484,8 @@ cleanup: static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt, struct niobuf_local *lnb, int npages) { - struct osd_thread_info *oti = osd_oti_get(env); - struct osd_iobuf *iobuf = &oti->oti_iobuf; - struct osd_device *d = osd_obj2dev(osd_dt_obj(dt)); int i; - /* to do IO stats, notice we do this here because - * osd_do_bio() doesn't wait for write to complete */ - osd_fini_iobuf(d, iobuf); - for (i = 0; i < npages; i++) { if (lnb[i].page == NULL) continue; @@ -567,6 +498,345 @@ static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt, RETURN(0); } +#ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */ +#define ldiskfs_ext_pblock(ex) ext_pblock((ex)) +#endif + +struct bpointers { + unsigned long *blocks; + unsigned long start; + int num; + int init_num; + int create; +}; + +static long ldiskfs_ext_find_goal(struct inode *inode, + struct ldiskfs_ext_path *path, + unsigned long block, int *aflags) +{ + struct ldiskfs_inode_info *ei = LDISKFS_I(inode); + unsigned long bg_start; + unsigned long colour; + int depth; + + if (path) { + struct ldiskfs_extent *ex; + depth = path->p_depth; + + /* try to predict block placement */ + if ((ex = path[depth].p_ext)) + return ldiskfs_ext_pblock(ex) + + (block - le32_to_cpu(ex->ee_block)); + + /* it looks index is empty + * try to find starting from index itself */ + if (path[depth].p_bh) + return path[depth].p_bh->b_blocknr; + } + + /* OK. use inode's group */ + bg_start = (ei->i_block_group * LDISKFS_BLOCKS_PER_GROUP(inode->i_sb)) + + le32_to_cpu(LDISKFS_SB(inode->i_sb)->s_es->s_first_data_block); + colour = (current->pid % 16) * + (LDISKFS_BLOCKS_PER_GROUP(inode->i_sb) / 16); + return bg_start + colour + block; +} + +static unsigned long new_blocks(handle_t *handle, struct inode *inode, + struct ldiskfs_ext_path *path, + unsigned long block, unsigned long *count, + int *err) +{ + struct ldiskfs_allocation_request ar; + unsigned long pblock; + int aflags; + + /* find neighbour allocated blocks */ + ar.lleft = block; + *err = ldiskfs_ext_search_left(inode, path, &ar.lleft, &ar.pleft); + if (*err) + return 0; + ar.lright = block; + *err = ldiskfs_ext_search_right(inode, path, &ar.lright, &ar.pright); + if (*err) + return 0; + + /* allocate new block */ + ar.goal = ldiskfs_ext_find_goal(inode, path, block, &aflags); + ar.inode = inode; + ar.logical = block; + ar.len = *count; + ar.flags = LDISKFS_MB_HINT_DATA; + pblock = ldiskfs_mb_new_blocks(handle, &ar, err); + *count = ar.len; + return pblock; +} + +static int ldiskfs_ext_new_extent_cb(struct inode *inode, + struct ldiskfs_ext_path *path, + struct ldiskfs_ext_cache *cex, +#ifdef HAVE_EXT_PREPARE_CB_EXTENT + struct ldiskfs_extent *ex, +#endif + void *cbdata) +{ + struct bpointers *bp = cbdata; + struct ldiskfs_extent nex; + unsigned long pblock; + unsigned long tgen; + int err, i; + unsigned long count; + handle_t *handle; + +#ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */ + if (cex->ec_type == LDISKFS_EXT_CACHE_EXTENT) { +#else + if ((cex->ec_len != 0) && (cex->ec_start != 0)) { +#endif + err = EXT_CONTINUE; + goto map; + } + + if (bp->create == 0) { + i = 0; + if (cex->ec_block < bp->start) + i = bp->start - cex->ec_block; + if (i >= cex->ec_len) + CERROR("nothing to do?! i = %d, e_num = %u\n", + i, cex->ec_len); + for (; i < cex->ec_len && bp->num; i++) { + *(bp->blocks) = 0; + bp->blocks++; + bp->num--; + bp->start++; + } + + return EXT_CONTINUE; + } + + tgen = LDISKFS_I(inode)->i_ext_generation; + count = ldiskfs_ext_calc_credits_for_insert(inode, path); + + handle = ldiskfs_journal_start(inode, count + LDISKFS_ALLOC_NEEDED + 1); + if (IS_ERR(handle)) { + return PTR_ERR(handle); + } + + if (tgen != LDISKFS_I(inode)->i_ext_generation) { + /* the tree has changed. so path can be invalid at moment */ + ldiskfs_journal_stop(handle); + return EXT_REPEAT; + } + + /* In 2.6.32 kernel, ldiskfs_ext_walk_space()'s callback func is not + * protected by i_data_sem as whole. so we patch it to store + * generation to path and now verify the tree hasn't changed */ + down_write((&LDISKFS_I(inode)->i_data_sem)); + + /* validate extent, make sure the extent tree does not changed */ + if (LDISKFS_I(inode)->i_ext_generation != path[0].p_generation) { + /* cex is invalid, try again */ + up_write(&LDISKFS_I(inode)->i_data_sem); + ldiskfs_journal_stop(handle); + return EXT_REPEAT; + } + + count = cex->ec_len; + pblock = new_blocks(handle, inode, path, cex->ec_block, &count, &err); + if (!pblock) + goto out; + BUG_ON(count > cex->ec_len); + + /* insert new extent */ + nex.ee_block = cpu_to_le32(cex->ec_block); + ldiskfs_ext_store_pblock(&nex, pblock); + nex.ee_len = cpu_to_le16(count); + err = ldiskfs_ext_insert_extent(handle, inode, path, &nex, 0); + if (err) { + /* free data blocks we just allocated */ + /* not a good idea to call discard here directly, + * but otherwise we'd need to call it every free() */ + ldiskfs_discard_preallocations(inode); +#ifdef HAVE_EXT_FREE_BLOCK_WITH_BUFFER_HEAD /* Introduced in 2.6.32-rc7 */ + ldiskfs_free_blocks(handle, inode, NULL, + ldiskfs_ext_pblock(&nex), + le16_to_cpu(nex.ee_len), 0); +#else + ldiskfs_free_blocks(handle, inode, ldiskfs_ext_pblock(&nex), + le16_to_cpu(nex.ee_len), 0); +#endif + goto out; + } + + /* + * Putting len of the actual extent we just inserted, + * we are asking ldiskfs_ext_walk_space() to continue + * scaning after that block + */ + cex->ec_len = le16_to_cpu(nex.ee_len); + cex->ec_start = ldiskfs_ext_pblock(&nex); + BUG_ON(le16_to_cpu(nex.ee_len) == 0); + BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block); + +out: + up_write((&LDISKFS_I(inode)->i_data_sem)); + ldiskfs_journal_stop(handle); +map: + if (err >= 0) { + /* map blocks */ + if (bp->num == 0) { + CERROR("hmm. why do we find this extent?\n"); + CERROR("initial space: %lu:%u\n", + bp->start, bp->init_num); +#ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */ + CERROR("current extent: %u/%u/%llu %d\n", + cex->ec_block, cex->ec_len, + (unsigned long long)cex->ec_start, + cex->ec_type); +#else + CERROR("current extent: %u/%u/%llu\n", + cex->ec_block, cex->ec_len, + (unsigned long long)cex->ec_start); +#endif + } + i = 0; + if (cex->ec_block < bp->start) + i = bp->start - cex->ec_block; + if (i >= cex->ec_len) + CERROR("nothing to do?! i = %d, e_num = %u\n", + i, cex->ec_len); + for (; i < cex->ec_len && bp->num; i++) { + *(bp->blocks) = cex->ec_start + i; +#ifdef LDISKFS_EXT_CACHE_EXTENT /* until kernel 2.6.37 */ + if (cex->ec_type != LDISKFS_EXT_CACHE_EXTENT) { +#else + if ((cex->ec_len == 0) || (cex->ec_start == 0)) { +#endif + /* unmap any possible underlying metadata from + * the block device mapping. bug 6998. */ + unmap_underlying_metadata(inode->i_sb->s_bdev, + *(bp->blocks)); + } + bp->blocks++; + bp->num--; + bp->start++; + } + } + return err; +} + +int osd_ldiskfs_map_nblocks(struct inode *inode, unsigned long block, + unsigned long num, unsigned long *blocks, + int create) +{ + struct bpointers bp; + int err; + + CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n", + block, block + num - 1, (unsigned) inode->i_ino); + + bp.blocks = blocks; + bp.start = block; + bp.init_num = bp.num = num; + bp.create = create; + + err = ldiskfs_ext_walk_space(inode, block, num, + ldiskfs_ext_new_extent_cb, &bp); + ldiskfs_ext_invalidate_cache(inode); + + return err; +} + +int osd_ldiskfs_map_ext_inode_pages(struct inode *inode, struct page **page, + int pages, unsigned long *blocks, + int create) +{ + int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; + int rc = 0, i = 0; + struct page *fp = NULL; + int clen = 0; + + CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n", + inode->i_ino, pages, (*page)->index); + + /* pages are sorted already. so, we just have to find + * contig. space and process them properly */ + while (i < pages) { + if (fp == NULL) { + /* start new extent */ + fp = *page++; + clen = 1; + i++; + continue; + } else if (fp->index + clen == (*page)->index) { + /* continue the extent */ + page++; + clen++; + i++; + continue; + } + + /* process found extent */ + rc = osd_ldiskfs_map_nblocks(inode, fp->index * blocks_per_page, + clen * blocks_per_page, blocks, + create); + if (rc) + GOTO(cleanup, rc); + + /* look for next extent */ + fp = NULL; + blocks += blocks_per_page * clen; + } + + if (fp) + rc = osd_ldiskfs_map_nblocks(inode, fp->index * blocks_per_page, + clen * blocks_per_page, blocks, + create); +cleanup: + return rc; +} + +int osd_ldiskfs_map_bm_inode_pages(struct inode *inode, struct page **page, + int pages, unsigned long *blocks, + int create) +{ + int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; + unsigned long *b; + int rc = 0, i; + + for (i = 0, b = blocks; i < pages; i++, page++) { + rc = ldiskfs_map_inode_page(inode, *page, b, create); + if (rc) { + CERROR("ino %lu, blk %lu create %d: rc %d\n", + inode->i_ino, *b, create, rc); + break; + } + + b += blocks_per_page; + } + return rc; +} + +static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page, + int pages, unsigned long *blocks, + int create, struct mutex *optional_mutex) +{ + int rc; + + if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) { + rc = osd_ldiskfs_map_ext_inode_pages(inode, page, pages, + blocks, create); + return rc; + } + if (optional_mutex != NULL) + mutex_lock(optional_mutex); + rc = osd_ldiskfs_map_bm_inode_pages(inode, page, pages, blocks, create); + if (optional_mutex != NULL) + mutex_unlock(optional_mutex); + + return rc; +} + static int osd_write_prep(const struct lu_env *env, struct dt_object *dt, struct niobuf_local *lnb, int npages) { @@ -590,15 +860,15 @@ static int osd_write_prep(const struct lu_env *env, struct dt_object *dt, RETURN(rc); isize = i_size_read(inode); - maxidx = ((isize + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT) - 1; + maxidx = ((isize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - 1; if (osd->od_writethrough_cache) cache = 1; if (isize > osd->od_readcache_max_filesize) cache = 0; - cfs_gettimeofday(&start); - for (i = 0; i < npages; i++) { + do_gettimeofday(&start); + for (i = 0; i < npages; i++) { if (cache == 0) generic_error_remove_page(inode->i_mapping, @@ -611,7 +881,7 @@ static int osd_write_prep(const struct lu_env *env, struct dt_object *dt, */ ClearPageUptodate(lnb[i].page); - if (lnb[i].len == CFS_PAGE_SIZE) + if (lnb[i].len == PAGE_CACHE_SIZE) continue; if (maxidx >= lnb[i].page->index) { @@ -626,19 +896,19 @@ static int osd_write_prep(const struct lu_env *env, struct dt_object *dt, off = (lnb[i].lnb_page_offset + lnb[i].len) & ~CFS_PAGE_MASK; if (off) - memset(p + off, 0, CFS_PAGE_SIZE - off); + memset(p + off, 0, PAGE_CACHE_SIZE - off); kunmap(lnb[i].page); - } - } - cfs_gettimeofday(&end); - timediff = cfs_timeval_sub(&end, &start, NULL); - lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff); + } + } + do_gettimeofday(&end); + timediff = cfs_timeval_sub(&end, &start, NULL); + lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff); if (iobuf->dr_npages) { - rc = osd->od_fsops->fs_map_inode_pages(inode, iobuf->dr_pages, - iobuf->dr_npages, - iobuf->dr_blocks, - 0, NULL); + rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages, + iobuf->dr_npages, + iobuf->dr_blocks, + 0, NULL); if (likely(rc == 0)) { rc = osd_do_bio(osd, inode, iobuf); /* do IO stats for preparation reads */ @@ -704,7 +974,7 @@ static int osd_declare_write_commit(const struct lu_env *env, extents++; if (!osd_is_mapped(inode, lnb[i].lnb_file_offset)) - quota_space += CFS_PAGE_SIZE; + quota_space += PAGE_CACHE_SIZE; /* ignore quota for the whole request if any page is from * client cache or written by root. @@ -839,10 +1109,10 @@ static int osd_write_commit(const struct lu_env *env, struct dt_object *dt, if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) { rc = -ENOSPC; } else if (iobuf->dr_npages > 0) { - rc = osd->od_fsops->fs_map_inode_pages(inode, iobuf->dr_pages, - iobuf->dr_npages, - iobuf->dr_blocks, - 1, NULL); + rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages, + iobuf->dr_npages, + iobuf->dr_blocks, + 1, NULL); } else { /* no pages to write, no transno is needed */ thandle->th_local = 1; @@ -858,7 +1128,9 @@ static int osd_write_commit(const struct lu_env *env, struct dt_object *dt, rc = osd_do_bio(osd, inode, iobuf); /* we don't do stats here as in read path because * write is async: we'll do this in osd_put_bufs() */ - } + } else { + osd_fini_iobuf(osd, iobuf); + } if (unlikely(rc != 0)) { /* if write fails, we should drop pages from the cache */ @@ -895,8 +1167,8 @@ static int osd_read_prep(const struct lu_env *env, struct dt_object *dt, if (i_size_read(inode) > osd->od_readcache_max_filesize) cache = 0; - cfs_gettimeofday(&start); - for (i = 0; i < npages; i++) { + do_gettimeofday(&start); + for (i = 0; i < npages; i++) { if (i_size_read(inode) <= lnb[i].lnb_file_offset) /* If there's no more data, abort early. @@ -919,18 +1191,18 @@ static int osd_read_prep(const struct lu_env *env, struct dt_object *dt, LPROC_OSD_CACHE_MISS, 1); osd_iobuf_add_page(iobuf, lnb[i].page); } - if (cache == 0) - generic_error_remove_page(inode->i_mapping,lnb[i].page); - } - cfs_gettimeofday(&end); - timediff = cfs_timeval_sub(&end, &start, NULL); - lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff); + if (cache == 0) + generic_error_remove_page(inode->i_mapping,lnb[i].page); + } + do_gettimeofday(&end); + timediff = cfs_timeval_sub(&end, &start, NULL); + lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff); if (iobuf->dr_npages) { - rc = osd->od_fsops->fs_map_inode_pages(inode, iobuf->dr_pages, - iobuf->dr_npages, - iobuf->dr_blocks, - 0, NULL); + rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages, + iobuf->dr_npages, + iobuf->dr_blocks, + 0, NULL); rc = osd_do_bio(osd, inode, iobuf); /* IO stats will be done in osd_bufs_put() */ @@ -1033,36 +1305,149 @@ static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt, return rc; } +static inline int osd_extents_enabled(struct super_block *sb, + struct inode *inode) +{ + if (inode != NULL) { + if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) + return 1; + } else if (test_opt(sb, EXTENTS)) { + return 1; + } + return 0; +} + +static inline int osd_calc_bkmap_credits(struct super_block *sb, + struct inode *inode, + const loff_t size, + const loff_t pos, + const int blocks) +{ + int credits, bits, bs, i; + + bits = sb->s_blocksize_bits; + bs = 1 << bits; + + /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself) + * we do not expect blockmaps on the large files, + * so let's shrink it to 2 levels (4GB files) */ + + /* this is default reservation: 2 levels */ + credits = (blocks + 2) * 3; + + /* actual offset is unknown, hard to optimize */ + if (pos == -1) + return credits; + + /* now check for few specific cases to optimize */ + if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) { + /* no indirects */ + credits = blocks; + /* allocate if not allocated */ + if (inode == NULL) { + credits += blocks * 2; + return credits; + } + for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) { + LASSERT(i < LDISKFS_NDIR_BLOCKS); + if (LDISKFS_I(inode)->i_data[i] == 0) + credits += 2; + } + } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) { + /* single indirect */ + credits = blocks * 3; + /* probably indirect block has been allocated already */ + if (!inode || LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK]) + credits += 3; + } + + return credits; +} + static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt, - const loff_t size, loff_t pos, - struct thandle *handle) + const struct lu_buf *buf, loff_t _pos, + struct thandle *handle) { - struct osd_thandle *oh; - int credits; - struct inode *inode; - int rc; + struct osd_object *obj = osd_dt_obj(dt); + struct inode *inode = obj->oo_inode; + struct super_block *sb = osd_sb(osd_obj2dev(obj)); + struct osd_thandle *oh; + int rc = 0, est = 0, credits, blocks, allocated = 0; + int bits, bs; + int depth, size; + loff_t pos; ENTRY; + LASSERT(buf != NULL); LASSERT(handle != NULL); oh = container_of0(handle, struct osd_thandle, ot_super); LASSERT(oh->ot_handle == NULL); - credits = osd_dto_credits_noquota[DTO_WRITE_BLOCK]; + size = buf->lb_len; + bits = sb->s_blocksize_bits; + bs = 1 << bits; - osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits); + if (_pos == -1) { + /* if this is an append, then we + * should expect cross-block record */ + pos = 0; + } else { + pos = _pos; + } - inode = osd_dt_obj(dt)->oo_inode; + /* blocks to modify */ + blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits); + LASSERT(blocks > 0); + + if (inode != NULL && _pos != -1) { + /* object size in blocks */ + est = (i_size_read(inode) + bs - 1) >> bits; + allocated = inode->i_blocks >> (bits - 9); + if (pos + size <= i_size_read(inode) && est <= allocated) { + /* looks like an overwrite, no need to modify tree */ + credits = blocks; + /* no need to modify i_size */ + goto out; + } + } - /* we may declare write to non-exist llog */ - if (inode == NULL) - RETURN(0); + if (osd_extents_enabled(sb, inode)) { + /* + * many concurrent threads may grow tree by the time + * our transaction starts. so, consider 2 is a min depth + * for every level we may need to allocate a new block + * and take some entries from the old one. so, 3 blocks + * to allocate (bitmap, gd, itself) + old block - 4 per + * level. + */ + depth = inode != NULL ? ext_depth(inode) : 0; + depth = max(depth, 1) + 1; + credits = depth; + /* if not append, then split may need to modify + * existing blocks moving entries into the new ones */ + if (_pos == -1) + credits += depth; + /* blocks to store data: bitmap,gd,itself */ + credits += blocks * 3; + } else { + credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks); + } + /* if inode is created as part of the transaction, + * then it's counted already by the creation method */ + if (inode != NULL) + credits++; + +out: + + osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits); /* dt_declare_write() is usually called for system objects, such * as llog or last_rcvd files. We needn't enforce quota on those * objects, so always set the lqi_space as 0. */ - rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh, - true, true, NULL, false); + if (inode != NULL) + rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, + 0, oh, true, true, NULL, false); RETURN(rc); } @@ -1232,38 +1617,45 @@ static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt, } static int osd_punch(const struct lu_env *env, struct dt_object *dt, - __u64 start, __u64 end, struct thandle *th, - struct lustre_capa *capa) + __u64 start, __u64 end, struct thandle *th, + struct lustre_capa *capa) { - struct osd_thandle *oh; - struct osd_object *obj = osd_dt_obj(dt); - struct inode *inode = obj->oo_inode; - handle_t *h; - tid_t tid; - int rc, rc2 = 0; - ENTRY; + struct osd_thandle *oh; + struct osd_object *obj = osd_dt_obj(dt); + struct inode *inode = obj->oo_inode; + handle_t *h; + tid_t tid; + int rc = 0, rc2 = 0; + ENTRY; - LASSERT(end == OBD_OBJECT_EOF); - LASSERT(dt_object_exists(dt)); - LASSERT(osd_invariant(obj)); + LASSERT(end == OBD_OBJECT_EOF); + LASSERT(dt_object_exists(dt)); + LASSERT(osd_invariant(obj)); LASSERT(inode != NULL); ll_vfs_dq_init(inode); - LASSERT(th); - oh = container_of(th, struct osd_thandle, ot_super); - LASSERT(oh->ot_handle->h_transaction != NULL); + LASSERT(th); + oh = container_of(th, struct osd_thandle, ot_super); + LASSERT(oh->ot_handle->h_transaction != NULL); osd_trans_exec_op(env, th, OSD_OT_PUNCH); - tid = oh->ot_handle->h_transaction->t_tid; + tid = oh->ot_handle->h_transaction->t_tid; - rc = vmtruncate(inode, start); + i_size_write(inode, start); + ll_truncate_pagecache(inode, start); +#ifdef HAVE_INODEOPS_TRUNCATE + if (inode->i_op->truncate) { + inode->i_op->truncate(inode); + } else +#endif + ldiskfs_truncate(inode); - /* - * For a partial-page truncate, flush the page to disk immediately to - * avoid data corruption during direct disk write. b=17397 - */ - if (rc == 0 && (start & ~CFS_PAGE_MASK) != 0) + /* + * For a partial-page truncate, flush the page to disk immediately to + * avoid data corruption during direct disk write. b=17397 + */ + if ((start & ~CFS_PAGE_MASK) != 0) rc = filemap_fdatawrite_range(inode->i_mapping, start, start+1); h = journal_current_handle(); @@ -1301,6 +1693,7 @@ static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt, file->f_dentry = dentry; file->f_mapping = inode->i_mapping; file->f_op = inode->i_fop; + set_file_inode(file, inode); saved_fs = get_fs(); set_fs(get_ds());