X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fosd-ldiskfs%2Fosd_io.c;h=97655955e5af4a2a9b8524035f7dd52ae7064f57;hp=eeae0250bcf247342fdda9659c85460b271d8138;hb=020941416419ab282f3d9b694014b2059d299d51;hpb=be41e2ce0d71a707da703e6f8e82d397be839d23 diff --git a/lustre/osd-ldiskfs/osd_io.c b/lustre/osd-ldiskfs/osd_io.c index eeae025..9765595 100644 --- a/lustre/osd-ldiskfs/osd_io.c +++ b/lustre/osd-ldiskfs/osd_io.c @@ -147,9 +147,14 @@ void osd_fini_iobuf(struct osd_device *d, struct osd_iobuf *iobuf) static void dio_complete_routine(struct bio *bio, int error) { - struct osd_iobuf *iobuf = bio->bi_private; - struct bio_vec *bvl; - int i; + struct osd_iobuf *iobuf = bio->bi_private; +#ifdef HAVE_BVEC_ITER + struct bvec_iter iter; + struct bio_vec bvl; +#else + int iter; + struct bio_vec *bvl; +#endif /* CAVEAT EMPTOR: possibly in IRQ context * DO NOT record procfs stats here!!! */ @@ -158,7 +163,7 @@ static void dio_complete_routine(struct bio *bio, int error) CERROR("***** bio->bi_private is NULL! This should never " "happen. Normally, I would crash here, but instead I " "will dump the bio contents to the console. Please " - "report this to , along " + "report this to , along " "with any interesting messages leading up to this point " "(like SCSI errors, perhaps). Because bi_private is " "NULL, I can't wake up the thread that initiated this " @@ -166,23 +171,23 @@ static void dio_complete_routine(struct bio *bio, int error) CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, " "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, " "bi_private: %p\n", bio->bi_next, bio->bi_flags, - bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size, - bio->bi_end_io, atomic_read(&bio->bi_cnt), - bio->bi_private); + bio->bi_rw, bio->bi_vcnt, bio_idx(bio), + bio_sectors(bio) << 9, bio->bi_end_io, + atomic_read(&bio->bi_cnt), bio->bi_private); return; } - /* the check is outside of the cycle for performance reason -bzzz */ + /* the check is outside of the cycle for performance reason -bzzz */ if (!test_bit(__REQ_WRITE, &bio->bi_rw)) { - bio_for_each_segment(bvl, bio, i) { - if (likely(error == 0)) - SetPageUptodate(bvl->bv_page); - LASSERT(PageLocked(bvl->bv_page)); - } + bio_for_each_segment(bvl, bio, iter) { + if (likely(error == 0)) + SetPageUptodate(bvec_iter_page(&bvl, iter)); + LASSERT(PageLocked(bvec_iter_page(&bvl, iter))); + } atomic_dec(&iobuf->dr_dev->od_r_in_flight); - } else { + } else { atomic_dec(&iobuf->dr_dev->od_w_in_flight); - } + } /* any real error is good enough -bzzz */ if (error != 0 && iobuf->dr_error == 0) @@ -244,13 +249,10 @@ static void osd_submit_bio(int rw, struct bio *bio) static int can_be_merged(struct bio *bio, sector_t sector) { - unsigned int size; - - if (!bio) - return 0; + if (bio == NULL) + return 0; - size = bio->bi_size >> 9; - return bio->bi_sector + size == sector ? 1 : 0; + return bio_end_sector(bio) == sector ? 1 : 0; } static int osd_do_bio(struct osd_device *osd, struct inode *inode, @@ -316,23 +318,23 @@ static int osd_do_bio(struct osd_device *osd, struct inode *inode, blocksize * nblocks, page_offset) != 0) continue; /* added this frag OK */ - if (bio != NULL) { - struct request_queue *q = - bdev_get_queue(bio->bi_bdev); - - /* Dang! I have to fragment this I/O */ - CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) " - "sectors %d(%d) psg %d(%d) hsg %d(%d)\n", - bio->bi_size, - bio->bi_vcnt, bio->bi_max_vecs, - bio->bi_size >> 9, queue_max_sectors(q), + if (bio != NULL) { + struct request_queue *q = + bdev_get_queue(bio->bi_bdev); + unsigned int bi_size = bio_sectors(bio) << 9; + + /* Dang! I have to fragment this I/O */ + CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) " + "sectors %d(%d) psg %d(%d) hsg %d(%d)\n", + bi_size, bio->bi_vcnt, bio->bi_max_vecs, + bio_sectors(bio), + queue_max_sectors(q), bio_phys_segments(q, bio), queue_max_phys_segments(q), 0, queue_max_hw_segments(q)); - - record_start_io(iobuf, bio->bi_size); - osd_submit_bio(iobuf->dr_rw, bio); - } + record_start_io(iobuf, bi_size); + osd_submit_bio(iobuf->dr_rw, bio); + } /* allocate new bio */ bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES, @@ -346,23 +348,23 @@ static int osd_do_bio(struct osd_device *osd, struct inode *inode, goto out; } - bio->bi_bdev = inode->i_sb->s_bdev; - bio->bi_sector = sector; + bio->bi_bdev = inode->i_sb->s_bdev; + bio_set_sector(bio, sector); bio->bi_rw = (iobuf->dr_rw == 0) ? READ : WRITE; - bio->bi_end_io = dio_complete_routine; - bio->bi_private = iobuf; + bio->bi_end_io = dio_complete_routine; + bio->bi_private = iobuf; - rc = bio_add_page(bio, page, - blocksize * nblocks, page_offset); - LASSERT(rc != 0); - } - } + rc = bio_add_page(bio, page, + blocksize * nblocks, page_offset); + LASSERT(rc != 0); + } + } - if (bio != NULL) { - record_start_io(iobuf, bio->bi_size); - osd_submit_bio(iobuf->dr_rw, bio); - rc = 0; - } + if (bio != NULL) { + record_start_io(iobuf, bio_sectors(bio) << 9); + osd_submit_bio(iobuf->dr_rw, bio); + rc = 0; + } out: /* in order to achieve better IO throughput, we don't wait for writes @@ -372,6 +374,7 @@ out: if (iobuf->dr_rw == 0) { wait_event(iobuf->dr_wait, atomic_read(&iobuf->dr_numreqs) == 0); + osd_fini_iobuf(osd, iobuf); } if (rc == 0) @@ -394,11 +397,11 @@ static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages, plen = len; lnb->lnb_file_offset = offset; lnb->lnb_page_offset = poff; - lnb->len = plen; - /* lb->flags = rnb->flags; */ - lnb->flags = 0; - lnb->page = NULL; - lnb->rc = 0; + lnb->lnb_len = plen; + /* lnb->lnb_flags = rnb->rnb_flags; */ + lnb->lnb_flags = 0; + lnb->lnb_page = NULL; + lnb->lnb_rc = 0; LASSERTF(plen <= len, "plen %u, len %lld\n", plen, (long long) len); @@ -457,26 +460,20 @@ int osd_bufs_get(const struct lu_env *env, struct dt_object *d, loff_t pos, osd_map_remote_to_local(pos, len, &npages, lnb); for (i = 0; i < npages; i++, lnb++) { - - /* We still set up for ungranted pages so that granted pages - * can be written to disk as they were promised, and portals - * needs to keep the pages all aligned properly. */ - lnb->dentry = (void *) obj; - - lnb->page = osd_get_page(d, lnb->lnb_file_offset, rw); - if (lnb->page == NULL) - GOTO(cleanup, rc = -ENOMEM); - - /* DLM locking protects us from write and truncate competing - * for same region, but truncate can leave dirty page in the - * cache. it's possible the writeout on a such a page is in - * progress when we access it. it's also possible that during - * this writeout we put new (partial) data, but then won't - * be able to proceed in filter_commitrw_write(). thus let's - * just wait for writeout completion, should be rare enough. - * -bzzz */ - wait_on_page_writeback(lnb->page); - BUG_ON(PageWriteback(lnb->page)); + lnb->lnb_page = osd_get_page(d, lnb->lnb_file_offset, rw); + if (lnb->lnb_page == NULL) + GOTO(cleanup, rc = -ENOMEM); + + /* DLM locking protects us from write and truncate competing + * for same region, but truncate can leave dirty page in the + * cache. it's possible the writeout on a such a page is in + * progress when we access it. it's also possible that during + * this writeout we put new (partial) data, but then won't + * be able to proceed in filter_commitrw_write(). thus let's + * just wait for writeout completion, should be rare enough. + * -bzzz */ + wait_on_page_writeback(lnb->lnb_page); + BUG_ON(PageWriteback(lnb->lnb_page)); lu_object_get(&d->do_lu); } @@ -487,29 +484,25 @@ cleanup: } static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt, - struct niobuf_local *lnb, int npages) + struct niobuf_local *lnb, int npages) { - struct osd_thread_info *oti = osd_oti_get(env); - struct osd_iobuf *iobuf = &oti->oti_iobuf; - struct osd_device *d = osd_obj2dev(osd_dt_obj(dt)); - int i; + int i; - /* to do IO stats, notice we do this here because - * osd_do_bio() doesn't wait for write to complete */ - osd_fini_iobuf(d, iobuf); + for (i = 0; i < npages; i++) { + if (lnb[i].lnb_page == NULL) + continue; + LASSERT(PageLocked(lnb[i].lnb_page)); + unlock_page(lnb[i].lnb_page); + page_cache_release(lnb[i].lnb_page); + lu_object_put(env, &dt->do_lu); + lnb[i].lnb_page = NULL; + } - for (i = 0; i < npages; i++) { - if (lnb[i].page == NULL) - continue; - LASSERT(PageLocked(lnb[i].page)); - unlock_page(lnb[i].page); - page_cache_release(lnb[i].page); - lu_object_put(env, &dt->do_lu); - lnb[i].page = NULL; - } - RETURN(0); + RETURN(0); } +#ifndef HAVE_LDISKFS_MAP_BLOCKS + #ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */ #define ldiskfs_ext_pblock(ex) ext_pblock((ex)) #endif @@ -629,7 +622,8 @@ static int ldiskfs_ext_new_extent_cb(struct inode *inode, tgen = LDISKFS_I(inode)->i_ext_generation; count = ldiskfs_ext_calc_credits_for_insert(inode, path); - handle = ldiskfs_journal_start(inode, count + LDISKFS_ALLOC_NEEDED + 1); + handle = osd_journal_start(inode, LDISKFS_HT_MISC, + count + LDISKFS_ALLOC_NEEDED + 1); if (IS_ERR(handle)) { return PTR_ERR(handle); } @@ -670,11 +664,12 @@ static int ldiskfs_ext_new_extent_cb(struct inode *inode, * but otherwise we'd need to call it every free() */ ldiskfs_discard_preallocations(inode); #ifdef HAVE_EXT_FREE_BLOCK_WITH_BUFFER_HEAD /* Introduced in 2.6.32-rc7 */ - ldiskfs_free_blocks(handle, inode, NULL, ldiskfs_ext_pblock(&nex), - cpu_to_le16(nex.ee_len), 0); + ldiskfs_free_blocks(handle, inode, NULL, + ldiskfs_ext_pblock(&nex), + le16_to_cpu(nex.ee_len), 0); #else ldiskfs_free_blocks(handle, inode, ldiskfs_ext_pblock(&nex), - cpu_to_le16(nex.ee_len), 0); + le16_to_cpu(nex.ee_len), 0); #endif goto out; } @@ -758,6 +753,26 @@ int osd_ldiskfs_map_nblocks(struct inode *inode, unsigned long block, return err; } +int osd_ldiskfs_map_bm_inode_pages(struct inode *inode, struct page **page, + int pages, unsigned long *blocks, + int create) +{ + int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; + unsigned long *b; + int rc = 0, i; + + for (i = 0, b = blocks; i < pages; i++, page++) { + rc = ldiskfs_map_inode_page(inode, *page, b, create); + if (rc) { + CERROR("ino %lu, blk %lu create %d: rc %d\n", + inode->i_ino, *b, create, rc); + break; + } + b += blocks_per_page; + } + return rc; +} + int osd_ldiskfs_map_ext_inode_pages(struct inode *inode, struct page **page, int pages, unsigned long *blocks, int create) @@ -807,30 +822,9 @@ cleanup: return rc; } -int osd_ldiskfs_map_bm_inode_pages(struct inode *inode, struct page **page, - int pages, unsigned long *blocks, - int create) -{ - int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; - unsigned long *b; - int rc = 0, i; - - for (i = 0, b = blocks; i < pages; i++, page++) { - rc = ldiskfs_map_inode_page(inode, *page, b, create); - if (rc) { - CERROR("ino %lu, blk %lu create %d: rc %d\n", - inode->i_ino, *b, create, rc); - break; - } - - b += blocks_per_page; - } - return rc; -} - static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page, int pages, unsigned long *blocks, - int create, struct mutex *optional_mutex) + int create) { int rc; @@ -839,14 +833,89 @@ static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page, blocks, create); return rc; } - if (optional_mutex != NULL) - mutex_lock(optional_mutex); rc = osd_ldiskfs_map_bm_inode_pages(inode, page, pages, blocks, create); - if (optional_mutex != NULL) - mutex_unlock(optional_mutex); return rc; } +#else +static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page, + int pages, unsigned long *blocks, + int create) +{ + int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; + int rc = 0, i = 0; + struct page *fp = NULL; + int clen = 0; + + CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n", + inode->i_ino, pages, (*page)->index); + + /* pages are sorted already. so, we just have to find + * contig. space and process them properly */ + while (i < pages) { + long blen, total = 0; + handle_t *handle = NULL; + struct ldiskfs_map_blocks map = { 0 }; + + if (fp == NULL) { /* start new extent */ + fp = *page++; + clen = 1; + if (++i != pages) + continue; + } else if (fp->index + clen == (*page)->index) { + /* continue the extent */ + page++; + clen++; + if (++i != pages) + continue; + } + /* process found extent */ + map.m_lblk = fp->index * blocks_per_page; + map.m_len = blen = clen * blocks_per_page; + if (create) { + create = LDISKFS_GET_BLOCKS_CREATE; + handle = ldiskfs_journal_current_handle(); + LASSERT(handle != NULL); + } +cont_map: + rc = ldiskfs_map_blocks(handle, inode, &map, create); + if (rc >= 0) { + int c = 0; + for (; total < blen && c < map.m_len; c++, total++) { + if (rc == 0) { + *(blocks + total) = 0; + total++; + break; + } else { + *(blocks + total) = map.m_pblk + c; + /* unmap any possible underlying + * metadata from the block device + * mapping. bug 6998. */ + if ((map.m_flags & LDISKFS_MAP_NEW) && + create) + unmap_underlying_metadata( + inode->i_sb->s_bdev, + map.m_pblk + c); + } + } + rc = 0; + } + if (rc == 0 && total < blen) { + map.m_lblk = fp->index * blocks_per_page + total; + map.m_len = blen - total; + goto cont_map; + } + if (rc != 0) + GOTO(cleanup, rc); + + /* look for next extent */ + fp = NULL; + blocks += blocks_per_page * clen; + } +cleanup: + return rc; +} +#endif /* HAVE_LDISKFS_MAP_BLOCKS */ static int osd_write_prep(const struct lu_env *env, struct dt_object *dt, struct niobuf_local *lnb, int npages) @@ -881,34 +950,34 @@ static int osd_write_prep(const struct lu_env *env, struct dt_object *dt, do_gettimeofday(&start); for (i = 0; i < npages; i++) { - if (cache == 0) - generic_error_remove_page(inode->i_mapping, - lnb[i].page); + if (cache == 0) + generic_error_remove_page(inode->i_mapping, + lnb[i].lnb_page); - /* - * till commit the content of the page is undefined - * we'll set it uptodate once bulk is done. otherwise - * subsequent reads can access non-stable data - */ - ClearPageUptodate(lnb[i].page); + /* + * till commit the content of the page is undefined + * we'll set it uptodate once bulk is done. otherwise + * subsequent reads can access non-stable data + */ + ClearPageUptodate(lnb[i].lnb_page); - if (lnb[i].len == PAGE_CACHE_SIZE) - continue; + if (lnb[i].lnb_len == PAGE_CACHE_SIZE) + continue; - if (maxidx >= lnb[i].page->index) { - osd_iobuf_add_page(iobuf, lnb[i].page); - } else { - long off; - char *p = kmap(lnb[i].page); + if (maxidx >= lnb[i].lnb_page->index) { + osd_iobuf_add_page(iobuf, lnb[i].lnb_page); + } else { + long off; + char *p = kmap(lnb[i].lnb_page); off = lnb[i].lnb_page_offset; if (off) memset(p, 0, off); - off = (lnb[i].lnb_page_offset + lnb[i].len) & + off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) & ~CFS_PAGE_MASK; - if (off) + if (off) memset(p + off, 0, PAGE_CACHE_SIZE - off); - kunmap(lnb[i].page); + kunmap(lnb[i].lnb_page); } } do_gettimeofday(&end); @@ -918,8 +987,7 @@ static int osd_write_prep(const struct lu_env *env, struct dt_object *dt, if (iobuf->dr_npages) { rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages, iobuf->dr_npages, - iobuf->dr_blocks, - 0, NULL); + iobuf->dr_blocks, 0); if (likely(rc == 0)) { rc = osd_do_bio(osd, inode, iobuf); /* do IO stats for preparation reads */ @@ -981,7 +1049,7 @@ static int osd_declare_write_commit(const struct lu_env *env, /* calculate number of extents (probably better to pass nb) */ for (i = 0; i < npages; i++) { if (i && lnb[i].lnb_file_offset != - lnb[i - 1].lnb_file_offset + lnb[i - 1].len) + lnb[i - 1].lnb_file_offset + lnb[i - 1].lnb_len) extents++; if (!osd_is_mapped(inode, lnb[i].lnb_file_offset)) @@ -996,8 +1064,8 @@ static int osd_declare_write_commit(const struct lu_env *env, * * XXX we could handle this on per-lnb basis as done by * grant. */ - if ((lnb[i].flags & OBD_BRW_NOQUOTA) || - (lnb[i].flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) == + if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) || + (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) == OBD_BRW_FROM_GRANT) ignore_quota = true; } @@ -1045,19 +1113,19 @@ static int osd_declare_write_commit(const struct lu_env *env, oh->ot_credits += newblocks; /* make sure the over quota flags were not set */ - lnb[0].flags &= ~(OBD_BRW_OVER_USRQUOTA | OBD_BRW_OVER_GRPQUOTA); + lnb[0].lnb_flags &= ~(OBD_BRW_OVER_USRQUOTA | OBD_BRW_OVER_GRPQUOTA); - rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, - quota_space, oh, true, true, &flags, - ignore_quota); + rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode), + quota_space, oh, osd_dt_obj(dt), true, + &flags, ignore_quota); /* we need only to store the overquota flags in the first lnb for * now, once we support multiple objects BRW, this code needs be * revised. */ if (flags & QUOTA_FL_OVER_USRQUOTA) - lnb[0].flags |= OBD_BRW_OVER_USRQUOTA; + lnb[0].lnb_flags |= OBD_BRW_OVER_USRQUOTA; if (flags & QUOTA_FL_OVER_GRPQUOTA) - lnb[0].flags |= OBD_BRW_OVER_GRPQUOTA; + lnb[0].lnb_flags |= OBD_BRW_OVER_GRPQUOTA; RETURN(rc); } @@ -1084,37 +1152,38 @@ static int osd_write_commit(const struct lu_env *env, struct dt_object *dt, ll_vfs_dq_init(inode); for (i = 0; i < npages; i++) { - if (lnb[i].rc == -ENOSPC && + if (lnb[i].lnb_rc == -ENOSPC && osd_is_mapped(inode, lnb[i].lnb_file_offset)) { - /* Allow the write to proceed if overwriting an - * existing block */ - lnb[i].rc = 0; - } + /* Allow the write to proceed if overwriting an + * existing block */ + lnb[i].lnb_rc = 0; + } - if (lnb[i].rc) { /* ENOSPC, network RPC error, etc. */ - CDEBUG(D_INODE, "Skipping [%d] == %d\n", i, - lnb[i].rc); - LASSERT(lnb[i].page); - generic_error_remove_page(inode->i_mapping,lnb[i].page); - continue; - } + if (lnb[i].lnb_rc) { /* ENOSPC, network RPC error, etc. */ + CDEBUG(D_INODE, "Skipping [%d] == %d\n", i, + lnb[i].lnb_rc); + LASSERT(lnb[i].lnb_page); + generic_error_remove_page(inode->i_mapping, + lnb[i].lnb_page); + continue; + } - LASSERT(PageLocked(lnb[i].page)); - LASSERT(!PageWriteback(lnb[i].page)); + LASSERT(PageLocked(lnb[i].lnb_page)); + LASSERT(!PageWriteback(lnb[i].lnb_page)); - if (lnb[i].lnb_file_offset + lnb[i].len > isize) - isize = lnb[i].lnb_file_offset + lnb[i].len; + if (lnb[i].lnb_file_offset + lnb[i].lnb_len > isize) + isize = lnb[i].lnb_file_offset + lnb[i].lnb_len; - /* - * Since write and truncate are serialized by oo_sem, even - * partial-page truncate should not leave dirty pages in the - * page cache. - */ - LASSERT(!PageDirty(lnb[i].page)); + /* + * Since write and truncate are serialized by oo_sem, even + * partial-page truncate should not leave dirty pages in the + * page cache. + */ + LASSERT(!PageDirty(lnb[i].lnb_page)); - SetPageUptodate(lnb[i].page); + SetPageUptodate(lnb[i].lnb_page); - osd_iobuf_add_page(iobuf, lnb[i].page); + osd_iobuf_add_page(iobuf, lnb[i].lnb_page); } if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) { @@ -1122,8 +1191,7 @@ static int osd_write_commit(const struct lu_env *env, struct dt_object *dt, } else if (iobuf->dr_npages > 0) { rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages, iobuf->dr_npages, - iobuf->dr_blocks, - 1, NULL); + iobuf->dr_blocks, 1); } else { /* no pages to write, no transno is needed */ thandle->th_local = 1; @@ -1139,19 +1207,22 @@ static int osd_write_commit(const struct lu_env *env, struct dt_object *dt, rc = osd_do_bio(osd, inode, iobuf); /* we don't do stats here as in read path because * write is async: we'll do this in osd_put_bufs() */ - } + } else { + osd_fini_iobuf(osd, iobuf); + } if (unlikely(rc != 0)) { /* if write fails, we should drop pages from the cache */ for (i = 0; i < npages; i++) { - if (lnb[i].page == NULL) - continue; - LASSERT(PageLocked(lnb[i].page)); - generic_error_remove_page(inode->i_mapping,lnb[i].page); - } - } + if (lnb[i].lnb_page == NULL) + continue; + LASSERT(PageLocked(lnb[i].lnb_page)); + generic_error_remove_page(inode->i_mapping, + lnb[i].lnb_page); + } + } - RETURN(rc); + RETURN(rc); } static int osd_read_prep(const struct lu_env *env, struct dt_object *dt, @@ -1163,7 +1234,7 @@ static int osd_read_prep(const struct lu_env *env, struct dt_object *dt, struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt)); struct timeval start, end; unsigned long timediff; - int rc = 0, i, m = 0, cache = 0; + int rc = 0, i, m = 0, cache = 0, cache_hits = 0, cache_misses = 0; LASSERT(inode); @@ -1180,38 +1251,47 @@ static int osd_read_prep(const struct lu_env *env, struct dt_object *dt, for (i = 0; i < npages; i++) { if (i_size_read(inode) <= lnb[i].lnb_file_offset) - /* If there's no more data, abort early. - * lnb->rc == 0, so it's easy to detect later. */ - break; + /* If there's no more data, abort early. + * lnb->lnb_rc == 0, so it's easy to detect later. */ + break; + + if (i_size_read(inode) < + lnb[i].lnb_file_offset + lnb[i].lnb_len - 1) + lnb[i].lnb_rc = i_size_read(inode) - + lnb[i].lnb_file_offset; + else + lnb[i].lnb_rc = lnb[i].lnb_len; + m += lnb[i].lnb_len; + + if (PageUptodate(lnb[i].lnb_page)) { + cache_hits++; + } else { + cache_misses++; + osd_iobuf_add_page(iobuf, lnb[i].lnb_page); + } - if (i_size_read(inode) < - lnb[i].lnb_file_offset + lnb[i].len - 1) - lnb[i].rc = i_size_read(inode) - lnb[i].lnb_file_offset; - else - lnb[i].rc = lnb[i].len; - m += lnb[i].len; - - lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS, 1); - if (PageUptodate(lnb[i].page)) { - lprocfs_counter_add(osd->od_stats, - LPROC_OSD_CACHE_HIT, 1); - } else { - lprocfs_counter_add(osd->od_stats, - LPROC_OSD_CACHE_MISS, 1); - osd_iobuf_add_page(iobuf, lnb[i].page); - } if (cache == 0) - generic_error_remove_page(inode->i_mapping,lnb[i].page); + generic_error_remove_page(inode->i_mapping, + lnb[i].lnb_page); } do_gettimeofday(&end); timediff = cfs_timeval_sub(&end, &start, NULL); lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff); + if (cache_hits != 0) + lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_HIT, + cache_hits); + if (cache_misses != 0) + lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_MISS, + cache_misses); + if (cache_hits + cache_misses != 0) + lprocfs_counter_add(osd->od_stats, LPROC_OSD_CACHE_ACCESS, + cache_hits + cache_misses); + if (iobuf->dr_npages) { rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages, iobuf->dr_npages, - iobuf->dr_blocks, - 0, NULL); + iobuf->dr_blocks, 0); rc = osd_do_bio(osd, inode, iobuf); /* IO stats will be done in osd_bufs_put() */ @@ -1314,36 +1394,150 @@ static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt, return rc; } +static inline int osd_extents_enabled(struct super_block *sb, + struct inode *inode) +{ + if (inode != NULL) { + if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) + return 1; + } else if (test_opt(sb, EXTENTS)) { + return 1; + } + return 0; +} + +static inline int osd_calc_bkmap_credits(struct super_block *sb, + struct inode *inode, + const loff_t size, + const loff_t pos, + const int blocks) +{ + int credits, bits, bs, i; + + bits = sb->s_blocksize_bits; + bs = 1 << bits; + + /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself) + * we do not expect blockmaps on the large files, + * so let's shrink it to 2 levels (4GB files) */ + + /* this is default reservation: 2 levels */ + credits = (blocks + 2) * 3; + + /* actual offset is unknown, hard to optimize */ + if (pos == -1) + return credits; + + /* now check for few specific cases to optimize */ + if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) { + /* no indirects */ + credits = blocks; + /* allocate if not allocated */ + if (inode == NULL) { + credits += blocks * 2; + return credits; + } + for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) { + LASSERT(i < LDISKFS_NDIR_BLOCKS); + if (LDISKFS_I(inode)->i_data[i] == 0) + credits += 2; + } + } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) { + /* single indirect */ + credits = blocks * 3; + /* probably indirect block has been allocated already */ + if (!inode || LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK]) + credits += 3; + } + + return credits; +} + static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt, - const loff_t size, loff_t pos, - struct thandle *handle) + const struct lu_buf *buf, loff_t _pos, + struct thandle *handle) { - struct osd_thandle *oh; - int credits; - struct inode *inode; - int rc; + struct osd_object *obj = osd_dt_obj(dt); + struct inode *inode = obj->oo_inode; + struct super_block *sb = osd_sb(osd_obj2dev(obj)); + struct osd_thandle *oh; + int rc = 0, est = 0, credits, blocks, allocated = 0; + int bits, bs; + int depth, size; + loff_t pos; ENTRY; + LASSERT(buf != NULL); LASSERT(handle != NULL); oh = container_of0(handle, struct osd_thandle, ot_super); LASSERT(oh->ot_handle == NULL); - credits = osd_dto_credits_noquota[DTO_WRITE_BLOCK]; + size = buf->lb_len; + bits = sb->s_blocksize_bits; + bs = 1 << bits; - osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits); + if (_pos == -1) { + /* if this is an append, then we + * should expect cross-block record */ + pos = 0; + } else { + pos = _pos; + } - inode = osd_dt_obj(dt)->oo_inode; + /* blocks to modify */ + blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits); + LASSERT(blocks > 0); + + if (inode != NULL && _pos != -1) { + /* object size in blocks */ + est = (i_size_read(inode) + bs - 1) >> bits; + allocated = inode->i_blocks >> (bits - 9); + if (pos + size <= i_size_read(inode) && est <= allocated) { + /* looks like an overwrite, no need to modify tree */ + credits = blocks; + /* no need to modify i_size */ + goto out; + } + } + + if (osd_extents_enabled(sb, inode)) { + /* + * many concurrent threads may grow tree by the time + * our transaction starts. so, consider 2 is a min depth + * for every level we may need to allocate a new block + * and take some entries from the old one. so, 3 blocks + * to allocate (bitmap, gd, itself) + old block - 4 per + * level. + */ + depth = inode != NULL ? ext_depth(inode) : 0; + depth = max(depth, 1) + 1; + credits = depth; + /* if not append, then split may need to modify + * existing blocks moving entries into the new ones */ + if (_pos == -1) + credits += depth; + /* blocks to store data: bitmap,gd,itself */ + credits += blocks * 3; + } else { + credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks); + } + /* if inode is created as part of the transaction, + * then it's counted already by the creation method */ + if (inode != NULL) + credits++; + +out: - /* we may declare write to non-exist llog */ - if (inode == NULL) - RETURN(0); + osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits); /* dt_declare_write() is usually called for system objects, such * as llog or last_rcvd files. We needn't enforce quota on those * objects, so always set the lqi_space as 0. */ - rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh, - true, true, NULL, false); + if (inode != NULL) + rc = osd_declare_inode_qid(env, i_uid_read(inode), + i_gid_read(inode), 0, oh, obj, true, + NULL, false); RETURN(rc); } @@ -1403,9 +1597,9 @@ int osd_ldiskfs_write_record(struct inode *inode, void *buf, int bufsize, err); break; } - LASSERTF(boffs + size <= bh->b_size, - "boffs %d size %d bh->b_size %lu", - boffs, size, (unsigned long)bh->b_size); + LASSERTF(boffs + size <= bh->b_size, + "boffs %d size %d bh->b_size %lu\n", + boffs, size, (unsigned long)bh->b_size); memcpy(bh->b_data + boffs, buf, size); err = ldiskfs_journal_dirty_metadata(handle, bh); if (err) @@ -1507,54 +1701,50 @@ static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt, inode = osd_dt_obj(dt)->oo_inode; LASSERT(inode); - rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh, - true, true, NULL, false); + rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode), + 0, oh, osd_dt_obj(dt), true, NULL, false); RETURN(rc); } static int osd_punch(const struct lu_env *env, struct dt_object *dt, - __u64 start, __u64 end, struct thandle *th, - struct lustre_capa *capa) + __u64 start, __u64 end, struct thandle *th, + struct lustre_capa *capa) { - struct osd_thandle *oh; - struct osd_object *obj = osd_dt_obj(dt); - struct inode *inode = obj->oo_inode; - handle_t *h; - tid_t tid; + struct osd_thandle *oh; + struct osd_object *obj = osd_dt_obj(dt); + struct inode *inode = obj->oo_inode; + handle_t *h; + tid_t tid; int rc = 0, rc2 = 0; - ENTRY; + ENTRY; - LASSERT(end == OBD_OBJECT_EOF); - LASSERT(dt_object_exists(dt)); - LASSERT(osd_invariant(obj)); + LASSERT(end == OBD_OBJECT_EOF); + LASSERT(dt_object_exists(dt)); + LASSERT(osd_invariant(obj)); LASSERT(inode != NULL); ll_vfs_dq_init(inode); - LASSERT(th); - oh = container_of(th, struct osd_thandle, ot_super); - LASSERT(oh->ot_handle->h_transaction != NULL); + LASSERT(th); + oh = container_of(th, struct osd_thandle, ot_super); + LASSERT(oh->ot_handle->h_transaction != NULL); osd_trans_exec_op(env, th, OSD_OT_PUNCH); - tid = oh->ot_handle->h_transaction->t_tid; + tid = oh->ot_handle->h_transaction->t_tid; i_size_write(inode, start); ll_truncate_pagecache(inode, start); #ifdef HAVE_INODEOPS_TRUNCATE - if (inode->i_op->truncate) + if (inode->i_op->truncate) { inode->i_op->truncate(inode); -#else - if (!(inode->i_state & (I_NEW|I_FREEING))) - mutex_lock(&inode->i_mutex); - ldiskfs_truncate(inode); - if (!(inode->i_state & (I_NEW|I_FREEING))) - mutex_unlock(&inode->i_mutex); + } else #endif + ldiskfs_truncate(inode); - /* - * For a partial-page truncate, flush the page to disk immediately to - * avoid data corruption during direct disk write. b=17397 - */ + /* + * For a partial-page truncate, flush the page to disk immediately to + * avoid data corruption during direct disk write. b=17397 + */ if ((start & ~CFS_PAGE_MASK) != 0) rc = filemap_fdatawrite_range(inode->i_mapping, start, start+1);