static void dio_complete_routine(struct bio *bio, int error)
{
- struct osd_iobuf *iobuf = bio->bi_private;
- struct bio_vec *bvl;
- int i;
+ struct osd_iobuf *iobuf = bio->bi_private;
+#ifdef HAVE_BVEC_ITER
+ struct bvec_iter iter;
+ struct bio_vec bvl;
+#else
+ int iter;
+ struct bio_vec *bvl;
+#endif
/* CAVEAT EMPTOR: possibly in IRQ context
* DO NOT record procfs stats here!!! */
CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
"bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
"bi_private: %p\n", bio->bi_next, bio->bi_flags,
- bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
- bio->bi_end_io, atomic_read(&bio->bi_cnt),
- bio->bi_private);
+ bio->bi_rw, bio->bi_vcnt, bio_idx(bio),
+ bio_sectors(bio) << 9, bio->bi_end_io,
+ atomic_read(&bio->bi_cnt), bio->bi_private);
return;
}
- /* the check is outside of the cycle for performance reason -bzzz */
+ /* the check is outside of the cycle for performance reason -bzzz */
if (!test_bit(__REQ_WRITE, &bio->bi_rw)) {
- bio_for_each_segment(bvl, bio, i) {
- if (likely(error == 0))
- SetPageUptodate(bvl->bv_page);
- LASSERT(PageLocked(bvl->bv_page));
- }
+ bio_for_each_segment(bvl, bio, iter) {
+ if (likely(error == 0))
+ SetPageUptodate(bvec_iter_page(&bvl, iter));
+ LASSERT(PageLocked(bvec_iter_page(&bvl, iter)));
+ }
atomic_dec(&iobuf->dr_dev->od_r_in_flight);
- } else {
+ } else {
atomic_dec(&iobuf->dr_dev->od_w_in_flight);
- }
+ }
/* any real error is good enough -bzzz */
if (error != 0 && iobuf->dr_error == 0)
static int can_be_merged(struct bio *bio, sector_t sector)
{
- unsigned int size;
-
- if (!bio)
- return 0;
+ if (bio == NULL)
+ return 0;
- size = bio->bi_size >> 9;
- return bio->bi_sector + size == sector ? 1 : 0;
+ return bio_end_sector(bio) == sector ? 1 : 0;
}
static int osd_do_bio(struct osd_device *osd, struct inode *inode,
blocksize * nblocks, page_offset) != 0)
continue; /* added this frag OK */
- if (bio != NULL) {
- struct request_queue *q =
- bdev_get_queue(bio->bi_bdev);
-
- /* Dang! I have to fragment this I/O */
- CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
- "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
- bio->bi_size,
- bio->bi_vcnt, bio->bi_max_vecs,
- bio->bi_size >> 9, queue_max_sectors(q),
+ if (bio != NULL) {
+ struct request_queue *q =
+ bdev_get_queue(bio->bi_bdev);
+ unsigned int bi_size = bio_sectors(bio) << 9;
+
+ /* Dang! I have to fragment this I/O */
+ CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
+ "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
+ bi_size, bio->bi_vcnt, bio->bi_max_vecs,
+ bio_sectors(bio),
+ queue_max_sectors(q),
bio_phys_segments(q, bio),
queue_max_phys_segments(q),
0, queue_max_hw_segments(q));
-
- record_start_io(iobuf, bio->bi_size);
- osd_submit_bio(iobuf->dr_rw, bio);
- }
+ record_start_io(iobuf, bi_size);
+ osd_submit_bio(iobuf->dr_rw, bio);
+ }
/* allocate new bio */
bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
goto out;
}
- bio->bi_bdev = inode->i_sb->s_bdev;
- bio->bi_sector = sector;
+ bio->bi_bdev = inode->i_sb->s_bdev;
+ bio_set_sector(bio, sector);
bio->bi_rw = (iobuf->dr_rw == 0) ? READ : WRITE;
- bio->bi_end_io = dio_complete_routine;
- bio->bi_private = iobuf;
+ bio->bi_end_io = dio_complete_routine;
+ bio->bi_private = iobuf;
- rc = bio_add_page(bio, page,
- blocksize * nblocks, page_offset);
- LASSERT(rc != 0);
- }
- }
+ rc = bio_add_page(bio, page,
+ blocksize * nblocks, page_offset);
+ LASSERT(rc != 0);
+ }
+ }
- if (bio != NULL) {
- record_start_io(iobuf, bio->bi_size);
- osd_submit_bio(iobuf->dr_rw, bio);
- rc = 0;
- }
+ if (bio != NULL) {
+ record_start_io(iobuf, bio_sectors(bio) << 9);
+ osd_submit_bio(iobuf->dr_rw, bio);
+ rc = 0;
+ }
out:
/* in order to achieve better IO throughput, we don't wait for writes
RETURN(0);
}
+#ifndef HAVE_LDISKFS_MAP_BLOCKS
+
#ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
#define ldiskfs_ext_pblock(ex) ext_pblock((ex))
#endif
tgen = LDISKFS_I(inode)->i_ext_generation;
count = ldiskfs_ext_calc_credits_for_insert(inode, path);
- handle = ldiskfs_journal_start(inode, count + LDISKFS_ALLOC_NEEDED + 1);
+ handle = osd_journal_start(inode, LDISKFS_HT_MISC,
+ count + LDISKFS_ALLOC_NEEDED + 1);
if (IS_ERR(handle)) {
return PTR_ERR(handle);
}
return err;
}
+int osd_ldiskfs_map_bm_inode_pages(struct inode *inode, struct page **page,
+ int pages, unsigned long *blocks,
+ int create)
+{
+ int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
+ unsigned long *b;
+ int rc = 0, i;
+
+ for (i = 0, b = blocks; i < pages; i++, page++) {
+ rc = ldiskfs_map_inode_page(inode, *page, b, create);
+ if (rc) {
+ CERROR("ino %lu, blk %lu create %d: rc %d\n",
+ inode->i_ino, *b, create, rc);
+ break;
+ }
+ b += blocks_per_page;
+ }
+ return rc;
+}
+
int osd_ldiskfs_map_ext_inode_pages(struct inode *inode, struct page **page,
int pages, unsigned long *blocks,
int create)
return rc;
}
-int osd_ldiskfs_map_bm_inode_pages(struct inode *inode, struct page **page,
- int pages, unsigned long *blocks,
- int create)
-{
- int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
- unsigned long *b;
- int rc = 0, i;
-
- for (i = 0, b = blocks; i < pages; i++, page++) {
- rc = ldiskfs_map_inode_page(inode, *page, b, create);
- if (rc) {
- CERROR("ino %lu, blk %lu create %d: rc %d\n",
- inode->i_ino, *b, create, rc);
- break;
- }
-
- b += blocks_per_page;
- }
- return rc;
-}
-
static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
int pages, unsigned long *blocks,
int create)
return rc;
}
+#else
+static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
+ int pages, unsigned long *blocks,
+ int create)
+{
+ int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
+ int rc = 0, i = 0;
+ struct page *fp = NULL;
+ int clen = 0;
+
+ CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
+ inode->i_ino, pages, (*page)->index);
+
+ /* pages are sorted already. so, we just have to find
+ * contig. space and process them properly */
+ while (i < pages) {
+ long blen, total = 0;
+ handle_t *handle = NULL;
+ struct ldiskfs_map_blocks map = { 0 };
+
+ if (fp == NULL) { /* start new extent */
+ fp = *page++;
+ clen = 1;
+ if (++i != pages)
+ continue;
+ } else if (fp->index + clen == (*page)->index) {
+ /* continue the extent */
+ page++;
+ clen++;
+ if (++i != pages)
+ continue;
+ }
+ /* process found extent */
+ map.m_lblk = fp->index * blocks_per_page;
+ map.m_len = blen = clen * blocks_per_page;
+ if (create) {
+ create = LDISKFS_GET_BLOCKS_CREATE;
+ handle = ldiskfs_journal_current_handle();
+ LASSERT(handle != NULL);
+ }
+cont_map:
+ rc = ldiskfs_map_blocks(handle, inode, &map, create);
+ if (rc >= 0) {
+ int c = 0;
+ for (; total < blen && c < map.m_len; c++, total++) {
+ if (rc == 0) {
+ *(blocks + total) = 0;
+ total++;
+ break;
+ } else {
+ *(blocks + total) = map.m_pblk + c;
+ /* unmap any possible underlying
+ * metadata from the block device
+ * mapping. bug 6998. */
+ if ((map.m_flags & LDISKFS_MAP_NEW) &&
+ create)
+ unmap_underlying_metadata(
+ inode->i_sb->s_bdev,
+ map.m_pblk + c);
+ }
+ }
+ rc = 0;
+ }
+ if (rc == 0 && total < blen) {
+ map.m_lblk = fp->index * blocks_per_page + total;
+ map.m_len = blen - total;
+ goto cont_map;
+ }
+ if (rc != 0)
+ GOTO(cleanup, rc);
+
+ /* look for next extent */
+ fp = NULL;
+ blocks += blocks_per_page * clen;
+ }
+cleanup:
+ return rc;
+}
+#endif /* HAVE_LDISKFS_MAP_BLOCKS */
static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
struct niobuf_local *lnb, int npages)
lnb[0].lnb_flags &= ~(OBD_BRW_OVER_USRQUOTA | OBD_BRW_OVER_GRPQUOTA);
rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
- quota_space, oh, true, true, &flags,
- ignore_quota);
+ quota_space, oh, osd_dt_obj(dt), true,
+ &flags, ignore_quota);
/* we need only to store the overquota flags in the first lnb for
* now, once we support multiple objects BRW, this code needs be
* objects, so always set the lqi_space as 0. */
if (inode != NULL)
rc = osd_declare_inode_qid(env, i_uid_read(inode),
- i_gid_read(inode), 0, oh, true,
- true, NULL, false);
+ i_gid_read(inode), 0, oh, obj, true,
+ NULL, false);
RETURN(rc);
}
LASSERT(inode);
rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
- 0, oh, true, true, NULL, false);
+ 0, oh, osd_dt_obj(dt), true, NULL, false);
RETURN(rc);
}