In the 3.14 kernel code base several data fields in
struct bio were moved into a new structure called
bvec_iter. This patch updates osd-ldiskfs to handle
this api change.
Change-Id: I849a1d62462c58a79766c176060b27c621627646
Signed-off-by: James Simmons <uja.ornl@gmail.com>
Reviewed-on: http://review.whamcloud.com/10995
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Bob Glossman <bob.glossman@intel.com>
Reviewed-by: Dmitry Eremin <dmitry.eremin@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
]) # LC_HAVE_HLIST_FOR_EACH_3ARG
#
]) # LC_HAVE_HLIST_FOR_EACH_3ARG
#
+# LC_HAVE_BIO_END_SECTOR
+#
+# 3.9 introduces bio_end_sector macro
+# f73a1c7d117d07a96d89475066188a2b79e53c48
+#
+AC_DEFUN([LC_HAVE_BIO_END_SECTOR], [
+LB_CHECK_COMPILE([if 'bio_end_sector is defined],
+bio_end_sector, [
+ #include <linux/bio.h>
+],[
+ struct bio bio;
+
+ bio_end_sector(&bio);
+],[
+ AC_DEFINE(HAVE_BIO_END_SECTOR, 1,
+ [bio_end_sector is defined])
+])
+]) # LC_HAVE_BIO_END_SECTOR
+
+#
# LC_HAVE_ONLY_PROCFS_SEQ
#
# 3.10+ only supports procfs seq_files handling
# LC_HAVE_ONLY_PROCFS_SEQ
#
# 3.10+ only supports procfs seq_files handling
# 3.9
LC_HAVE_HLIST_FOR_EACH_3ARG
# 3.9
LC_HAVE_HLIST_FOR_EACH_3ARG
# 3.10
LC_HAVE_ONLY_PROCFS_SEQ
# 3.10
LC_HAVE_ONLY_PROCFS_SEQ
# define blkdev_get_by_dev(dev, mode, holder) open_by_devnum(dev, mode)
#endif
# define blkdev_get_by_dev(dev, mode, holder) open_by_devnum(dev, mode)
#endif
+#ifdef HAVE_BVEC_ITER
+#define bio_idx(bio) (bio->bi_iter.bi_idx)
+#define bio_set_sector(bio, sector) (bio->bi_iter.bi_sector = sector)
+#else
+#define bio_idx(bio) (bio->bi_idx)
+#define bio_set_sector(bio, sector) (bio->bi_sector = sector)
+#define bio_sectors(bio) ((bio)->bi_size >> 9)
+#ifndef HAVE_BIO_END_SECTOR
+#define bio_end_sector(bio) (bio->bi_sector + bio_sectors(bio))
+#endif
+#define bvec_iter_page(bvec, iter) (*bvec->bv_page)
+#endif
+
#ifndef HAVE_BLK_QUEUE_MAX_SEGMENTS
#define blk_queue_max_segments(rq, seg) \
do { blk_queue_max_phys_segments(rq, seg); \
#ifndef HAVE_BLK_QUEUE_MAX_SEGMENTS
#define blk_queue_max_segments(rq, seg) \
do { blk_queue_max_phys_segments(rq, seg); \
static void dio_complete_routine(struct bio *bio, int error)
{
static void dio_complete_routine(struct bio *bio, int error)
{
- struct osd_iobuf *iobuf = bio->bi_private;
- struct bio_vec *bvl;
- int i;
+ struct osd_iobuf *iobuf = bio->bi_private;
+#ifdef HAVE_BVEC_ITER
+ struct bvec_iter iter;
+ struct bio_vec bvl;
+#else
+ int iter;
+ struct bio_vec *bvl;
+#endif
/* CAVEAT EMPTOR: possibly in IRQ context
* DO NOT record procfs stats here!!! */
/* CAVEAT EMPTOR: possibly in IRQ context
* DO NOT record procfs stats here!!! */
CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
"bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
"bi_private: %p\n", bio->bi_next, bio->bi_flags,
CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
"bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
"bi_private: %p\n", bio->bi_next, bio->bi_flags,
- bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
- bio->bi_end_io, atomic_read(&bio->bi_cnt),
- bio->bi_private);
+ bio->bi_rw, bio->bi_vcnt, bio_idx(bio),
+ bio_sectors(bio) << 9, bio->bi_end_io,
+ atomic_read(&bio->bi_cnt), bio->bi_private);
- /* the check is outside of the cycle for performance reason -bzzz */
+ /* the check is outside of the cycle for performance reason -bzzz */
if (!test_bit(__REQ_WRITE, &bio->bi_rw)) {
if (!test_bit(__REQ_WRITE, &bio->bi_rw)) {
- bio_for_each_segment(bvl, bio, i) {
- if (likely(error == 0))
- SetPageUptodate(bvl->bv_page);
- LASSERT(PageLocked(bvl->bv_page));
- }
+ bio_for_each_segment(bvl, bio, iter) {
+ if (likely(error == 0))
+ SetPageUptodate(bvec_iter_page(&bvl, iter));
+ LASSERT(PageLocked(bvec_iter_page(&bvl, iter)));
+ }
atomic_dec(&iobuf->dr_dev->od_r_in_flight);
atomic_dec(&iobuf->dr_dev->od_r_in_flight);
atomic_dec(&iobuf->dr_dev->od_w_in_flight);
atomic_dec(&iobuf->dr_dev->od_w_in_flight);
/* any real error is good enough -bzzz */
if (error != 0 && iobuf->dr_error == 0)
/* any real error is good enough -bzzz */
if (error != 0 && iobuf->dr_error == 0)
static int can_be_merged(struct bio *bio, sector_t sector)
{
static int can_be_merged(struct bio *bio, sector_t sector)
{
- unsigned int size;
-
- if (!bio)
- return 0;
+ if (bio == NULL)
+ return 0;
- size = bio->bi_size >> 9;
- return bio->bi_sector + size == sector ? 1 : 0;
+ return bio_end_sector(bio) == sector ? 1 : 0;
}
static int osd_do_bio(struct osd_device *osd, struct inode *inode,
}
static int osd_do_bio(struct osd_device *osd, struct inode *inode,
blocksize * nblocks, page_offset) != 0)
continue; /* added this frag OK */
blocksize * nblocks, page_offset) != 0)
continue; /* added this frag OK */
- if (bio != NULL) {
- struct request_queue *q =
- bdev_get_queue(bio->bi_bdev);
-
- /* Dang! I have to fragment this I/O */
- CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
- "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
- bio->bi_size,
- bio->bi_vcnt, bio->bi_max_vecs,
- bio->bi_size >> 9, queue_max_sectors(q),
+ if (bio != NULL) {
+ struct request_queue *q =
+ bdev_get_queue(bio->bi_bdev);
+ unsigned int bi_size = bio_sectors(bio) << 9;
+
+ /* Dang! I have to fragment this I/O */
+ CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
+ "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
+ bi_size, bio->bi_vcnt, bio->bi_max_vecs,
+ bio_sectors(bio),
+ queue_max_sectors(q),
bio_phys_segments(q, bio),
queue_max_phys_segments(q),
0, queue_max_hw_segments(q));
bio_phys_segments(q, bio),
queue_max_phys_segments(q),
0, queue_max_hw_segments(q));
-
- record_start_io(iobuf, bio->bi_size);
- osd_submit_bio(iobuf->dr_rw, bio);
- }
+ record_start_io(iobuf, bi_size);
+ osd_submit_bio(iobuf->dr_rw, bio);
+ }
/* allocate new bio */
bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
/* allocate new bio */
bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
- bio->bi_bdev = inode->i_sb->s_bdev;
- bio->bi_sector = sector;
+ bio->bi_bdev = inode->i_sb->s_bdev;
+ bio_set_sector(bio, sector);
bio->bi_rw = (iobuf->dr_rw == 0) ? READ : WRITE;
bio->bi_rw = (iobuf->dr_rw == 0) ? READ : WRITE;
- bio->bi_end_io = dio_complete_routine;
- bio->bi_private = iobuf;
+ bio->bi_end_io = dio_complete_routine;
+ bio->bi_private = iobuf;
- rc = bio_add_page(bio, page,
- blocksize * nblocks, page_offset);
- LASSERT(rc != 0);
- }
- }
+ rc = bio_add_page(bio, page,
+ blocksize * nblocks, page_offset);
+ LASSERT(rc != 0);
+ }
+ }
- if (bio != NULL) {
- record_start_io(iobuf, bio->bi_size);
- osd_submit_bio(iobuf->dr_rw, bio);
- rc = 0;
- }
+ if (bio != NULL) {
+ record_start_io(iobuf, bio_sectors(bio) << 9);
+ osd_submit_bio(iobuf->dr_rw, bio);
+ rc = 0;
+ }
out:
/* in order to achieve better IO throughput, we don't wait for writes
out:
/* in order to achieve better IO throughput, we don't wait for writes