#endif
#ifndef bio_for_each_segment_all /* since kernel version 3.9 */
+#ifdef HAVE_BVEC_ITER
+#define bio_for_each_segment_all(bv, bio, it) \
+ for (it = 0, bv = (bio)->bi_io_vec; it < (bio)->bi_vcnt; it++, bv++)
+#else
#define bio_for_each_segment_all(bv, bio, it) bio_for_each_segment(bv, bio, it)
#endif
+#endif
#endif /* _LUSTRE_COMPAT_H */
struct cl_object *obj = ll_i2info(inode)->lli_clob;
pgoff_t offset;
int ret;
-#ifdef HAVE_BVEC_ITER
- struct bvec_iter iter;
- struct bio_vec bvec;
-#else
int iter;
struct bio_vec *bvec;
-#endif
int rw;
size_t page_count = 0;
struct bio *bio;
#ifdef HAVE_BVEC_ITER
offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
- bio_for_each_segment_all(bvec, bio, iter) {
- BUG_ON(bvec.bv_offset != 0);
- BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
-
- pages[page_count] = bvec.bv_page;
- offsets[page_count] = offset;
- page_count++;
- offset += bvec.bv_len;
#else
offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset;
+#endif
bio_for_each_segment_all(bvec, bio, iter) {
BUG_ON(bvec->bv_offset != 0);
BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE);
offsets[page_count] = offset;
page_count++;
offset += bvec->bv_len;
-#endif
}
LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
}