* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2013, Intel Corporation.
+ * Copyright (c) 2012, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
static void dio_complete_routine(struct bio *bio, int error)
{
- struct osd_iobuf *iobuf = bio->bi_private;
- struct bio_vec *bvl;
- int i;
+ struct osd_iobuf *iobuf = bio->bi_private;
+#ifdef HAVE_BVEC_ITER
+ struct bvec_iter iter;
+ struct bio_vec bvl;
+#else
+ int iter;
+ struct bio_vec *bvl;
+#endif
/* CAVEAT EMPTOR: possibly in IRQ context
* DO NOT record procfs stats here!!! */
CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
"bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
"bi_private: %p\n", bio->bi_next, bio->bi_flags,
- bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
- bio->bi_end_io, atomic_read(&bio->bi_cnt),
- bio->bi_private);
+ bio->bi_rw, bio->bi_vcnt, bio_idx(bio),
+ bio_sectors(bio) << 9, bio->bi_end_io,
+ atomic_read(&bio->bi_cnt), bio->bi_private);
return;
}
- /* the check is outside of the cycle for performance reason -bzzz */
+ /* the check is outside of the cycle for performance reason -bzzz */
if (!test_bit(__REQ_WRITE, &bio->bi_rw)) {
- bio_for_each_segment(bvl, bio, i) {
- if (likely(error == 0))
- SetPageUptodate(bvl->bv_page);
- LASSERT(PageLocked(bvl->bv_page));
- }
+ bio_for_each_segment(bvl, bio, iter) {
+ if (likely(error == 0))
+ SetPageUptodate(bvec_iter_page(&bvl, iter));
+ LASSERT(PageLocked(bvec_iter_page(&bvl, iter)));
+ }
atomic_dec(&iobuf->dr_dev->od_r_in_flight);
- } else {
+ } else {
atomic_dec(&iobuf->dr_dev->od_w_in_flight);
- }
+ }
/* any real error is good enough -bzzz */
if (error != 0 && iobuf->dr_error == 0)
static int can_be_merged(struct bio *bio, sector_t sector)
{
- unsigned int size;
-
- if (!bio)
- return 0;
+ if (bio == NULL)
+ return 0;
- size = bio->bi_size >> 9;
- return bio->bi_sector + size == sector ? 1 : 0;
+ return bio_end_sector(bio) == sector ? 1 : 0;
}
static int osd_do_bio(struct osd_device *osd, struct inode *inode,
blocksize * nblocks, page_offset) != 0)
continue; /* added this frag OK */
- if (bio != NULL) {
- struct request_queue *q =
- bdev_get_queue(bio->bi_bdev);
-
- /* Dang! I have to fragment this I/O */
- CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
- "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
- bio->bi_size,
- bio->bi_vcnt, bio->bi_max_vecs,
- bio->bi_size >> 9, queue_max_sectors(q),
+ if (bio != NULL) {
+ struct request_queue *q =
+ bdev_get_queue(bio->bi_bdev);
+ unsigned int bi_size = bio_sectors(bio) << 9;
+
+ /* Dang! I have to fragment this I/O */
+ CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
+ "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
+ bi_size, bio->bi_vcnt, bio->bi_max_vecs,
+ bio_sectors(bio),
+ queue_max_sectors(q),
bio_phys_segments(q, bio),
queue_max_phys_segments(q),
0, queue_max_hw_segments(q));
-
- record_start_io(iobuf, bio->bi_size);
- osd_submit_bio(iobuf->dr_rw, bio);
- }
+ record_start_io(iobuf, bi_size);
+ osd_submit_bio(iobuf->dr_rw, bio);
+ }
/* allocate new bio */
bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
goto out;
}
- bio->bi_bdev = inode->i_sb->s_bdev;
- bio->bi_sector = sector;
+ bio->bi_bdev = inode->i_sb->s_bdev;
+ bio_set_sector(bio, sector);
bio->bi_rw = (iobuf->dr_rw == 0) ? READ : WRITE;
- bio->bi_end_io = dio_complete_routine;
- bio->bi_private = iobuf;
+ bio->bi_end_io = dio_complete_routine;
+ bio->bi_private = iobuf;
- rc = bio_add_page(bio, page,
- blocksize * nblocks, page_offset);
- LASSERT(rc != 0);
- }
- }
+ rc = bio_add_page(bio, page,
+ blocksize * nblocks, page_offset);
+ LASSERT(rc != 0);
+ }
+ }
- if (bio != NULL) {
- record_start_io(iobuf, bio->bi_size);
- osd_submit_bio(iobuf->dr_rw, bio);
- rc = 0;
- }
+ if (bio != NULL) {
+ record_start_io(iobuf, bio_sectors(bio) << 9);
+ osd_submit_bio(iobuf->dr_rw, bio);
+ rc = 0;
+ }
out:
/* in order to achieve better IO throughput, we don't wait for writes
RETURN(0);
}
-struct page *osd_get_page(struct dt_object *dt, loff_t offset, int rw)
+static struct page *osd_get_page(struct dt_object *dt, loff_t offset, int rw)
{
struct inode *inode = osd_dt_obj(dt)->oo_inode;
struct osd_device *d = osd_obj2dev(osd_dt_obj(dt));
* journal_start
* i_mutex
* page lock
-
- * osd write path
- * lock page(s)
- * journal_start
- * truncate_sem
-
+ *
+ * osd write path:
+ * - lock page(s)
+ * - journal_start
+ * - truncate_sem
+ *
* ext4 vmtruncate:
- * lock pages, unlock
- * journal_start
- * lock partial page
- * i_data_sem
-
-*/
-int osd_bufs_get(const struct lu_env *env, struct dt_object *d, loff_t pos,
- ssize_t len, struct niobuf_local *lnb, int rw,
- struct lustre_capa *capa)
-{
- struct osd_object *obj = osd_dt_obj(d);
- int npages, i, rc = 0;
-
- LASSERT(obj->oo_inode);
-
- osd_map_remote_to_local(pos, len, &npages, lnb);
-
- for (i = 0; i < npages; i++, lnb++) {
- lnb->lnb_page = osd_get_page(d, lnb->lnb_file_offset, rw);
- if (lnb->lnb_page == NULL)
- GOTO(cleanup, rc = -ENOMEM);
-
- /* DLM locking protects us from write and truncate competing
- * for same region, but truncate can leave dirty page in the
- * cache. it's possible the writeout on a such a page is in
- * progress when we access it. it's also possible that during
- * this writeout we put new (partial) data, but then won't
- * be able to proceed in filter_commitrw_write(). thus let's
- * just wait for writeout completion, should be rare enough.
- * -bzzz */
- wait_on_page_writeback(lnb->lnb_page);
- BUG_ON(PageWriteback(lnb->lnb_page));
-
- lu_object_get(&d->do_lu);
- }
- rc = i;
-
-cleanup:
- RETURN(rc);
-}
+ * - lock pages, unlock
+ * - journal_start
+ * - lock partial page
+ * - i_data_sem
+ *
+ */
+/**
+ * Unlock and release pages loaded by osd_bufs_get()
+ *
+ * Unlock \a npages pages from \a lnb and drop the refcount on them.
+ *
+ * \param env thread execution environment
+ * \param dt dt object undergoing IO (OSD object + methods)
+ * \param lnb array of pages undergoing IO
+ * \param npages number of pages in \a lnb
+ *
+ * \retval 0 always
+ */
static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
struct niobuf_local *lnb, int npages)
{
RETURN(0);
}
+/**
+ * Load and lock pages undergoing IO
+ *
+ * Pages as described in the \a lnb array are fetched (from disk or cache)
+ * and locked for IO by the caller.
+ *
+ * DLM locking protects us from write and truncate competing for same region,
+ * but partial-page truncate can leave dirty pages in the cache for ldiskfs.
+ * It's possible the writeout on a such a page is in progress when we access
+ * it. It's also possible that during this writeout we put new (partial) data
+ * into the page, but won't be able to proceed in filter_commitrw_write().
+ * Therefore, just wait for writeout completion as it should be rare enough.
+ *
+ * \param env thread execution environment
+ * \param dt dt object undergoing IO (OSD object + methods)
+ * \param pos byte offset of IO start
+ * \param len number of bytes of IO
+ * \param lnb array of extents undergoing IO
+ * \param rw read or write operation?
+ * \param capa capabilities
+ *
+ * \retval pages (zero or more) loaded successfully
+ * \retval -ENOMEM on memory/page allocation error
+ */
+static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
+ loff_t pos, ssize_t len, struct niobuf_local *lnb,
+ int rw)
+{
+ struct osd_object *obj = osd_dt_obj(dt);
+ int npages, i, rc = 0;
+
+ LASSERT(obj->oo_inode);
+
+ osd_map_remote_to_local(pos, len, &npages, lnb);
+
+ for (i = 0; i < npages; i++, lnb++) {
+ lnb->lnb_page = osd_get_page(dt, lnb->lnb_file_offset, rw);
+ if (lnb->lnb_page == NULL)
+ GOTO(cleanup, rc = -ENOMEM);
+
+ wait_on_page_writeback(lnb->lnb_page);
+ BUG_ON(PageWriteback(lnb->lnb_page));
+
+ lu_object_get(&dt->do_lu);
+ }
+
+ RETURN(i);
+
+cleanup:
+ if (i > 0)
+ osd_bufs_put(env, dt, lnb - i, i);
+ return rc;
+}
+
+#ifndef HAVE_LDISKFS_MAP_BLOCKS
+
#ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
#define ldiskfs_ext_pblock(ex) ext_pblock((ex))
#endif
tgen = LDISKFS_I(inode)->i_ext_generation;
count = ldiskfs_ext_calc_credits_for_insert(inode, path);
- handle = ldiskfs_journal_start(inode, count + LDISKFS_ALLOC_NEEDED + 1);
+ handle = osd_journal_start(inode, LDISKFS_HT_MISC,
+ count + LDISKFS_ALLOC_NEEDED + 1);
if (IS_ERR(handle)) {
return PTR_ERR(handle);
}
return err;
}
-int osd_ldiskfs_map_nblocks(struct inode *inode, unsigned long block,
- unsigned long num, unsigned long *blocks,
- int create)
+static int osd_ldiskfs_map_nblocks(struct inode *inode, unsigned long block,
+ unsigned long num, unsigned long *blocks,
+ int create)
{
struct bpointers bp;
int err;
return err;
}
-int osd_ldiskfs_map_ext_inode_pages(struct inode *inode, struct page **page,
- int pages, unsigned long *blocks,
- int create)
+static int osd_ldiskfs_map_bm_inode_pages(struct inode *inode,
+ struct page **page, int pages,
+ unsigned long *blocks, int create)
+{
+ int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
+ pgoff_t bitmap_max_page_index;
+ unsigned long *b;
+ int rc = 0, i;
+
+ bitmap_max_page_index = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes >>
+ PAGE_SHIFT;
+ for (i = 0, b = blocks; i < pages; i++, page++) {
+ if ((*page)->index + 1 >= bitmap_max_page_index) {
+ rc = -EFBIG;
+ break;
+ }
+ rc = ldiskfs_map_inode_page(inode, *page, b, create);
+ if (rc) {
+ CERROR("ino %lu, blk %lu create %d: rc %d\n",
+ inode->i_ino, *b, create, rc);
+ break;
+ }
+ b += blocks_per_page;
+ }
+ return rc;
+}
+
+static int osd_ldiskfs_map_ext_inode_pages(struct inode *inode,
+ struct page **page,
+ int pages, unsigned long *blocks,
+ int create)
{
int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
int rc = 0, i = 0;
struct page *fp = NULL;
int clen = 0;
+ pgoff_t extent_max_page_index;
+
+ extent_max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
inode->i_ino, pages, (*page)->index);
continue;
}
+ if (fp->index + i >= extent_max_page_index)
+ GOTO(cleanup, rc = -EFBIG);
+
/* process found extent */
rc = osd_ldiskfs_map_nblocks(inode, fp->index * blocks_per_page,
clen * blocks_per_page, blocks,
return rc;
}
-int osd_ldiskfs_map_bm_inode_pages(struct inode *inode, struct page **page,
- int pages, unsigned long *blocks,
- int create)
-{
- int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
- unsigned long *b;
- int rc = 0, i;
-
- for (i = 0, b = blocks; i < pages; i++, page++) {
- rc = ldiskfs_map_inode_page(inode, *page, b, create);
- if (rc) {
- CERROR("ino %lu, blk %lu create %d: rc %d\n",
- inode->i_ino, *b, create, rc);
- break;
- }
-
- b += blocks_per_page;
- }
- return rc;
-}
-
static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
int pages, unsigned long *blocks,
int create)
{
int rc;
- if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS)) {
+ if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
rc = osd_ldiskfs_map_ext_inode_pages(inode, page, pages,
blocks, create);
return rc;
return rc;
}
+#else
+static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
+ int pages, unsigned long *blocks,
+ int create)
+{
+ int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
+ int rc = 0, i = 0;
+ struct page *fp = NULL;
+ int clen = 0;
+ pgoff_t max_page_index;
+
+ max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
+
+ CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
+ inode->i_ino, pages, (*page)->index);
+
+ /* pages are sorted already. so, we just have to find
+ * contig. space and process them properly */
+ while (i < pages) {
+ long blen, total = 0;
+ handle_t *handle = NULL;
+ struct ldiskfs_map_blocks map = { 0 };
+
+ if (fp == NULL) { /* start new extent */
+ fp = *page++;
+ clen = 1;
+ if (++i != pages)
+ continue;
+ } else if (fp->index + clen == (*page)->index) {
+ /* continue the extent */
+ page++;
+ clen++;
+ if (++i != pages)
+ continue;
+ }
+ if (fp->index + i >= max_page_index)
+ GOTO(cleanup, rc = -EFBIG);
+ /* process found extent */
+ map.m_lblk = fp->index * blocks_per_page;
+ map.m_len = blen = clen * blocks_per_page;
+ if (create) {
+ create = LDISKFS_GET_BLOCKS_CREATE;
+ handle = ldiskfs_journal_current_handle();
+ LASSERT(handle != NULL);
+ }
+cont_map:
+ rc = ldiskfs_map_blocks(handle, inode, &map, create);
+ if (rc >= 0) {
+ int c = 0;
+ for (; total < blen && c < map.m_len; c++, total++) {
+ if (rc == 0) {
+ *(blocks + total) = 0;
+ total++;
+ break;
+ } else {
+ *(blocks + total) = map.m_pblk + c;
+ /* unmap any possible underlying
+ * metadata from the block device
+ * mapping. bug 6998. */
+ if ((map.m_flags & LDISKFS_MAP_NEW) &&
+ create)
+ unmap_underlying_metadata(
+ inode->i_sb->s_bdev,
+ map.m_pblk + c);
+ }
+ }
+ rc = 0;
+ }
+ if (rc == 0 && total < blen) {
+ map.m_lblk = fp->index * blocks_per_page + total;
+ map.m_len = blen - total;
+ goto cont_map;
+ }
+ if (rc != 0)
+ GOTO(cleanup, rc);
+
+ /* look for next extent */
+ fp = NULL;
+ blocks += blocks_per_page * clen;
+ }
+cleanup:
+ return rc;
+}
+#endif /* HAVE_LDISKFS_MAP_BLOCKS */
static int osd_write_prep(const struct lu_env *env, struct dt_object *dt,
struct niobuf_local *lnb, int npages)
if (off)
memset(p, 0, off);
off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
- ~CFS_PAGE_MASK;
+ ~PAGE_MASK;
if (off)
memset(p + off, 0, PAGE_CACHE_SIZE - off);
kunmap(lnb[i].lnb_page);
}
/* Check if a block is allocated or not */
-static int osd_is_mapped(struct inode *inode, obd_size offset)
+static int osd_is_mapped(struct inode *inode, u64 offset)
{
sector_t (*fs_bmap)(struct address_space *, sector_t);
* 5 is max tree depth: inode + 4 index blocks
* with blockmaps, depth is 3 at most
*/
- if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS)) {
+ if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
/*
* many concurrent threads may grow tree by the time
* our transaction starts. so, consider 2 is a min depth
lnb[0].lnb_flags &= ~(OBD_BRW_OVER_USRQUOTA | OBD_BRW_OVER_GRPQUOTA);
rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
- quota_space, oh, true, true, &flags,
- ignore_quota);
+ quota_space, oh, osd_dt_obj(dt), true,
+ &flags, ignore_quota);
/* we need only to store the overquota flags in the first lnb for
* now, once we support multiple objects BRW, this code needs be
struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
struct timeval start, end;
unsigned long timediff;
- int rc = 0, i, m = 0, cache = 0, cache_hits = 0, cache_misses = 0;
+ int rc = 0, i, cache = 0, cache_hits = 0, cache_misses = 0;
+ loff_t isize;
LASSERT(inode);
if (unlikely(rc != 0))
RETURN(rc);
+ isize = i_size_read(inode);
+
if (osd->od_read_cache)
cache = 1;
- if (i_size_read(inode) > osd->od_readcache_max_filesize)
+ if (isize > osd->od_readcache_max_filesize)
cache = 0;
do_gettimeofday(&start);
for (i = 0; i < npages; i++) {
- if (i_size_read(inode) <= lnb[i].lnb_file_offset)
+ if (isize <= lnb[i].lnb_file_offset)
/* If there's no more data, abort early.
* lnb->lnb_rc == 0, so it's easy to detect later. */
break;
- if (i_size_read(inode) <
- lnb[i].lnb_file_offset + lnb[i].lnb_len - 1)
- lnb[i].lnb_rc = i_size_read(inode) -
- lnb[i].lnb_file_offset;
+ if (isize < lnb[i].lnb_file_offset + lnb[i].lnb_len - 1)
+ lnb[i].lnb_rc = isize - lnb[i].lnb_file_offset;
else
lnb[i].lnb_rc = lnb[i].lnb_len;
- m += lnb[i].lnb_len;
if (PageUptodate(lnb[i].lnb_page)) {
cache_hits++;
boffs = *offs & (blocksize - 1);
csize = min(blocksize - boffs, size);
bh = ldiskfs_bread(NULL, inode, block, 0, &err);
- if (!bh) {
- CERROR("%s: can't read %u@%llu on ino %lu: rc = %d\n",
- LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
- csize, *offs, inode->i_ino, err);
- return err;
- }
+ if (err != 0) {
+ CERROR("%s: can't read %u@%llu on ino %lu: rc = %d\n",
+ LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ csize, *offs, inode->i_ino, err);
+ if (bh != NULL)
+ brelse(bh);
+ return err;
+ }
- memcpy(buf, bh->b_data + boffs, csize);
- brelse(bh);
+ if (bh != NULL) {
+ memcpy(buf, bh->b_data + boffs, csize);
+ brelse(bh);
+ } else {
+ memset(buf, 0, csize);
+ }
*offs += csize;
buf += csize;
}
static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
- struct lu_buf *buf, loff_t *pos,
- struct lustre_capa *capa)
+ struct lu_buf *buf, loff_t *pos)
{
struct inode *inode = osd_dt_obj(dt)->oo_inode;
int rc;
- if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_READ))
- RETURN(-EACCES);
-
/* Read small symlink from inode body as we need to maintain correct
* on-disk symlinks for ldiskfs.
*/
struct inode *inode)
{
if (inode != NULL) {
- if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
+ if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
return 1;
} else if (LDISKFS_HAS_INCOMPAT_FEATURE(sb,
- LDISKFS_FEATURE_INCOMPAT_EXTENTS))
+ LDISKFS_FEATURE_INCOMPAT_EXTENTS)) {
return 1;
+ }
return 0;
}
* objects, so always set the lqi_space as 0. */
if (inode != NULL)
rc = osd_declare_inode_qid(env, i_uid_read(inode),
- i_gid_read(inode), 0, oh, true,
- true, NULL, false);
+ i_gid_read(inode), 0, oh, obj, true,
+ NULL, false);
RETURN(rc);
}
"boffs %d size %d bh->b_size %lu\n",
boffs, size, (unsigned long)bh->b_size);
memcpy(bh->b_data + boffs, buf, size);
- err = ldiskfs_journal_dirty_metadata(handle, bh);
+ err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
if (err)
break;
}
static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
- const struct lu_buf *buf, loff_t *pos,
- struct thandle *handle, struct lustre_capa *capa,
- int ignore_quota)
+ const struct lu_buf *buf, loff_t *pos,
+ struct thandle *handle, int ignore_quota)
{
struct inode *inode = osd_dt_obj(dt)->oo_inode;
struct osd_thandle *oh;
LASSERT(dt_object_exists(dt));
- if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_WRITE))
- return -EACCES;
-
LASSERT(handle != NULL);
LASSERT(inode != NULL);
ll_vfs_dq_init(inode);
LASSERT(inode);
rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
- 0, oh, true, true, NULL, false);
+ 0, oh, osd_dt_obj(dt), true, NULL, false);
RETURN(rc);
}
static int osd_punch(const struct lu_env *env, struct dt_object *dt,
- __u64 start, __u64 end, struct thandle *th,
- struct lustre_capa *capa)
+ __u64 start, __u64 end, struct thandle *th)
{
struct osd_thandle *oh;
struct osd_object *obj = osd_dt_obj(dt);
* For a partial-page truncate, flush the page to disk immediately to
* avoid data corruption during direct disk write. b=17397
*/
- if ((start & ~CFS_PAGE_MASK) != 0)
+ if ((start & ~PAGE_MASK) != 0)
rc = filemap_fdatawrite_range(inode->i_mapping, start, start+1);
h = journal_current_handle();
RETURN(rc == 0 ? rc2 : rc);
}
+static int fiemap_check_ranges(struct inode *inode,
+ u64 start, u64 len, u64 *new_len)
+{
+ loff_t maxbytes;
+
+ *new_len = len;
+
+ if (len == 0)
+ return -EINVAL;
+
+ if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
+ maxbytes = inode->i_sb->s_maxbytes;
+ else
+ maxbytes = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes;
+
+ if (start > maxbytes)
+ return -EFBIG;
+
+ /*
+ * Shrink request scope to what the fs can actually handle.
+ */
+ if (len > maxbytes || (maxbytes - len) < start)
+ *new_len = maxbytes - start;
+
+ return 0;
+}
+
+/* So that the fiemap access checks can't overflow on 32 bit machines. */
+#define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent))
+
static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
struct ll_user_fiemap *fm)
{
- struct inode *inode = osd_dt_obj(dt)->oo_inode;
- struct osd_thread_info *info = osd_oti_get(env);
- struct dentry *dentry = &info->oti_obj_dentry;
- struct file *file = &info->oti_file;
- mm_segment_t saved_fs;
- int rc;
+ struct fiemap_extent_info fieinfo = {0, };
+ struct inode *inode = osd_dt_obj(dt)->oo_inode;
+ u64 len;
+ int rc;
- LASSERT(inode);
- dentry->d_inode = inode;
- dentry->d_sb = inode->i_sb;
- file->f_dentry = dentry;
- file->f_mapping = inode->i_mapping;
- file->f_op = inode->i_fop;
- set_file_inode(file, inode);
-
- saved_fs = get_fs();
- set_fs(get_ds());
- /* ldiskfs_ioctl does not have a inode argument */
- if (inode->i_fop->unlocked_ioctl)
- rc = inode->i_fop->unlocked_ioctl(file, FSFILT_IOC_FIEMAP,
- (long)fm);
- else
- rc = -ENOTTY;
- set_fs(saved_fs);
- return rc;
+
+ LASSERT(inode);
+ if (inode->i_op->fiemap == NULL)
+ return -EOPNOTSUPP;
+
+ if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS)
+ return -EINVAL;
+
+ rc = fiemap_check_ranges(inode, fm->fm_start, fm->fm_length, &len);
+ if (rc)
+ return rc;
+
+ fieinfo.fi_flags = fm->fm_flags;
+ fieinfo.fi_extents_max = fm->fm_extent_count;
+ fieinfo.fi_extents_start = fm->fm_extents;
+
+ if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
+ filemap_write_and_wait(inode->i_mapping);
+
+ rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
+ fm->fm_flags = fieinfo.fi_flags;
+ fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
+
+ return rc;
}
/*