* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2014, Intel Corporation.
+ * Copyright (c) 2012, 2015, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
/* the check is outside of the cycle for performance reason -bzzz */
if (!test_bit(__REQ_WRITE, &bio->bi_rw)) {
- bio_for_each_segment(bvl, bio, iter) {
+ bio_for_each_segment_all(bvl, bio, iter) {
if (likely(error == 0))
- SetPageUptodate(bvec_iter_page(&bvl, iter));
- LASSERT(PageLocked(bvec_iter_page(&bvl, iter)));
+ SetPageUptodate(bvl_to_page(bvl));
+ LASSERT(PageLocked(bvl_to_page(bvl)));
}
atomic_dec(&iobuf->dr_dev->od_r_in_flight);
} else {
RETURN(rc);
}
-/* Check if a block is allocated or not */
-static int osd_is_mapped(struct inode *inode, u64 offset)
+struct osd_fextent {
+ sector_t start;
+ sector_t end;
+ unsigned int mapped:1;
+};
+
+static int osd_is_mapped(struct dt_object *dt, __u64 offset,
+ struct osd_fextent *cached_extent)
{
- sector_t (*fs_bmap)(struct address_space *, sector_t);
+ struct inode *inode = osd_dt_obj(dt)->oo_inode;
+ sector_t block = offset >> inode->i_blkbits;
+ sector_t start;
+ struct fiemap_extent_info fei = { 0 };
+ struct fiemap_extent fe = { 0 };
+ mm_segment_t saved_fs;
+ int rc;
- fs_bmap = inode->i_mapping->a_ops->bmap;
-
- /* We can't know if we are overwriting or not */
- if (unlikely(fs_bmap == NULL))
- return 0;
+ if (block >= cached_extent->start && block < cached_extent->end)
+ return cached_extent->mapped;
if (i_size_read(inode) == 0)
return 0;
/* Beyond EOF, must not be mapped */
- if (((i_size_read(inode) - 1) >> inode->i_blkbits) <
- (offset >> inode->i_blkbits))
+ if (((i_size_read(inode) - 1) >> inode->i_blkbits) < block)
return 0;
- if (fs_bmap(inode->i_mapping, offset >> inode->i_blkbits) == 0)
+ fei.fi_extents_max = 1;
+ fei.fi_extents_start = &fe;
+
+ saved_fs = get_fs();
+ set_fs(get_ds());
+ rc = inode->i_op->fiemap(inode, &fei, offset, FIEMAP_MAX_OFFSET-offset);
+ set_fs(saved_fs);
+ if (rc != 0)
return 0;
- return 1;
+ start = fe.fe_logical >> inode->i_blkbits;
+
+ if (start > block) {
+ cached_extent->start = block;
+ cached_extent->end = start;
+ cached_extent->mapped = 0;
+ } else {
+ cached_extent->start = start;
+ cached_extent->end = (fe.fe_logical + fe.fe_length) >>
+ inode->i_blkbits;
+ cached_extent->mapped = 1;
+ }
+
+ return cached_extent->mapped;
}
static int osd_declare_write_commit(const struct lu_env *env,
int credits = 0;
bool ignore_quota = false;
long long quota_space = 0;
+ struct osd_fextent extent = { 0 };
ENTRY;
LASSERT(handle != NULL);
lnb[i - 1].lnb_file_offset + lnb[i - 1].lnb_len)
extents++;
- if (!osd_is_mapped(inode, lnb[i].lnb_file_offset))
+ if (!osd_is_mapped(dt, lnb[i].lnb_file_offset, &extent))
quota_space += PAGE_CACHE_SIZE;
/* ignore quota for the whole request if any page is from
struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
loff_t isize;
int rc = 0, i;
+ struct osd_fextent extent = { 0 };
LASSERT(inode);
for (i = 0; i < npages; i++) {
if (lnb[i].lnb_rc == -ENOSPC &&
- osd_is_mapped(inode, lnb[i].lnb_file_offset)) {
+ osd_is_mapped(dt, lnb[i].lnb_file_offset, &extent)) {
/* Allow the write to proceed if overwriting an
* existing block */
lnb[i].lnb_rc = 0;
* lnb->lnb_rc == 0, so it's easy to detect later. */
break;
- if (isize < lnb[i].lnb_file_offset + lnb[i].lnb_len - 1)
+ if (isize < lnb[i].lnb_file_offset + lnb[i].lnb_len)
lnb[i].lnb_rc = isize - lnb[i].lnb_file_offset;
else
lnb[i].lnb_rc = lnb[i].lnb_len;
return 0;
}
-static inline int osd_calc_bkmap_credits(struct super_block *sb,
- struct inode *inode,
- const loff_t size,
- const loff_t pos,
- const int blocks)
+int osd_calc_bkmap_credits(struct super_block *sb, struct inode *inode,
+ const loff_t size, const loff_t pos,
+ const int blocks)
{
int credits, bits, bs, i;
} else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
/* single indirect */
credits = blocks * 3;
- /* probably indirect block has been allocated already */
- if (!inode || LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK])
+ if (inode == NULL ||
+ LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK] == 0)
credits += 3;
+ else
+ /* The indirect block may be modified. */
+ credits += 1;
}
return credits;
return rc;
}
+static int osd_ladvise(const struct lu_env *env, struct dt_object *dt,
+ __u64 start, __u64 end, enum lu_ladvise_type advice)
+{
+ int rc;
+ ENTRY;
+
+ switch (advice) {
+ default:
+ rc = -ENOTSUPP;
+ break;
+ }
+
+ RETURN(rc);
+}
+
/*
* in some cases we may need declare methods for objects being created
* e.g., when we create symlink
};
const struct dt_body_operations osd_body_ops = {
- .dbo_read = osd_read,
- .dbo_declare_write = osd_declare_write,
- .dbo_write = osd_write,
- .dbo_bufs_get = osd_bufs_get,
- .dbo_bufs_put = osd_bufs_put,
- .dbo_write_prep = osd_write_prep,
- .dbo_declare_write_commit = osd_declare_write_commit,
- .dbo_write_commit = osd_write_commit,
- .dbo_read_prep = osd_read_prep,
- .dbo_declare_punch = osd_declare_punch,
- .dbo_punch = osd_punch,
- .dbo_fiemap_get = osd_fiemap_get,
+ .dbo_read = osd_read,
+ .dbo_declare_write = osd_declare_write,
+ .dbo_write = osd_write,
+ .dbo_bufs_get = osd_bufs_get,
+ .dbo_bufs_put = osd_bufs_put,
+ .dbo_write_prep = osd_write_prep,
+ .dbo_declare_write_commit = osd_declare_write_commit,
+ .dbo_write_commit = osd_write_commit,
+ .dbo_read_prep = osd_read_prep,
+ .dbo_declare_punch = osd_declare_punch,
+ .dbo_punch = osd_punch,
+ .dbo_fiemap_get = osd_fiemap_get,
+ .dbo_ladvise = osd_ladvise,
};