* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2013, Intel Corporation.
+ * Copyright (c) 2012, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
static int osd_bufs_get(const struct lu_env *env, struct dt_object *dt,
loff_t pos, ssize_t len, struct niobuf_local *lnb,
- int rw, struct lustre_capa *capa)
+ int rw)
{
struct osd_object *obj = osd_dt_obj(dt);
int npages, i, rc = 0;
unsigned long *blocks, int create)
{
int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
+ pgoff_t bitmap_max_page_index;
unsigned long *b;
int rc = 0, i;
+ bitmap_max_page_index = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes >>
+ PAGE_SHIFT;
for (i = 0, b = blocks; i < pages; i++, page++) {
+ if ((*page)->index + 1 >= bitmap_max_page_index) {
+ rc = -EFBIG;
+ break;
+ }
rc = ldiskfs_map_inode_page(inode, *page, b, create);
if (rc) {
CERROR("ino %lu, blk %lu create %d: rc %d\n",
int rc = 0, i = 0;
struct page *fp = NULL;
int clen = 0;
+ pgoff_t extent_max_page_index;
+
+ extent_max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
inode->i_ino, pages, (*page)->index);
continue;
}
+ if (fp->index + i >= extent_max_page_index)
+ GOTO(cleanup, rc = -EFBIG);
+
/* process found extent */
rc = osd_ldiskfs_map_nblocks(inode, fp->index * blocks_per_page,
clen * blocks_per_page, blocks,
int rc = 0, i = 0;
struct page *fp = NULL;
int clen = 0;
+ pgoff_t max_page_index;
+
+ max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
inode->i_ino, pages, (*page)->index);
if (++i != pages)
continue;
}
+ if (fp->index + i >= max_page_index)
+ GOTO(cleanup, rc = -EFBIG);
/* process found extent */
map.m_lblk = fp->index * blocks_per_page;
map.m_len = blen = clen * blocks_per_page;
if (off)
memset(p, 0, off);
off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
- ~CFS_PAGE_MASK;
+ ~PAGE_MASK;
if (off)
memset(p + off, 0, PAGE_CACHE_SIZE - off);
kunmap(lnb[i].lnb_page);
}
/* Check if a block is allocated or not */
-static int osd_is_mapped(struct inode *inode, obd_size offset)
+static int osd_is_mapped(struct inode *inode, u64 offset)
{
sector_t (*fs_bmap)(struct address_space *, sector_t);
struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
struct timeval start, end;
unsigned long timediff;
- int rc = 0, i, m = 0, cache = 0, cache_hits = 0, cache_misses = 0;
+ int rc = 0, i, cache = 0, cache_hits = 0, cache_misses = 0;
+ loff_t isize;
LASSERT(inode);
if (unlikely(rc != 0))
RETURN(rc);
+ isize = i_size_read(inode);
+
if (osd->od_read_cache)
cache = 1;
- if (i_size_read(inode) > osd->od_readcache_max_filesize)
+ if (isize > osd->od_readcache_max_filesize)
cache = 0;
do_gettimeofday(&start);
for (i = 0; i < npages; i++) {
- if (i_size_read(inode) <= lnb[i].lnb_file_offset)
+ if (isize <= lnb[i].lnb_file_offset)
/* If there's no more data, abort early.
* lnb->lnb_rc == 0, so it's easy to detect later. */
break;
- if (i_size_read(inode) <
- lnb[i].lnb_file_offset + lnb[i].lnb_len - 1)
- lnb[i].lnb_rc = i_size_read(inode) -
- lnb[i].lnb_file_offset;
+ if (isize < lnb[i].lnb_file_offset + lnb[i].lnb_len - 1)
+ lnb[i].lnb_rc = isize - lnb[i].lnb_file_offset;
else
lnb[i].lnb_rc = lnb[i].lnb_len;
- m += lnb[i].lnb_len;
if (PageUptodate(lnb[i].lnb_page)) {
cache_hits++;
}
static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
- struct lu_buf *buf, loff_t *pos,
- struct lustre_capa *capa)
+ struct lu_buf *buf, loff_t *pos)
{
struct inode *inode = osd_dt_obj(dt)->oo_inode;
int rc;
- if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_READ))
- RETURN(-EACCES);
-
/* Read small symlink from inode body as we need to maintain correct
* on-disk symlinks for ldiskfs.
*/
if (inode != NULL) {
if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
return 1;
- } else if (test_opt(sb, EXTENTS)) {
+ } else if (LDISKFS_HAS_INCOMPAT_FEATURE(sb,
+ LDISKFS_FEATURE_INCOMPAT_EXTENTS)) {
return 1;
}
return 0;
"boffs %d size %d bh->b_size %lu\n",
boffs, size, (unsigned long)bh->b_size);
memcpy(bh->b_data + boffs, buf, size);
- err = ldiskfs_journal_dirty_metadata(handle, bh);
+ err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
if (err)
break;
}
static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
- const struct lu_buf *buf, loff_t *pos,
- struct thandle *handle, struct lustre_capa *capa,
- int ignore_quota)
+ const struct lu_buf *buf, loff_t *pos,
+ struct thandle *handle, int ignore_quota)
{
struct inode *inode = osd_dt_obj(dt)->oo_inode;
struct osd_thandle *oh;
LASSERT(dt_object_exists(dt));
- if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_WRITE))
- return -EACCES;
-
LASSERT(handle != NULL);
LASSERT(inode != NULL);
ll_vfs_dq_init(inode);
}
static int osd_punch(const struct lu_env *env, struct dt_object *dt,
- __u64 start, __u64 end, struct thandle *th,
- struct lustre_capa *capa)
+ __u64 start, __u64 end, struct thandle *th)
{
struct osd_thandle *oh;
struct osd_object *obj = osd_dt_obj(dt);
* For a partial-page truncate, flush the page to disk immediately to
* avoid data corruption during direct disk write. b=17397
*/
- if ((start & ~CFS_PAGE_MASK) != 0)
+ if ((start & ~PAGE_MASK) != 0)
rc = filemap_fdatawrite_range(inode->i_mapping, start, start+1);
h = journal_current_handle();
RETURN(rc == 0 ? rc2 : rc);
}
+static int fiemap_check_ranges(struct inode *inode,
+ u64 start, u64 len, u64 *new_len)
+{
+ loff_t maxbytes;
+
+ *new_len = len;
+
+ if (len == 0)
+ return -EINVAL;
+
+ if (ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS))
+ maxbytes = inode->i_sb->s_maxbytes;
+ else
+ maxbytes = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes;
+
+ if (start > maxbytes)
+ return -EFBIG;
+
+ /*
+ * Shrink request scope to what the fs can actually handle.
+ */
+ if (len > maxbytes || (maxbytes - len) < start)
+ *new_len = maxbytes - start;
+
+ return 0;
+}
+
+/* So that the fiemap access checks can't overflow on 32 bit machines. */
+#define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent))
+
static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
struct ll_user_fiemap *fm)
{
- struct inode *inode = osd_dt_obj(dt)->oo_inode;
- struct osd_thread_info *info = osd_oti_get(env);
- struct dentry *dentry = &info->oti_obj_dentry;
- struct file *file = &info->oti_file;
- mm_segment_t saved_fs;
- int rc;
+ struct fiemap_extent_info fieinfo = {0, };
+ struct inode *inode = osd_dt_obj(dt)->oo_inode;
+ u64 len;
+ int rc;
- LASSERT(inode);
- dentry->d_inode = inode;
- dentry->d_sb = inode->i_sb;
- file->f_dentry = dentry;
- file->f_mapping = inode->i_mapping;
- file->f_op = inode->i_fop;
- set_file_inode(file, inode);
-
- saved_fs = get_fs();
- set_fs(get_ds());
- /* ldiskfs_ioctl does not have a inode argument */
- if (inode->i_fop->unlocked_ioctl)
- rc = inode->i_fop->unlocked_ioctl(file, FSFILT_IOC_FIEMAP,
- (long)fm);
- else
- rc = -ENOTTY;
- set_fs(saved_fs);
- return rc;
+
+ LASSERT(inode);
+ if (inode->i_op->fiemap == NULL)
+ return -EOPNOTSUPP;
+
+ if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS)
+ return -EINVAL;
+
+ rc = fiemap_check_ranges(inode, fm->fm_start, fm->fm_length, &len);
+ if (rc)
+ return rc;
+
+ fieinfo.fi_flags = fm->fm_flags;
+ fieinfo.fi_extents_max = fm->fm_extent_count;
+ fieinfo.fi_extents_start = fm->fm_extents;
+
+ if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
+ filemap_write_and_wait(inode->i_mapping);
+
+ rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
+ fm->fm_flags = fieinfo.fi_flags;
+ fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
+
+ return rc;
}
/*