for (page_idx = page_idx_start, block_idx = start_blocks;
block_idx < block_idx_end; page_idx++,
block_idx += blocks_left_page) {
+ /* For cases where the filesystems blocksize is not the
+ * same as PAGE_SIZE (e.g. ARM with PAGE_SIZE=64KB and
+ * blocksize=4KB), there will be multiple blocks to
+ * read/write per page. Also, the start and end block may
+ * not be aligned to the start and end of the page, so the
+ * first page may skip some blocks at the start ("i != 0",
+ * "blocks_left_page" is reduced), and the last page may
+ * skip some blocks at the end (limited by "count").
+ */
page = pages[page_idx];
LASSERT(page_idx < iobuf->dr_npages);
i = block_idx % blocks_per_page;
blocks_left_page = blocks_per_page - i;
- for (page_offset = i * blocksize; i < blocks_left_page;
+ if (block_idx + blocks_left_page > block_idx_end)
+ blocks_left_page = block_idx_end - block_idx;
+ page_offset = i * blocksize;
+ for (i = 0; i < blocks_left_page;
i += nblocks, page_offset += blocksize * nblocks) {
nblocks = 1;
* heavily-fragmented, it will be reduced to 4K at the worst.
*/
extent_bytes = osd_extent_bytes(osd);
- LASSERT(extent_bytes >= (1 << osd_sb(osd)->s_blocksize));
+ LASSERT(extent_bytes >= osd_sb(osd)->s_blocksize);
/* calculate number of extents (probably better to pass nb) */
for (i = 0; i < npages; i++) {
loff_t offset, int whence)
{
struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_device *dev = osd_obj2dev(obj);
struct inode *inode = obj->oo_inode;
struct file *file;
loff_t result;
ENTRY;
-
LASSERT(dt_object_exists(dt));
LASSERT(osd_invariant(obj));
LASSERT(inode);
LASSERT(offset >= 0);
- file = osd_quasi_file(env, inode);
- result = file->f_op->llseek(file, offset, whence);
+ file = alloc_file_pseudo(inode, dev->od_mnt, "/", O_NOATIME,
+ inode->i_fop);
+ if (IS_ERR(file))
+ RETURN(PTR_ERR(file));
+ file->f_mode |= FMODE_64BITHASH;
+ result = file->f_op->llseek(file, offset, whence);
+ ihold(inode);
+ fput(file);
/*
* If 'offset' is beyond end of object file then treat it as not error
* but valid case for SEEK_HOLE and return 'offset' as result.
osd_partial_page_flush(d, inode, size);
}
-void osd_execute_punch(const struct lu_env *env, struct osd_object *obj,
- loff_t start, loff_t end, int mode)
+static int osd_execute_punch(const struct lu_env *env, struct osd_object *obj,
+ loff_t start, loff_t end, int mode)
{
struct osd_device *d = osd_obj2dev(obj);
struct inode *inode = obj->oo_inode;
- struct file *file = osd_quasi_file(env, inode);
+ struct file *file;
+ int rc;
- file->f_op->fallocate(file, mode, start, end - start);
- osd_partial_page_flush_punch(d, inode, start, end - 1);
+ file = alloc_file_pseudo(inode, d->od_mnt, "/", O_NOATIME,
+ inode->i_fop);
+ if (IS_ERR(file))
+ RETURN(PTR_ERR(file));
+
+ file->f_mode |= FMODE_64BITHASH;
+ rc = file->f_op->fallocate(file, mode, start, end - start);
+ ihold(inode);
+ fput(file);
+ if (rc == 0)
+ osd_partial_page_flush_punch(d, inode, start, end - 1);
+ return rc;
}
-void osd_process_truncates(const struct lu_env *env, struct list_head *list)
+int osd_process_truncates(const struct lu_env *env, struct list_head *list)
{
struct osd_access_lock *al;
+ int rc = 0;
- LASSERT(journal_current_handle() == NULL);
+ LASSERT(!journal_current_handle());
list_for_each_entry(al, list, tl_list) {
if (al->tl_shared)
if (al->tl_truncate)
osd_execute_truncate(al->tl_obj);
else if (al->tl_punch)
- osd_execute_punch(env, al->tl_obj, al->tl_start,
- al->tl_end, al->tl_mode);
+ rc = osd_execute_punch(env, al->tl_obj, al->tl_start,
+ al->tl_end, al->tl_mode);
}
+
+ return rc;
}