struct address_space *mapping = inode->i_mapping;
struct page *vmpage;
struct niobuf_remote *rnb;
+ struct mdt_body *body;
char *data;
unsigned long index, start;
struct niobuf_local lnb;
if (rnb->rnb_offset % PAGE_SIZE)
RETURN_EXIT;
- /* Server returns whole file or just file tail if it fills in
- * reply buffer, in both cases total size should be inode size.
+ /* Server returns whole file or just file tail if it fills in reply
+ * buffer, in both cases total size should be equal to the file size.
*/
- if (rnb->rnb_offset + rnb->rnb_len < i_size_read(inode)) {
- CERROR("%s: server returns off/len %llu/%u < i_size %llu\n",
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ if (rnb->rnb_offset + rnb->rnb_len != body->mbo_dom_size) {
+ CERROR("%s: server returns off/len %llu/%u but size %llu\n",
ll_i2sbi(inode)->ll_fsname, rnb->rnb_offset,
- rnb->rnb_len, i_size_read(inode));
+ rnb->rnb_len, body->mbo_dom_size);
RETURN_EXIT;
}
- CDEBUG(D_INFO, "Get data along with open at %llu len %i, i_size %llu\n",
- rnb->rnb_offset, rnb->rnb_len, i_size_read(inode));
+ CDEBUG(D_INFO, "Get data along with open at %llu len %i, size %llu\n",
+ rnb->rnb_offset, rnb->rnb_len, body->mbo_dom_size);
data = (char *)rnb + sizeof(*rnb);
if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
return true;
- if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
+ if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
return true;
return false;
}
-static void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot)
+void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot)
{
struct inode *inode = file_inode(file);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
io->ci_lockreq = CILR_MANDATORY;
}
io->ci_noatime = file_is_noatime(file);
+ io->ci_async_readahead = false;
/* FLR: only use non-delay I/O for read as there is only one
* avaliable mirror for write. */
__u16 refcheck;
bool cached;
+ if (!iov_iter_count(to))
+ return 0;
+
/**
* Currently when PCC read failed, we do not fall back to the
* normal read path, just return the error.
ENTRY;
+ if (!iov_iter_count(from))
+ GOTO(out, rc_normal = 0);
+
/**
* When PCC write failed, we usually do not fall back to the normal
* write path, just return the error. But there is a special case when
if (result)
RETURN(result);
+ if (!iov_count)
+ RETURN(0);
+
# ifdef HAVE_IOV_ITER_INIT_DIRECTION
iov_iter_init(&to, READ, iov, nr_segs, iov_count);
# else /* !HAVE_IOV_ITER_INIT_DIRECTION */
struct iovec iov = { .iov_base = buf, .iov_len = count };
struct kiocb kiocb;
ssize_t result;
+
ENTRY;
+ if (!count)
+ RETURN(0);
+
init_sync_kiocb(&kiocb, file);
kiocb.ki_pos = *ppos;
#ifdef HAVE_KIOCB_KI_LEFT
if (result)
RETURN(result);
+ if (!iov_count)
+ RETURN(0);
+
# ifdef HAVE_IOV_ITER_INIT_DIRECTION
iov_iter_init(&from, WRITE, iov, nr_segs, iov_count);
# else /* !HAVE_IOV_ITER_INIT_DIRECTION */
ENTRY;
+ if (!count)
+ RETURN(0);
+
init_sync_kiocb(&kiocb, file);
kiocb.ki_pos = *ppos;
#ifdef HAVE_KIOCB_KI_LEFT
rc = ll_heat_set(inode, flags);
RETURN(rc);
}
- case LL_IOC_PCC_DETACH:
+ case LL_IOC_PCC_DETACH: {
+ struct lu_pcc_detach *detach;
+
+ OBD_ALLOC_PTR(detach);
+ if (detach == NULL)
+ RETURN(-ENOMEM);
+
+ if (copy_from_user(detach,
+ (const struct lu_pcc_detach __user *)arg,
+ sizeof(*detach)))
+ GOTO(out_detach_free, rc = -EFAULT);
+
if (!S_ISREG(inode->i_mode))
- RETURN(-EINVAL);
+ GOTO(out_detach_free, rc = -EINVAL);
if (!inode_owner_or_capable(inode))
- RETURN(-EPERM);
+ GOTO(out_detach_free, rc = -EPERM);
- RETURN(pcc_ioctl_detach(inode));
+ rc = pcc_ioctl_detach(inode, detach->pccd_opt);
+out_detach_free:
+ OBD_FREE_PTR(detach);
+ RETURN(rc);
+ }
case LL_IOC_PCC_STATE: {
struct lu_pcc_state __user *ustate =
(struct lu_pcc_state __user *)arg;
* file_dentry() as is done otherwise.
*/
-#ifdef HAVE_FILE_FSYNC_4ARGS
int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct dentry *dentry = file_dentry(file);
-#elif defined(HAVE_FILE_FSYNC_2ARGS)
-int ll_fsync(struct file *file, int datasync)
-{
- struct dentry *dentry = file_dentry(file);
- loff_t start = 0;
- loff_t end = LLONG_MAX;
-#else
-int ll_fsync(struct file *file, struct dentry *dentry, int datasync)
-{
- loff_t start = 0;
- loff_t end = LLONG_MAX;
-#endif
struct inode *inode = dentry->d_inode;
struct ll_inode_info *lli = ll_i2info(inode);
struct ptlrpc_request *req;
ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
- PFID(ll_inode2fid(inode)), inode);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), start %lld, end %lld,"
+ "datasync %d\n",
+ PFID(ll_inode2fid(inode)), inode, start, end, datasync);
+
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC, 1);
-#ifdef HAVE_FILE_FSYNC_4ARGS
- rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
- inode_lock(inode);
-#else
/* fsync's caller has already called _fdata{sync,write}, we want
* that IO to finish before calling the osc and mdc sync methods */
- rc = filemap_fdatawait(inode->i_mapping);
-#endif
+ rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ inode_lock(inode);
/* catch async errors that were recorded back when async writeback
* failed for pages in this mapping. */
fd->fd_write_failed = false;
}
-#ifdef HAVE_FILE_FSYNC_4ARGS
inode_unlock(inode);
-#endif
RETURN(rc);
}
if (!(exp_connect_flags2(ll_i2sbi(parent)->ll_md_exp) &
OBD_CONNECT2_DIR_MIGRATE)) {
if (le32_to_cpu(lum->lum_stripe_count) > 1 ||
- ll_i2info(child_inode)->lli_lsm_md) {
+ ll_dir_striped(child_inode)) {
CERROR("%s: MDT doesn't support stripe directory "
"migration!\n", ll_i2sbi(parent)->ll_fsname);
GOTO(out_iput, rc = -EOPNOTSUPP);
static int
ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock)
{
- ENTRY;
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ ENTRY;
- RETURN(-ENOSYS);
+ /*
+ * In order to avoid flood of warning messages, only print one message
+ * for one file. And the entire message rate on the client is limited
+ * by CDEBUG_LIMIT too.
+ */
+ if (!(fd->fd_flags & LL_FILE_FLOCK_WARNING)) {
+ fd->fd_flags |= LL_FILE_FLOCK_WARNING;
+ CDEBUG_LIMIT(D_TTY | D_CONSOLE,
+ "flock disabled, mount with '-o [local]flock' to enable\r\n");
+ }
+ RETURN(-ENOSYS);
}
/**
ldlm_lockname[mode]);
flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK;
- for (i = 0; i <= MDS_INODELOCK_MAXSHIFT && *bits != 0; i++) {
+ for (i = 0; i < MDS_INODELOCK_NUMBITS && *bits != 0; i++) {
policy.l_inodebits.bits = *bits & (1 << i);
if (policy.l_inodebits.bits == 0)
continue;
/* If it is striped directory, and there is bad stripe
* Let's revalidate the dentry again, instead of returning
* error */
- if (S_ISDIR(inode->i_mode) &&
- ll_i2info(inode)->lli_lsm_md != NULL)
+ if (ll_dir_striped(inode))
return 0;
/* This path cannot be hit for regular files unless in
LASSERT(lli->lli_lsm_md != NULL);
- /* foreign dir is not striped dir */
- if (lli->lli_lsm_md->lsm_md_magic == LMV_MAGIC_FOREIGN)
+ if (!lmv_dir_striped(lli->lli_lsm_md))
RETURN(0);
down_read(&lli->lli_lsm_sem);
}
} else {
/* If object isn't regular a file then don't validate size. */
- if (S_ISDIR(inode->i_mode) &&
- lli->lli_lsm_md != NULL) {
+ if (ll_dir_striped(inode)) {
rc = ll_merge_md_attr(inode);
if (rc < 0)
RETURN(rc);
/* mostly layout lock is caching on the local side, so try to
* match it before grabbing layout lock mutex. */
mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
- LCK_CR | LCK_CW | LCK_PR | LCK_PW);
+ LCK_CR | LCK_CW | LCK_PR |
+ LCK_PW | LCK_EX);
if (mode != 0) { /* hit cached lock */
rc = ll_layout_lock_set(&lockh, mode, inode);
if (rc == -EAGAIN)