op_data->op_data = lmm;
op_data->op_data_size = lmmsize;
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LLITE_OPEN_DELAY, cfs_fail_val);
+
rc = md_intent_lock(sbi->ll_md_exp, op_data, itp, &req,
&ll_md_blocking_ast, 0);
kfree(name);
* of kernel will deal with that later.
*/
ll_set_lock_data(sbi->ll_md_exp, de->d_inode, itp, &bits);
- if (bits & MDS_INODELOCK_LOOKUP)
+ if (bits & MDS_INODELOCK_LOOKUP) {
d_lustre_revalidate(de);
+ ll_update_dir_depth(parent->d_inode, de->d_inode);
+ }
+
/* if DoM bit returned along with LAYOUT bit then there
* can be read-on-open data returned.
*/
ptlrpc_req_finished(req);
ll_intent_drop_lock(itp);
- /* We did open by fid, but by the time we got to the server,
- * the object disappeared. If this is a create, we cannot really
- * tell the userspace that the file it was trying to create
- * does not exist. Instead let's return -ESTALE, and the VFS will
- * retry the create with LOOKUP_REVAL that we are going to catch
- * in ll_revalidate_dentry() and use lookup then.
+ /* We did open by fid, but by the time we got to the server, the object
+ * disappeared. This is possible if the object was unlinked, but it's
+ * also possible if the object was unlinked by a rename. In the case
+ * of an object renamed over our existing one, we can't fail this open.
+ * O_CREAT also goes through this path if we had an existing dentry,
+ * and it's obviously wrong to return ENOENT for O_CREAT.
+ *
+ * Instead let's return -ESTALE, and the VFS will retry the open with
+ * LOOKUP_REVAL, which we catch in ll_revalidate_dentry and fail to
+ * revalidate, causing a lookup. This causes extra lookups in the case
+ * where we had a dentry in cache but the file is being unlinked and we
+ * lose the race with unlink, but this should be very rare.
*/
- if (rc == -ENOENT && itp->it_op & IT_CREAT)
+ if (rc == -ENOENT)
rc = -ESTALE;
RETURN(rc);
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ll_file_data *fd = file->private_data;
struct range_lock range;
+ bool range_locked = false;
struct cl_io *io;
ssize_t result = 0;
int rc = 0;
+ int rc2 = 0;
unsigned int retried = 0, dio_lock = 0;
bool is_aio = false;
+ bool is_parallel_dio = false;
struct cl_dio_aio *ci_aio = NULL;
size_t per_bytes;
bool partial_io = false;
if (file->f_flags & O_DIRECT) {
if (!is_sync_kiocb(args->u.normal.via_iocb))
is_aio = true;
- ci_aio = cl_aio_alloc(args->u.normal.via_iocb);
+
+ /* the kernel does not support AIO on pipes, and parallel DIO
+ * uses part of the AIO path, so we must not do parallel dio
+ * to pipes
+ */
+ is_parallel_dio = !iov_iter_is_pipe(args->u.normal.via_iter) &&
+ !is_aio;
+
+ if (!ll_sbi_has_parallel_dio(sbi))
+ is_parallel_dio = false;
+
+ ci_aio = cl_aio_alloc(args->u.normal.via_iocb,
+ ll_i2info(inode)->lli_clob);
if (!ci_aio)
GOTO(out, rc = -ENOMEM);
}
io->ci_aio = ci_aio;
io->ci_dio_lock = dio_lock;
io->ci_ndelay_tried = retried;
+ io->ci_parallel_dio = is_parallel_dio;
if (cl_io_rw_init(env, io, iot, *ppos, per_bytes) == 0) {
- bool range_locked = false;
-
if (file->f_flags & O_APPEND)
range_lock_init(&range, 0, LUSTRE_EOF);
else
rc = cl_io_loop(env, io);
ll_cl_remove(file, env);
- if (range_locked) {
+ if (range_locked && !is_parallel_dio) {
CDEBUG(D_VFSTRACE, "Range unlock "RL_FMT"\n",
RL_PARA(&range));
range_unlock(&lli->lli_write_tree, &range);
+ range_locked = false;
}
} else {
/* cl_io_rw_init() handled IO */
rc = io->ci_result;
}
+ /* N/B: parallel DIO may be disabled during i/o submission;
+ * if that occurs, async RPCs are resolved before we get here, and this
+ * wait call completes immediately.
+ */
+ if (is_parallel_dio) {
+ struct cl_sync_io *anchor = &io->ci_aio->cda_sync;
+
+ /* for dio, EIOCBQUEUED is an implementation detail,
+ * and we don't return it to userspace
+ */
+ if (rc == -EIOCBQUEUED)
+ rc = 0;
+
+ rc2 = cl_sync_io_wait_recycle(env, anchor, 0, 0);
+ if (rc2 < 0)
+ rc = rc2;
+
+ if (range_locked) {
+ range_unlock(&lli->lli_write_tree, &range);
+ range_locked = false;
+ }
+ }
+
/*
* In order to move forward AIO, ci_nob was increased,
* but that doesn't mean io have been finished, it just
*/
if (io->ci_nob > 0) {
if (!is_aio) {
- result += io->ci_nob;
- *ppos = io->u.ci_wr.wr.crw_pos; /* for splice */
+ if (rc2 == 0) {
+ result += io->ci_nob;
+ *ppos = io->u.ci_wr.wr.crw_pos; /* for splice */
+ } else if (rc2) {
+ result = 0;
+ }
}
count -= io->ci_nob;
cl_sync_io_note(env, &io->ci_aio->cda_sync,
rc == -EIOCBQUEUED ? 0 : rc);
if (!is_aio) {
- cl_aio_free(io->ci_aio);
+ cl_aio_free(env, io->ci_aio);
io->ci_aio = NULL;
}
}
RETURN(0);
}
-int ll_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
+int ll_ioctl_check_project(struct inode *inode, __u32 xflags,
+ __u32 projid)
{
/*
* Project Quota ID state is only allowed to change from within the init
* namespace. Enforce that restriction only if we are trying to change
* the quota ID state. Everything else is allowed in user namespaces.
*/
- if (current_user_ns() == &init_user_ns)
+ if (current_user_ns() == &init_user_ns) {
+ /*
+ * Caller is allowed to change the project ID. if it is being
+ * changed, make sure that the new value is valid.
+ */
+ if (ll_i2info(inode)->lli_projid != projid &&
+ !projid_valid(make_kprojid(&init_user_ns, projid)))
+ return -EINVAL;
+
return 0;
+ }
- if (ll_i2info(inode)->lli_projid != fa->fsx_projid)
+ if (ll_i2info(inode)->lli_projid != projid)
return -EINVAL;
if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags)) {
- if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
+ if (!(xflags & FS_XFLAG_PROJINHERIT))
return -EINVAL;
} else {
- if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
+ if (xflags & FS_XFLAG_PROJINHERIT)
return -EINVAL;
}
return 0;
}
-int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd,
- unsigned long arg)
+static int ll_set_project(struct inode *inode, __u32 xflags, __u32 projid)
{
struct md_op_data *op_data;
struct ptlrpc_request *req = NULL;
- struct fsxattr fsxattr;
struct cl_object *obj;
unsigned int inode_flags;
int rc = 0;
- if (copy_from_user(&fsxattr,
- (const struct fsxattr __user *)arg,
- sizeof(fsxattr)))
- RETURN(-EFAULT);
-
- rc = ll_ioctl_check_project(inode, &fsxattr);
+ rc = ll_ioctl_check_project(inode, xflags, projid);
if (rc)
RETURN(rc);
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
- inode_flags = ll_xflags_to_inode_flags(fsxattr.fsx_xflags);
+ inode_flags = ll_xflags_to_inode_flags(xflags);
op_data->op_attr_flags = ll_inode_to_ext_flags(inode_flags);
- if (fsxattr.fsx_xflags & FS_XFLAG_PROJINHERIT)
+ if (xflags & FS_XFLAG_PROJINHERIT)
op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
- op_data->op_projid = fsxattr.fsx_projid;
+ op_data->op_projid = projid;
op_data->op_xvalid |= OP_XVALID_PROJID | OP_XVALID_FLAGS;
rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data, NULL, 0, &req);
ptlrpc_req_finished(req);
ll_update_inode_flags(inode, op_data->op_attr_flags);
/* Avoid OST RPC if this is only ioctl setting project inherit flag */
- if (fsxattr.fsx_xflags == 0 ||
- fsxattr.fsx_xflags == FS_XFLAG_PROJINHERIT)
+ if (xflags == 0 || xflags == FS_XFLAG_PROJINHERIT)
GOTO(out_fsxattr, rc);
obj = ll_i2info(inode)->lli_clob;
if (obj) {
struct iattr attr = { 0 };
- rc = cl_setattr_ost(obj, &attr, OP_XVALID_FLAGS,
- fsxattr.fsx_xflags);
+ rc = cl_setattr_ost(obj, &attr, OP_XVALID_FLAGS, xflags);
}
out_fsxattr:
RETURN(rc);
}
+int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd,
+ unsigned long arg)
+{
+ struct fsxattr fsxattr;
+
+ ENTRY;
+
+ if (copy_from_user(&fsxattr,
+ (const struct fsxattr __user *)arg,
+ sizeof(fsxattr)))
+ RETURN(-EFAULT);
+
+ RETURN(ll_set_project(inode, fsxattr.fsx_xflags,
+ fsxattr.fsx_projid));
+}
+
+int ll_ioctl_project(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct lu_project lu_project;
+ struct dentry *dentry = file_dentry(file);
+ struct inode *inode = file_inode(file);
+ struct dentry *child_dentry = NULL;
+ int rc = 0, name_len;
+
+ if (copy_from_user(&lu_project,
+ (const struct lu_project __user *)arg,
+ sizeof(lu_project)))
+ RETURN(-EFAULT);
+
+ /* apply child dentry if name is valid */
+ name_len = strnlen(lu_project.project_name, NAME_MAX);
+ if (name_len > 0 && name_len <= NAME_MAX) {
+ inode_lock(inode);
+ child_dentry = lookup_one_len(lu_project.project_name,
+ dentry, name_len);
+ inode_unlock(inode);
+ if (IS_ERR(child_dentry)) {
+ rc = PTR_ERR(child_dentry);
+ goto out;
+ }
+ inode = child_dentry->d_inode;
+ if (!inode) {
+ rc = -ENOENT;
+ goto out;
+ }
+ } else if (name_len > NAME_MAX) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ switch (lu_project.project_type) {
+ case LU_PROJECT_SET:
+ rc = ll_set_project(inode, lu_project.project_xflags,
+ lu_project.project_id);
+ break;
+ case LU_PROJECT_GET:
+ lu_project.project_xflags =
+ ll_inode_flags_to_xflags(inode->i_flags);
+ if (test_bit(LLIF_PROJECT_INHERIT,
+ &ll_i2info(inode)->lli_flags))
+ lu_project.project_xflags |= FS_XFLAG_PROJINHERIT;
+ lu_project.project_id = ll_i2info(inode)->lli_projid;
+ if (copy_to_user((struct lu_project __user *)arg,
+ &lu_project, sizeof(lu_project))) {
+ rc = -EFAULT;
+ goto out;
+ }
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+out:
+ if (!IS_ERR_OR_NULL(child_dentry))
+ dput(child_dentry);
+ RETURN(rc);
+}
+
static long ll_file_unlock_lease(struct file *file, struct ll_ioc_lease *ioc,
unsigned long arg)
{
RETURN(ll_ioctl_fsgetxattr(inode, cmd, arg));
case FS_IOC_FSSETXATTR:
RETURN(ll_ioctl_fssetxattr(inode, cmd, arg));
+ case LL_IOC_PROJECT:
+ RETURN(ll_ioctl_project(file, cmd, arg));
case BLKSSZGET:
RETURN(put_user(PAGE_SIZE, (int __user *)arg));
case LL_IOC_HEAT_GET: {
/* fsync's caller has already called _fdata{sync,write}, we want
* that IO to finish before calling the osc and mdc sync methods */
rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
- inode_lock(inode);
/* catch async errors that were recorded back when async writeback
* failed for pages in this mapping. */
fd->fd_write_failed = false;
}
- inode_unlock(inode);
-
if (!rc)
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC,
ktime_us_delta(ktime_get(), kstart));
PFID(ll_inode2fid(child_inode)));
GOTO(out_iput, rc = -ENOKEY);
}
+ if (unlikely(!llcrypt_policy_has_filename_enc(child_inode))) {
+ CDEBUG(D_SEC,
+ "cannot migrate old format encrypted "DFID", please move to new enc dir first\n",
+ PFID(ll_inode2fid(child_inode)));
+ GOTO(out_iput, rc = -EUCLEAN);
+ }
}
op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen,
lock = ldlm_handle2lock(lockh);
LASSERT(lock != NULL);
- LASSERT(ldlm_has_layout(lock));
+
+ if (!ldlm_has_layout(lock))
+ GOTO(out, rc = -EAGAIN);
LDLM_DEBUG(lock, "file "DFID"(%p) being reconfigured",
PFID(&lli->lli_fid), inode);