*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lustre/llite/file.c
*
#include <linux/user_namespace.h>
#include <linux/uidgid.h>
#include <linux/falloc.h>
+#include <linux/ktime.h>
#include <uapi/linux/lustre/lustre_ioctl.h>
#include <uapi/linux/llcrypt.h>
/* Usually the lease is not released when the
* application crashed, we need to release here. */
rc = ll_lease_close(fd->fd_lease_och, inode, &lease_broken);
- CDEBUG(rc ? D_ERROR : D_INODE, "Clean up lease "DFID" %d/%d\n",
- PFID(&lli->lli_fid), rc, lease_broken);
+ CDEBUG_LIMIT(rc ? D_ERROR : D_INODE,
+ "Clean up lease "DFID" %d/%d\n",
+ PFID(&lli->lli_fid), rc, lease_broken);
fd->fd_lease_och = NULL;
}
lli->lli_async_rc = 0;
}
+ lli->lli_close_fd_time = ktime_get();
+
rc = ll_md_close(inode, file);
if (CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_DUMP_LOG, cfs_fail_val))
op_data->op_data = lmm;
op_data->op_data_size = lmmsize;
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LLITE_OPEN_DELAY, cfs_fail_val);
+
rc = md_intent_lock(sbi->ll_md_exp, op_data, itp, &req,
&ll_md_blocking_ast, 0);
kfree(name);
GOTO(out, rc);
}
- rc = ll_prep_inode(&de->d_inode, req, NULL, itp);
+ rc = ll_prep_inode(&de->d_inode, &req->rq_pill, NULL, itp);
if (!rc && itp->it_lock_mode) {
__u64 bits = 0;
* of kernel will deal with that later.
*/
ll_set_lock_data(sbi->ll_md_exp, de->d_inode, itp, &bits);
- if (bits & MDS_INODELOCK_LOOKUP)
+ if (bits & MDS_INODELOCK_LOOKUP) {
d_lustre_revalidate(de);
+ ll_update_dir_depth(parent->d_inode, de->d_inode);
+ }
+
/* if DoM bit returned along with LAYOUT bit then there
* can be read-on-open data returned.
*/
ptlrpc_req_finished(req);
ll_intent_drop_lock(itp);
- /* We did open by fid, but by the time we got to the server,
- * the object disappeared. If this is a create, we cannot really
- * tell the userspace that the file it was trying to create
- * does not exist. Instead let's return -ESTALE, and the VFS will
- * retry the create with LOOKUP_REVAL that we are going to catch
- * in ll_revalidate_dentry() and use lookup then.
+ /* We did open by fid, but by the time we got to the server, the object
+ * disappeared. This is possible if the object was unlinked, but it's
+ * also possible if the object was unlinked by a rename. In the case
+ * of an object renamed over our existing one, we can't fail this open.
+ * O_CREAT also goes through this path if we had an existing dentry,
+ * and it's obviously wrong to return ENOENT for O_CREAT.
+ *
+ * Instead let's return -ESTALE, and the VFS will retry the open with
+ * LOOKUP_REVAL, which we catch in ll_revalidate_dentry and fail to
+ * revalidate, causing a lookup. This causes extra lookups in the case
+ * where we had a dentry in cache but the file is being unlinked and we
+ * lose the race with unlink, but this should be very rare.
*/
- if (rc == -ENOENT && itp->it_op & IT_CREAT)
+ if (rc == -ENOENT)
rc = -ESTALE;
RETURN(rc);
RETURN(0);
}
+void ll_track_file_opens(struct inode *inode)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+
+ /* do not skew results with delays from never-opened inodes */
+ if (ktime_to_ns(lli->lli_close_fd_time))
+ ll_stats_ops_tally(sbi, LPROC_LL_INODE_OPCLTM,
+ ktime_us_delta(ktime_get(), lli->lli_close_fd_time));
+
+ if (ktime_after(ktime_get(),
+ ktime_add_ms(lli->lli_close_fd_time,
+ sbi->ll_oc_max_ms))) {
+ lli->lli_open_fd_count = 1;
+ lli->lli_close_fd_time = ns_to_ktime(0);
+ } else {
+ lli->lli_open_fd_count++;
+ }
+
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_OCOUNT,
+ lli->lli_open_fd_count);
+}
+
/* Open a file, and (for the very first open) create objects on the OSTs at
* this time. If opened with O_LOV_DELAY_CREATE, then we don't do the object
* creation or open until ll_lov_setstripe() ioctl is called.
if (S_ISDIR(inode->i_mode))
ll_authorize_statahead(inode, fd);
+ ll_track_file_opens(inode);
if (is_root_inode(inode)) {
file->private_data = fd;
RETURN(0);
LASSERT(*och_usecount == 0);
if (!it->it_disposition) {
struct dentry *dentry = file_dentry(file);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ll_dentry_data *ldd;
/* We cannot just request lock handle now, new ELC code
* handle to be returned from LOOKUP|OPEN request,
* for example if the target entry was a symlink.
*
- * Only fetch MDS_OPEN_LOCK if this is in NFS path,
- * marked by a bit set in ll_iget_for_nfs. Clear the
- * bit so that it's not confusing later callers.
+ * In NFS path we know there's pathologic behavior
+ * so we always enable open lock caching when coming
+ * from there. It's detected by setting a flag in
+ * ll_iget_for_nfs.
*
- * NB; when ldd is NULL, it must have come via normal
- * lookup path only, since ll_iget_for_nfs always calls
- * ll_d_init().
+ * After reaching number of opens of this inode
+ * we always ask for an open lock on it to handle
+ * bad userspace actors that open and close files
+ * in a loop for absolutely no good reason
*/
+
ldd = ll_d2d(dentry);
- if (ldd && ldd->lld_nfs_dentry) {
+ if (filename_is_volatile(dentry->d_name.name,
+ dentry->d_name.len,
+ NULL)) {
+ /* There really is nothing here, but this
+ * make this more readable I think.
+ * We do not want openlock for volatile
+ * files under any circumstances
+ */
+ } else if (ldd && ldd->lld_nfs_dentry) {
+ /* NFS path. This also happens to catch
+ * open by fh files I guess
+ */
+ it->it_flags |= MDS_OPEN_LOCK;
+ /* clear the flag for future lookups */
ldd->lld_nfs_dentry = 0;
- if (!filename_is_volatile(dentry->d_name.name,
- dentry->d_name.len,
- NULL))
+ } else if (sbi->ll_oc_thrsh_count > 0) {
+ /* Take MDS_OPEN_LOCK with many opens */
+ if (lli->lli_open_fd_count >=
+ sbi->ll_oc_thrsh_count)
+ it->it_flags |= MDS_OPEN_LOCK;
+
+ /* If this is open after we just closed */
+ else if (ktime_before(ktime_get(),
+ ktime_add_ms(lli->lli_close_fd_time,
+ sbi->ll_oc_thrsh_ms)))
it->it_flags |= MDS_OPEN_LOCK;
}
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ll_file_data *fd = file->private_data;
struct range_lock range;
+ bool range_locked = false;
struct cl_io *io;
ssize_t result = 0;
int rc = 0;
+ int rc2 = 0;
unsigned int retried = 0, dio_lock = 0;
bool is_aio = false;
+ bool is_parallel_dio = false;
struct cl_dio_aio *ci_aio = NULL;
size_t per_bytes;
bool partial_io = false;
if (file->f_flags & O_DIRECT) {
if (!is_sync_kiocb(args->u.normal.via_iocb))
is_aio = true;
- ci_aio = cl_aio_alloc(args->u.normal.via_iocb);
+
+ /* the kernel does not support AIO on pipes, and parallel DIO
+ * uses part of the AIO path, so we must not do parallel dio
+ * to pipes
+ */
+ is_parallel_dio = !iov_iter_is_pipe(args->u.normal.via_iter) &&
+ !is_aio;
+
+ if (!ll_sbi_has_parallel_dio(sbi))
+ is_parallel_dio = false;
+
+ ci_aio = cl_aio_alloc(args->u.normal.via_iocb,
+ ll_i2info(inode)->lli_clob);
if (!ci_aio)
GOTO(out, rc = -ENOMEM);
}
io->ci_aio = ci_aio;
io->ci_dio_lock = dio_lock;
io->ci_ndelay_tried = retried;
+ io->ci_parallel_dio = is_parallel_dio;
if (cl_io_rw_init(env, io, iot, *ppos, per_bytes) == 0) {
- bool range_locked = false;
-
if (file->f_flags & O_APPEND)
range_lock_init(&range, 0, LUSTRE_EOF);
else
rc = cl_io_loop(env, io);
ll_cl_remove(file, env);
- if (range_locked) {
+ if (range_locked && !is_parallel_dio) {
CDEBUG(D_VFSTRACE, "Range unlock "RL_FMT"\n",
RL_PARA(&range));
range_unlock(&lli->lli_write_tree, &range);
+ range_locked = false;
}
} else {
/* cl_io_rw_init() handled IO */
rc = io->ci_result;
}
+ /* N/B: parallel DIO may be disabled during i/o submission;
+ * if that occurs, async RPCs are resolved before we get here, and this
+ * wait call completes immediately.
+ */
+ if (is_parallel_dio) {
+ struct cl_sync_io *anchor = &io->ci_aio->cda_sync;
+
+ /* for dio, EIOCBQUEUED is an implementation detail,
+ * and we don't return it to userspace
+ */
+ if (rc == -EIOCBQUEUED)
+ rc = 0;
+
+ rc2 = cl_sync_io_wait_recycle(env, anchor, 0, 0);
+ if (rc2 < 0)
+ rc = rc2;
+
+ if (range_locked) {
+ range_unlock(&lli->lli_write_tree, &range);
+ range_locked = false;
+ }
+ }
+
/*
* In order to move forward AIO, ci_nob was increased,
* but that doesn't mean io have been finished, it just
*/
if (io->ci_nob > 0) {
if (!is_aio) {
- result += io->ci_nob;
- *ppos = io->u.ci_wr.wr.crw_pos; /* for splice */
+ if (rc2 == 0) {
+ result += io->ci_nob;
+ *ppos = io->u.ci_wr.wr.crw_pos; /* for splice */
+ } else if (rc2) {
+ result = 0;
+ }
}
count -= io->ci_nob;
cl_sync_io_note(env, &io->ci_aio->cda_sync,
rc == -EIOCBQUEUED ? 0 : rc);
if (!is_aio) {
- cl_aio_free(io->ci_aio);
+ cl_aio_free(env, io->ci_aio);
io->ci_aio = NULL;
}
}
descr->cld_mode = cl_mode;
/* CEF_MUST is used because we do not want to convert a
* lockahead request to a lockless lock */
- descr->cld_enq_flags = CEF_MUST | CEF_LOCK_NO_EXPAND |
- CEF_NONBLOCK;
+ descr->cld_enq_flags = CEF_MUST | CEF_LOCK_NO_EXPAND;
if (ladvise->lla_peradvice_flags & LF_ASYNC)
descr->cld_enq_flags |= CEF_SPECULATIVE;
RETURN(0);
}
-int ll_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
+int ll_ioctl_check_project(struct inode *inode, __u32 xflags,
+ __u32 projid)
{
/*
* Project Quota ID state is only allowed to change from within the init
* namespace. Enforce that restriction only if we are trying to change
* the quota ID state. Everything else is allowed in user namespaces.
*/
- if (current_user_ns() == &init_user_ns)
+ if (current_user_ns() == &init_user_ns) {
+ /*
+ * Caller is allowed to change the project ID. if it is being
+ * changed, make sure that the new value is valid.
+ */
+ if (ll_i2info(inode)->lli_projid != projid &&
+ !projid_valid(make_kprojid(&init_user_ns, projid)))
+ return -EINVAL;
+
return 0;
+ }
- if (ll_i2info(inode)->lli_projid != fa->fsx_projid)
+ if (ll_i2info(inode)->lli_projid != projid)
return -EINVAL;
if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags)) {
- if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
+ if (!(xflags & FS_XFLAG_PROJINHERIT))
return -EINVAL;
} else {
- if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
+ if (xflags & FS_XFLAG_PROJINHERIT)
return -EINVAL;
}
return 0;
}
-int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd,
- unsigned long arg)
+static int ll_set_project(struct inode *inode, __u32 xflags, __u32 projid)
{
struct md_op_data *op_data;
struct ptlrpc_request *req = NULL;
- struct fsxattr fsxattr;
struct cl_object *obj;
unsigned int inode_flags;
int rc = 0;
- if (copy_from_user(&fsxattr,
- (const struct fsxattr __user *)arg,
- sizeof(fsxattr)))
- RETURN(-EFAULT);
-
- rc = ll_ioctl_check_project(inode, &fsxattr);
+ rc = ll_ioctl_check_project(inode, xflags, projid);
if (rc)
RETURN(rc);
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
- inode_flags = ll_xflags_to_inode_flags(fsxattr.fsx_xflags);
+ inode_flags = ll_xflags_to_inode_flags(xflags);
op_data->op_attr_flags = ll_inode_to_ext_flags(inode_flags);
- if (fsxattr.fsx_xflags & FS_XFLAG_PROJINHERIT)
+ if (xflags & FS_XFLAG_PROJINHERIT)
op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
- op_data->op_projid = fsxattr.fsx_projid;
+ op_data->op_projid = projid;
op_data->op_xvalid |= OP_XVALID_PROJID | OP_XVALID_FLAGS;
rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data, NULL, 0, &req);
ptlrpc_req_finished(req);
ll_update_inode_flags(inode, op_data->op_attr_flags);
/* Avoid OST RPC if this is only ioctl setting project inherit flag */
- if (fsxattr.fsx_xflags == 0 ||
- fsxattr.fsx_xflags == FS_XFLAG_PROJINHERIT)
+ if (xflags == 0 || xflags == FS_XFLAG_PROJINHERIT)
GOTO(out_fsxattr, rc);
obj = ll_i2info(inode)->lli_clob;
if (obj) {
struct iattr attr = { 0 };
- rc = cl_setattr_ost(obj, &attr, OP_XVALID_FLAGS,
- fsxattr.fsx_xflags);
+ rc = cl_setattr_ost(obj, &attr, OP_XVALID_FLAGS, xflags);
}
out_fsxattr:
RETURN(rc);
}
+int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd,
+ unsigned long arg)
+{
+ struct fsxattr fsxattr;
+
+ ENTRY;
+
+ if (copy_from_user(&fsxattr,
+ (const struct fsxattr __user *)arg,
+ sizeof(fsxattr)))
+ RETURN(-EFAULT);
+
+ RETURN(ll_set_project(inode, fsxattr.fsx_xflags,
+ fsxattr.fsx_projid));
+}
+
+int ll_ioctl_project(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct lu_project lu_project;
+ struct dentry *dentry = file_dentry(file);
+ struct inode *inode = file_inode(file);
+ struct dentry *child_dentry = NULL;
+ int rc = 0, name_len;
+
+ if (copy_from_user(&lu_project,
+ (const struct lu_project __user *)arg,
+ sizeof(lu_project)))
+ RETURN(-EFAULT);
+
+ /* apply child dentry if name is valid */
+ name_len = strnlen(lu_project.project_name, NAME_MAX);
+ if (name_len > 0 && name_len <= NAME_MAX) {
+ inode_lock(inode);
+ child_dentry = lookup_one_len(lu_project.project_name,
+ dentry, name_len);
+ inode_unlock(inode);
+ if (IS_ERR(child_dentry)) {
+ rc = PTR_ERR(child_dentry);
+ goto out;
+ }
+ inode = child_dentry->d_inode;
+ if (!inode) {
+ rc = -ENOENT;
+ goto out;
+ }
+ } else if (name_len > NAME_MAX) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ switch (lu_project.project_type) {
+ case LU_PROJECT_SET:
+ rc = ll_set_project(inode, lu_project.project_xflags,
+ lu_project.project_id);
+ break;
+ case LU_PROJECT_GET:
+ lu_project.project_xflags =
+ ll_inode_flags_to_xflags(inode->i_flags);
+ if (test_bit(LLIF_PROJECT_INHERIT,
+ &ll_i2info(inode)->lli_flags))
+ lu_project.project_xflags |= FS_XFLAG_PROJINHERIT;
+ lu_project.project_id = ll_i2info(inode)->lli_projid;
+ if (copy_to_user((struct lu_project __user *)arg,
+ &lu_project, sizeof(lu_project))) {
+ rc = -EFAULT;
+ goto out;
+ }
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+out:
+ if (!IS_ERR_OR_NULL(child_dentry))
+ dput(child_dentry);
+ RETURN(rc);
+}
+
static long ll_file_unlock_lease(struct file *file, struct ll_ioc_lease *ioc,
unsigned long arg)
{
case LL_LEASE_LAYOUT_SPLIT:
if (layout_file)
fput(layout_file);
+
+ ll_layout_refresh(inode, &fd->fd_layout_version);
break;
case LL_LEASE_PCC_ATTACH:
if (!rc)
RETURN(ll_ioctl_fsgetxattr(inode, cmd, arg));
case FS_IOC_FSSETXATTR:
RETURN(ll_ioctl_fssetxattr(inode, cmd, arg));
+ case LL_IOC_PROJECT:
+ RETURN(ll_ioctl_project(file, cmd, arg));
case BLKSSZGET:
RETURN(put_user(PAGE_SIZE, (int __user *)arg));
case LL_IOC_HEAT_GET: {
/* fsync's caller has already called _fdata{sync,write}, we want
* that IO to finish before calling the osc and mdc sync methods */
rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
- inode_lock(inode);
/* catch async errors that were recorded back when async writeback
* failed for pages in this mapping. */
fd->fd_write_failed = false;
}
- inode_unlock(inode);
-
if (!rc)
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC,
ktime_us_delta(ktime_get(), kstart));
*fid = body->mbo_fid1;
if (inode != NULL)
- rc = ll_prep_inode(inode, req, parent->i_sb, NULL);
+ rc = ll_prep_inode(inode, &req->rq_pill, parent->i_sb, NULL);
out_req:
ptlrpc_req_finished(req);
RETURN(rc);
PFID(ll_inode2fid(child_inode)));
GOTO(out_iput, rc = -ENOKEY);
}
+ if (unlikely(!llcrypt_policy_has_filename_enc(child_inode))) {
+ CDEBUG(D_SEC,
+ "cannot migrate old format encrypted "DFID", please move to new enc dir first\n",
+ PFID(ll_inode2fid(child_inode)));
+ GOTO(out_iput, rc = -EUCLEAN);
+ }
}
op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen,
struct lu_env *env;
struct cl_io *io;
__u16 refcheck;
- int rc; loff_t sa_falloc_end;
+ int rc;
loff_t size = i_size_read(inode);
ENTRY;
io->u.ci_setattr.sa_parent_fid = lu_object_fid(&io->ci_obj->co_lu);
io->u.ci_setattr.sa_falloc_mode = mode;
io->u.ci_setattr.sa_falloc_offset = offset;
- io->u.ci_setattr.sa_falloc_len = len;
- io->u.ci_setattr.sa_falloc_end = io->u.ci_setattr.sa_falloc_offset +
- io->u.ci_setattr.sa_falloc_len;
+ io->u.ci_setattr.sa_falloc_end = offset + len;
io->u.ci_setattr.sa_subtype = CL_SETATTR_FALLOCATE;
- sa_falloc_end = io->u.ci_setattr.sa_falloc_end;
- if (sa_falloc_end > size) {
+ if (io->u.ci_setattr.sa_falloc_end > size) {
+ loff_t newsize = io->u.ci_setattr.sa_falloc_end;
+
/* Check new size against VFS/VM file size limit and rlimit */
- rc = inode_newsize_ok(inode, sa_falloc_end);
+ rc = inode_newsize_ok(inode, newsize);
if (rc)
goto out;
- if (sa_falloc_end > ll_file_maxbytes(inode)) {
+ if (newsize > ll_file_maxbytes(inode)) {
CDEBUG(D_INODE, "file size too large %llu > %llu\n",
- (unsigned long long)(sa_falloc_end),
+ (unsigned long long)newsize,
ll_file_maxbytes(inode));
rc = -EFBIG;
goto out;
}
}
-again:
- if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0)
- rc = cl_io_loop(env, io);
- else
- rc = io->ci_result;
-
- cl_io_fini(env, io);
- if (unlikely(io->ci_need_restart))
- goto again;
+ do {
+ rc = cl_io_init(env, io, CIT_SETATTR, io->ci_obj);
+ if (!rc)
+ rc = cl_io_loop(env, io);
+ else
+ rc = io->ci_result;
+ cl_io_fini(env, io);
+ } while (unlikely(io->ci_need_restart));
out:
cl_env_put(env, &refcheck);
struct inode *inode = filp->f_path.dentry->d_inode;
int rc;
+ if (offset < 0 || len <= 0)
+ RETURN(-EINVAL);
/*
* Encrypted inodes can't handle collapse range or zero range or insert
* range since we would need to re-encrypt blocks with a different IV or
RETURN(-EOPNOTSUPP);
/*
- * Only mode == 0 (which is standard prealloc) is supported now.
- * Punch is not supported yet.
+ * mode == 0 (which is standard prealloc) and PUNCH is supported
+ * Rest of mode options are not supported yet.
*/
- if (mode & ~FALLOC_FL_KEEP_SIZE)
+ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
RETURN(-EOPNOTSUPP);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FALLOCATE, 1);
lock = ldlm_handle2lock(lockh);
LASSERT(lock != NULL);
- LASSERT(ldlm_has_layout(lock));
+
+ if (!ldlm_has_layout(lock))
+ GOTO(out, rc = -EAGAIN);
LDLM_DEBUG(lock, "file "DFID"(%p) being reconfigured",
PFID(&lli->lli_fid), inode);