#include <linux/file.h>
#include <linux/sched.h>
#include <linux/user_namespace.h>
-#ifdef HAVE_UIDGID_HEADER
-# include <linux/uidgid.h>
-#endif
+#include <linux/uidgid.h>
#include <uapi/linux/lustre/lustre_ioctl.h>
#include <lustre_swab.h>
op_data->op_attr_blocks += ((struct inode *)data)->i_blocks;
op_data->op_attr.ia_valid |= ATTR_SIZE;
op_data->op_xvalid |= OP_XVALID_BLOCKS;
+ /* fallthrough */
case MDS_CLOSE_LAYOUT_SPLIT:
case MDS_CLOSE_LAYOUT_SWAP: {
struct split_param *sp = data;
.l_inodebits = { MDS_INODELOCK_OPEN },
};
__u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_inode_info *lli = ll_i2info(inode);
struct lustre_handle lockh;
enum ldlm_mode lockmode;
rc = ll_md_real_close(inode, fd->fd_omode);
out:
- LUSTRE_FPRIVATE(file) = NULL;
+ file->private_data = NULL;
ll_file_data_put(fd);
RETURN(rc);
*/
int ll_file_release(struct inode *inode, struct file *file)
{
- struct ll_file_data *fd;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_inode_info *lli = ll_i2info(inode);
- int rc;
- ENTRY;
+ struct ll_file_data *fd;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ ktime_t kstart = ktime_get();
+ int rc;
+
+ ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
PFID(ll_inode2fid(inode)), inode);
- if (inode->i_sb->s_root != file_dentry(file))
- ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1);
- fd = LUSTRE_FPRIVATE(file);
- LASSERT(fd != NULL);
+ fd = file->private_data;
+ LASSERT(fd != NULL);
/* The last ref on @file, maybe not the the owner pid of statahead,
* because parent and child process can share the same file handle. */
ll_deauthorize_statahead(inode, fd);
if (inode->i_sb->s_root == file_dentry(file)) {
- LUSTRE_FPRIVATE(file) = NULL;
+ file->private_data = NULL;
ll_file_data_put(fd);
- RETURN(0);
+ GOTO(out, rc = 0);
}
pcc_file_release(inode, file);
if (CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_DUMP_LOG, cfs_fail_val))
libcfs_debug_dumplog();
+out:
+ if (!rc && inode->i_sb->s_root != file_dentry(file))
+ ll_stats_ops_tally(sbi, LPROC_LL_RELEASE,
+ ktime_us_delta(ktime_get(), kstart));
RETURN(rc);
}
if (obj == NULL)
RETURN_EXIT;
- if (!req_capsule_has_field(&req->rq_pill, &RMF_NIOBUF_INLINE,
- RCL_SERVER))
+ if (!req_capsule_field_present(&req->rq_pill, &RMF_NIOBUF_INLINE,
+ RCL_SERVER))
RETURN_EXIT;
rnb = req_capsule_server_get(&req->rq_pill, &RMF_NIOBUF_INLINE);
* client PAGE_SIZE to be used on that client, if server's PAGE_SIZE is
* smaller then offset may be not aligned and that data is just ignored.
*/
- if (rnb->rnb_offset % PAGE_SIZE)
+ if (rnb->rnb_offset & ~PAGE_MASK)
RETURN_EXIT;
/* Server returns whole file or just file tail if it fills in reply
data = (char *)rnb + sizeof(*rnb);
lnb.lnb_file_offset = rnb->rnb_offset;
- start = lnb.lnb_file_offset / PAGE_SIZE;
+ start = lnb.lnb_file_offset >> PAGE_SHIFT;
index = 0;
- LASSERT(lnb.lnb_file_offset % PAGE_SIZE == 0);
+ LASSERT((lnb.lnb_file_offset & ~PAGE_MASK) == 0);
lnb.lnb_page_offset = 0;
do {
lnb.lnb_data = data + (index << PAGE_SHIFT);
struct inode *inode = file_inode(file);
ENTRY;
- LASSERT(!LUSTRE_FPRIVATE(file));
+ LASSERT(!file->private_data);
LASSERT(fd != NULL);
RETURN(rc);
}
- LUSTRE_FPRIVATE(file) = fd;
+ file->private_data = fd;
ll_readahead_init(inode, &fd->fd_ras);
fd->fd_omode = it->it_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
struct obd_client_handle **och_p = NULL;
__u64 *och_usecount = NULL;
struct ll_file_data *fd;
+ ktime_t kstart = ktime_get();
int rc = 0;
ENTRY;
ll_authorize_statahead(inode, fd);
if (inode->i_sb->s_root == file_dentry(file)) {
- LUSTRE_FPRIVATE(file) = fd;
- RETURN(0);
- }
+ file->private_data = fd;
+ RETURN(0);
+ }
if (!it || !it->it_disposition) {
- /* Convert f_flags into access mode. We cannot use file->f_mode,
- * because everything but O_ACCMODE mask was stripped from
- * there */
- if ((oit.it_flags + 1) & O_ACCMODE)
- oit.it_flags++;
- if (file->f_flags & O_TRUNC)
- oit.it_flags |= FMODE_WRITE;
+ /* Convert f_flags into access mode. We cannot use file->f_mode,
+ * because everything but O_ACCMODE mask was stripped from
+ * there */
+ if ((oit.it_flags + 1) & O_ACCMODE)
+ oit.it_flags++;
+ if (file->f_flags & O_TRUNC)
+ oit.it_flags |= FMODE_WRITE;
/* kernel only call f_op->open in dentry_open. filp_open calls
* dentry_open after call to open_namei that checks permissions.
if (oit.it_flags & (FMODE_WRITE | FMODE_READ))
oit.it_flags |= MDS_OPEN_OWNEROVERRIDE;
- /* We do not want O_EXCL here, presumably we opened the file
- * already? XXX - NFS implications? */
- oit.it_flags &= ~O_EXCL;
+ /* We do not want O_EXCL here, presumably we opened the file
+ * already? XXX - NFS implications? */
+ oit.it_flags &= ~O_EXCL;
- /* bug20584, if "it_flags" contains O_CREAT, the file will be
- * created if necessary, then "IT_CREAT" should be set to keep
- * consistent with it */
- if (oit.it_flags & O_CREAT)
- oit.it_op |= IT_CREAT;
+ /* bug20584, if "it_flags" contains O_CREAT, the file will be
+ * created if necessary, then "IT_CREAT" should be set to keep
+ * consistent with it */
+ if (oit.it_flags & O_CREAT)
+ oit.it_op |= IT_CREAT;
- it = &oit;
- }
+ it = &oit;
+ }
restart:
- /* Let's see if we have file open on MDS already. */
- if (it->it_flags & FMODE_WRITE) {
- och_p = &lli->lli_mds_write_och;
- och_usecount = &lli->lli_open_fd_write_count;
- } else if (it->it_flags & FMODE_EXEC) {
- och_p = &lli->lli_mds_exec_och;
- och_usecount = &lli->lli_open_fd_exec_count;
- } else {
- och_p = &lli->lli_mds_read_och;
- och_usecount = &lli->lli_open_fd_read_count;
- }
+ /* Let's see if we have file open on MDS already. */
+ if (it->it_flags & FMODE_WRITE) {
+ och_p = &lli->lli_mds_write_och;
+ och_usecount = &lli->lli_open_fd_write_count;
+ } else if (it->it_flags & FMODE_EXEC) {
+ och_p = &lli->lli_mds_exec_och;
+ och_usecount = &lli->lli_open_fd_exec_count;
+ } else {
+ och_p = &lli->lli_mds_read_och;
+ och_usecount = &lli->lli_open_fd_read_count;
+ }
mutex_lock(&lli->lli_och_mutex);
- if (*och_p) { /* Open handle is present */
- if (it_disposition(it, DISP_OPEN_OPEN)) {
- /* Well, there's extra open request that we do not need,
- let's close it somehow. This will decref request. */
- rc = it_open_error(DISP_OPEN_OPEN, it);
- if (rc) {
+ if (*och_p) { /* Open handle is present */
+ if (it_disposition(it, DISP_OPEN_OPEN)) {
+ /* Well, there's extra open request that we do not need,
+ * let's close it somehow. This will decref request. */
+ rc = it_open_error(DISP_OPEN_OPEN, it);
+ if (rc) {
mutex_unlock(&lli->lli_och_mutex);
- GOTO(out_openerr, rc);
- }
+ GOTO(out_openerr, rc);
+ }
ll_release_openhandle(file_dentry(file), it);
- }
- (*och_usecount)++;
+ }
+ (*och_usecount)++;
- rc = ll_local_open(file, it, fd, NULL);
- if (rc) {
- (*och_usecount)--;
+ rc = ll_local_open(file, it, fd, NULL);
+ if (rc) {
+ (*och_usecount)--;
mutex_unlock(&lli->lli_och_mutex);
- GOTO(out_openerr, rc);
- }
- } else {
- LASSERT(*och_usecount == 0);
+ GOTO(out_openerr, rc);
+ }
+ } else {
+ LASSERT(*och_usecount == 0);
if (!it->it_disposition) {
- struct ll_dentry_data *ldd = ll_d2d(file->f_path.dentry);
- /* We cannot just request lock handle now, new ELC code
- means that one of other OPEN locks for this file
- could be cancelled, and since blocking ast handler
- would attempt to grab och_mutex as well, that would
- result in a deadlock */
+ struct dentry *dentry = file_dentry(file);
+ struct ll_dentry_data *ldd;
+
+ /* We cannot just request lock handle now, new ELC code
+ * means that one of other OPEN locks for this file
+ * could be cancelled, and since blocking ast handler
+ * would attempt to grab och_mutex as well, that would
+ * result in a deadlock
+ */
mutex_unlock(&lli->lli_och_mutex);
/*
* Normally called under two situations:
* lookup path only, since ll_iget_for_nfs always calls
* ll_d_init().
*/
+ ldd = ll_d2d(dentry);
if (ldd && ldd->lld_nfs_dentry) {
ldd->lld_nfs_dentry = 0;
- it->it_flags |= MDS_OPEN_LOCK;
+ if (!filename_is_volatile(dentry->d_name.name,
+ dentry->d_name.len,
+ NULL))
+ it->it_flags |= MDS_OPEN_LOCK;
}
- /*
+ /*
* Always specify MDS_OPEN_BY_FID because we don't want
* to get file with different fid.
*/
it->it_flags |= MDS_OPEN_BY_FID;
- rc = ll_intent_file_open(file_dentry(file), NULL, 0,
- it);
- if (rc)
- GOTO(out_openerr, rc);
+ rc = ll_intent_file_open(dentry, NULL, 0, it);
+ if (rc)
+ GOTO(out_openerr, rc);
- goto restart;
- }
- OBD_ALLOC(*och_p, sizeof (struct obd_client_handle));
- if (!*och_p)
- GOTO(out_och_free, rc = -ENOMEM);
+ goto restart;
+ }
+ OBD_ALLOC(*och_p, sizeof(struct obd_client_handle));
+ if (!*och_p)
+ GOTO(out_och_free, rc = -ENOMEM);
- (*och_usecount)++;
+ (*och_usecount)++;
- /* md_intent_lock() didn't get a request ref if there was an
- * open error, so don't do cleanup on the request here
- * (bug 3430) */
- /* XXX (green): Should not we bail out on any error here, not
- * just open error? */
+ /* md_intent_lock() didn't get a request ref if there was an
+ * open error, so don't do cleanup on the request here
+ * (bug 3430) */
+ /* XXX (green): Should not we bail out on any error here, not
+ * just open error? */
rc = it_open_error(DISP_OPEN_OPEN, it);
if (rc != 0)
GOTO(out_och_free, rc);
GOTO(out_och_free, rc);
mutex_unlock(&lli->lli_och_mutex);
- fd = NULL;
- /* Must do this outside lli_och_mutex lock to prevent deadlock where
- different kind of OPEN lock for this same inode gets cancelled
- by ldlm_cancel_lru */
- if (!S_ISREG(inode->i_mode))
- GOTO(out_och_free, rc);
+ /* lockless for direct IO so that it can do IO in parallel */
+ if (file->f_flags & O_DIRECT)
+ fd->fd_flags |= LL_FILE_LOCKLESS_IO;
+ fd = NULL;
+ /* Must do this outside lli_och_mutex lock to prevent deadlock where
+ different kind of OPEN lock for this same inode gets cancelled
+ by ldlm_cancel_lru */
+ if (!S_ISREG(inode->i_mode))
+ GOTO(out_och_free, rc);
cl_lov_delay_create_clear(&file->f_flags);
GOTO(out_och_free, rc);
out_och_free:
- if (rc) {
- if (och_p && *och_p) {
- OBD_FREE(*och_p, sizeof (struct obd_client_handle));
- *och_p = NULL; /* OBD_FREE writes some magic there */
- (*och_usecount)--;
- }
+ if (rc) {
+ if (och_p && *och_p) {
+ OBD_FREE(*och_p, sizeof(struct obd_client_handle));
+ *och_p = NULL; /* OBD_FREE writes some magic there */
+ (*och_usecount)--;
+ }
mutex_unlock(&lli->lli_och_mutex);
out_openerr:
if (fd != NULL)
ll_file_data_put(fd);
- } else {
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN, 1);
- }
+ } else {
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN,
+ ktime_us_delta(ktime_get(), kstart));
+ }
out_nofiledata:
if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) {
it_clear_disposition(it, DISP_ENQ_OPEN_REF);
}
- return rc;
+ return rc;
}
static int ll_md_blocking_lease_ast(struct ldlm_lock *lock,
struct lustre_handle *old_open_handle)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct obd_client_handle **och_p;
__u64 *och_usecount;
int rc = 0;
static int ll_lease_och_release(struct inode *inode, struct file *file)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct obd_client_handle **och_p;
struct obd_client_handle *old_och = NULL;
__u64 *och_usecount;
GOTO(out_release_it, rc);
LASSERT(it_disposition(&it, DISP_ENQ_OPEN_REF));
- ll_och_fill(sbi->ll_md_exp, &it, och);
+ rc = ll_och_fill(sbi->ll_md_exp, &it, och);
+ if (rc)
+ GOTO(out_release_it, rc);
if (!it_disposition(&it, DISP_OPEN_LEASE)) /* old server? */
GOTO(out_close, rc = -EOPNOTSUPP);
* POSIX. Solving this problem needs to send an RPC to MDT for each
* read, this will hurt performance.
*/
- if (inode->i_atime.tv_sec < lli->lli_atime ||
- lli->lli_update_atime) {
+ if (ll_file_test_and_clear_flag(lli, LLIF_UPDATE_ATIME) ||
+ inode->i_atime.tv_sec < lli->lli_atime)
inode->i_atime.tv_sec = lli->lli_atime;
- lli->lli_update_atime = 0;
- }
+
inode->i_mtime.tv_sec = lli->lli_mtime;
inode->i_ctime.tv_sec = lli->lli_ctime;
*/
void ll_io_set_mirror(struct cl_io *io, const struct file *file)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
/* clear layout version for generic(non-resync) I/O in case it carries
* stale layout version due to I/O restart */
struct vvp_io_args *args)
{
struct inode *inode = file_inode(file);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
io->ci_lock_no_expand = fd->ll_lock_no_expand;
struct vvp_io *vio = vvp_env_io(env);
struct inode *inode = file_inode(file);
struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct range_lock range;
struct cl_io *io;
ssize_t result = 0;
int rc = 0;
unsigned retried = 0;
- bool restarted = false;
+ unsigned ignore_lockless = 0;
ENTRY;
restart:
io = vvp_env_thread_io(env);
ll_io_init(io, file, iot, args);
+ io->ci_ignore_lockless = ignore_lockless;
io->ci_ndelay_tried = retried;
if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
else
range_lock_init(&range, *ppos, *ppos + count - 1);
- vio->vui_fd = LUSTRE_FPRIVATE(file);
+ vio->vui_fd = file->private_data;
vio->vui_io_subtype = args->via_io_subtype;
switch (vio->vui_io_subtype) {
file->f_path.dentry->d_name.name,
iot, rc, result, io->ci_need_restart);
- if ((rc == 0 || rc == -ENODATA) && count > 0 && io->ci_need_restart) {
+ if ((rc == 0 || rc == -ENODATA || rc == -ENOLCK) &&
+ count > 0 && io->ci_need_restart) {
CDEBUG(D_VFSTRACE,
"%s: restart %s from %lld, count: %zu, ret: %zd, rc: %d\n",
file_dentry(file)->d_name.name,
*ppos, count, result, rc);
/* preserve the tried count for FLR */
retried = io->ci_ndelay_tried;
- restarted = true;
+ ignore_lockless = io->ci_ignore_lockless;
goto restart;
}
if (result > 0) {
ll_heat_add(file_inode(iocb->ki_filp), CIT_READ, result);
ll_stats_ops_tally(ll_i2sbi(file_inode(iocb->ki_filp)),
- LPROC_LL_READ_BYTES, result);
+ LPROC_LL_READ_BYTES, result);
}
return result;
{
struct lu_env *env;
struct vvp_io_args *args;
+ struct file *file = iocb->ki_filp;
ssize_t result;
ssize_t rc2;
__u16 refcheck;
+ ktime_t kstart = ktime_get();
bool cached;
if (!iov_iter_count(to))
*/
result = pcc_file_read_iter(iocb, to, &cached);
if (cached)
- return result;
+ GOTO(out, result);
- ll_ras_enter(iocb->ki_filp);
+ ll_ras_enter(file, iocb->ki_pos, iov_iter_count(to));
result = ll_do_fast_read(iocb, to);
if (result < 0 || iov_iter_count(to) == 0)
args->u.normal.via_iter = to;
args->u.normal.via_iocb = iocb;
- rc2 = ll_file_io_generic(env, args, iocb->ki_filp, CIT_READ,
+ rc2 = ll_file_io_generic(env, args, file, CIT_READ,
&iocb->ki_pos, iov_iter_count(to));
if (rc2 > 0)
result += rc2;
cl_env_put(env, &refcheck);
out:
+ if (result > 0) {
+ ll_rw_stats_tally(ll_i2sbi(file_inode(file)), current->pid,
+ file->private_data, iocb->ki_pos, result,
+ READ);
+ ll_stats_ops_tally(ll_i2sbi(file_inode(file)), LPROC_LL_READ,
+ ktime_us_delta(ktime_get(), kstart));
+ }
+
return result;
}
struct vvp_io_args *args;
struct lu_env *env;
ssize_t rc_tiny = 0, rc_normal;
+ struct file *file = iocb->ki_filp;
__u16 refcheck;
bool cached;
+ ktime_t kstart = ktime_get();
int result;
ENTRY;
*/
result = pcc_file_write_iter(iocb, from, &cached);
if (cached && result != -ENOSPC && result != -EDQUOT)
- return result;
+ GOTO(out, rc_normal = result);
/* NB: we can't do direct IO for tiny writes because they use the page
* cache, we can't do sync writes because tiny writes can't flush
* pages, and we can't do append writes because we can't guarantee the
* required DLM locks are held to protect file size.
*/
- if (ll_sbi_has_tiny_write(ll_i2sbi(file_inode(iocb->ki_filp))) &&
- !(iocb->ki_filp->f_flags & (O_DIRECT | O_SYNC | O_APPEND)))
+ if (ll_sbi_has_tiny_write(ll_i2sbi(file_inode(file))) &&
+ !(file->f_flags & (O_DIRECT | O_SYNC | O_APPEND)))
rc_tiny = ll_do_tiny_write(iocb, from);
/* In case of error, go on and try normal write - Only stop if tiny
args->u.normal.via_iter = from;
args->u.normal.via_iocb = iocb;
- rc_normal = ll_file_io_generic(env, args, iocb->ki_filp, CIT_WRITE,
- &iocb->ki_pos, iov_iter_count(from));
+ rc_normal = ll_file_io_generic(env, args, file, CIT_WRITE,
+ &iocb->ki_pos, iov_iter_count(from));
/* On success, combine bytes written. */
if (rc_tiny >= 0 && rc_normal > 0)
cl_env_put(env, &refcheck);
out:
+ if (rc_normal > 0) {
+ ll_rw_stats_tally(ll_i2sbi(file_inode(file)), current->pid,
+ file->private_data, iocb->ki_pos,
+ rc_normal, WRITE);
+ ll_stats_ops_tally(ll_i2sbi(file_inode(file)), LPROC_LL_WRITE,
+ ktime_us_delta(ktime_get(), kstart));
+ }
+
RETURN(rc_normal);
}
__u16 refcheck;
bool cached;
- ENTRY;
+ ENTRY;
result = pcc_file_splice_read(in_file, ppos, pipe,
count, flags, &cached);
if (cached)
RETURN(result);
- ll_ras_enter(in_file);
+ ll_ras_enter(in_file, *ppos, count);
env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
args = ll_env_args(env, IO_SPLICE);
- args->u.splice.via_pipe = pipe;
- args->u.splice.via_flags = flags;
+ args->u.splice.via_pipe = pipe;
+ args->u.splice.via_flags = flags;
+
+ result = ll_file_io_generic(env, args, in_file, CIT_READ, ppos, count);
+ cl_env_put(env, &refcheck);
- result = ll_file_io_generic(env, args, in_file, CIT_READ, ppos, count);
- cl_env_put(env, &refcheck);
- RETURN(result);
+ if (result > 0)
+ ll_rw_stats_tally(ll_i2sbi(file_inode(in_file)), current->pid,
+ in_file->private_data, *ppos, result,
+ READ);
+ RETURN(result);
}
int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
if ((__swab32(lum->lmm_magic) & le32_to_cpu(LOV_MAGIC_MASK)) ==
le32_to_cpu(LOV_MAGIC_MAGIC)) {
/* this code will only exist for big-endian systems */
- lustre_swab_lov_user_md(lum);
+ lustre_swab_lov_user_md(lum, 0);
}
ll_inode_size_lock(inode);
stripe_count = 0;
}
- lustre_swab_lov_user_md((struct lov_user_md *)lmm);
+ lustre_swab_lov_user_md((struct lov_user_md *)lmm, 0);
/* if function called for directory - we should
* avoid swab not existent lsm objects */
RETURN(rc);
}
+
static int
ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct cl_object *obj = lli->lli_clob;
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_grouplock grouplock;
int rc;
ENTRY;
RETURN(-EINVAL);
}
- if (ll_file_nolock(file))
- RETURN(-EOPNOTSUPP);
+ if (ll_file_nolock(file))
+ RETURN(-EOPNOTSUPP);
+retry:
+ if (file->f_flags & O_NONBLOCK) {
+ if (!mutex_trylock(&lli->lli_group_mutex))
+ RETURN(-EAGAIN);
+ } else
+ mutex_lock(&lli->lli_group_mutex);
- spin_lock(&lli->lli_lock);
if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
CWARN("group lock already existed with gid %lu\n",
fd->fd_grouplock.lg_gid);
- spin_unlock(&lli->lli_lock);
- RETURN(-EINVAL);
+ GOTO(out, rc = -EINVAL);
+ }
+ if (arg != lli->lli_group_gid && lli->lli_group_users != 0) {
+ if (file->f_flags & O_NONBLOCK)
+ GOTO(out, rc = -EAGAIN);
+ mutex_unlock(&lli->lli_group_mutex);
+ wait_var_event(&lli->lli_group_users, !lli->lli_group_users);
+ GOTO(retry, rc = 0);
}
LASSERT(fd->fd_grouplock.lg_lock == NULL);
- spin_unlock(&lli->lli_lock);
/**
* XXX: group lock needs to protect all OST objects while PFL
env = cl_env_get(&refcheck);
if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ GOTO(out, rc = PTR_ERR(env));
rc = cl_object_layout_get(env, obj, &cl);
if (!rc && cl.cl_is_composite)
cl_env_put(env, &refcheck);
if (rc)
- RETURN(rc);
+ GOTO(out, rc);
}
rc = cl_get_grouplock(ll_i2info(inode)->lli_clob,
arg, (file->f_flags & O_NONBLOCK), &grouplock);
- if (rc)
- RETURN(rc);
- spin_lock(&lli->lli_lock);
- if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
- spin_unlock(&lli->lli_lock);
- CERROR("another thread just won the race\n");
- cl_put_grouplock(&grouplock);
- RETURN(-EINVAL);
- }
+ if (rc)
+ GOTO(out, rc);
fd->fd_flags |= LL_FILE_GROUP_LOCKED;
fd->fd_grouplock = grouplock;
- spin_unlock(&lli->lli_lock);
+ if (lli->lli_group_users == 0)
+ lli->lli_group_gid = grouplock.lg_gid;
+ lli->lli_group_users++;
CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
- RETURN(0);
+out:
+ mutex_unlock(&lli->lli_group_mutex);
+
+ RETURN(rc);
}
static int ll_put_grouplock(struct inode *inode, struct file *file,
unsigned long arg)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_grouplock grouplock;
+ int rc;
ENTRY;
- spin_lock(&lli->lli_lock);
+ mutex_lock(&lli->lli_group_mutex);
if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- spin_unlock(&lli->lli_lock);
- CWARN("no group lock held\n");
- RETURN(-EINVAL);
- }
+ CWARN("no group lock held\n");
+ GOTO(out, rc = -EINVAL);
+ }
LASSERT(fd->fd_grouplock.lg_lock != NULL);
if (fd->fd_grouplock.lg_gid != arg) {
CWARN("group lock %lu doesn't match current id %lu\n",
arg, fd->fd_grouplock.lg_gid);
- spin_unlock(&lli->lli_lock);
- RETURN(-EINVAL);
+ GOTO(out, rc = -EINVAL);
}
grouplock = fd->fd_grouplock;
memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
- spin_unlock(&lli->lli_lock);
cl_put_grouplock(&grouplock);
+
+ lli->lli_group_users--;
+ if (lli->lli_group_users == 0) {
+ lli->lli_group_gid = 0;
+ wake_up_var(&lli->lli_group_users);
+ }
CDEBUG(D_INFO, "group lock %lu released\n", arg);
- RETURN(0);
+ GOTO(out, rc = 0);
+out:
+ mutex_unlock(&lli->lli_group_mutex);
+
+ RETURN(rc);
}
/**
if (!och)
GOTO(out, rc = -ENOMEM);
- ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
+ rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
+ if (rc)
+ GOTO(out, rc);
rc = ll_close_inode_openhandle(inode, och, 0, NULL);
out:
ladvise_names[advice], rc);
GOTO(out, rc);
}
+ /* fallthrough */
case LU_LADVISE_WILLREAD:
case LU_LADVISE_DONTNEED:
default:
static int ll_lock_noexpand(struct file *file, int flags)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
fd->ll_lock_no_expand = !(flags & LF_UNSET);
unsigned long arg)
{
struct inode *inode = file_inode(file);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_inode_info *lli = ll_i2info(inode);
struct obd_client_handle *och = NULL;
struct split_param sp;
{
struct inode *inode = file_inode(file);
struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct obd_client_handle *och = NULL;
__u64 open_flags = 0;
bool lease_broken;
ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(file);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
int flags, rc;
ENTRY;
{
struct inode *inode = file_inode(file);
loff_t retval, eof = 0;
+ ktime_t kstart = ktime_get();
ENTRY;
retval = offset + ((origin == SEEK_END) ? i_size_read(inode) :
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), to=%llu=%#llx(%d)\n",
PFID(ll_inode2fid(inode)), inode, retval, retval,
origin);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK, 1);
if (origin == SEEK_END || origin == SEEK_HOLE || origin == SEEK_DATA) {
retval = ll_glimpse_size(inode);
}
retval = ll_generic_file_llseek_size(file, offset, origin,
- ll_file_maxbytes(inode), eof);
+ ll_file_maxbytes(inode), eof);
+ if (retval >= 0)
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK,
+ ktime_us_delta(ktime_get(), kstart));
RETURN(retval);
}
{
struct inode *inode = file_inode(file);
struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
int rc, err;
LASSERT(!S_ISDIR(inode->i_mode));
struct inode *inode = dentry->d_inode;
struct ll_inode_info *lli = ll_i2info(inode);
struct ptlrpc_request *req;
+ ktime_t kstart = ktime_get();
int rc, err;
ENTRY;
"datasync %d\n",
PFID(ll_inode2fid(inode)), inode, start, end, datasync);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC, 1);
-
/* fsync's caller has already called _fdata{sync,write}, we want
* that IO to finish before calling the osc and mdc sync methods */
rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
ptlrpc_req_finished(req);
if (S_ISREG(inode->i_mode)) {
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
bool cached;
/* Sync metadata on MDT first, and then sync the cached data
}
inode_unlock(inode);
+
+ if (!rc)
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC,
+ ktime_us_delta(ktime_get(), kstart));
RETURN(rc);
}
struct lustre_handle lockh = { 0 };
union ldlm_policy_data flock = { { 0 } };
int fl_type = file_lock->fl_type;
+ ktime_t kstart = ktime_get();
__u64 flags = 0;
int rc;
int rc2 = 0;
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID" file_lock=%p\n",
PFID(ll_inode2fid(inode)), file_lock);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK, 1);
-
- if (file_lock->fl_flags & FL_FLOCK) {
- LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK));
- /* flocks are whole-file locks */
- flock.l_flock.end = OFFSET_MAX;
- /* For flocks owner is determined by the local file desctiptor*/
- flock.l_flock.owner = (unsigned long)file_lock->fl_file;
- } else if (file_lock->fl_flags & FL_POSIX) {
- flock.l_flock.owner = (unsigned long)file_lock->fl_owner;
- flock.l_flock.start = file_lock->fl_start;
- flock.l_flock.end = file_lock->fl_end;
- } else {
- RETURN(-EINVAL);
- }
- flock.l_flock.pid = file_lock->fl_pid;
+ if (file_lock->fl_flags & FL_FLOCK) {
+ LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK));
+ /* flocks are whole-file locks */
+ flock.l_flock.end = OFFSET_MAX;
+ /* For flocks owner is determined by the local file desctiptor*/
+ flock.l_flock.owner = (unsigned long)file_lock->fl_file;
+ } else if (file_lock->fl_flags & FL_POSIX) {
+ flock.l_flock.owner = (unsigned long)file_lock->fl_owner;
+ flock.l_flock.start = file_lock->fl_start;
+ flock.l_flock.end = file_lock->fl_end;
+ } else {
+ RETURN(-EINVAL);
+ }
+ flock.l_flock.pid = file_lock->fl_pid;
+#if defined(HAVE_LM_COMPARE_OWNER) || defined(lm_compare_owner)
/* Somewhat ugly workaround for svc lockd.
* lockd installs custom fl_lmops->lm_compare_owner that checks
* for the fl_owner to be the same (which it always is on local node
* pointer space for current->files are not intersecting */
if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner)
flock.l_flock.owner = (unsigned long)file_lock->fl_pid;
+#endif
switch (fl_type) {
case F_RDLCK:
ll_finish_md_op_data(op_data);
- RETURN(rc);
+ if (!rc)
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK,
+ ktime_us_delta(ktime_get(), kstart));
+ RETURN(rc);
}
int ll_get_fid_by_name(struct inode *parent, const char *name,
static int
ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
ENTRY;
/*
struct inode *inode = de->d_inode;
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ll_inode_info *lli = ll_i2info(inode);
+ ktime_t kstart = ktime_get();
int rc;
- ll_stats_ops_tally(sbi, LPROC_LL_GETATTR, 1);
-
rc = ll_inode_revalidate(de, IT_GETATTR);
if (rc < 0)
RETURN(rc);
stat->size = i_size_read(inode);
stat->blocks = inode->i_blocks;
- return 0;
+ ll_stats_ops_tally(sbi, LPROC_LL_GETATTR,
+ ktime_us_delta(ktime_get(), kstart));
+
+ return 0;
}
#ifdef HAVE_INODEOPS_ENHANCED_GETATTR
}
#ifdef HAVE_IOP_SET_ACL
-#ifdef CONFIG_FS_POSIX_ACL
+#ifdef CONFIG_LUSTRE_FS_POSIX_ACL
int ll_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
struct ll_sb_info *sbi = ll_i2sbi(inode);
set_cached_acl(inode, type, acl);
RETURN(rc);
}
-#endif /* CONFIG_FS_POSIX_ACL */
+#endif /* CONFIG_LUSTRE_FS_POSIX_ACL */
#endif /* HAVE_IOP_SET_ACL */
int ll_inode_permission(struct inode *inode, int mask)
const struct cred *old_cred = NULL;
cfs_cap_t cap;
bool squash_id = false;
+ ktime_t kstart = ktime_get();
ENTRY;
if (mask & MAY_NOT_BLOCK)
old_cred = override_creds(cred);
}
- ll_stats_ops_tally(sbi, LPROC_LL_INODE_PERM, 1);
rc = generic_permission(inode, mask);
/* restore current process's credentials and FS capability */
if (squash_id) {
put_cred(cred);
}
+ if (!rc)
+ ll_stats_ops_tally(sbi, LPROC_LL_INODE_PERM,
+ ktime_us_delta(ktime_get(), kstart));
+
RETURN(rc);
}