* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
struct ll_file_data *ll_file_data_get(void)
{
- struct ll_file_data *fd;
+ struct ll_file_data *fd;
- OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, CFS_ALLOC_IO);
- return fd;
+ OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, CFS_ALLOC_IO);
+ fd->fd_write_failed = false;
+ return fd;
}
static void ll_file_data_put(struct ll_file_data *fd)
if (fh)
op_data->op_handle = *fh;
op_data->op_capa1 = ll_mdscapa_get(inode);
+
+ if (LLIF_DATA_MODIFIED & ll_i2info(inode)->lli_flags)
+ op_data->op_bias |= MDS_DATA_MODIFIED;
}
/**
CERROR("inode %lu mdc close failed: rc = %d\n",
inode->i_ino, rc);
}
+
+ /* DATA_MODIFIED flag was successfully sent on close, cancel data
+ * modification flag. */
+ if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags &= ~LLIF_DATA_MODIFIED;
+ spin_unlock(&lli->lli_lock);
+ }
+
ll_finish_md_op_data(op_data);
if (rc == 0) {
och_usecount = &lli->lli_open_fd_read_count;
}
- cfs_mutex_lock(&lli->lli_och_mutex);
+ mutex_lock(&lli->lli_och_mutex);
if (*och_usecount) { /* There are still users of this handle, so
skip freeing it. */
- cfs_mutex_unlock(&lli->lli_och_mutex);
+ mutex_unlock(&lli->lli_och_mutex);
RETURN(0);
}
och=*och_p;
*och_p = NULL;
- cfs_mutex_unlock(&lli->lli_och_mutex);
+ mutex_unlock(&lli->lli_och_mutex);
if (och) { /* There might be a race and somebody have freed this och
already */
struct inode *inode = file->f_dentry->d_inode;
ldlm_policy_data_t policy = {.l_inodebits={MDS_INODELOCK_OPEN}};
- cfs_mutex_lock(&lli->lli_och_mutex);
+ mutex_lock(&lli->lli_och_mutex);
if (fd->fd_omode & FMODE_WRITE) {
lockmode = LCK_CW;
LASSERT(lli->lli_open_fd_write_count);
LASSERT(lli->lli_open_fd_read_count);
lli->lli_open_fd_read_count--;
}
- cfs_mutex_unlock(&lli->lli_och_mutex);
+ mutex_unlock(&lli->lli_och_mutex);
if (!md_lock_match(md_exp, flags, ll_inode2fid(inode),
LDLM_IBITS, &policy, lockmode,
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
+ itp->it_flags |= MDS_OPEN_BY_FID;
rc = md_intent_lock(sbi->ll_md_exp, op_data, lmm, lmmsize, itp,
0 /*unused */, &req, ll_md_blocking_ast, 0);
ll_finish_md_op_data(op_data);
GOTO(out, rc);
}
- rc = ll_prep_inode(&file->f_dentry->d_inode, req, NULL);
+ rc = ll_prep_inode(&file->f_dentry->d_inode, req, NULL, itp);
if (!rc && itp->d.lustre.it_lock_mode)
ll_set_lock_data(sbi->ll_md_exp, file->f_dentry->d_inode,
itp, NULL);
fd->fd_file = file;
if (S_ISDIR(inode->i_mode)) {
- cfs_spin_lock(&lli->lli_sa_lock);
- if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL &&
- lli->lli_opendir_pid == 0) {
- lli->lli_opendir_key = fd;
- lli->lli_opendir_pid = cfs_curproc_pid();
- opendir_set = 1;
- }
- cfs_spin_unlock(&lli->lli_sa_lock);
+ spin_lock(&lli->lli_sa_lock);
+ if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL &&
+ lli->lli_opendir_pid == 0) {
+ lli->lli_opendir_key = fd;
+ lli->lli_opendir_pid = cfs_curproc_pid();
+ opendir_set = 1;
+ }
+ spin_unlock(&lli->lli_sa_lock);
}
if (inode->i_sb->s_root == file->f_dentry) {
och_usecount = &lli->lli_open_fd_read_count;
}
- cfs_mutex_lock(&lli->lli_och_mutex);
+ mutex_lock(&lli->lli_och_mutex);
if (*och_p) { /* Open handle is present */
if (it_disposition(it, DISP_OPEN_OPEN)) {
/* Well, there's extra open request that we do not need,
let's close it somehow. This will decref request. */
rc = it_open_error(DISP_OPEN_OPEN, it);
if (rc) {
- cfs_mutex_unlock(&lli->lli_och_mutex);
+ mutex_unlock(&lli->lli_och_mutex);
GOTO(out_openerr, rc);
}
rc = ll_local_open(file, it, fd, NULL);
if (rc) {
(*och_usecount)--;
- cfs_mutex_unlock(&lli->lli_och_mutex);
+ mutex_unlock(&lli->lli_och_mutex);
GOTO(out_openerr, rc);
}
} else {
could be cancelled, and since blocking ast handler
would attempt to grab och_mutex as well, that would
result in a deadlock */
- cfs_mutex_unlock(&lli->lli_och_mutex);
+ mutex_unlock(&lli->lli_och_mutex);
it->it_create_mode |= M_CHECK_STALE;
rc = ll_intent_file_open(file, NULL, 0, it);
it->it_create_mode &= ~M_CHECK_STALE;
if (rc)
GOTO(out_och_free, rc);
}
- cfs_mutex_unlock(&lli->lli_och_mutex);
+ mutex_unlock(&lli->lli_och_mutex);
fd = NULL;
/* Must do this outside lli_och_mutex lock to prevent deadlock where
GOTO(out_och_free, rc);
out_och_free:
- if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) {
- ptlrpc_req_finished(it->d.lustre.it_data);
- it_clear_disposition(it, DISP_ENQ_OPEN_REF);
- }
-
if (rc) {
if (och_p && *och_p) {
OBD_FREE(*och_p, sizeof (struct obd_client_handle));
*och_p = NULL; /* OBD_FREE writes some magic there */
(*och_usecount)--;
}
- cfs_mutex_unlock(&lli->lli_och_mutex);
+ mutex_unlock(&lli->lli_och_mutex);
out_openerr:
if (opendir_set != 0)
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN, 1);
}
+ if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) {
+ ptlrpc_req_finished(it->d.lustre.it_data);
+ it_clear_disposition(it, DISP_ENQ_OPEN_REF);
+ }
+
return rc;
}
CDEBUG(D_VFSTRACE, DFID" updating i_size "LPU64"\n",
PFID(&lli->lli_fid), lvb.lvb_size);
inode->i_blocks = lvb.lvb_blocks;
-
- LTIME_S(inode->i_mtime) = lvb.lvb_mtime;
- LTIME_S(inode->i_atime) = lvb.lvb_atime;
- LTIME_S(inode->i_ctime) = lvb.lvb_ctime;
}
+ LTIME_S(inode->i_mtime) = lvb.lvb_mtime;
+ LTIME_S(inode->i_atime) = lvb.lvb_atime;
+ LTIME_S(inode->i_ctime) = lvb.lvb_ctime;
ll_inode_size_unlock(inode);
ccc_inode_lsm_put(inode, lsm);
}
}
-static ssize_t ll_file_io_generic(const struct lu_env *env,
- struct vvp_io_args *args, struct file *file,
- enum cl_io_type iot, loff_t *ppos, size_t count)
+static ssize_t
+ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
+ struct file *file, enum cl_io_type iot,
+ loff_t *ppos, size_t count)
{
- struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode);
+ struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
struct cl_io *io;
ssize_t result;
ENTRY;
#endif
if ((iot == CIT_WRITE) &&
!(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- if (cfs_mutex_lock_interruptible(&lli->
+ if (mutex_lock_interruptible(&lli->
lli_write_mutex))
GOTO(out, result = -ERESTARTSYS);
write_mutex_locked = 1;
} else if (iot == CIT_READ) {
- cfs_down_read(&lli->lli_trunc_sem);
+ down_read(&lli->lli_trunc_sem);
}
break;
case IO_SENDFILE:
}
result = cl_io_loop(env, io);
if (write_mutex_locked)
- cfs_mutex_unlock(&lli->lli_write_mutex);
+ mutex_unlock(&lli->lli_write_mutex);
else if (args->via_io_subtype == IO_NORMAL && iot == CIT_READ)
- cfs_up_read(&lli->lli_trunc_sem);
+ up_read(&lli->lli_trunc_sem);
} else {
/* cl_io_rw_init() handled IO */
result = io->ci_result;
if (result >= 0) {
ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode),
LPROC_LL_WRITE_BYTES, result);
- lli->lli_write_rc = 0;
- } else {
- lli->lli_write_rc = result;
- }
- }
+ fd->fd_write_failed = false;
+ } else {
+ fd->fd_write_failed = true;
+ }
+ }
- return result;
+ return result;
}
static int ll_lov_recreate_obj(struct inode *inode, unsigned long arg)
{
- struct ll_recreate_obj ucreat;
- ENTRY;
+ struct ll_recreate_obj ucreat;
+ ENTRY;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
- RETURN(-EPERM);
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ RETURN(-EPERM);
- if (cfs_copy_from_user(&ucreat, (struct ll_recreate_obj *)arg,
- sizeof(struct ll_recreate_obj)))
- RETURN(-EFAULT);
+ if (copy_from_user(&ucreat, (struct ll_recreate_obj *)arg,
+ sizeof(ucreat)))
+ RETURN(-EFAULT);
- RETURN(ll_lov_recreate(inode, ucreat.lrc_id, 0,
- ucreat.lrc_ost_idx));
+ RETURN(ll_lov_recreate(inode, ucreat.lrc_id, 0,
+ ucreat.lrc_ost_idx));
}
static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg)
{
- struct lu_fid fid;
- obd_id id;
- obd_count ost_idx;
+ struct lu_fid fid;
+ obd_id id;
+ obd_count ost_idx;
ENTRY;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
- RETURN(-EPERM);
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ RETURN(-EPERM);
- if (cfs_copy_from_user(&fid, (struct lu_fid *)arg,
- sizeof(struct lu_fid)))
- RETURN(-EFAULT);
+ if (copy_from_user(&fid, (struct lu_fid *)arg, sizeof(fid)))
+ RETURN(-EFAULT);
- id = fid_oid(&fid) | ((fid_seq(&fid) & 0xffff) << 32);
- ost_idx = (fid_seq(&fid) >> 16) & 0xffff;
- RETURN(ll_lov_recreate(inode, id, 0, ost_idx));
+ id = fid_oid(&fid) | ((fid_seq(&fid) & 0xffff) << 32);
+ ost_idx = (fid_seq(&fid) >> 16) & 0xffff;
+ RETURN(ll_lov_recreate(inode, id, 0, ost_idx));
}
int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
static int ll_lov_setea(struct inode *inode, struct file *file,
unsigned long arg)
{
- int flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
- struct lov_user_md *lump;
- int lum_size = sizeof(struct lov_user_md) +
- sizeof(struct lov_user_ost_data);
- int rc;
- ENTRY;
+ int flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
+ struct lov_user_md *lump;
+ int lum_size = sizeof(struct lov_user_md) +
+ sizeof(struct lov_user_ost_data);
+ int rc;
+ ENTRY;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
- RETURN(-EPERM);
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ RETURN(-EPERM);
- OBD_ALLOC_LARGE(lump, lum_size);
- if (lump == NULL) {
+ OBD_ALLOC_LARGE(lump, lum_size);
+ if (lump == NULL)
RETURN(-ENOMEM);
- }
- if (cfs_copy_from_user(lump, (struct lov_user_md *)arg, lum_size)) {
- OBD_FREE_LARGE(lump, lum_size);
- RETURN(-EFAULT);
- }
- rc = ll_lov_setstripe_ea_info(inode, file, flags, lump, lum_size);
+ if (copy_from_user(lump, (struct lov_user_md *)arg, lum_size)) {
+ OBD_FREE_LARGE(lump, lum_size);
+ RETURN(-EFAULT);
+ }
+
+ rc = ll_lov_setstripe_ea_info(inode, file, flags, lump, lum_size);
- OBD_FREE_LARGE(lump, lum_size);
- RETURN(rc);
+ OBD_FREE_LARGE(lump, lum_size);
+ RETURN(rc);
}
static int ll_lov_setstripe(struct inode *inode, struct file *file,
- unsigned long arg)
-{
- struct lov_user_md_v3 lumv3;
- struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
- struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg;
- struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg;
- int lum_size;
- int rc;
- int flags = FMODE_WRITE;
- ENTRY;
+ unsigned long arg)
+{
+ struct lov_user_md_v3 lumv3;
+ struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
+ struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg;
+ struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg;
+ int lum_size, rc;
+ int flags = FMODE_WRITE;
+ ENTRY;
- /* first try with v1 which is smaller than v3 */
- lum_size = sizeof(struct lov_user_md_v1);
- if (cfs_copy_from_user(lumv1, lumv1p, lum_size))
- RETURN(-EFAULT);
+ /* first try with v1 which is smaller than v3 */
+ lum_size = sizeof(struct lov_user_md_v1);
+ if (copy_from_user(lumv1, lumv1p, lum_size))
+ RETURN(-EFAULT);
- if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
- lum_size = sizeof(struct lov_user_md_v3);
- if (cfs_copy_from_user(&lumv3, lumv3p, lum_size))
- RETURN(-EFAULT);
- }
+ if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
+ lum_size = sizeof(struct lov_user_md_v3);
+ if (copy_from_user(&lumv3, lumv3p, lum_size))
+ RETURN(-EFAULT);
+ }
- rc = ll_lov_setstripe_ea_info(inode, file, flags, lumv1, lum_size);
- if (rc == 0) {
+ rc = ll_lov_setstripe_ea_info(inode, file, flags, lumv1, lum_size);
+ if (rc == 0) {
struct lov_stripe_md *lsm;
+ __u32 gen;
+
put_user(0, &lumv1p->lmm_stripe_count);
+
+ ll_layout_refresh(inode, &gen);
lsm = ccc_inode_lsm_get(inode);
rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode),
0, lsm, (void *)arg);
if (ll_file_nolock(file))
RETURN(-EOPNOTSUPP);
- cfs_spin_lock(&lli->lli_lock);
- if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
- CWARN("group lock already existed with gid %lu\n",
- fd->fd_grouplock.cg_gid);
- cfs_spin_unlock(&lli->lli_lock);
- RETURN(-EINVAL);
- }
- LASSERT(fd->fd_grouplock.cg_lock == NULL);
- cfs_spin_unlock(&lli->lli_lock);
-
- rc = cl_get_grouplock(cl_i2info(inode)->lli_clob,
- arg, (file->f_flags & O_NONBLOCK), &grouplock);
- if (rc)
- RETURN(rc);
-
- cfs_spin_lock(&lli->lli_lock);
- if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
- cfs_spin_unlock(&lli->lli_lock);
- CERROR("another thread just won the race\n");
- cl_put_grouplock(&grouplock);
- RETURN(-EINVAL);
- }
+ spin_lock(&lli->lli_lock);
+ if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
+ CWARN("group lock already existed with gid %lu\n",
+ fd->fd_grouplock.cg_gid);
+ spin_unlock(&lli->lli_lock);
+ RETURN(-EINVAL);
+ }
+ LASSERT(fd->fd_grouplock.cg_lock == NULL);
+ spin_unlock(&lli->lli_lock);
+
+ rc = cl_get_grouplock(cl_i2info(inode)->lli_clob,
+ arg, (file->f_flags & O_NONBLOCK), &grouplock);
+ if (rc)
+ RETURN(rc);
+
+ spin_lock(&lli->lli_lock);
+ if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
+ spin_unlock(&lli->lli_lock);
+ CERROR("another thread just won the race\n");
+ cl_put_grouplock(&grouplock);
+ RETURN(-EINVAL);
+ }
- fd->fd_flags |= LL_FILE_GROUP_LOCKED;
- fd->fd_grouplock = grouplock;
- cfs_spin_unlock(&lli->lli_lock);
+ fd->fd_flags |= LL_FILE_GROUP_LOCKED;
+ fd->fd_grouplock = grouplock;
+ spin_unlock(&lli->lli_lock);
- CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
- RETURN(0);
+ CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
+ RETURN(0);
}
int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ccc_grouplock grouplock;
- ENTRY;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ccc_grouplock grouplock;
+ ENTRY;
- cfs_spin_lock(&lli->lli_lock);
- if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- cfs_spin_unlock(&lli->lli_lock);
+ spin_lock(&lli->lli_lock);
+ if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ spin_unlock(&lli->lli_lock);
CWARN("no group lock held\n");
RETURN(-EINVAL);
}
if (fd->fd_grouplock.cg_gid != arg) {
CWARN("group lock %lu doesn't match current id %lu\n",
arg, fd->fd_grouplock.cg_gid);
- cfs_spin_unlock(&lli->lli_lock);
- RETURN(-EINVAL);
- }
+ spin_unlock(&lli->lli_lock);
+ RETURN(-EINVAL);
+ }
- grouplock = fd->fd_grouplock;
- memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
- fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
- cfs_spin_unlock(&lli->lli_lock);
+ grouplock = fd->fd_grouplock;
+ memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
+ fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
+ spin_unlock(&lli->lli_lock);
- cl_put_grouplock(&grouplock);
- CDEBUG(D_INFO, "group lock %lu released\n", arg);
- RETURN(0);
+ cl_put_grouplock(&grouplock);
+ CDEBUG(D_INFO, "group lock %lu released\n", arg);
+ RETURN(0);
}
/**
RETURN(rc);
}
-int ll_fid2path(struct obd_export *exp, void *arg)
+int ll_fid2path(struct inode *inode, void *arg)
{
- struct getinfo_fid2path *gfout, *gfin;
- int outsize, rc;
- ENTRY;
+ struct obd_export *exp = ll_i2mdexp(inode);
+ struct getinfo_fid2path *gfout, *gfin;
+ int outsize, rc;
+ ENTRY;
- /* Need to get the buflen */
- OBD_ALLOC_PTR(gfin);
- if (gfin == NULL)
- RETURN(-ENOMEM);
- if (cfs_copy_from_user(gfin, arg, sizeof(*gfin))) {
- OBD_FREE_PTR(gfin);
- RETURN(-EFAULT);
- }
+ if (!cfs_capable(CFS_CAP_DAC_READ_SEARCH) &&
+ !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
+ RETURN(-EPERM);
- outsize = sizeof(*gfout) + gfin->gf_pathlen;
- OBD_ALLOC(gfout, outsize);
- if (gfout == NULL) {
- OBD_FREE_PTR(gfin);
- RETURN(-ENOMEM);
- }
- memcpy(gfout, gfin, sizeof(*gfout));
- OBD_FREE_PTR(gfin);
+ /* Need to get the buflen */
+ OBD_ALLOC_PTR(gfin);
+ if (gfin == NULL)
+ RETURN(-ENOMEM);
+ if (copy_from_user(gfin, arg, sizeof(*gfin))) {
+ OBD_FREE_PTR(gfin);
+ RETURN(-EFAULT);
+ }
- /* Call mdc_iocontrol */
- rc = obd_iocontrol(OBD_IOC_FID2PATH, exp, outsize, gfout, NULL);
- if (rc)
- GOTO(gf_free, rc);
- if (cfs_copy_to_user(arg, gfout, outsize))
- rc = -EFAULT;
+ outsize = sizeof(*gfout) + gfin->gf_pathlen;
+ OBD_ALLOC(gfout, outsize);
+ if (gfout == NULL) {
+ OBD_FREE_PTR(gfin);
+ RETURN(-ENOMEM);
+ }
+ memcpy(gfout, gfin, sizeof(*gfout));
+ OBD_FREE_PTR(gfin);
+
+ /* Call mdc_iocontrol */
+ rc = obd_iocontrol(OBD_IOC_FID2PATH, exp, outsize, gfout, NULL);
+ if (rc)
+ GOTO(gf_free, rc);
+ if (copy_to_user(arg, gfout, outsize))
+ rc = -EFAULT;
gf_free:
- OBD_FREE(gfout, outsize);
- RETURN(rc);
+ OBD_FREE(gfout, outsize);
+ RETURN(rc);
}
static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg)
if (fiemap_s == NULL)
RETURN(-ENOMEM);
- /* get the fiemap value */
- if (copy_from_user(fiemap_s,(struct ll_user_fiemap __user *)arg,
- sizeof(*fiemap_s)))
- GOTO(error, rc = -EFAULT);
+ /* get the fiemap value */
+ if (copy_from_user(fiemap_s, (struct ll_user_fiemap __user *)arg,
+ sizeof(*fiemap_s)))
+ GOTO(error, rc = -EFAULT);
/* If fm_extent_count is non-zero, read the first extent since
* it is used to calculate end_offset and device from previous
ret_bytes += (fiemap_s->fm_mapped_extents *
sizeof(struct ll_fiemap_extent));
- if (copy_to_user((void *)arg, fiemap_s, ret_bytes))
- rc = -EFAULT;
+ if (copy_to_user((void *)arg, fiemap_s, ret_bytes))
+ rc = -EFAULT;
error:
OBD_FREE_LARGE(fiemap_s, num_bytes);
* Version is computed using server side locking.
*
* @param extent_lock Take extent lock. Not needed if a process is already
- * holding the OST object group locks.
+ * holding the OST object group locks.
*/
-static int ll_data_version(struct inode *inode, __u64 *data_version,
- int extent_lock)
+int ll_data_version(struct inode *inode, __u64 *data_version,
+ int extent_lock)
{
- struct lov_stripe_md *lsm = NULL;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct obdo *obdo = NULL;
- int rc;
+ struct lov_stripe_md *lsm = NULL;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct obdo *obdo = NULL;
+ int rc;
ENTRY;
/* If no stripe, we consider version is 0. */
lsm = ccc_inode_lsm_get(inode);
if (lsm == NULL) {
- *data_version = 0;
- CDEBUG(D_INODE, "No object for inode\n");
- RETURN(0);
- }
+ *data_version = 0;
+ CDEBUG(D_INODE, "No object for inode\n");
+ RETURN(0);
+ }
- OBD_ALLOC_PTR(obdo);
+ OBD_ALLOC_PTR(obdo);
if (obdo == NULL) {
ccc_inode_lsm_put(inode, lsm);
RETURN(-ENOMEM);
}
- rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, NULL, obdo, 0, extent_lock);
- if (!rc) {
- if (!(obdo->o_valid & OBD_MD_FLDATAVERSION))
- rc = -EOPNOTSUPP;
- else
- *data_version = obdo->o_data_version;
- }
+ rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, NULL, obdo, 0, extent_lock);
+ if (!rc) {
+ if (!(obdo->o_valid & OBD_MD_FLDATAVERSION))
+ rc = -EOPNOTSUPP;
+ else
+ *data_version = obdo->o_data_version;
+ }
- OBD_FREE_PTR(obdo);
+ OBD_FREE_PTR(obdo);
ccc_inode_lsm_put(inode, lsm);
RETURN(rc);
case FSFILT_IOC_SETVERSION_OLD:
case FSFILT_IOC_SETVERSION:
*/
- case LL_IOC_FLUSHCTX:
- RETURN(ll_flush_ctx(inode));
- case LL_IOC_PATH2FID: {
- if (cfs_copy_to_user((void *)arg, ll_inode2fid(inode),
- sizeof(struct lu_fid)))
- RETURN(-EFAULT);
+ case LL_IOC_FLUSHCTX:
+ RETURN(ll_flush_ctx(inode));
+ case LL_IOC_PATH2FID: {
+ if (copy_to_user((void *)arg, ll_inode2fid(inode),
+ sizeof(struct lu_fid)))
+ RETURN(-EFAULT);
- RETURN(0);
- }
- case OBD_IOC_FID2PATH:
- RETURN(ll_fid2path(ll_i2mdexp(inode), (void *)arg));
- case LL_IOC_DATA_VERSION: {
- struct ioc_data_version idv;
- int rc;
+ RETURN(0);
+ }
+ case OBD_IOC_FID2PATH:
+ RETURN(ll_fid2path(inode, (void *)arg));
+ case LL_IOC_DATA_VERSION: {
+ struct ioc_data_version idv;
+ int rc;
- if (cfs_copy_from_user(&idv, (char *)arg, sizeof(idv)))
- RETURN(-EFAULT);
+ if (copy_from_user(&idv, (char *)arg, sizeof(idv)))
+ RETURN(-EFAULT);
- rc = ll_data_version(inode, &idv.idv_version,
- !(idv.idv_flags & LL_DV_NOFLUSH));
+ rc = ll_data_version(inode, &idv.idv_version,
+ !(idv.idv_flags & LL_DV_NOFLUSH));
- if (rc == 0 &&
- cfs_copy_to_user((char *) arg, &idv, sizeof(idv)))
- RETURN(-EFAULT);
+ if (rc == 0 && copy_to_user((char *) arg, &idv, sizeof(idv)))
+ RETURN(-EFAULT);
- RETURN(rc);
- }
+ RETURN(rc);
+ }
case LL_IOC_GET_MDTIDX: {
int mdtidx;
RETURN(0);
}
- case OBD_IOC_GETDTNAME:
- case OBD_IOC_GETMDNAME:
- RETURN(ll_get_obd_name(inode, cmd, arg));
- default: {
- int err;
-
- if (LLIOC_STOP ==
- ll_iocontrol_call(inode, file, cmd, arg, &err))
- RETURN(err);
-
- RETURN(obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL,
- (void *)arg));
- }
- }
+ case OBD_IOC_GETDTNAME:
+ case OBD_IOC_GETMDNAME:
+ RETURN(ll_get_obd_name(inode, cmd, arg));
+ case LL_IOC_HSM_STATE_GET: {
+ struct md_op_data *op_data;
+ struct hsm_user_state *hus;
+ int rc;
+
+ OBD_ALLOC_PTR(hus);
+ if (hus == NULL)
+ RETURN(-ENOMEM);
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, hus);
+ if (op_data == NULL) {
+ OBD_FREE_PTR(hus);
+ RETURN(-ENOMEM);
+ }
+
+ rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
+ op_data, NULL);
+
+ if (copy_to_user((void *)arg, hus, sizeof(*hus)))
+ rc = -EFAULT;
+
+ ll_finish_md_op_data(op_data);
+ OBD_FREE_PTR(hus);
+ RETURN(rc);
+ }
+ case LL_IOC_HSM_STATE_SET: {
+ struct md_op_data *op_data;
+ struct hsm_state_set *hss;
+ int rc;
+
+ OBD_ALLOC_PTR(hss);
+ if (hss == NULL)
+ RETURN(-ENOMEM);
+ if (copy_from_user(hss, (char *)arg, sizeof(*hss))) {
+ OBD_FREE_PTR(hss);
+ RETURN(-EFAULT);
+ }
+
+ /* Non-root users are forbidden to set or clear flags which are
+ * NOT defined in HSM_USER_MASK. */
+ if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK)
+ && !cfs_capable(CFS_CAP_SYS_ADMIN)) {
+ OBD_FREE_PTR(hss);
+ RETURN(-EPERM);
+ }
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, hss);
+ if (op_data == NULL) {
+ OBD_FREE_PTR(hss);
+ RETURN(-ENOMEM);
+ }
+
+ rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
+ op_data, NULL);
+
+ ll_finish_md_op_data(op_data);
+
+ OBD_FREE_PTR(hss);
+ RETURN(rc);
+ }
+
+ default: {
+ int err;
+
+ if (LLIOC_STOP ==
+ ll_iocontrol_call(inode, file, cmd, arg, &err))
+ RETURN(err);
+
+ RETURN(obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL,
+ (void *)arg));
+ }
+ }
}
-loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
+#ifndef HAVE_FILE_LLSEEK_SIZE
+static inline loff_t
+llseek_execute(struct file *file, loff_t offset, loff_t maxsize)
{
- struct inode *inode = file->f_dentry->d_inode;
- loff_t retval;
- ENTRY;
- retval = offset + ((origin == 2) ? i_size_read(inode) :
- (origin == 1) ? file->f_pos : 0);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), to=%llu=%#llx(%s)\n",
- inode->i_ino, inode->i_generation, inode, retval, retval,
- origin == 2 ? "SEEK_END": origin == 1 ? "SEEK_CUR" : "SEEK_SET");
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK, 1);
-
- if (origin == 2) { /* SEEK_END */
- int rc;
+ if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
+ return -EINVAL;
+ if (offset > maxsize)
+ return -EINVAL;
- rc = ll_glimpse_size(inode);
- if (rc != 0)
- RETURN(rc);
+ if (offset != file->f_pos) {
+ file->f_pos = offset;
+ file->f_version = 0;
+ }
+ return offset;
+}
+
+static loff_t
+generic_file_llseek_size(struct file *file, loff_t offset, int origin,
+ loff_t maxsize, loff_t eof)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+
+ switch (origin) {
+ case SEEK_END:
+ offset += eof;
+ break;
+ case SEEK_CUR:
+ /*
+ * Here we special-case the lseek(fd, 0, SEEK_CUR)
+ * position-querying operation. Avoid rewriting the "same"
+ * f_pos value back to the file because a concurrent read(),
+ * write() or lseek() might have altered it
+ */
+ if (offset == 0)
+ return file->f_pos;
+ /*
+ * f_lock protects against read/modify/write race with other
+ * SEEK_CURs. Note that parallel writes and reads behave
+ * like SEEK_SET.
+ */
+ mutex_lock(&inode->i_mutex);
+ offset = llseek_execute(file, file->f_pos + offset, maxsize);
+ mutex_unlock(&inode->i_mutex);
+ return offset;
+ case SEEK_DATA:
+ /*
+ * In the generic case the entire file is data, so as long as
+ * offset isn't at the end of the file then the offset is data.
+ */
+ if (offset >= eof)
+ return -ENXIO;
+ break;
+ case SEEK_HOLE:
+ /*
+ * There is a virtual hole at the end of the file, so as long as
+ * offset isn't i_size or larger, return i_size.
+ */
+ if (offset >= eof)
+ return -ENXIO;
+ offset = eof;
+ break;
+ }
- offset += i_size_read(inode);
- } else if (origin == 1) { /* SEEK_CUR */
- offset += file->f_pos;
- }
+ return llseek_execute(file, offset, maxsize);
+}
+#endif
- retval = -EINVAL;
- if (offset >= 0 && offset <= ll_file_maxbytes(inode)) {
- if (offset != file->f_pos) {
- file->f_pos = offset;
- }
- retval = offset;
- }
+loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ loff_t retval, eof = 0;
- RETURN(retval);
+ ENTRY;
+ retval = offset + ((origin == SEEK_END) ? i_size_read(inode) :
+ (origin == SEEK_CUR) ? file->f_pos : 0);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), to=%llu=%#llx(%d)\n",
+ inode->i_ino, inode->i_generation, inode, retval, retval,
+ origin);
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK, 1);
+
+ if (origin == SEEK_END || origin == SEEK_HOLE || origin == SEEK_DATA) {
+ retval = ll_glimpse_size(inode);
+ if (retval != 0)
+ RETURN(retval);
+ eof = i_size_read(inode);
+ }
+
+ retval = generic_file_llseek_size(file, offset, origin,
+ ll_file_maxbytes(inode), eof);
+ RETURN(retval);
}
-#ifdef HAVE_FLUSH_OWNER_ID
int ll_flush(struct file *file, fl_owner_t id)
-#else
-int ll_flush(struct file *file)
-#endif
{
- struct inode *inode = file->f_dentry->d_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
- int rc, err;
-
- LASSERT(!S_ISDIR(inode->i_mode));
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ int rc, err;
- /* the application should know write failure already. */
- if (lli->lli_write_rc)
- return 0;
+ LASSERT(!S_ISDIR(inode->i_mode));
- /* catch async errors that were recorded back when async writeback
- * failed for pages in this mapping. */
- rc = lli->lli_async_rc;
- lli->lli_async_rc = 0;
+ /* catch async errors that were recorded back when async writeback
+ * failed for pages in this mapping. */
+ rc = lli->lli_async_rc;
+ lli->lli_async_rc = 0;
err = lov_read_and_clear_async_rc(lli->lli_clob);
if (rc == 0)
rc = err;
+ /* The application has been told write failure already.
+ * Do not report failure again. */
+ if (fd->fd_write_failed)
+ return 0;
return rc ? -EIO : 0;
}
io = ccc_env_thread_io(env);
io->ci_obj = cl_i2info(inode)->lli_clob;
+ io->ci_ignore_layout = 1;
/* initialize parameters for sync */
fio = &io->u.ci_fsync;
struct ll_inode_info *lli = ll_i2info(inode);
struct ptlrpc_request *req;
struct obd_capa *oc;
- struct lov_stripe_md *lsm;
int rc, err;
ENTRY;
+
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
inode->i_generation, inode);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC, 1);
+#ifdef HAVE_FILE_FSYNC_4ARGS
+ rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ mutex_lock(&inode->i_mutex);
+#else
/* fsync's caller has already called _fdata{sync,write}, we want
* that IO to finish before calling the osc and mdc sync methods */
rc = filemap_fdatawait(inode->i_mapping);
+#endif
/* catch async errors that were recorded back when async writeback
* failed for pages in this mapping. */
if (!err)
ptlrpc_req_finished(req);
- lsm = ccc_inode_lsm_get(inode);
- if (data && lsm) {
+ if (data) {
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+
err = cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
CL_FSYNC_ALL);
if (rc == 0 && err < 0)
rc = err;
- lli->lli_write_rc = rc < 0 ? rc : 0;
+ if (rc < 0)
+ fd->fd_write_failed = true;
+ else
+ fd->fd_write_failed = false;
}
- ccc_inode_lsm_put(inode, lsm);
+#ifdef HAVE_FILE_FSYNC_4ARGS
+ mutex_unlock(&inode->i_mutex);
+#endif
RETURN(rc);
}
ldlm_policy_data_t flock = {{0}};
int flags = 0;
int rc;
+ int rc2 = 0;
ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu file_lock=%p\n",
}
flock.l_flock.pid = file_lock->fl_pid;
- /* Somewhat ugly workaround for svc lockd.
- * lockd installs custom fl_lmops->fl_compare_owner that checks
- * for the fl_owner to be the same (which it always is on local node
- * I guess between lockd processes) and then compares pid.
- * As such we assign pid to the owner field to make it all work,
- * conflict with normal locks is unlikely since pid space and
- * pointer space for current->files are not intersecting */
- if (file_lock->fl_lmops && file_lock->fl_lmops->fl_compare_owner)
- flock.l_flock.owner = (unsigned long)file_lock->fl_pid;
+ /* Somewhat ugly workaround for svc lockd.
+ * lockd installs custom fl_lmops->lm_compare_owner that checks
+ * for the fl_owner to be the same (which it always is on local node
+ * I guess between lockd processes) and then compares pid.
+ * As such we assign pid to the owner field to make it all work,
+ * conflict with normal locks is unlikely since pid space and
+ * pointer space for current->files are not intersecting */
+ if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner)
+ flock.l_flock.owner = (unsigned long)file_lock->fl_pid;
switch (file_lock->fl_type) {
case F_RDLCK:
rc = md_enqueue(sbi->ll_md_exp, &einfo, NULL,
op_data, &lockh, &flock, 0, NULL /* req */, flags);
- ll_finish_md_op_data(op_data);
-
if ((file_lock->fl_flags & FL_FLOCK) &&
(rc == 0 || file_lock->fl_type == F_UNLCK))
- flock_lock_file_wait(file, file_lock);
+ rc2 = flock_lock_file_wait(file, file_lock);
if ((file_lock->fl_flags & FL_POSIX) &&
(rc == 0 || file_lock->fl_type == F_UNLCK) &&
!(flags & LDLM_FL_TEST_LOCK))
- posix_lock_file_wait(file, file_lock);
+ rc2 = posix_lock_file_wait(file, file_lock);
+
+ if (rc2 && file_lock->fl_type != F_UNLCK) {
+ einfo.ei_mode = LCK_NL;
+ md_enqueue(sbi->ll_md_exp, &einfo, NULL,
+ op_data, &lockh, &flock, 0, NULL /* req */, flags);
+ rc = rc2;
+ }
+
+ ll_finish_md_op_data(op_data);
RETURN(rc);
}
ldlm_mode_t mode = (l_req_mode == LCK_MINMODE) ?
(LCK_CR|LCK_CW|LCK_PR|LCK_PW) : l_req_mode;
struct lu_fid *fid;
- int flags;
+ __u64 flags;
int i;
ENTRY;
}
ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
- struct lustre_handle *lockh)
+ struct lustre_handle *lockh, __u64 flags)
{
ldlm_policy_data_t policy = { .l_inodebits = {bits}};
struct lu_fid *fid;
ldlm_mode_t rc;
- int flags;
ENTRY;
fid = &ll_i2info(inode)->lli_fid;
CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
- flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING;
- rc = md_lock_match(ll_i2mdexp(inode), flags, fid, LDLM_IBITS, &policy,
+ rc = md_lock_match(ll_i2mdexp(inode), LDLM_FL_BLOCK_GRANTED|flags,
+ fid, LDLM_IBITS, &policy,
LCK_CR|LCK_CW|LCK_PR|LCK_PW, lockh);
RETURN(rc);
}
-static int ll_inode_revalidate_fini(struct inode *inode, int rc) {
- if (rc == -ENOENT) { /* Already unlinked. Just update nlink
- * and return success */
- inode->i_nlink = 0;
- /* This path cannot be hit for regular files unless in
- * case of obscure races, so no need to to validate
- * size. */
- if (!S_ISREG(inode->i_mode) &&
- !S_ISDIR(inode->i_mode))
- return 0;
- }
-
- if (rc) {
- CERROR("failure %d inode %lu\n", rc, inode->i_ino);
- return -abs(rc);
-
- }
+static int ll_inode_revalidate_fini(struct inode *inode, int rc)
+{
+ /* Already unlinked. Just update nlink and return success */
+ if (rc == -ENOENT) {
+ clear_nlink(inode);
+ /* This path cannot be hit for regular files unless in
+ * case of obscure races, so no need to to validate
+ * size. */
+ if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
+ return 0;
+ } else if (rc != 0) {
+ CERROR("%s: revalidate FID "DFID" error: rc = %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), rc);
+ }
- return 0;
+ return rc;
}
int __ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it,
int rc = 0;
ENTRY;
- if (!inode) {
- CERROR("REPORT THIS LINE TO PETER\n");
- RETURN(0);
- }
+ LASSERT(inode != NULL);
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%s\n",
inode->i_ino, inode->i_generation, inode, dentry->d_name.name);
RETURN(rc);
}
- rc = ll_prep_inode(&inode, req, NULL);
+ rc = ll_prep_inode(&inode, req, NULL, NULL);
}
out:
ptlrpc_req_finished(req);
ENTRY;
rc = __ll_inode_revalidate_it(dentry, it, ibits);
-
- /* if object not yet allocated, don't validate size */
- if (rc == 0 && !ll_i2info(dentry->d_inode)->lli_has_smd) {
- LTIME_S(inode->i_atime) = ll_i2info(inode)->lli_lvb.lvb_atime;
- LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_lvb.lvb_mtime;
- LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_lvb.lvb_ctime;
- RETURN(0);
- }
-
- /* ll_glimpse_size will prefer locally cached writes if they extend
- * the file */
-
- if (rc == 0)
- rc = ll_glimpse_size(inode);
-
+ if (rc != 0)
+ RETURN(rc);
+
+ /* if object isn't regular file, don't validate size */
+ if (!S_ISREG(inode->i_mode)) {
+ LTIME_S(inode->i_atime) = ll_i2info(inode)->lli_lvb.lvb_atime;
+ LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_lvb.lvb_mtime;
+ LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_lvb.lvb_ctime;
+ } else {
+ rc = ll_glimpse_size(inode);
+ }
RETURN(rc);
}
stat->nlink = inode->i_nlink;
stat->uid = inode->i_uid;
stat->gid = inode->i_gid;
- stat->rdev = kdev_t_to_nr(inode->i_rdev);
+ stat->rdev = inode->i_rdev;
stat->atime = inode->i_atime;
stat->mtime = inode->i_mtime;
stat->ctime = inode->i_ctime;
-#ifdef HAVE_INODE_BLKSIZE
- stat->blksize = inode->i_blksize;
-#else
- stat->blksize = 1 << inode->i_blkbits;
-#endif
+ stat->blksize = 1 << inode->i_blkbits;
stat->size = i_size_read(inode);
stat->blocks = inode->i_blocks;
}
#endif
+struct posix_acl * ll_get_acl(struct inode *inode, int type)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct posix_acl *acl = NULL;
+ ENTRY;
+
+ spin_lock(&lli->lli_lock);
+ /* VFS' acl_permission_check->check_acl will release the refcount */
+ acl = posix_acl_dup(lli->lli_posix_acl);
+ spin_unlock(&lli->lli_lock);
+
+ RETURN(acl);
+}
+#ifndef HAVE_GENERIC_PERMISSION_2ARGS
static int
-#ifdef HAVE_GENERIC_PERMISSION_4ARGS
-lustre_check_acl(struct inode *inode, int mask, unsigned int flags)
-#else
-lustre_check_acl(struct inode *inode, int mask)
-#endif
+# ifdef HAVE_GENERIC_PERMISSION_4ARGS
+ll_check_acl(struct inode *inode, int mask, unsigned int flags)
+# else
+ll_check_acl(struct inode *inode, int mask)
+# endif
{
-#ifdef CONFIG_FS_POSIX_ACL
- struct ll_inode_info *lli = ll_i2info(inode);
- struct posix_acl *acl;
- int rc;
- ENTRY;
+# ifdef CONFIG_FS_POSIX_ACL
+ struct posix_acl *acl;
+ int rc;
+ ENTRY;
-#ifdef HAVE_GENERIC_PERMISSION_4ARGS
- if (flags & IPERM_FLAG_RCU)
- return -ECHILD;
-#endif
- cfs_spin_lock(&lli->lli_lock);
- acl = posix_acl_dup(lli->lli_posix_acl);
- cfs_spin_unlock(&lli->lli_lock);
+# ifdef HAVE_GENERIC_PERMISSION_4ARGS
+ if (flags & IPERM_FLAG_RCU)
+ return -ECHILD;
+# endif
+ acl = ll_get_acl(inode, ACL_TYPE_ACCESS);
- if (!acl)
- RETURN(-EAGAIN);
+ if (!acl)
+ RETURN(-EAGAIN);
- rc = posix_acl_permission(inode, acl, mask);
- posix_acl_release(acl);
+ rc = posix_acl_permission(inode, acl, mask);
+ posix_acl_release(acl);
- RETURN(rc);
-#else
- return -EAGAIN;
-#endif
+ RETURN(rc);
+# else /* !CONFIG_FS_POSIX_ACL */
+ return -EAGAIN;
+# endif /* CONFIG_FS_POSIX_ACL */
}
+#endif /* HAVE_GENERIC_PERMISSION_2ARGS */
#ifdef HAVE_GENERIC_PERMISSION_4ARGS
int ll_inode_permission(struct inode *inode, int mask, unsigned int flags)
int rc = 0;
ENTRY;
-#ifdef HAVE_GENERIC_PERMISSION_4ARGS
+#ifdef MAY_NOT_BLOCK
+ if (mask & MAY_NOT_BLOCK)
+ return -ECHILD;
+#elif defined(HAVE_GENERIC_PERMISSION_4ARGS)
if (flags & IPERM_FLAG_RCU)
return -ECHILD;
#endif
RETURN(rc);
}
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), inode mode %x mask %o\n",
- inode->i_ino, inode->i_generation, inode, inode->i_mode, mask);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), inode mode %x mask %o\n",
+ inode->i_ino, inode->i_generation, inode, inode->i_mode, mask);
- if (ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT)
- return lustre_check_remote_perm(inode, mask);
+ if (ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT)
+ return lustre_check_remote_perm(inode, mask);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_PERM, 1);
- rc = ll_generic_permission(inode, mask, flags, lustre_check_acl);
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_PERM, 1);
+ rc = ll_generic_permission(inode, mask, flags, ll_check_acl);
- RETURN(rc);
+ RETURN(rc);
}
#ifdef HAVE_FILE_READV
};
struct inode_operations ll_file_inode_operations = {
- .setattr = ll_setattr,
- .truncate = ll_truncate,
- .getattr = ll_getattr,
- .permission = ll_inode_permission,
- .setxattr = ll_setxattr,
- .getxattr = ll_getxattr,
- .listxattr = ll_listxattr,
- .removexattr = ll_removexattr,
+ .setattr = ll_setattr,
+ .getattr = ll_getattr,
+ .permission = ll_inode_permission,
+ .setxattr = ll_setxattr,
+ .getxattr = ll_getxattr,
+ .listxattr = ll_listxattr,
+ .removexattr = ll_removexattr,
#ifdef HAVE_LINUX_FIEMAP_H
- .fiemap = ll_fiemap,
+ .fiemap = ll_fiemap,
+#endif
+#ifdef HAVE_IOP_GET_ACL
+ .get_acl = ll_get_acl,
#endif
};
/* dynamic ioctl number support routins */
static struct llioc_ctl_data {
- cfs_rw_semaphore_t ioc_sem;
+ struct rw_semaphore ioc_sem;
cfs_list_t ioc_head;
} llioc = {
__RWSEM_INITIALIZER(llioc.ioc_sem),
in_data->iocd_count = count;
memcpy(in_data->iocd_cmd, cmd, sizeof(unsigned int) * count);
- cfs_down_write(&llioc.ioc_sem);
+ down_write(&llioc.ioc_sem);
cfs_list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
- cfs_up_write(&llioc.ioc_sem);
+ up_write(&llioc.ioc_sem);
RETURN(in_data);
}
if (magic == NULL)
return;
- cfs_down_write(&llioc.ioc_sem);
+ down_write(&llioc.ioc_sem);
cfs_list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) {
if (tmp == magic) {
unsigned int size = tmp->iocd_size;
cfs_list_del(&tmp->iocd_list);
- cfs_up_write(&llioc.ioc_sem);
+ up_write(&llioc.ioc_sem);
OBD_FREE(tmp, size);
return;
}
}
- cfs_up_write(&llioc.ioc_sem);
+ up_write(&llioc.ioc_sem);
CWARN("didn't find iocontrol register block with magic: %p\n", magic);
}
struct llioc_data *data;
int rc = -EINVAL, i;
- cfs_down_read(&llioc.ioc_sem);
+ down_read(&llioc.ioc_sem);
cfs_list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
for (i = 0; i < data->iocd_count; i++) {
if (cmd != data->iocd_cmd[i])
if (ret == LLIOC_STOP)
break;
}
- cfs_up_read(&llioc.ioc_sem);
+ up_read(&llioc.ioc_sem);
if (rcp)
*rcp = rc;
result = cl_conf_set(env, lli->lli_clob, conf);
cl_env_nested_put(&nest, env);
+
+ if (conf->coc_opc == OBJECT_CONF_SET) {
+ struct ldlm_lock *lock = conf->coc_lock;
+
+ LASSERT(lock != NULL);
+ LASSERT(ldlm_has_layout(lock));
+ if (result == 0) {
+ /* it can only be allowed to match after layout is
+ * applied to inode otherwise false layout would be
+ * seen. Applying layout shoud happen before dropping
+ * the intent lock. */
+ ldlm_lock_allow_match(lock);
+ }
+ }
RETURN(result);
}
/**
+ * Apply the layout to the inode. Layout lock is held and will be released
+ * in this function.
+ */
+static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode,
+ struct inode *inode, __u32 *gen, bool reconf)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ldlm_lock *lock;
+ struct lustre_md md = { NULL };
+ struct cl_object_conf conf;
+ int rc = 0;
+ bool lvb_ready;
+ ENTRY;
+
+ LASSERT(lustre_handle_is_used(lockh));
+
+ lock = ldlm_handle2lock(lockh);
+ LASSERT(lock != NULL);
+ LASSERT(ldlm_has_layout(lock));
+
+ LDLM_DEBUG(lock, "File %p/"DFID" being reconfigured: %d.\n",
+ inode, PFID(&lli->lli_fid), reconf);
+
+ lock_res_and_lock(lock);
+ lvb_ready = !!(lock->l_flags & LDLM_FL_LVB_READY);
+ unlock_res_and_lock(lock);
+ /* checking lvb_ready is racy but this is okay. The worst case is
+ * that multi processes may configure the file on the same time. */
+ if (lvb_ready || !reconf) {
+ LDLM_LOCK_PUT(lock);
+
+ rc = -ENODATA;
+ if (lvb_ready) {
+ /* layout_gen must be valid if layout lock is not
+ * cancelled and stripe has already set */
+ *gen = lli->lli_layout_gen;
+ rc = 0;
+ }
+ ldlm_lock_decref(lockh, mode);
+ RETURN(rc);
+ }
+
+ /* for layout lock, lmm is returned in lock's lvb.
+ * lvb_data is immutable if the lock is held so it's safe to access it
+ * without res lock. See the description in ldlm_lock_decref_internal()
+ * for the condition to free lvb_data of layout lock */
+ if (lock->l_lvb_data != NULL) {
+ rc = obd_unpackmd(sbi->ll_dt_exp, &md.lsm,
+ lock->l_lvb_data, lock->l_lvb_len);
+ if (rc >= 0) {
+ if (md.lsm != NULL)
+ *gen = md.lsm->lsm_layout_gen;
+ rc = 0;
+ } else {
+ CERROR("%s: file "DFID" unpackmd error: %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(&lli->lli_fid), rc);
+ }
+ }
+ if (rc < 0) {
+ LDLM_LOCK_PUT(lock);
+ ldlm_lock_decref(lockh, mode);
+ RETURN(rc);
+ }
+
+ /* set layout to file. Unlikely this will fail as old layout was
+ * surely eliminated */
+ memset(&conf, 0, sizeof conf);
+ conf.coc_opc = OBJECT_CONF_SET;
+ conf.coc_inode = inode;
+ conf.coc_lock = lock;
+ conf.u.coc_md = &md;
+ rc = ll_layout_conf(inode, &conf);
+ LDLM_LOCK_PUT(lock);
+
+ ldlm_lock_decref(lockh, mode);
+
+ if (md.lsm != NULL)
+ obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
+
+ /* wait for IO to complete if it's still being used. */
+ if (rc == -EBUSY) {
+ CDEBUG(D_INODE, "%s: %p/"DFID" wait for layout reconf.\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ inode, PFID(&lli->lli_fid));
+
+ memset(&conf, 0, sizeof conf);
+ conf.coc_opc = OBJECT_CONF_WAIT;
+ conf.coc_inode = inode;
+ rc = ll_layout_conf(inode, &conf);
+ if (rc == 0)
+ rc = -EAGAIN;
+
+ CDEBUG(D_INODE, "file: "DFID" waiting layout return: %d.\n",
+ PFID(&lli->lli_fid), rc);
+ }
+
+ RETURN(rc);
+}
+
+/**
* This function checks if there exists a LAYOUT lock on the client side,
* or enqueues it if it doesn't have one in cache.
*
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct md_op_data *op_data = NULL;
- struct ptlrpc_request *req = NULL;
- struct lookup_intent it = { .it_op = IT_LAYOUT };
+ struct md_op_data *op_data;
+ struct lookup_intent it;
struct lustre_handle lockh;
ldlm_mode_t mode;
- struct cl_object_conf conf = { .coc_inode = inode,
- .coc_validate_only = true };
+ struct ldlm_enqueue_info einfo = { .ei_type = LDLM_IBITS,
+ .ei_mode = LCK_CR,
+ .ei_cb_bl = ll_md_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast,
+ .ei_cbdata = inode };
int rc;
ENTRY;
- *gen = 0;
- if (!(ll_i2sbi(inode)->ll_flags & LL_SBI_LAYOUT_LOCK))
+ *gen = LL_LAYOUT_GEN_ZERO;
+ if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
RETURN(0);
/* sanity checks */
/* mostly layout lock is caching on the local side, so try to match
* it before grabbing layout lock mutex. */
- mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh);
+ mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0);
if (mode != 0) { /* hit cached lock */
- struct lov_stripe_md *lsm;
+ rc = ll_layout_lock_set(&lockh, mode, inode, gen, false);
+ if (rc == 0)
+ RETURN(0);
- lsm = ccc_inode_lsm_get(inode);
- if (lsm != NULL)
- *gen = lsm->lsm_layout_gen;
- ccc_inode_lsm_put(inode, lsm);
- ldlm_lock_decref(&lockh, mode);
+ /* better hold lli_layout_mutex to try again otherwise
+ * it will have starvation problem. */
+ }
- RETURN(0);
+ /* take layout lock mutex to enqueue layout lock exclusively. */
+ mutex_lock(&lli->lli_layout_mutex);
+
+again:
+ /* try again. Maybe somebody else has done this. */
+ mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0);
+ if (mode != 0) { /* hit cached lock */
+ rc = ll_layout_lock_set(&lockh, mode, inode, gen, true);
+ if (rc == -EAGAIN)
+ goto again;
+
+ mutex_unlock(&lli->lli_layout_mutex);
+ RETURN(rc);
}
op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
- 0, 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
+ 0, 0, LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data)) {
+ mutex_unlock(&lli->lli_layout_mutex);
RETURN(PTR_ERR(op_data));
+ }
- /* take layout lock mutex to enqueue layout lock exclusively. */
- cfs_mutex_lock(&lli->lli_layout_mutex);
+ /* have to enqueue one */
+ memset(&it, 0, sizeof(it));
+ it.it_op = IT_LAYOUT;
+ lockh.cookie = 0ULL;
- /* make sure the old conf goes away */
- ll_layout_conf(inode, &conf);
+ LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file %p/"DFID".\n",
+ ll_get_fsname(inode->i_sb, NULL, 0), inode,
+ PFID(&lli->lli_fid));
- /* enqueue layout lock */
- rc = md_intent_lock(sbi->ll_md_exp, op_data, NULL, 0, &it, 0,
- &req, ll_md_blocking_ast, 0);
- if (rc == 0) {
- /* we get a new lock, so update the lock data */
- lockh.cookie = it.d.lustre.it_lock_handle;
- md_set_lock_data(sbi->ll_md_exp, &lockh.cookie, inode, NULL);
-
- /* req == NULL is when lock was found in client cache, without
- * any request to server (but lsm can be canceled just after a
- * release) */
- if (req != NULL) {
- struct ldlm_lock *lock = ldlm_handle2lock(&lockh);
- struct lustre_md md = { NULL };
- void *lmm;
- int lmmsize;
-
- /* for IT_LAYOUT lock, lmm is returned in lock's lvb
- * data via completion callback */
- LASSERT(lock != NULL);
- lmm = lock->l_lvb_data;
- lmmsize = lock->l_lvb_len;
- if (lmm != NULL)
- rc = obd_unpackmd(sbi->ll_dt_exp, &md.lsm,
- lmm, lmmsize);
- if (rc == 0) {
- if (md.lsm != NULL)
- *gen = md.lsm->lsm_layout_gen;
-
- memset(&conf, 0, sizeof conf);
- conf.coc_inode = inode;
- conf.u.coc_md = &md;
- ll_layout_conf(inode, &conf);
- /* is this racy? */
- lli->lli_has_smd = md.lsm != NULL;
- }
- if (md.lsm != NULL)
- obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
+ rc = md_enqueue(sbi->ll_md_exp, &einfo, &it, op_data, &lockh,
+ NULL, 0, NULL, 0);
+ if (it.d.lustre.it_data != NULL)
+ ptlrpc_req_finished(it.d.lustre.it_data);
+ it.d.lustre.it_data = NULL;
+
+ ll_finish_md_op_data(op_data);
- LDLM_LOCK_PUT(lock);
- ptlrpc_req_finished(req);
- } else { /* hit caching lock */
- struct lov_stripe_md *lsm;
+ mode = it.d.lustre.it_lock_mode;
+ it.d.lustre.it_lock_mode = 0;
+ ll_intent_drop_lock(&it);
- lsm = ccc_inode_lsm_get(inode);
- if (lsm != NULL)
- *gen = lsm->lsm_layout_gen;
- ccc_inode_lsm_put(inode, lsm);
- }
- ll_intent_drop_lock(&it);
+ if (rc == 0) {
+ /* set lock data in case this is a new lock */
+ ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
+ rc = ll_layout_lock_set(&lockh, mode, inode, gen, true);
+ if (rc == -EAGAIN)
+ goto again;
}
- cfs_mutex_unlock(&lli->lli_layout_mutex);
- ll_finish_md_op_data(op_data);
+ mutex_unlock(&lli->lli_layout_mutex);
RETURN(rc);
}