*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2015, Intel Corporation.
+ * Copyright (c) 2011, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#endif
#include <lustre/ll_fiemap.h>
-#include <lustre_ioctl.h>
+#include <uapi/linux/lustre_ioctl.h>
#include <lustre_swab.h>
#include "cl_object.h"
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
PFID(ll_inode2fid(inode)), inode);
-#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
- inode == inode->i_sb->s_root->d_inode) {
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
-
- LASSERT(fd != NULL);
- if (unlikely(fd->fd_flags & LL_FILE_RMTACL)) {
- fd->fd_flags &= ~LL_FILE_RMTACL;
- rct_del(&sbi->ll_rct, current_pid());
- et_search_free(&sbi->ll_et, current_pid());
- }
- }
-#endif
-
- if (inode->i_sb->s_root != file->f_path.dentry)
+ if (inode->i_sb->s_root != file_dentry(file))
ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1);
fd = LUSTRE_FPRIVATE(file);
LASSERT(fd != NULL);
if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd)
ll_deauthorize_statahead(inode, fd);
- if (inode->i_sb->s_root == file->f_path.dentry) {
+ if (inode->i_sb->s_root == file_dentry(file)) {
LUSTRE_FPRIVATE(file) = NULL;
ll_file_data_put(fd);
RETURN(0);
RETURN(rc);
}
-static int ll_intent_file_open(struct file *file, void *lmm, int lmmsize,
+static int ll_intent_file_open(struct dentry *de, void *lmm, int lmmsize,
struct lookup_intent *itp)
{
- struct dentry *de = file->f_path.dentry;
struct ll_sb_info *sbi = ll_i2sbi(de->d_inode);
struct dentry *parent = de->d_parent;
const char *name = NULL;
ptlrpc_req_finished(req);
ll_intent_drop_lock(itp);
+ /* We did open by fid, but by the time we got to the server,
+ * the object disappeared. If this is a create, we cannot really
+ * tell the userspace that the file it was trying to create
+ * does not exist. Instead let's return -ESTALE, and the VFS will
+ * retry the create with LOOKUP_REVAL that we are going to catch
+ * in ll_revalidate_dentry() and use lookup then.
+ */
+ if (rc == -ENOENT && itp->it_op & IT_CREAT)
+ rc = -ESTALE;
+
RETURN(rc);
}
static int ll_local_open(struct file *file, struct lookup_intent *it,
struct ll_file_data *fd, struct obd_client_handle *och)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
ENTRY;
LASSERT(!LUSTRE_FPRIVATE(file));
if (S_ISDIR(inode->i_mode))
ll_authorize_statahead(inode, fd);
- if (inode->i_sb->s_root == file->f_path.dentry) {
+ if (inode->i_sb->s_root == file_dentry(file)) {
LUSTRE_FPRIVATE(file) = fd;
RETURN(0);
}
GOTO(out_openerr, rc);
}
- ll_release_openhandle(file->f_path.dentry, it);
+ ll_release_openhandle(file_dentry(file), it);
}
(*och_usecount)++;
} else {
LASSERT(*och_usecount == 0);
if (!it->it_disposition) {
+ struct ll_dentry_data *ldd = ll_d2d(file->f_path.dentry);
/* We cannot just request lock handle now, new ELC code
means that one of other OPEN locks for this file
could be cancelled, and since blocking ast handler
* handle to be returned from LOOKUP|OPEN request,
* for example if the target entry was a symlink.
*
- * Always fetch MDS_OPEN_LOCK if this is not setstripe.
+ * Only fetch MDS_OPEN_LOCK if this is in NFS path,
+ * marked by a bit set in ll_iget_for_nfs. Clear the
+ * bit so that it's not confusing later callers.
*
+ * NB; when ldd is NULL, it must have come via normal
+ * lookup path only, since ll_iget_for_nfs always calls
+ * ll_d_init().
+ */
+ if (ldd && ldd->lld_nfs_dentry) {
+ ldd->lld_nfs_dentry = 0;
+ it->it_flags |= MDS_OPEN_LOCK;
+ }
+
+ /*
* Always specify MDS_OPEN_BY_FID because we don't want
* to get file with different fid.
*/
- it->it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID;
- rc = ll_intent_file_open(file, NULL, 0, it);
+ it->it_flags |= MDS_OPEN_BY_FID;
+ rc = ll_intent_file_open(file_dentry(file), NULL, 0,
+ it);
if (rc)
GOTO(out_openerr, rc);
}
/**
+ * When setting a lease on a file, we take ownership of the lli_mds_*_och
+ * and save it as fd->fd_och so as to force client to reopen the file even
+ * if it has an open lock in cache already.
+ */
+static int ll_lease_och_acquire(struct inode *inode, struct file *file,
+ struct lustre_handle *old_handle)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct obd_client_handle **och_p;
+ __u64 *och_usecount;
+ int rc = 0;
+ ENTRY;
+
+ /* Get the openhandle of the file */
+ mutex_lock(&lli->lli_och_mutex);
+ if (fd->fd_lease_och != NULL)
+ GOTO(out_unlock, rc = -EBUSY);
+
+ if (fd->fd_och == NULL) {
+ if (file->f_mode & FMODE_WRITE) {
+ LASSERT(lli->lli_mds_write_och != NULL);
+ och_p = &lli->lli_mds_write_och;
+ och_usecount = &lli->lli_open_fd_write_count;
+ } else {
+ LASSERT(lli->lli_mds_read_och != NULL);
+ och_p = &lli->lli_mds_read_och;
+ och_usecount = &lli->lli_open_fd_read_count;
+ }
+
+ if (*och_usecount > 1)
+ GOTO(out_unlock, rc = -EBUSY);
+
+ fd->fd_och = *och_p;
+ *och_usecount = 0;
+ *och_p = NULL;
+ }
+
+ *old_handle = fd->fd_och->och_fh;
+
+ EXIT;
+out_unlock:
+ mutex_unlock(&lli->lli_och_mutex);
+ return rc;
+}
+
+/**
+ * Release ownership on lli_mds_*_och when putting back a file lease.
+ */
+static int ll_lease_och_release(struct inode *inode, struct file *file)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct obd_client_handle **och_p;
+ struct obd_client_handle *old_och = NULL;
+ __u64 *och_usecount;
+ int rc = 0;
+ ENTRY;
+
+ mutex_lock(&lli->lli_och_mutex);
+ if (file->f_mode & FMODE_WRITE) {
+ och_p = &lli->lli_mds_write_och;
+ och_usecount = &lli->lli_open_fd_write_count;
+ } else {
+ och_p = &lli->lli_mds_read_och;
+ och_usecount = &lli->lli_open_fd_read_count;
+ }
+
+ /* The file may have been open by another process (broken lease) so
+ * *och_p is not NULL. In this case we should simply increase usecount
+ * and close fd_och.
+ */
+ if (*och_p != NULL) {
+ old_och = fd->fd_och;
+ (*och_usecount)++;
+ } else {
+ *och_p = fd->fd_och;
+ *och_usecount = 1;
+ }
+ fd->fd_och = NULL;
+ mutex_unlock(&lli->lli_och_mutex);
+
+ if (old_och != NULL)
+ rc = ll_close_inode_openhandle(inode, old_och, 0, NULL);
+
+ RETURN(rc);
+}
+
+/**
* Acquire a lease and open the file.
*/
static struct obd_client_handle *
RETURN(ERR_PTR(-EINVAL));
if (file != NULL) {
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct obd_client_handle **och_p;
- __u64 *och_usecount;
-
if (!(fmode & file->f_mode) || (file->f_mode & FMODE_EXEC))
RETURN(ERR_PTR(-EPERM));
- /* Get the openhandle of the file */
- rc = -EBUSY;
- mutex_lock(&lli->lli_och_mutex);
- if (fd->fd_lease_och != NULL) {
- mutex_unlock(&lli->lli_och_mutex);
- RETURN(ERR_PTR(rc));
- }
-
- if (fd->fd_och == NULL) {
- if (file->f_mode & FMODE_WRITE) {
- LASSERT(lli->lli_mds_write_och != NULL);
- och_p = &lli->lli_mds_write_och;
- och_usecount = &lli->lli_open_fd_write_count;
- } else {
- LASSERT(lli->lli_mds_read_och != NULL);
- och_p = &lli->lli_mds_read_och;
- och_usecount = &lli->lli_open_fd_read_count;
- }
- if (*och_usecount == 1) {
- fd->fd_och = *och_p;
- *och_p = NULL;
- *och_usecount = 0;
- rc = 0;
- }
- }
- mutex_unlock(&lli->lli_och_mutex);
- if (rc < 0) /* more than 1 opener */
+ rc = ll_lease_och_acquire(inode, file, &old_handle);
+ if (rc)
RETURN(ERR_PTR(rc));
-
- LASSERT(fd->fd_och != NULL);
- old_handle = fd->fd_och->och_fh;
}
OBD_ALLOC_PTR(och);
if (it.it_lock_mode == 0 ||
it.it_lock_bits != MDS_INODELOCK_OPEN) {
/* open lock must return for lease */
- CERROR(DFID "lease granted but no open lock, %d/"LPU64".\n",
+ CERROR(DFID "lease granted but no open lock, %d/%llu.\n",
PFID(ll_inode2fid(inode)), it.it_lock_mode,
it.it_lock_bits);
GOTO(out_close, rc = -EPROTO);
}
CDEBUG(D_INODE, "lease for "DFID" broken? %d\n",
- PFID(&ll_i2info(inode)->lli_fid), cancelled);
+ PFID(&ll_i2info(inode)->lli_fid), cancelled);
if (!cancelled)
ldlm_cli_cancel(&och->och_lease_handle, 0);
+
if (lease_broken != NULL)
*lease_broken = cancelled;
* All in all, the atime in Lustre does not strictly comply with
* POSIX. Solving this problem needs to send an RPC to MDT for each
* read, this will hurt performance. */
- if (LTIME_S(inode->i_atime) < lli->lli_atime)
+ if (LTIME_S(inode->i_atime) < lli->lli_atime || lli->lli_update_atime) {
LTIME_S(inode->i_atime) = lli->lli_atime;
+ lli->lli_update_atime = 0;
+ }
LTIME_S(inode->i_mtime) = lli->lli_mtime;
LTIME_S(inode->i_ctime) = lli->lli_ctime;
if (mtime < attr->cat_mtime)
mtime = attr->cat_mtime;
- CDEBUG(D_VFSTRACE, DFID" updating i_size "LPU64"\n",
+ CDEBUG(D_VFSTRACE, DFID" updating i_size %llu\n",
PFID(&lli->lli_fid), attr->cat_size);
i_size_write(inode, attr->cat_size);
static bool file_is_noatime(const struct file *file)
{
const struct vfsmount *mnt = file->f_path.mnt;
- const struct inode *inode = file->f_path.dentry->d_inode;
+ const struct inode *inode = file_inode((struct file *)file);
/* Adapted from file_accessed() and touch_atime().*/
if (file->f_flags & O_NOATIME)
return false;
}
-static void ll_io_init(struct cl_io *io, const struct file *file, int write)
+static int ll_file_io_ptask(struct cfs_ptask *ptask);
+
+static void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot)
{
- struct inode *inode = file->f_path.dentry->d_inode;
-
- io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
- if (write) {
- io->u.ci_wr.wr_append = !!(file->f_flags & O_APPEND);
- io->u.ci_wr.wr_sync = file->f_flags & O_SYNC ||
- file->f_flags & O_DIRECT ||
- IS_SYNC(inode);
+ struct inode *inode = file_inode(file);
+
+ memset(&io->u.ci_rw.rw_iter, 0, sizeof(io->u.ci_rw.rw_iter));
+ init_sync_kiocb(&io->u.ci_rw.rw_iocb, file);
+ io->u.ci_rw.rw_file = file;
+ io->u.ci_rw.rw_ptask = ll_file_io_ptask;
+ io->u.ci_rw.rw_nonblock = !!(file->f_flags & O_NONBLOCK);
+ if (iot == CIT_WRITE) {
+ io->u.ci_rw.rw_append = !!(file->f_flags & O_APPEND);
+ io->u.ci_rw.rw_sync = !!(file->f_flags & O_SYNC ||
+ file->f_flags & O_DIRECT ||
+ IS_SYNC(inode));
+ }
+ io->ci_obj = ll_i2info(inode)->lli_clob;
+ io->ci_lockreq = CILR_MAYBE;
+ if (ll_file_nolock(file)) {
+ io->ci_lockreq = CILR_NEVER;
+ io->ci_no_srvlock = 1;
+ } else if (file->f_flags & O_APPEND) {
+ io->ci_lockreq = CILR_MANDATORY;
}
- io->ci_obj = ll_i2info(inode)->lli_clob;
- io->ci_lockreq = CILR_MAYBE;
- if (ll_file_nolock(file)) {
- io->ci_lockreq = CILR_NEVER;
- io->ci_no_srvlock = 1;
- } else if (file->f_flags & O_APPEND) {
- io->ci_lockreq = CILR_MANDATORY;
- }
-
io->ci_noatime = file_is_noatime(file);
+ if (ll_i2sbi(inode)->ll_flags & LL_SBI_PIO)
+ io->ci_pio = !io->u.ci_rw.rw_append;
+ else
+ io->ci_pio = 0;
+}
+
+static int ll_file_io_ptask(struct cfs_ptask *ptask)
+{
+ struct cl_io_pt *pt = ptask->pt_cbdata;
+ struct file *file = pt->cip_file;
+ struct lu_env *env;
+ struct cl_io *io;
+ loff_t pos = pt->cip_pos;
+ int rc;
+ __u16 refcheck;
+ ENTRY;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ CDEBUG(D_VFSTRACE, "%s: %s range: [%llu, %llu)\n",
+ file_dentry(file)->d_name.name,
+ pt->cip_iot == CIT_READ ? "read" : "write",
+ pos, pos + pt->cip_count);
+
+restart:
+ io = vvp_env_thread_io(env);
+ ll_io_init(io, file, pt->cip_iot);
+ io->u.ci_rw.rw_iter = pt->cip_iter;
+ io->u.ci_rw.rw_iocb = pt->cip_iocb;
+ io->ci_pio = 0; /* It's already in parallel task */
+
+ rc = cl_io_rw_init(env, io, pt->cip_iot, pos,
+ pt->cip_count - pt->cip_result);
+ if (!rc) {
+ struct vvp_io *vio = vvp_env_io(env);
+
+ vio->vui_io_subtype = IO_NORMAL;
+ vio->vui_fd = LUSTRE_FPRIVATE(file);
+
+ ll_cl_add(file, env, io, LCC_RW);
+ rc = cl_io_loop(env, io);
+ ll_cl_remove(file, env);
+ } else {
+ /* cl_io_rw_init() handled IO */
+ rc = io->ci_result;
+ }
+
+ if (OBD_FAIL_CHECK_RESET(OBD_FAIL_LLITE_PTASK_IO_FAIL, 0)) {
+ if (io->ci_nob > 0)
+ io->ci_nob /= 2;
+ rc = -EIO;
+ }
+
+ if (io->ci_nob > 0) {
+ pt->cip_result += io->ci_nob;
+ iov_iter_advance(&pt->cip_iter, io->ci_nob);
+ pos += io->ci_nob;
+ pt->cip_iocb.ki_pos = pos;
+#ifdef HAVE_KIOCB_KI_LEFT
+ pt->cip_iocb.ki_left = pt->cip_count - pt->cip_result;
+#elif defined(HAVE_KI_NBYTES)
+ pt->cip_iocb.ki_nbytes = pt->cip_count - pt->cip_result;
+#endif
+ }
+
+ cl_io_fini(env, io);
+
+ if ((rc == 0 || rc == -ENODATA) &&
+ pt->cip_result < pt->cip_count &&
+ io->ci_need_restart) {
+ CDEBUG(D_VFSTRACE,
+ "%s: restart %s range: [%llu, %llu) ret: %zd, rc: %d\n",
+ file_dentry(file)->d_name.name,
+ pt->cip_iot == CIT_READ ? "read" : "write",
+ pos, pos + pt->cip_count - pt->cip_result,
+ pt->cip_result, rc);
+ goto restart;
+ }
+
+ CDEBUG(D_VFSTRACE, "%s: %s ret: %zd, rc: %d\n",
+ file_dentry(file)->d_name.name,
+ pt->cip_iot == CIT_READ ? "read" : "write",
+ pt->cip_result, rc);
+
+ cl_env_put(env, &refcheck);
+ RETURN(pt->cip_result > 0 ? 0 : rc);
}
static ssize_t
struct file *file, enum cl_io_type iot,
loff_t *ppos, size_t count)
{
+ struct range_lock range;
struct vvp_io *vio = vvp_env_io(env);
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
struct cl_io *io;
+ loff_t pos = *ppos;
ssize_t result = 0;
int rc = 0;
- struct range_lock range;
ENTRY;
- CDEBUG(D_VFSTRACE, "file: %s, type: %d ppos: "LPU64", count: %zu\n",
- file->f_path.dentry->d_name.name, iot, *ppos, count);
+ CDEBUG(D_VFSTRACE, "%s: %s range: [%llu, %llu)\n",
+ file_dentry(file)->d_name.name,
+ iot == CIT_READ ? "read" : "write", pos, pos + count);
restart:
io = vvp_env_thread_io(env);
- ll_io_init(io, file, iot == CIT_WRITE);
+ ll_io_init(io, file, iot);
+ if (args->via_io_subtype == IO_NORMAL) {
+ io->u.ci_rw.rw_iter = *args->u.normal.via_iter;
+ io->u.ci_rw.rw_iocb = *args->u.normal.via_iocb;
+ } else {
+ io->ci_pio = 0;
+ }
- if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
+ if (cl_io_rw_init(env, io, iot, pos, count) == 0) {
bool range_locked = false;
if (file->f_flags & O_APPEND)
range_lock_init(&range, 0, LUSTRE_EOF);
else
- range_lock_init(&range, *ppos, *ppos + count - 1);
+ range_lock_init(&range, pos, pos + count - 1);
vio->vui_fd = LUSTRE_FPRIVATE(file);
vio->vui_io_subtype = args->via_io_subtype;
switch (vio->vui_io_subtype) {
case IO_NORMAL:
- vio->vui_iter = args->u.normal.via_iter;
- vio->vui_iocb = args->u.normal.via_iocb;
/* Direct IO reads must also take range lock,
* or multiple reads will try to work on the same pages
* See LU-6227 for details. */
}
ll_cl_add(file, env, io, LCC_RW);
+ if (io->ci_pio && iot == CIT_WRITE && !IS_NOSEC(inode) &&
+ !lli->lli_inode_locked) {
+ inode_lock(inode);
+ lli->lli_inode_locked = 1;
+ }
rc = cl_io_loop(env, io);
+ if (lli->lli_inode_locked) {
+ lli->lli_inode_locked = 0;
+ inode_unlock(inode);
+ }
ll_cl_remove(file, env);
if (range_locked) {
if (io->ci_nob > 0) {
result += io->ci_nob;
- count -= io->ci_nob;
- *ppos = io->u.ci_wr.wr.crw_pos; /* for splice */
+ count -= io->ci_nob;
- /* prepare IO restart */
- if (count > 0 && args->via_io_subtype == IO_NORMAL)
- args->u.normal.via_iter = vio->vui_iter;
+ if (args->via_io_subtype == IO_NORMAL) {
+ iov_iter_advance(args->u.normal.via_iter, io->ci_nob);
+ pos += io->ci_nob;
+ args->u.normal.via_iocb->ki_pos = pos;
+#ifdef HAVE_KIOCB_KI_LEFT
+ args->u.normal.via_iocb->ki_left = count;
+#elif defined(HAVE_KI_NBYTES)
+ args->u.normal.via_iocb->ki_nbytes = count;
+#endif
+ } else {
+ /* for splice */
+ pos = io->u.ci_rw.rw_range.cir_pos;
+ }
}
- GOTO(out, rc);
out:
cl_io_fini(env, io);
if ((rc == 0 || rc == -ENODATA) && count > 0 && io->ci_need_restart) {
CDEBUG(D_VFSTRACE,
- "%s: restart %s from %lld, count:%zu, result: %zd\n",
- file->f_path.dentry->d_name.name,
- iot == CIT_READ ? "read" : "write",
- *ppos, count, result);
+ "%s: restart %s range: [%llu, %llu) ret: %zd, rc: %d\n",
+ file_dentry(file)->d_name.name,
+ iot == CIT_READ ? "read" : "write",
+ pos, pos + count, result, rc);
goto restart;
}
}
}
- CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result);
+ CDEBUG(D_VFSTRACE, "%s: %s *ppos: %llu, pos: %llu, ret: %zd, rc: %d\n",
+ file_dentry(file)->d_name.name,
+ iot == CIT_READ ? "read" : "write", *ppos, pos, result, rc);
- return result > 0 ? result : rc;
+ *ppos = pos;
+
+ RETURN(result > 0 ? result : rc);
}
/**
static ssize_t ll_file_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
+ struct lu_env *env;
struct iovec iov = { .iov_base = buf, .iov_len = count };
- struct kiocb *kiocb;
- ssize_t result;
- ENTRY;
+ struct kiocb *kiocb;
+ ssize_t result;
+ __u16 refcheck;
+ ENTRY;
- OBD_ALLOC_PTR(kiocb);
- if (kiocb == NULL)
- RETURN(-ENOMEM);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+ kiocb = &ll_env_info(env)->lti_kiocb;
init_sync_kiocb(kiocb, file);
kiocb->ki_pos = *ppos;
#ifdef HAVE_KIOCB_KI_LEFT
result = ll_file_aio_read(kiocb, &iov, 1, kiocb->ki_pos);
*ppos = kiocb->ki_pos;
- OBD_FREE_PTR(kiocb);
+ cl_env_put(env, &refcheck);
RETURN(result);
}
RETURN(result);
}
-int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
- __u64 flags, struct lov_user_md *lum,
- int lum_size)
+int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
+ __u64 flags, struct lov_user_md *lum, int lum_size)
{
struct lookup_intent oit = {
.it_op = IT_OPEN,
ENTRY;
ll_inode_size_lock(inode);
- rc = ll_intent_file_open(file, lum, lum_size, &oit);
+ rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
if (rc < 0)
GOTO(out_unlock, rc);
- ll_release_openhandle(file->f_path.dentry, &oit);
+ ll_release_openhandle(dentry, &oit);
out_unlock:
ll_inode_size_unlock(inode);
ll_intent_release(&oit);
- cl_lov_delay_create_clear(&file->f_flags);
RETURN(rc);
}
lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize);
LASSERT(lmm != NULL);
- if ((lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1)) &&
- (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3))) {
- GOTO(out, rc = -EPROTO);
- }
+ if (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1) &&
+ lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3) &&
+ lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_COMP_V1))
+ GOTO(out, rc = -EPROTO);
/*
* This is coming from the MDS, so is probably in
if (LOV_MAGIC != cpu_to_le32(LOV_MAGIC)) {
int stripe_count;
- stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
- if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED)
- stripe_count = 0;
+ if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
+ lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
+ stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
+ if (le32_to_cpu(lmm->lmm_pattern) &
+ LOV_PATTERN_F_RELEASED)
+ stripe_count = 0;
+ }
/* if function called for directory - we should
* avoid swab not existent lsm objects */
if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) {
- lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
+ lustre_swab_lov_user_md_v1(
+ (struct lov_user_md_v1 *)lmm);
if (S_ISREG(body->mbo_mode))
lustre_swab_lov_user_md_objects(
((struct lov_user_md_v1 *)lmm)->lmm_objects,
stripe_count);
} else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
lustre_swab_lov_user_md_v3(
- (struct lov_user_md_v3 *)lmm);
+ (struct lov_user_md_v3 *)lmm);
if (S_ISREG(body->mbo_mode))
- lustre_swab_lov_user_md_objects(
- ((struct lov_user_md_v3 *)lmm)->lmm_objects,
- stripe_count);
- }
+ lustre_swab_lov_user_md_objects(
+ ((struct lov_user_md_v3 *)lmm)->lmm_objects,
+ stripe_count);
+ } else if (lmm->lmm_magic ==
+ cpu_to_le32(LOV_MAGIC_COMP_V1)) {
+ lustre_swab_lov_comp_md_v1(
+ (struct lov_comp_md_v1 *)lmm);
+ }
}
out:
}
static int ll_lov_setea(struct inode *inode, struct file *file,
- unsigned long arg)
+ void __user *arg)
{
__u64 flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
struct lov_user_md *lump;
if (lump == NULL)
RETURN(-ENOMEM);
- if (copy_from_user(lump, (struct lov_user_md __user *)arg, lum_size))
+ if (copy_from_user(lump, arg, lum_size))
GOTO(out_lump, rc = -EFAULT);
- rc = ll_lov_setstripe_ea_info(inode, file, flags, lump, lum_size);
+ rc = ll_lov_setstripe_ea_info(inode, file_dentry(file), flags, lump,
+ lum_size);
+ cl_lov_delay_create_clear(&file->f_flags);
out_lump:
OBD_FREE_LARGE(lump, lum_size);
RETURN(rc);
}
-static int ll_file_getstripe(struct inode *inode,
- struct lov_user_md __user *lum)
+static int ll_file_getstripe(struct inode *inode, void __user *lum, size_t size)
{
struct lu_env *env;
__u16 refcheck;
if (IS_ERR(env))
RETURN(PTR_ERR(env));
- rc = cl_object_getstripe(env, ll_i2info(inode)->lli_clob, lum);
+ rc = cl_object_getstripe(env, ll_i2info(inode)->lli_clob, lum, size);
cl_env_put(env, &refcheck);
RETURN(rc);
}
static int ll_lov_setstripe(struct inode *inode, struct file *file,
- unsigned long arg)
+ void __user *arg)
{
struct lov_user_md __user *lum = (struct lov_user_md __user *)arg;
struct lov_user_md *klum;
RETURN(rc);
lum_size = rc;
- rc = ll_lov_setstripe_ea_info(inode, file, flags, klum, lum_size);
- if (rc == 0) {
+ rc = ll_lov_setstripe_ea_info(inode, file_dentry(file), flags, klum,
+ lum_size);
+ if (!rc) {
__u32 gen;
- put_user(0, &lum->lmm_stripe_count);
+ rc = put_user(0, &lum->lmm_stripe_count);
+ if (rc)
+ GOTO(out, rc);
+
+ rc = ll_layout_refresh(inode, &gen);
+ if (rc)
+ GOTO(out, rc);
- ll_layout_refresh(inode, &gen);
- rc = ll_file_getstripe(inode, (struct lov_user_md __user *)arg);
+ rc = ll_file_getstripe(inode, arg, lum_size);
}
+ cl_lov_delay_create_clear(&file->f_flags);
+out:
OBD_FREE(klum, lum_size);
RETURN(rc);
}
static int
ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ll_grouplock grouplock;
- int rc;
- ENTRY;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct cl_object *obj = lli->lli_clob;
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_grouplock grouplock;
+ int rc;
+ ENTRY;
if (arg == 0) {
CWARN("group id for group lock must not be 0\n");
LASSERT(fd->fd_grouplock.lg_lock == NULL);
spin_unlock(&lli->lli_lock);
+ /**
+ * XXX: group lock needs to protect all OST objects while PFL
+ * can add new OST objects during the IO, so we'd instantiate
+ * all OST objects before getting its group lock.
+ */
+ if (obj) {
+ struct lu_env *env;
+ __u16 refcheck;
+ struct cl_layout cl = {
+ .cl_is_composite = false,
+ };
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ rc = cl_object_layout_get(env, obj, &cl);
+ if (!rc && cl.cl_is_composite)
+ rc = ll_layout_write_intent(inode, 0, OBD_OBJECT_EOF);
+
+ cl_env_put(env, &refcheck);
+ if (rc)
+ RETURN(rc);
+ }
+
rc = cl_get_grouplock(ll_i2info(inode)->lli_clob,
arg, (file->f_flags & O_NONBLOCK), &grouplock);
if (rc)
if (llss == NULL)
RETURN(-ENOMEM);
- llss->inode1 = file1->f_path.dentry->d_inode;
- llss->inode2 = file2->f_path.dentry->d_inode;
+ llss->inode1 = file_inode(file1);
+ llss->inode2 = file_inode(file2);
rc = ll_check_swap_layouts_validity(llss->inode1, llss->inode2);
if (rc < 0)
ATTR_MTIME | ATTR_MTIME_SET |
ATTR_ATIME | ATTR_ATIME_SET;
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
- rc = ll_setattr_raw(file->f_path.dentry, attr, true);
+ rc = ll_setattr_raw(file_dentry(file), attr, true);
if (rc == -ENODATA)
rc = 0;
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
out:
if (hss != NULL)
static int ll_file_futimes_3(struct file *file, const struct ll_futimes_3 *lfu)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct iattr ia = {
.ia_valid = ATTR_ATIME | ATTR_ATIME_SET |
ATTR_MTIME | ATTR_MTIME_SET |
if (!S_ISREG(inode->i_mode))
RETURN(-EINVAL);
- mutex_lock(&inode->i_mutex);
- rc = ll_setattr_raw(file->f_path.dentry, &ia, false);
- mutex_unlock(&inode->i_mutex);
+ inode_lock(inode);
+ rc = ll_setattr_raw(file_dentry(file), &ia, false);
+ inode_unlock(inode);
RETURN(rc);
}
* much more data being sent to the client.
*/
static int ll_ladvise(struct inode *inode, struct file *file, __u64 flags,
- struct lu_ladvise *ladvise)
+ struct llapi_lu_ladvise *ladvise)
{
struct lu_env *env;
struct cl_io *io;
RETURN(rc);
}
+int ll_ioctl_fsgetxattr(struct inode *inode, unsigned int cmd,
+ unsigned long arg)
+{
+ struct fsxattr fsxattr;
+
+ if (copy_from_user(&fsxattr,
+ (const struct fsxattr __user *)arg,
+ sizeof(fsxattr)))
+ RETURN(-EFAULT);
+
+ fsxattr.fsx_projid = ll_i2info(inode)->lli_projid;
+ if (copy_to_user((struct fsxattr __user *)arg,
+ &fsxattr, sizeof(fsxattr)))
+ RETURN(-EFAULT);
+
+ RETURN(0);
+}
+
+int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd,
+ unsigned long arg)
+{
+
+ struct md_op_data *op_data;
+ struct ptlrpc_request *req = NULL;
+ int rc = 0;
+ struct fsxattr fsxattr;
+
+ /* only root could change project ID */
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ RETURN(-EPERM);
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ RETURN(PTR_ERR(op_data));
+
+ if (copy_from_user(&fsxattr,
+ (const struct fsxattr __user *)arg,
+ sizeof(fsxattr)))
+ GOTO(out_fsxattr1, rc = -EFAULT);
+
+ op_data->op_projid = fsxattr.fsx_projid;
+ op_data->op_attr.ia_valid |= MDS_ATTR_PROJID;
+ rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data, NULL,
+ 0, &req);
+ ptlrpc_req_finished(req);
+
+out_fsxattr1:
+ ll_finish_md_op_data(op_data);
+ RETURN(rc);
+
+
+}
+
static long
ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
int flags, rc;
ENTRY;
fd->fd_flags &= ~flags;
}
RETURN(0);
- case LL_IOC_LOV_SETSTRIPE:
- RETURN(ll_lov_setstripe(inode, file, arg));
- case LL_IOC_LOV_SETEA:
- RETURN(ll_lov_setea(inode, file, arg));
+ case LL_IOC_LOV_SETSTRIPE:
+ case LL_IOC_LOV_SETSTRIPE_NEW:
+ RETURN(ll_lov_setstripe(inode, file, (void __user *)arg));
+ case LL_IOC_LOV_SETEA:
+ RETURN(ll_lov_setea(inode, file, (void __user *)arg));
case LL_IOC_LOV_SWAP_LAYOUTS: {
struct file *file2;
struct lustre_swap_layouts lsl;
if (copy_from_user(&lsl, (char __user *)arg,
- sizeof(struct lustre_swap_layouts)))
+ sizeof(struct lustre_swap_layouts)))
RETURN(-EFAULT);
if ((file->f_flags & O_ACCMODE) == O_RDONLY)
mutex_unlock(&lli->lli_och_mutex);
if (och == NULL)
GOTO(out, rc = -ENOLCK);
- inode2 = file2->f_path.dentry->d_inode;
+ inode2 = file_inode(file2);
rc = ll_swap_layouts_close(och, inode, inode2);
} else {
rc = ll_swap_layouts(file, file2, &lsl);
RETURN(rc);
}
case LL_IOC_LOV_GETSTRIPE:
- RETURN(ll_file_getstripe(inode,
- (struct lov_user_md __user *)arg));
+ case LL_IOC_LOV_GETSTRIPE_NEW:
+ RETURN(ll_file_getstripe(inode, (void __user *)arg, 0));
case FSFILT_IOC_GETFLAGS:
case FSFILT_IOC_SETFLAGS:
RETURN(ll_iocontrol(inode, file, cmd, arg));
if (rc < 0)
RETURN(rc);
+ rc = ll_lease_och_release(inode, file);
+ if (rc < 0)
+ RETURN(rc);
+
if (lease_broken)
fmode = 0;
RETURN(ll_file_futimes_3(file, &lfu));
}
case LL_IOC_LADVISE: {
- struct ladvise_hdr *ladvise_hdr;
+ struct llapi_ladvise_hdr *ladvise_hdr;
int i;
int num_advise;
int alloc_size = sizeof(*ladvise_hdr);
RETURN(-ENOMEM);
if (copy_from_user(ladvise_hdr,
- (const struct ladvise_hdr __user *)arg,
+ (const struct llapi_ladvise_hdr __user *)arg,
alloc_size))
GOTO(out_ladvise, rc = -EFAULT);
* TODO: submit multiple advices to one server in a single RPC
*/
if (copy_from_user(ladvise_hdr,
- (const struct ladvise_hdr __user *)arg,
+ (const struct llapi_ladvise_hdr __user *)arg,
alloc_size))
GOTO(out_ladvise, rc = -EFAULT);
OBD_FREE(ladvise_hdr, alloc_size);
RETURN(rc);
}
+ case LL_IOC_FSGETXATTR:
+ RETURN(ll_ioctl_fsgetxattr(inode, cmd, arg));
+ case LL_IOC_FSSETXATTR:
+ RETURN(ll_ioctl_fssetxattr(inode, cmd, arg));
default: {
int err;
generic_file_llseek_size(struct file *file, loff_t offset, int origin,
loff_t maxsize, loff_t eof)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
switch (origin) {
case SEEK_END:
* SEEK_CURs. Note that parallel writes and reads behave
* like SEEK_SET.
*/
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
offset = llseek_execute(file, file->f_pos + offset, maxsize);
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
return offset;
case SEEK_DATA:
/*
static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
loff_t retval, eof = 0;
ENTRY;
static int ll_flush(struct file *file, fl_owner_t id)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
int rc, err;
}
/*
- * When dentry is provided (the 'else' case), *file->f_path.dentry may be
+ * When dentry is provided (the 'else' case), file_dentry() may be
* null and dentry must be used directly rather than pulled from
- * *file->f_path.dentry as is done otherwise.
+ * file_dentry() as is done otherwise.
*/
#ifdef HAVE_FILE_FSYNC_4ARGS
int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct dentry *dentry = file->f_path.dentry;
+ struct dentry *dentry = file_dentry(file);
+ bool lock_inode;
#elif defined(HAVE_FILE_FSYNC_2ARGS)
int ll_fsync(struct file *file, int datasync)
{
- struct dentry *dentry = file->f_path.dentry;
+ struct dentry *dentry = file_dentry(file);
loff_t start = 0;
loff_t end = LLONG_MAX;
#else
#ifdef HAVE_FILE_FSYNC_4ARGS
rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
- mutex_lock(&inode->i_mutex);
+ lock_inode = !lli->lli_inode_locked;
+ if (lock_inode)
+ inode_lock(inode);
#else
/* fsync's caller has already called _fdata{sync,write}, we want
* that IO to finish before calling the osc and mdc sync methods */
lli->lli_async_rc = 0;
if (rc == 0)
rc = err;
- err = lov_read_and_clear_async_rc(lli->lli_clob);
- if (rc == 0)
- rc = err;
+ if (lli->lli_clob != NULL) {
+ err = lov_read_and_clear_async_rc(lli->lli_clob);
+ if (rc == 0)
+ rc = err;
+ }
}
err = md_fsync(ll_i2sbi(inode)->ll_md_exp, ll_inode2fid(inode), &req);
}
#ifdef HAVE_FILE_FSYNC_4ARGS
- mutex_unlock(&inode->i_mutex);
+ if (lock_inode)
+ inode_unlock(inode);
#endif
RETURN(rc);
}
static int
ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ldlm_enqueue_info einfo = {
.ei_type = LDLM_FLOCK,
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
- CDEBUG(D_DLMTRACE, "inode="DFID", pid=%u, flags="LPX64", mode=%u, "
- "start="LPU64", end="LPU64"\n", PFID(ll_inode2fid(inode)),
+ CDEBUG(D_DLMTRACE, "inode="DFID", pid=%u, flags=%#llx, mode=%u, "
+ "start=%llu, end=%llu\n", PFID(ll_inode2fid(inode)),
flock.l_flock.pid, flags, einfo.ei_mode,
flock.l_flock.start, flock.l_flock.end);
- rc = md_enqueue(sbi->ll_md_exp, &einfo, &flock, NULL, op_data, &lockh,
+ rc = md_enqueue(sbi->ll_md_exp, &einfo, &flock, op_data, &lockh,
flags);
/* Restore the file lock type if not TEST lock. */
if (rc2 && file_lock->fl_type != F_UNLCK) {
einfo.ei_mode = LCK_NL;
- md_enqueue(sbi->ll_md_exp, &einfo, &flock, NULL, op_data,
+ md_enqueue(sbi->ll_md_exp, &einfo, &flock, op_data,
&lockh, flags);
rc = rc2;
}
RETURN(PTR_ERR(op_data));
/* Get child FID first */
- qstr.hash = full_name_hash(name, namelen);
+ qstr.hash = ll_full_name_hash(file_dentry(file), name, namelen);
qstr.name = name;
qstr.len = namelen;
- dchild = d_lookup(file->f_path.dentry, &qstr);
+ dchild = d_lookup(file_dentry(file), &qstr);
if (dchild != NULL) {
if (dchild->d_inode != NULL)
child_inode = igrab(dchild->d_inode);
if (child_inode == parent->i_sb->s_root->d_inode)
GOTO(out_iput, rc = -EINVAL);
- mutex_lock(&child_inode->i_mutex);
+ inode_lock(child_inode);
op_data->op_fid3 = *ll_inode2fid(child_inode);
if (!fid_is_sane(&op_data->op_fid3)) {
CERROR("%s: migrate %s, but FID "DFID" is insane\n",
op_data->op_cli_flags = CLI_MIGRATE;
rc = md_rename(ll_i2sbi(parent)->ll_md_exp, op_data, name,
namelen, name, namelen, &request);
- if (rc == 0)
+ if (rc == 0) {
+ LASSERT(request != NULL);
ll_update_times(request, parent);
- if (request != NULL) {
body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
- if (body == NULL) {
- ptlrpc_req_finished(request);
- GOTO(out_close, rc = -EPROTO);
- }
+ LASSERT(body != NULL);
/* If the server does release layout lock, then we cleanup
* the client och here, otherwise release it in out_close: */
OBD_FREE_PTR(och);
och = NULL;
}
+ }
+
+ if (request != NULL) {
ptlrpc_req_finished(request);
+ request = NULL;
}
/* Try again if the file layout has changed. */
- if (rc == -EAGAIN && S_ISREG(child_inode->i_mode)) {
- request = NULL;
+ if (rc == -EAGAIN && S_ISREG(child_inode->i_mode))
goto again;
- }
+
out_close:
if (och != NULL) /* close the file */
ll_lease_close(och, child_inode, NULL);
if (rc == 0)
clear_nlink(child_inode);
out_unlock:
- mutex_unlock(&child_inode->i_mutex);
+ inode_unlock(child_inode);
out_iput:
iput(child_inode);
out_free:
RETURN(rc);
}
+static inline dev_t ll_compat_encode_dev(dev_t dev)
+{
+ /* The compat_sys_*stat*() syscalls will fail unless the
+ * device majors and minors are both less than 256. Note that
+ * the value returned here will be passed through
+ * old_encode_dev() in cp_compat_stat(). And so we are not
+ * trying to return a valid compat (u16) device number, just
+ * one that will pass the old_valid_dev() check. */
+
+ return MKDEV(MAJOR(dev) & 0xff, MINOR(dev) & 0xff);
+}
+
int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
{
struct inode *inode = de->d_inode;
OBD_FAIL_TIMEOUT(OBD_FAIL_GETATTR_DELAY, 30);
- stat->dev = inode->i_sb->s_dev;
- if (ll_need_32bit_api(sbi))
+ if (ll_need_32bit_api(sbi)) {
stat->ino = cl_fid_build_ino(&lli->lli_fid, 1);
- else
+ stat->dev = ll_compat_encode_dev(inode->i_sb->s_dev);
+ stat->rdev = ll_compat_encode_dev(inode->i_rdev);
+ } else {
stat->ino = inode->i_ino;
+ stat->dev = inode->i_sb->s_dev;
+ stat->rdev = inode->i_rdev;
+ }
+
stat->mode = inode->i_mode;
stat->uid = inode->i_uid;
stat->gid = inode->i_gid;
- stat->rdev = inode->i_rdev;
stat->atime = inode->i_atime;
stat->mtime = inode->i_mtime;
stat->ctime = inode->i_ctime;
- stat->blksize = 1 << inode->i_blkbits;
+ stat->blksize = sbi->ll_stat_blksize ?: 1 << inode->i_blkbits;
stat->nlink = inode->i_nlink;
stat->size = i_size_read(inode);
}
ll_stats_ops_tally(sbi, LPROC_LL_INODE_PERM, 1);
-
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
- rc = lustre_check_remote_perm(inode, mask);
- else
- rc = ll_generic_permission(inode, mask, flags, ll_check_acl);
-
+ rc = ll_generic_permission(inode, mask, flags, ll_check_acl);
/* restore current process's credentials and FS capability */
if (squash_id) {
revert_creds(old_cred);
PFID(&lli->lli_fid), inode);
/* in case this is a caching lock and reinstate with new inode */
- md_set_lock_data(sbi->ll_md_exp, &lockh->cookie, inode, NULL);
+ md_set_lock_data(sbi->ll_md_exp, lockh, inode, NULL);
lock_res_and_lock(lock);
lvb_ready = ldlm_is_lvb_ready(lock);
unlock_res_and_lock(lock);
+
/* checking lvb_ready is racy but this is okay. The worst case is
* that multi processes may configure the file on the same time. */
-
if (lvb_ready)
GOTO(out, rc = 0);
/* refresh layout failed, need to wait */
wait_layout = rc == -EBUSY;
EXIT;
-
out:
LDLM_LOCK_PUT(lock);
ldlm_lock_decref(lockh, mode);
RETURN(rc);
}
-static int ll_layout_refresh_locked(struct inode *inode)
+/**
+ * Issue layout intent RPC to MDS.
+ * \param inode [in] file inode
+ * \param intent [in] layout intent
+ *
+ * \retval 0 on success
+ * \retval < 0 error code
+ */
+static int ll_layout_intent(struct inode *inode, struct layout_intent *intent)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct md_op_data *op_data;
- struct lookup_intent it;
- struct lustre_handle lockh;
- enum ldlm_mode mode;
- struct ldlm_enqueue_info einfo = {
- .ei_type = LDLM_IBITS,
- .ei_mode = LCK_CR,
- .ei_cb_bl = &ll_md_blocking_ast,
- .ei_cb_cp = &ldlm_completion_ast,
- };
+ struct lookup_intent it;
+ struct ptlrpc_request *req;
int rc;
ENTRY;
-again:
- /* mostly layout lock is caching on the local side, so try to match
- * it before grabbing layout lock mutex. */
- mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
- LCK_CR | LCK_CW | LCK_PR | LCK_PW);
- if (mode != 0) { /* hit cached lock */
- rc = ll_layout_lock_set(&lockh, mode, inode);
- if (rc == -EAGAIN)
- goto again;
-
- RETURN(rc);
- }
-
op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
0, 0, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
- /* have to enqueue one */
+ op_data->op_data = intent;
+ op_data->op_data_size = sizeof(*intent);
+
memset(&it, 0, sizeof(it));
it.it_op = IT_LAYOUT;
- lockh.cookie = 0ULL;
+ if (intent->li_opc == LAYOUT_INTENT_WRITE ||
+ intent->li_opc == LAYOUT_INTENT_TRUNC)
+ it.it_flags = FMODE_WRITE;
LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file "DFID"(%p)",
ll_get_fsname(inode->i_sb, NULL, 0),
PFID(&lli->lli_fid), inode);
- rc = md_enqueue(sbi->ll_md_exp, &einfo, NULL, &it, op_data, &lockh, 0);
+ rc = md_intent_lock(sbi->ll_md_exp, op_data, &it, &req,
+ &ll_md_blocking_ast, 0);
if (it.it_request != NULL)
ptlrpc_req_finished(it.it_request);
it.it_request = NULL;
ll_finish_md_op_data(op_data);
- mode = it.it_lock_mode;
- it.it_lock_mode = 0;
- ll_intent_drop_lock(&it);
-
- if (rc == 0) {
- /* set lock data in case this is a new lock */
+ /* set lock data in case this is a new lock */
+ if (!rc)
ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
- rc = ll_layout_lock_set(&lockh, mode, inode);
- if (rc == -EAGAIN)
- goto again;
- }
+
+ ll_intent_drop_lock(&it);
RETURN(rc);
}
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct lustre_handle lockh;
+ struct layout_intent intent = {
+ .li_opc = LAYOUT_INTENT_ACCESS,
+ };
+ enum ldlm_mode mode;
int rc;
ENTRY;
/* take layout lock mutex to enqueue layout lock exclusively. */
mutex_lock(&lli->lli_layout_mutex);
- rc = ll_layout_refresh_locked(inode);
- if (rc < 0)
- GOTO(out, rc);
+ while (1) {
+ /* mostly layout lock is caching on the local side, so try to
+ * match it before grabbing layout lock mutex. */
+ mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
+ LCK_CR | LCK_CW | LCK_PR | LCK_PW);
+ if (mode != 0) { /* hit cached lock */
+ rc = ll_layout_lock_set(&lockh, mode, inode);
+ if (rc == -EAGAIN)
+ continue;
+ break;
+ }
- *gen = ll_layout_version_get(lli);
-out:
+ rc = ll_layout_intent(inode, &intent);
+ if (rc != 0)
+ break;
+ }
+
+ if (rc == 0)
+ *gen = ll_layout_version_get(lli);
mutex_unlock(&lli->lli_layout_mutex);
RETURN(rc);
}
/**
+ * Issue layout intent RPC indicating where in a file an IO is about to write.
+ *
+ * \param[in] inode file inode.
+ * \param[in] start start offset of fille in bytes where an IO is about to
+ * write.
+ * \param[in] end exclusive end offset in bytes of the write range.
+ *
+ * \retval 0 on success
+ * \retval < 0 error code
+ */
+int ll_layout_write_intent(struct inode *inode, __u64 start, __u64 end)
+{
+ struct layout_intent intent = {
+ .li_opc = LAYOUT_INTENT_WRITE,
+ .li_start = start,
+ .li_end = end,
+ };
+ int rc;
+ ENTRY;
+
+ rc = ll_layout_intent(inode, &intent);
+
+ RETURN(rc);
+}
+
+/**
* This function send a restore request to the MDT
*/
int ll_layout_restore(struct inode *inode, loff_t offset, __u64 length)