*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2015, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#ifdef HAVE_UIDGID_HEADER
# include <linux/uidgid.h>
#endif
-#include <lustre/ll_fiemap.h>
-#include <lustre_ioctl.h>
+#include <uapi/linux/lustre/lustre_ioctl.h>
#include <lustre_swab.h>
#include "cl_object.h"
#include "llite_internal.h"
#include "vvp_internal.h"
+struct split_param {
+ struct inode *sp_inode;
+ __u16 sp_mirror_id;
+};
+
static int
ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg);
static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
bool *lease_broken);
-static enum llioc_iter
-ll_iocontrol_call(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg, int *rcp);
-
static struct ll_file_data *ll_file_data_get(void)
{
struct ll_file_data *fd;
op_data->op_attr.ia_mtime = inode->i_mtime;
op_data->op_attr.ia_ctime = inode->i_ctime;
op_data->op_attr.ia_size = i_size_read(inode);
- op_data->op_attr.ia_valid |= ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
- ATTR_MTIME | ATTR_MTIME_SET |
- ATTR_CTIME | ATTR_CTIME_SET;
+ op_data->op_attr.ia_valid |= (ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
+ ATTR_MTIME | ATTR_MTIME_SET |
+ ATTR_CTIME);
+ op_data->op_xvalid |= OP_XVALID_CTIME_SET;
op_data->op_attr_blocks = inode->i_blocks;
op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags);
- op_data->op_handle = och->och_fh;
+ if (ll_file_test_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT))
+ op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
+ op_data->op_open_handle = och->och_open_handle;
if (och->och_flags & FMODE_WRITE &&
ll_file_test_and_clear_flag(ll_i2info(inode), LLIF_DATA_MODIFIED))
if (class_exp2obd(md_exp) == NULL) {
CERROR("%s: invalid MDC connection handle closing "DFID"\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(&lli->lli_fid));
+ ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
GOTO(out, rc = 0);
}
ll_prepare_close(inode, op_data, och);
switch (bias) {
- case MDS_CLOSE_LAYOUT_SWAP:
+ case MDS_CLOSE_LAYOUT_MERGE:
+ /* merge blocks from the victim inode */
+ op_data->op_attr_blocks += ((struct inode *)data)->i_blocks;
+ op_data->op_attr.ia_valid |= ATTR_SIZE;
+ op_data->op_xvalid |= OP_XVALID_BLOCKS;
+ case MDS_CLOSE_LAYOUT_SPLIT:
+ case MDS_CLOSE_LAYOUT_SWAP: {
+ struct split_param *sp = data;
+
LASSERT(data != NULL);
- op_data->op_bias |= MDS_CLOSE_LAYOUT_SWAP;
+ op_data->op_bias |= bias;
op_data->op_data_version = 0;
op_data->op_lease_handle = och->och_lease_handle;
- op_data->op_fid2 = *ll_inode2fid(data);
+ if (bias == MDS_CLOSE_LAYOUT_SPLIT) {
+ op_data->op_fid2 = *ll_inode2fid(sp->sp_inode);
+ op_data->op_mirror_id = sp->sp_mirror_id;
+ } else {
+ op_data->op_fid2 = *ll_inode2fid(data);
+ }
+ break;
+ }
+
+ case MDS_CLOSE_RESYNC_DONE: {
+ struct ll_ioc_lease *ioc = data;
+
+ LASSERT(data != NULL);
+ op_data->op_attr_blocks +=
+ ioc->lil_count * op_data->op_attr_blocks;
+ op_data->op_attr.ia_valid |= ATTR_SIZE;
+ op_data->op_xvalid |= OP_XVALID_BLOCKS;
+ op_data->op_bias |= MDS_CLOSE_RESYNC_DONE;
+
+ op_data->op_lease_handle = och->och_lease_handle;
+ op_data->op_data = &ioc->lil_ids[0];
+ op_data->op_data_size =
+ ioc->lil_count * sizeof(ioc->lil_ids[0]);
break;
+ }
case MDS_HSM_RELEASE:
LASSERT(data != NULL);
op_data->op_bias |= MDS_HSM_RELEASE;
op_data->op_data_version = *(__u64 *)data;
op_data->op_lease_handle = och->och_lease_handle;
- op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
+ op_data->op_attr.ia_valid |= ATTR_SIZE;
+ op_data->op_xvalid |= OP_XVALID_BLOCKS;
break;
default:
break;
}
+ if (!(op_data->op_attr.ia_valid & ATTR_SIZE))
+ op_data->op_xvalid |= OP_XVALID_LAZYSIZE;
+ if (!(op_data->op_xvalid & OP_XVALID_BLOCKS))
+ op_data->op_xvalid |= OP_XVALID_LAZYBLOCKS;
+
rc = md_close(md_exp, op_data, och->och_mod, &req);
if (rc != 0 && rc != -EINTR)
CERROR("%s: inode "DFID" mdc close failed: rc = %d\n",
md_exp->exp_obd->obd_name, PFID(&lli->lli_fid), rc);
- if (rc == 0 &&
- op_data->op_bias & (MDS_HSM_RELEASE | MDS_CLOSE_LAYOUT_SWAP)) {
+ if (rc == 0 && op_data->op_bias & bias) {
struct mdt_body *body;
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
out:
md_clear_open_replay_data(md_exp, och);
- och->och_fh.cookie = DEAD_HANDLE_MAGIC;
+ och->och_open_handle.cookie = DEAD_HANDLE_MAGIC;
OBD_FREE_PTR(och);
ptlrpc_req_finished(req); /* This is close request */
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
PFID(ll_inode2fid(inode)), inode);
-#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
- inode == inode->i_sb->s_root->d_inode) {
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
-
- LASSERT(fd != NULL);
- if (unlikely(fd->fd_flags & LL_FILE_RMTACL)) {
- fd->fd_flags &= ~LL_FILE_RMTACL;
- rct_del(&sbi->ll_rct, current_pid());
- et_search_free(&sbi->ll_et, current_pid());
- }
- }
-#endif
-
- if (inode->i_sb->s_root != file->f_path.dentry)
+ if (inode->i_sb->s_root != file_dentry(file))
ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1);
fd = LUSTRE_FPRIVATE(file);
LASSERT(fd != NULL);
if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd)
ll_deauthorize_statahead(inode, fd);
- if (inode->i_sb->s_root == file->f_path.dentry) {
+ if (inode->i_sb->s_root == file_dentry(file)) {
LUSTRE_FPRIVATE(file) = NULL;
ll_file_data_put(fd);
RETURN(0);
RETURN(rc);
}
-static int ll_intent_file_open(struct file *file, void *lmm, int lmmsize,
+static inline int ll_dom_readpage(void *data, struct page *page)
+{
+ struct niobuf_local *lnb = data;
+ void *kaddr;
+
+ kaddr = ll_kmap_atomic(page, KM_USER0);
+ memcpy(kaddr, lnb->lnb_data, lnb->lnb_len);
+ if (lnb->lnb_len < PAGE_SIZE)
+ memset(kaddr + lnb->lnb_len, 0,
+ PAGE_SIZE - lnb->lnb_len);
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ ll_kunmap_atomic(kaddr, KM_USER0);
+ unlock_page(page);
+
+ return 0;
+}
+
+void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req,
+ struct lookup_intent *it)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct cl_object *obj = lli->lli_clob;
+ struct address_space *mapping = inode->i_mapping;
+ struct page *vmpage;
+ struct niobuf_remote *rnb;
+ char *data;
+ unsigned long index, start;
+ struct niobuf_local lnb;
+
+ ENTRY;
+
+ if (obj == NULL)
+ RETURN_EXIT;
+
+ if (!req_capsule_has_field(&req->rq_pill, &RMF_NIOBUF_INLINE,
+ RCL_SERVER))
+ RETURN_EXIT;
+
+ rnb = req_capsule_server_get(&req->rq_pill, &RMF_NIOBUF_INLINE);
+ if (rnb == NULL || rnb->rnb_len == 0)
+ RETURN_EXIT;
+
+ /* LU-11595: Server may return whole file and that is OK always or
+ * it may return just file tail and its offset must be aligned with
+ * client PAGE_SIZE to be used on that client, if server's PAGE_SIZE is
+ * smaller then offset may be not aligned and that data is just ignored.
+ */
+ if (rnb->rnb_offset % PAGE_SIZE)
+ RETURN_EXIT;
+
+ /* Server returns whole file or just file tail if it fills in
+ * reply buffer, in both cases total size should be inode size.
+ */
+ if (rnb->rnb_offset + rnb->rnb_len < i_size_read(inode)) {
+ CERROR("%s: server returns off/len %llu/%u < i_size %llu\n",
+ ll_i2sbi(inode)->ll_fsname, rnb->rnb_offset,
+ rnb->rnb_len, i_size_read(inode));
+ RETURN_EXIT;
+ }
+
+ CDEBUG(D_INFO, "Get data along with open at %llu len %i, i_size %llu\n",
+ rnb->rnb_offset, rnb->rnb_len, i_size_read(inode));
+
+ data = (char *)rnb + sizeof(*rnb);
+
+ lnb.lnb_file_offset = rnb->rnb_offset;
+ start = lnb.lnb_file_offset / PAGE_SIZE;
+ index = 0;
+ LASSERT(lnb.lnb_file_offset % PAGE_SIZE == 0);
+ lnb.lnb_page_offset = 0;
+ do {
+ lnb.lnb_data = data + (index << PAGE_SHIFT);
+ lnb.lnb_len = rnb->rnb_len - (index << PAGE_SHIFT);
+ if (lnb.lnb_len > PAGE_SIZE)
+ lnb.lnb_len = PAGE_SIZE;
+
+ vmpage = read_cache_page(mapping, index + start,
+ ll_dom_readpage, &lnb);
+ if (IS_ERR(vmpage)) {
+ CWARN("%s: cannot fill page %lu for "DFID
+ " with data: rc = %li\n",
+ ll_i2sbi(inode)->ll_fsname, index + start,
+ PFID(lu_object_fid(&obj->co_lu)),
+ PTR_ERR(vmpage));
+ break;
+ }
+ put_page(vmpage);
+ index++;
+ } while (rnb->rnb_len > (index << PAGE_SHIFT));
+ EXIT;
+}
+
+static int ll_intent_file_open(struct dentry *de, void *lmm, int lmmsize,
struct lookup_intent *itp)
{
- struct dentry *de = file->f_path.dentry;
struct ll_sb_info *sbi = ll_i2sbi(de->d_inode);
struct dentry *parent = de->d_parent;
- const char *name = NULL;
+ char *name = NULL;
int len = 0;
struct md_op_data *op_data;
struct ptlrpc_request *req = NULL;
/* if server supports open-by-fid, or file name is invalid, don't pack
* name in open request */
- if (!(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_OPEN_BY_FID) &&
- lu_name_is_valid_2(de->d_name.name, de->d_name.len)) {
- name = de->d_name.name;
+ if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_OPEN_BY_NAME) ||
+ !(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_OPEN_BY_FID)) {
+retry:
len = de->d_name.len;
+ name = kmalloc(len + 1, GFP_NOFS);
+ if (!name)
+ RETURN(-ENOMEM);
+
+ /* race here */
+ spin_lock(&de->d_lock);
+ if (len != de->d_name.len) {
+ spin_unlock(&de->d_lock);
+ kfree(name);
+ goto retry;
+ }
+ memcpy(name, de->d_name.name, len);
+ name[len] = '\0';
+ spin_unlock(&de->d_lock);
+
+ if (!lu_name_is_valid_2(name, len)) {
+ kfree(name);
+ RETURN(-ESTALE);
+ }
}
op_data = ll_prep_md_op_data(NULL, parent->d_inode, de->d_inode,
name, len, 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
+ if (IS_ERR(op_data)) {
+ kfree(name);
RETURN(PTR_ERR(op_data));
+ }
op_data->op_data = lmm;
op_data->op_data_size = lmmsize;
rc = md_intent_lock(sbi->ll_md_exp, op_data, itp, &req,
&ll_md_blocking_ast, 0);
+ kfree(name);
ll_finish_md_op_data(op_data);
if (rc == -ESTALE) {
/* reason for keep own exit path - don`t flood log
}
rc = ll_prep_inode(&de->d_inode, req, NULL, itp);
- if (!rc && itp->it_lock_mode)
+
+ if (!rc && itp->it_lock_mode) {
+ struct lustre_handle handle = {.cookie = itp->it_lock_handle};
+ struct ldlm_lock *lock;
+ bool has_dom_bit = false;
+
+ /* If we got a lock back and it has a LOOKUP bit set,
+ * make sure the dentry is marked as valid so we can find it.
+ * We don't need to care about actual hashing since other bits
+ * of kernel will deal with that later.
+ */
+ lock = ldlm_handle2lock(&handle);
+ if (lock) {
+ has_dom_bit = ldlm_has_dom(lock);
+ if (lock->l_policy_data.l_inodebits.bits &
+ MDS_INODELOCK_LOOKUP)
+ d_lustre_revalidate(de);
+
+ LDLM_LOCK_PUT(lock);
+ }
ll_set_lock_data(sbi->ll_md_exp, de->d_inode, itp, NULL);
+ if (has_dom_bit)
+ ll_dom_finish_open(de->d_inode, req, itp);
+ }
out:
ptlrpc_req_finished(req);
ll_intent_drop_lock(itp);
+ /* We did open by fid, but by the time we got to the server,
+ * the object disappeared. If this is a create, we cannot really
+ * tell the userspace that the file it was trying to create
+ * does not exist. Instead let's return -ESTALE, and the VFS will
+ * retry the create with LOOKUP_REVAL that we are going to catch
+ * in ll_revalidate_dentry() and use lookup then.
+ */
+ if (rc == -ENOENT && itp->it_op & IT_CREAT)
+ rc = -ESTALE;
+
RETURN(rc);
}
struct mdt_body *body;
body = req_capsule_server_get(&it->it_request->rq_pill, &RMF_MDT_BODY);
- och->och_fh = body->mbo_handle;
+ och->och_open_handle = body->mbo_open_handle;
och->och_fid = body->mbo_fid1;
och->och_lease_handle.cookie = it->it_lock_handle;
och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
static int ll_local_open(struct file *file, struct lookup_intent *it,
struct ll_file_data *fd, struct obd_client_handle *och)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
ENTRY;
LASSERT(!LUSTRE_FPRIVATE(file));
fd = ll_file_data_get();
if (fd == NULL)
- GOTO(out_openerr, rc = -ENOMEM);
+ GOTO(out_nofiledata, rc = -ENOMEM);
fd->fd_file = file;
if (S_ISDIR(inode->i_mode))
ll_authorize_statahead(inode, fd);
- if (inode->i_sb->s_root == file->f_path.dentry) {
+ if (inode->i_sb->s_root == file_dentry(file)) {
LUSTRE_FPRIVATE(file) = fd;
RETURN(0);
}
if (file->f_flags & O_TRUNC)
oit.it_flags |= FMODE_WRITE;
- /* kernel only call f_op->open in dentry_open. filp_open calls
- * dentry_open after call to open_namei that checks permissions.
- * Only nfsd_open call dentry_open directly without checking
- * permissions and because of that this code below is safe. */
- if (oit.it_flags & (FMODE_WRITE | FMODE_READ))
- oit.it_flags |= MDS_OPEN_OWNEROVERRIDE;
+ /* kernel only call f_op->open in dentry_open. filp_open calls
+ * dentry_open after call to open_namei that checks permissions.
+ * Only nfsd_open call dentry_open directly without checking
+ * permissions and because of that this code below is safe.
+ */
+ if (oit.it_flags & (FMODE_WRITE | FMODE_READ))
+ oit.it_flags |= MDS_OPEN_OWNEROVERRIDE;
/* We do not want O_EXCL here, presumably we opened the file
* already? XXX - NFS implications? */
GOTO(out_openerr, rc);
}
- ll_release_openhandle(file->f_path.dentry, it);
+ ll_release_openhandle(file_dentry(file), it);
}
(*och_usecount)++;
} else {
LASSERT(*och_usecount == 0);
if (!it->it_disposition) {
+ struct ll_dentry_data *ldd = ll_d2d(file->f_path.dentry);
/* We cannot just request lock handle now, new ELC code
means that one of other OPEN locks for this file
could be cancelled, and since blocking ast handler
* handle to be returned from LOOKUP|OPEN request,
* for example if the target entry was a symlink.
*
- * Always fetch MDS_OPEN_LOCK if this is not setstripe.
+ * Only fetch MDS_OPEN_LOCK if this is in NFS path,
+ * marked by a bit set in ll_iget_for_nfs. Clear the
+ * bit so that it's not confusing later callers.
*
+ * NB; when ldd is NULL, it must have come via normal
+ * lookup path only, since ll_iget_for_nfs always calls
+ * ll_d_init().
+ */
+ if (ldd && ldd->lld_nfs_dentry) {
+ ldd->lld_nfs_dentry = 0;
+ it->it_flags |= MDS_OPEN_LOCK;
+ }
+
+ /*
* Always specify MDS_OPEN_BY_FID because we don't want
* to get file with different fid.
*/
- it->it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID;
- rc = ll_intent_file_open(file, NULL, 0, it);
+ it->it_flags |= MDS_OPEN_BY_FID;
+ rc = ll_intent_file_open(file_dentry(file), NULL, 0,
+ it);
if (rc)
GOTO(out_openerr, rc);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN, 1);
}
+out_nofiledata:
if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) {
ptlrpc_req_finished(it->it_request);
it_clear_disposition(it, DISP_ENQ_OPEN_REF);
}
/**
+ * When setting a lease on a file, we take ownership of the lli_mds_*_och
+ * and save it as fd->fd_och so as to force client to reopen the file even
+ * if it has an open lock in cache already.
+ */
+static int ll_lease_och_acquire(struct inode *inode, struct file *file,
+ struct lustre_handle *old_open_handle)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct obd_client_handle **och_p;
+ __u64 *och_usecount;
+ int rc = 0;
+ ENTRY;
+
+ /* Get the openhandle of the file */
+ mutex_lock(&lli->lli_och_mutex);
+ if (fd->fd_lease_och != NULL)
+ GOTO(out_unlock, rc = -EBUSY);
+
+ if (fd->fd_och == NULL) {
+ if (file->f_mode & FMODE_WRITE) {
+ LASSERT(lli->lli_mds_write_och != NULL);
+ och_p = &lli->lli_mds_write_och;
+ och_usecount = &lli->lli_open_fd_write_count;
+ } else {
+ LASSERT(lli->lli_mds_read_och != NULL);
+ och_p = &lli->lli_mds_read_och;
+ och_usecount = &lli->lli_open_fd_read_count;
+ }
+
+ if (*och_usecount > 1)
+ GOTO(out_unlock, rc = -EBUSY);
+
+ fd->fd_och = *och_p;
+ *och_usecount = 0;
+ *och_p = NULL;
+ }
+
+ *old_open_handle = fd->fd_och->och_open_handle;
+
+ EXIT;
+out_unlock:
+ mutex_unlock(&lli->lli_och_mutex);
+ return rc;
+}
+
+/**
+ * Release ownership on lli_mds_*_och when putting back a file lease.
+ */
+static int ll_lease_och_release(struct inode *inode, struct file *file)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct obd_client_handle **och_p;
+ struct obd_client_handle *old_och = NULL;
+ __u64 *och_usecount;
+ int rc = 0;
+ ENTRY;
+
+ mutex_lock(&lli->lli_och_mutex);
+ if (file->f_mode & FMODE_WRITE) {
+ och_p = &lli->lli_mds_write_och;
+ och_usecount = &lli->lli_open_fd_write_count;
+ } else {
+ och_p = &lli->lli_mds_read_och;
+ och_usecount = &lli->lli_open_fd_read_count;
+ }
+
+ /* The file may have been open by another process (broken lease) so
+ * *och_p is not NULL. In this case we should simply increase usecount
+ * and close fd_och.
+ */
+ if (*och_p != NULL) {
+ old_och = fd->fd_och;
+ (*och_usecount)++;
+ } else {
+ *och_p = fd->fd_och;
+ *och_usecount = 1;
+ }
+ fd->fd_och = NULL;
+ mutex_unlock(&lli->lli_och_mutex);
+
+ if (old_och != NULL)
+ rc = ll_close_inode_openhandle(inode, old_och, 0, NULL);
+
+ RETURN(rc);
+}
+
+/**
* Acquire a lease and open the file.
*/
static struct obd_client_handle *
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct md_op_data *op_data;
struct ptlrpc_request *req = NULL;
- struct lustre_handle old_handle = { 0 };
+ struct lustre_handle old_open_handle = { 0 };
struct obd_client_handle *och = NULL;
int rc;
int rc2;
RETURN(ERR_PTR(-EINVAL));
if (file != NULL) {
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct obd_client_handle **och_p;
- __u64 *och_usecount;
-
if (!(fmode & file->f_mode) || (file->f_mode & FMODE_EXEC))
RETURN(ERR_PTR(-EPERM));
- /* Get the openhandle of the file */
- rc = -EBUSY;
- mutex_lock(&lli->lli_och_mutex);
- if (fd->fd_lease_och != NULL) {
- mutex_unlock(&lli->lli_och_mutex);
- RETURN(ERR_PTR(rc));
- }
-
- if (fd->fd_och == NULL) {
- if (file->f_mode & FMODE_WRITE) {
- LASSERT(lli->lli_mds_write_och != NULL);
- och_p = &lli->lli_mds_write_och;
- och_usecount = &lli->lli_open_fd_write_count;
- } else {
- LASSERT(lli->lli_mds_read_och != NULL);
- och_p = &lli->lli_mds_read_och;
- och_usecount = &lli->lli_open_fd_read_count;
- }
- if (*och_usecount == 1) {
- fd->fd_och = *och_p;
- *och_p = NULL;
- *och_usecount = 0;
- rc = 0;
- }
- }
- mutex_unlock(&lli->lli_och_mutex);
- if (rc < 0) /* more than 1 opener */
+ rc = ll_lease_och_acquire(inode, file, &old_open_handle);
+ if (rc)
RETURN(ERR_PTR(rc));
-
- LASSERT(fd->fd_och != NULL);
- old_handle = fd->fd_och->och_fh;
}
OBD_ALLOC_PTR(och);
GOTO(out, rc = PTR_ERR(op_data));
/* To tell the MDT this openhandle is from the same owner */
- op_data->op_handle = old_handle;
+ op_data->op_open_handle = old_open_handle;
it.it_flags = fmode | open_flags;
it.it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID | MDS_OPEN_LEASE;
if (it.it_lock_mode == 0 ||
it.it_lock_bits != MDS_INODELOCK_OPEN) {
/* open lock must return for lease */
- CERROR(DFID "lease granted but no open lock, %d/"LPU64".\n",
+ CERROR(DFID "lease granted but no open lock, %d/%llu.\n",
PFID(ll_inode2fid(inode)), it.it_lock_mode,
it.it_lock_bits);
GOTO(out_close, rc = -EPROTO);
rc2 = ll_close_inode_openhandle(inode, och, 0, NULL);
if (rc2 < 0)
CERROR("%s: error closing file "DFID": %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(&ll_i2info(inode)->lli_fid), rc2);
+ sbi->ll_fsname, PFID(&ll_i2info(inode)->lli_fid), rc2);
och = NULL; /* och has been freed in ll_close_inode_openhandle() */
out_release_it:
ll_intent_release(&it);
ENTRY;
CDEBUG(D_INODE, "%s: biased close of file "DFID"\n",
- ll_get_fsname(inode->i_sb, NULL, 0), PFID(fid1));
+ ll_i2sbi(inode)->ll_fsname, PFID(fid1));
rc = ll_check_swap_layouts_validity(inode, inode2);
if (rc < 0)
if (rc == 0)
GOTO(out_free_och, rc = -EINVAL);
- /* Close the file and swap layouts between inode & inode2.
+ /* Close the file and {swap,merge} layouts between inode & inode2.
* NB: lease lock handle is released in mdc_close_layout_swap_pack()
* because we still need it to pack l_remote_handle to MDT. */
rc = ll_close_inode_openhandle(inode, och, MDS_CLOSE_LAYOUT_SWAP,
* Release lease and close the file.
* It will check if the lease has ever broken.
*/
-static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
- bool *lease_broken)
+static int ll_lease_close_intent(struct obd_client_handle *och,
+ struct inode *inode,
+ bool *lease_broken, enum mds_op_bias bias,
+ void *data)
{
struct ldlm_lock *lock;
bool cancelled = true;
LDLM_LOCK_PUT(lock);
}
- CDEBUG(D_INODE, "lease for "DFID" broken? %d\n",
- PFID(&ll_i2info(inode)->lli_fid), cancelled);
+ CDEBUG(D_INODE, "lease for "DFID" broken? %d, bias: %x\n",
+ PFID(&ll_i2info(inode)->lli_fid), cancelled, bias);
- if (!cancelled)
- ldlm_cli_cancel(&och->och_lease_handle, 0);
if (lease_broken != NULL)
*lease_broken = cancelled;
- rc = ll_close_inode_openhandle(inode, och, 0, NULL);
+ if (!cancelled && !bias)
+ ldlm_cli_cancel(&och->och_lease_handle, 0);
+
+ if (cancelled) { /* no need to excute intent */
+ bias = 0;
+ data = NULL;
+ }
+
+ rc = ll_close_inode_openhandle(inode, och, bias, data);
RETURN(rc);
}
+static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
+ bool *lease_broken)
+{
+ return ll_lease_close_intent(och, inode, lease_broken, 0, NULL);
+}
+
+/**
+ * After lease is taken, send the RPC MDS_REINT_RESYNC to the MDT
+ */
+static int ll_lease_file_resync(struct obd_client_handle *och,
+ struct inode *inode, unsigned long arg)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct md_op_data *op_data;
+ struct ll_ioc_lease_id ioc;
+ __u64 data_version_unused;
+ int rc;
+ ENTRY;
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ RETURN(PTR_ERR(op_data));
+
+ if (copy_from_user(&ioc, (struct ll_ioc_lease_id __user *)arg,
+ sizeof(ioc)))
+ RETURN(-EFAULT);
+
+ /* before starting file resync, it's necessary to clean up page cache
+ * in client memory, otherwise once the layout version is increased,
+ * writing back cached data will be denied the OSTs. */
+ rc = ll_data_version(inode, &data_version_unused, LL_DV_WR_FLUSH);
+ if (rc)
+ GOTO(out, rc);
+
+ op_data->op_lease_handle = och->och_lease_handle;
+ op_data->op_mirror_id = ioc.lil_mirror_id;
+ rc = md_file_resync(sbi->ll_md_exp, op_data);
+ if (rc)
+ GOTO(out, rc);
+
+ EXIT;
+out:
+ ll_finish_md_op_data(op_data);
+ return rc;
+}
+
int ll_merge_attr(const struct lu_env *env, struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
ll_inode_size_lock(inode);
- /* merge timestamps the most recently obtained from mds with
- timestamps obtained from osts */
- LTIME_S(inode->i_atime) = lli->lli_atime;
- LTIME_S(inode->i_mtime) = lli->lli_mtime;
- LTIME_S(inode->i_ctime) = lli->lli_ctime;
+ /* Merge timestamps the most recently obtained from MDS with
+ * timestamps obtained from OSTs.
+ *
+ * Do not overwrite atime of inode because it may be refreshed
+ * by file_accessed() function. If the read was served by cache
+ * data, there is no RPC to be sent so that atime may not be
+ * transferred to OSTs at all. MDT only updates atime at close time
+ * if it's at least 'mdd.*.atime_diff' older.
+ * All in all, the atime in Lustre does not strictly comply with
+ * POSIX. Solving this problem needs to send an RPC to MDT for each
+ * read, this will hurt performance.
+ */
+ if (inode->i_atime.tv_sec < lli->lli_atime ||
+ lli->lli_update_atime) {
+ inode->i_atime.tv_sec = lli->lli_atime;
+ lli->lli_update_atime = 0;
+ }
+ inode->i_mtime.tv_sec = lli->lli_mtime;
+ inode->i_ctime.tv_sec = lli->lli_ctime;
- atime = LTIME_S(inode->i_atime);
- mtime = LTIME_S(inode->i_mtime);
- ctime = LTIME_S(inode->i_ctime);
+ mtime = inode->i_mtime.tv_sec;
+ atime = inode->i_atime.tv_sec;
+ ctime = inode->i_ctime.tv_sec;
cl_object_attr_lock(obj);
- rc = cl_object_attr_get(env, obj, attr);
+ if (OBD_FAIL_CHECK(OBD_FAIL_MDC_MERGE))
+ rc = -EINVAL;
+ else
+ rc = cl_object_attr_get(env, obj, attr);
cl_object_attr_unlock(obj);
if (rc != 0)
- GOTO(out_size_unlock, rc);
+ GOTO(out_size_unlock, rc = (rc == -ENODATA ? 0 : rc));
if (atime < attr->cat_atime)
atime = attr->cat_atime;
if (mtime < attr->cat_mtime)
mtime = attr->cat_mtime;
- CDEBUG(D_VFSTRACE, DFID" updating i_size "LPU64"\n",
+ CDEBUG(D_VFSTRACE, DFID" updating i_size %llu\n",
PFID(&lli->lli_fid), attr->cat_size);
i_size_write(inode, attr->cat_size);
inode->i_blocks = attr->cat_blocks;
- LTIME_S(inode->i_atime) = atime;
- LTIME_S(inode->i_mtime) = mtime;
- LTIME_S(inode->i_ctime) = ctime;
+ inode->i_mtime.tv_sec = mtime;
+ inode->i_atime.tv_sec = atime;
+ inode->i_ctime.tv_sec = ctime;
out_size_unlock:
ll_inode_size_unlock(inode);
RETURN(rc);
}
+/**
+ * Set designated mirror for I/O.
+ *
+ * So far only read, write, and truncated can support to issue I/O to
+ * designated mirror.
+ */
+void ll_io_set_mirror(struct cl_io *io, const struct file *file)
+{
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+
+ /* clear layout version for generic(non-resync) I/O in case it carries
+ * stale layout version due to I/O restart */
+ io->ci_layout_version = 0;
+
+ /* FLR: disable non-delay for designated mirror I/O because obviously
+ * only one mirror is available */
+ if (fd->fd_designated_mirror > 0) {
+ io->ci_ndelay = 0;
+ io->ci_designated_mirror = fd->fd_designated_mirror;
+ io->ci_layout_version = fd->fd_layout_version;
+ }
+
+ CDEBUG(D_VFSTRACE, "%s: desiginated mirror: %d\n",
+ file->f_path.dentry->d_name.name, io->ci_designated_mirror);
+}
+
static bool file_is_noatime(const struct file *file)
{
const struct vfsmount *mnt = file->f_path.mnt;
- const struct inode *inode = file->f_path.dentry->d_inode;
+ const struct inode *inode = file_inode((struct file *)file);
/* Adapted from file_accessed() and touch_atime().*/
if (file->f_flags & O_NOATIME)
return false;
}
-static void ll_io_init(struct cl_io *io, const struct file *file, int write)
+static void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+
+ io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
+ io->ci_lock_no_expand = fd->ll_lock_no_expand;
- io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
- if (write) {
+ if (iot == CIT_WRITE) {
io->u.ci_wr.wr_append = !!(file->f_flags & O_APPEND);
- io->u.ci_wr.wr_sync = file->f_flags & O_SYNC ||
- file->f_flags & O_DIRECT ||
- IS_SYNC(inode);
+ io->u.ci_wr.wr_sync = !!(file->f_flags & O_SYNC ||
+ file->f_flags & O_DIRECT ||
+ IS_SYNC(inode));
+ }
+ io->ci_obj = ll_i2info(inode)->lli_clob;
+ io->ci_lockreq = CILR_MAYBE;
+ if (ll_file_nolock(file)) {
+ io->ci_lockreq = CILR_NEVER;
+ io->ci_no_srvlock = 1;
+ } else if (file->f_flags & O_APPEND) {
+ io->ci_lockreq = CILR_MANDATORY;
}
- io->ci_obj = ll_i2info(inode)->lli_clob;
- io->ci_lockreq = CILR_MAYBE;
- if (ll_file_nolock(file)) {
- io->ci_lockreq = CILR_NEVER;
- io->ci_no_srvlock = 1;
- } else if (file->f_flags & O_APPEND) {
- io->ci_lockreq = CILR_MANDATORY;
- }
-
io->ci_noatime = file_is_noatime(file);
+
+ /* FLR: only use non-delay I/O for read as there is only one
+ * avaliable mirror for write. */
+ io->ci_ndelay = !(iot == CIT_WRITE);
+
+ ll_io_set_mirror(io, file);
+}
+
+static void ll_heat_add(struct inode *inode, enum cl_io_type iot,
+ __u64 count)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ enum obd_heat_type sample_type;
+ enum obd_heat_type iobyte_type;
+ __u64 now = ktime_get_real_seconds();
+
+ if (!ll_sbi_has_file_heat(sbi) ||
+ lli->lli_heat_flags & LU_HEAT_FLAG_OFF)
+ return;
+
+ if (iot == CIT_READ) {
+ sample_type = OBD_HEAT_READSAMPLE;
+ iobyte_type = OBD_HEAT_READBYTE;
+ } else if (iot == CIT_WRITE) {
+ sample_type = OBD_HEAT_WRITESAMPLE;
+ iobyte_type = OBD_HEAT_WRITEBYTE;
+ } else {
+ return;
+ }
+
+ spin_lock(&lli->lli_heat_lock);
+ obd_heat_add(&lli->lli_heat_instances[sample_type], now, 1,
+ sbi->ll_heat_decay_weight, sbi->ll_heat_period_second);
+ obd_heat_add(&lli->lli_heat_instances[iobyte_type], now, count,
+ sbi->ll_heat_decay_weight, sbi->ll_heat_period_second);
+ spin_unlock(&lli->lli_heat_lock);
}
static ssize_t
loff_t *ppos, size_t count)
{
struct vvp_io *vio = vvp_env_io(env);
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct range_lock range;
struct cl_io *io;
ssize_t result = 0;
int rc = 0;
- struct range_lock range;
+ unsigned retried = 0;
+ bool restarted = false;
ENTRY;
- CDEBUG(D_VFSTRACE, "file: %s, type: %d ppos: "LPU64", count: %zu\n",
- file->f_path.dentry->d_name.name, iot, *ppos, count);
+ CDEBUG(D_VFSTRACE, "%s: %s ppos: %llu, count: %zu\n",
+ file_dentry(file)->d_name.name,
+ iot == CIT_READ ? "read" : "write", *ppos, count);
restart:
io = vvp_env_thread_io(env);
- ll_io_init(io, file, iot == CIT_WRITE);
+ ll_io_init(io, file, iot);
+ io->ci_ndelay_tried = retried;
if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
bool range_locked = false;
switch (vio->vui_io_subtype) {
case IO_NORMAL:
vio->vui_iter = args->u.normal.via_iter;
-#ifndef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
- vio->vui_tot_nrsegs = vio->vui_iter->nr_segs;
-#endif /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
vio->vui_iocb = args->u.normal.via_iocb;
/* Direct IO reads must also take range lock,
* or multiple reads will try to work on the same pages
LBUG();
}
- ll_cl_add(file, env, io);
+ ll_cl_add(file, env, io, LCC_RW);
rc = cl_io_loop(env, io);
ll_cl_remove(file, env);
if (io->ci_nob > 0) {
result += io->ci_nob;
- count -= io->ci_nob;
+ count -= io->ci_nob;
*ppos = io->u.ci_wr.wr.crw_pos; /* for splice */
/* prepare IO restart */
- if (count > 0 && args->via_io_subtype == IO_NORMAL) {
+ if (count > 0 && args->via_io_subtype == IO_NORMAL)
args->u.normal.via_iter = vio->vui_iter;
-#ifndef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
- args->u.normal.via_iter->nr_segs = vio->vui_tot_nrsegs;
-#endif /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
- }
}
- GOTO(out, rc);
out:
cl_io_fini(env, io);
+ CDEBUG(D_VFSTRACE,
+ "%s: %d io complete with rc: %d, result: %zd, restart: %d\n",
+ file->f_path.dentry->d_name.name,
+ iot, rc, result, io->ci_need_restart);
+
if ((rc == 0 || rc == -ENODATA) && count > 0 && io->ci_need_restart) {
CDEBUG(D_VFSTRACE,
- "%s: restart %s from %lld, count:%zu, result: %zd\n",
- file->f_path.dentry->d_name.name,
+ "%s: restart %s from %lld, count: %zu, ret: %zd, rc: %d\n",
+ file_dentry(file)->d_name.name,
iot == CIT_READ ? "read" : "write",
- *ppos, count, result);
+ *ppos, count, result, rc);
+ /* preserve the tried count for FLR */
+ retried = io->ci_ndelay_tried;
+ restarted = true;
goto restart;
}
}
CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result);
+ if (result > 0)
+ ll_heat_add(inode, iot, result);
- return result > 0 ? result : rc;
+ RETURN(result > 0 ? result : rc);
}
-/*
- * Read from a file (through the page cache).
+/**
+ * The purpose of fast read is to overcome per I/O overhead and improve IOPS
+ * especially for small I/O.
+ *
+ * To serve a read request, CLIO has to create and initialize a cl_io and
+ * then request DLM lock. This has turned out to have siginificant overhead
+ * and affects the performance of small I/O dramatically.
+ *
+ * It's not necessary to create a cl_io for each I/O. Under the help of read
+ * ahead, most of the pages being read are already in memory cache and we can
+ * read those pages directly because if the pages exist, the corresponding DLM
+ * lock must exist so that page content must be valid.
+ *
+ * In fast read implementation, the llite speculatively finds and reads pages
+ * in memory cache. There are three scenarios for fast read:
+ * - If the page exists and is uptodate, kernel VM will provide the data and
+ * CLIO won't be intervened;
+ * - If the page was brought into memory by read ahead, it will be exported
+ * and read ahead parameters will be updated;
+ * - Otherwise the page is not in memory, we can't do fast read. Therefore,
+ * it will go back and invoke normal read, i.e., a cl_io will be created
+ * and DLM lock will be requested.
+ *
+ * POSIX compliance: posix standard states that read is intended to be atomic.
+ * Lustre read implementation is in line with Linux kernel read implementation
+ * and neither of them complies with POSIX standard in this matter. Fast read
+ * doesn't make the situation worse on single node but it may interleave write
+ * results from multiple nodes due to short read handling in ll_file_aio_read().
+ *
+ * \param env - lu_env
+ * \param iocb - kiocb from kernel
+ * \param iter - user space buffers where the data will be copied
+ *
+ * \retval - number of bytes have been read, or error code if error occurred.
*/
-static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+static ssize_t
+ll_do_fast_read(struct kiocb *iocb, struct iov_iter *iter)
{
- struct vvp_io_args *args;
- struct lu_env *env;
ssize_t result;
- __u16 refcheck;
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
+ if (!ll_sbi_has_fast_read(ll_i2sbi(file_inode(iocb->ki_filp))))
+ return 0;
- args = ll_env_args(env, IO_NORMAL);
- args->u.normal.via_iter = to;
- args->u.normal.via_iocb = iocb;
+ /* NB: we can't do direct IO for fast read because it will need a lock
+ * to make IO engine happy. */
+ if (iocb->ki_filp->f_flags & O_DIRECT)
+ return 0;
+
+ result = generic_file_read_iter(iocb, iter);
+
+ /* If the first page is not in cache, generic_file_aio_read() will be
+ * returned with -ENODATA.
+ * See corresponding code in ll_readpage(). */
+ if (result == -ENODATA)
+ result = 0;
+
+ if (result > 0) {
+ ll_heat_add(file_inode(iocb->ki_filp), CIT_READ, result);
+ ll_stats_ops_tally(ll_i2sbi(file_inode(iocb->ki_filp)),
+ LPROC_LL_READ_BYTES, result);
+ }
- result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_READ,
- &iocb->ki_pos, iov_iter_count(to));
- cl_env_put(env, &refcheck);
return result;
}
/*
- * Write to a file (through the page cache).
+ * Read from a file (through the page cache).
*/
-static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- struct vvp_io_args *args;
struct lu_env *env;
+ struct vvp_io_args *args;
ssize_t result;
+ ssize_t rc2;
__u16 refcheck;
+ ll_ras_enter(iocb->ki_filp);
+
+ result = ll_do_fast_read(iocb, to);
+ if (result < 0 || iov_iter_count(to) == 0)
+ GOTO(out, result);
+
env = cl_env_get(&refcheck);
if (IS_ERR(env))
return PTR_ERR(env);
args = ll_env_args(env, IO_NORMAL);
- args->u.normal.via_iter = from;
+ args->u.normal.via_iter = to;
args->u.normal.via_iocb = iocb;
- result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_WRITE,
- &iocb->ki_pos, iov_iter_count(from));
+ rc2 = ll_file_io_generic(env, args, iocb->ki_filp, CIT_READ,
+ &iocb->ki_pos, iov_iter_count(to));
+ if (rc2 > 0)
+ result += rc2;
+ else if (result == 0)
+ result = rc2;
+
cl_env_put(env, &refcheck);
+out:
return result;
}
+/**
+ * Similar trick to ll_do_fast_read, this improves write speed for tiny writes.
+ * If a page is already in the page cache and dirty (and some other things -
+ * See ll_tiny_write_begin for the instantiation of these rules), then we can
+ * write to it without doing a full I/O, because Lustre already knows about it
+ * and will write it out. This saves a lot of processing time.
+ *
+ * All writes here are within one page, so exclusion is handled by the page
+ * lock on the vm page. We do not do tiny writes for writes which touch
+ * multiple pages because it's very unlikely multiple sequential pages are
+ * are already dirty.
+ *
+ * We limit these to < PAGE_SIZE because PAGE_SIZE writes are relatively common
+ * and are unlikely to be to already dirty pages.
+ *
+ * Attribute updates are important here, we do them in ll_tiny_write_end.
+ */
+static ssize_t ll_do_tiny_write(struct kiocb *iocb, struct iov_iter *iter)
+{
+ ssize_t count = iov_iter_count(iter);
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ bool lock_inode = !IS_NOSEC(inode);
+ ssize_t result = 0;
+
+ ENTRY;
+
+ /* Restrict writes to single page and < PAGE_SIZE. See comment at top
+ * of function for why.
+ */
+ if (count >= PAGE_SIZE ||
+ (iocb->ki_pos & (PAGE_SIZE-1)) + count > PAGE_SIZE)
+ RETURN(0);
+
+ if (unlikely(lock_inode))
+ inode_lock(inode);
+ result = __generic_file_write_iter(iocb, iter);
+
+ if (unlikely(lock_inode))
+ inode_unlock(inode);
+
+ /* If the page is not already dirty, ll_tiny_write_begin returns
+ * -ENODATA. We continue on to normal write.
+ */
+ if (result == -ENODATA)
+ result = 0;
+
+ if (result > 0) {
+ ll_heat_add(inode, CIT_WRITE, result);
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_WRITE_BYTES,
+ result);
+ ll_file_set_flag(ll_i2info(inode), LLIF_DATA_MODIFIED);
+ }
+
+ CDEBUG(D_VFSTRACE, "result: %zu, original count %zu\n", result, count);
+
+ RETURN(result);
+}
+
+/*
+ * Write to a file (through the page cache).
+ */
+static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct vvp_io_args *args;
+ struct lu_env *env;
+ ssize_t rc_tiny = 0, rc_normal;
+ __u16 refcheck;
+
+ ENTRY;
+
+ /* NB: we can't do direct IO for tiny writes because they use the page
+ * cache, we can't do sync writes because tiny writes can't flush
+ * pages, and we can't do append writes because we can't guarantee the
+ * required DLM locks are held to protect file size.
+ */
+ if (ll_sbi_has_tiny_write(ll_i2sbi(file_inode(iocb->ki_filp))) &&
+ !(iocb->ki_filp->f_flags & (O_DIRECT | O_SYNC | O_APPEND)))
+ rc_tiny = ll_do_tiny_write(iocb, from);
+
+ /* In case of error, go on and try normal write - Only stop if tiny
+ * write completed I/O.
+ */
+ if (iov_iter_count(from) == 0)
+ GOTO(out, rc_normal = rc_tiny);
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ args = ll_env_args(env, IO_NORMAL);
+ args->u.normal.via_iter = from;
+ args->u.normal.via_iocb = iocb;
+
+ rc_normal = ll_file_io_generic(env, args, iocb->ki_filp, CIT_WRITE,
+ &iocb->ki_pos, iov_iter_count(from));
+
+ /* On success, combine bytes written. */
+ if (rc_tiny >= 0 && rc_normal > 0)
+ rc_normal += rc_tiny;
+ /* On error, only return error from normal write if tiny write did not
+ * write any bytes. Otherwise return bytes written by tiny write.
+ */
+ else if (rc_tiny > 0)
+ rc_normal = rc_tiny;
+
+ cl_env_put(env, &refcheck);
+out:
+ RETURN(rc_normal);
+}
+
#ifndef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
/*
* XXX: exact copy from kernel code (__generic_file_aio_write_nolock)
static ssize_t ll_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
- struct iovec *local_iov;
- struct iov_iter *to;
+ struct iov_iter to;
size_t iov_count;
ssize_t result;
- struct lu_env *env = NULL;
- __u16 refcheck;
ENTRY;
result = ll_file_get_iov_count(iov, &nr_segs, &iov_count);
if (result)
RETURN(result);
- if (nr_segs == 1) {
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- RETURN(PTR_ERR(env));
-
- local_iov = &ll_env_info(env)->lti_local_iov;
- *local_iov = *iov;
-
- } else {
- OBD_ALLOC(local_iov, sizeof(*iov) * nr_segs);
- if (local_iov == NULL)
- RETURN(-ENOMEM);
-
- memcpy(local_iov, iov, sizeof(*iov) * nr_segs);
- }
-
- OBD_ALLOC_PTR(to);
- if (to == NULL) {
- result = -ENOMEM;
- goto out;
- }
# ifdef HAVE_IOV_ITER_INIT_DIRECTION
- iov_iter_init(to, READ, local_iov, nr_segs, iov_count);
+ iov_iter_init(&to, READ, iov, nr_segs, iov_count);
# else /* !HAVE_IOV_ITER_INIT_DIRECTION */
- iov_iter_init(to, local_iov, nr_segs, iov_count, 0);
+ iov_iter_init(&to, iov, nr_segs, iov_count, 0);
# endif /* HAVE_IOV_ITER_INIT_DIRECTION */
- result = ll_file_read_iter(iocb, to);
-
- OBD_FREE_PTR(to);
-out:
- if (nr_segs == 1)
- cl_env_put(env, &refcheck);
- else
- OBD_FREE(local_iov, sizeof(*iov) * nr_segs);
+ result = ll_file_read_iter(iocb, &to);
RETURN(result);
}
static ssize_t ll_file_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
- struct lu_env *env;
struct iovec iov = { .iov_base = buf, .iov_len = count };
- struct kiocb *kiocb;
- ssize_t result;
- __u16 refcheck;
- ENTRY;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ struct kiocb kiocb;
+ ssize_t result;
+ ENTRY;
- kiocb = &ll_env_info(env)->lti_kiocb;
- init_sync_kiocb(kiocb, file);
- kiocb->ki_pos = *ppos;
+ init_sync_kiocb(&kiocb, file);
+ kiocb.ki_pos = *ppos;
#ifdef HAVE_KIOCB_KI_LEFT
- kiocb->ki_left = count;
+ kiocb.ki_left = count;
#elif defined(HAVE_KI_NBYTES)
- kiocb->ki_nbytes = count;
+ kiocb.i_nbytes = count;
#endif
- result = ll_file_aio_read(kiocb, &iov, 1, kiocb->ki_pos);
- *ppos = kiocb->ki_pos;
+ result = ll_file_aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
+ *ppos = kiocb.ki_pos;
- cl_env_put(env, &refcheck);
RETURN(result);
}
static ssize_t ll_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
- struct iovec *local_iov;
- struct iov_iter *from;
+ struct iov_iter from;
size_t iov_count;
ssize_t result;
- struct lu_env *env = NULL;
- __u16 refcheck;
ENTRY;
result = ll_file_get_iov_count(iov, &nr_segs, &iov_count);
if (result)
RETURN(result);
- if (nr_segs == 1) {
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- RETURN(PTR_ERR(env));
-
- local_iov = &ll_env_info(env)->lti_local_iov;
- *local_iov = *iov;
- } else {
- OBD_ALLOC(local_iov, sizeof(*iov) * nr_segs);
- if (local_iov == NULL)
- RETURN(-ENOMEM);
-
- memcpy(local_iov, iov, sizeof(*iov) * nr_segs);
- }
-
- OBD_ALLOC_PTR(from);
- if (from == NULL) {
- result = -ENOMEM;
- goto out;
- }
# ifdef HAVE_IOV_ITER_INIT_DIRECTION
- iov_iter_init(from, WRITE, local_iov, nr_segs, iov_count);
+ iov_iter_init(&from, WRITE, iov, nr_segs, iov_count);
# else /* !HAVE_IOV_ITER_INIT_DIRECTION */
- iov_iter_init(from, local_iov, nr_segs, iov_count, 0);
+ iov_iter_init(&from, iov, nr_segs, iov_count, 0);
# endif /* HAVE_IOV_ITER_INIT_DIRECTION */
- result = ll_file_write_iter(iocb, from);
-
- OBD_FREE_PTR(from);
-out:
- if (nr_segs == 1)
- cl_env_put(env, &refcheck);
- else
- OBD_FREE(local_iov, sizeof(*iov) * nr_segs);
+ result = ll_file_write_iter(iocb, &from);
RETURN(result);
}
static ssize_t ll_file_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct lu_env *env;
struct iovec iov = { .iov_base = (void __user *)buf,
.iov_len = count };
- struct kiocb *kiocb;
- ssize_t result;
- __u16 refcheck;
- ENTRY;
+ struct kiocb kiocb;
+ ssize_t result;
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ ENTRY;
- kiocb = &ll_env_info(env)->lti_kiocb;
- init_sync_kiocb(kiocb, file);
- kiocb->ki_pos = *ppos;
+ init_sync_kiocb(&kiocb, file);
+ kiocb.ki_pos = *ppos;
#ifdef HAVE_KIOCB_KI_LEFT
- kiocb->ki_left = count;
+ kiocb.ki_left = count;
#elif defined(HAVE_KI_NBYTES)
- kiocb->ki_nbytes = count;
+ kiocb.ki_nbytes = count;
#endif
- result = ll_file_aio_write(kiocb, &iov, 1, kiocb->ki_pos);
- *ppos = kiocb->ki_pos;
+ result = ll_file_aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
+ *ppos = kiocb.ki_pos;
- cl_env_put(env, &refcheck);
RETURN(result);
}
#endif /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
__u16 refcheck;
ENTRY;
+ ll_ras_enter(in_file);
+
env = cl_env_get(&refcheck);
if (IS_ERR(env))
RETURN(PTR_ERR(env));
RETURN(result);
}
-int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
- __u64 flags, struct lov_user_md *lum,
- int lum_size)
+int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
+ __u64 flags, struct lov_user_md *lum, int lum_size)
{
struct lookup_intent oit = {
.it_op = IT_OPEN,
ENTRY;
ll_inode_size_lock(inode);
- rc = ll_intent_file_open(file, lum, lum_size, &oit);
+ rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
if (rc < 0)
GOTO(out_unlock, rc);
- ll_release_openhandle(file->f_path.dentry, &oit);
+ ll_release_openhandle(dentry, &oit);
out_unlock:
ll_inode_size_unlock(inode);
ll_intent_release(&oit);
- cl_lov_delay_create_clear(&file->f_flags);
RETURN(rc);
}
lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize);
LASSERT(lmm != NULL);
- if ((lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1)) &&
- (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3))) {
- GOTO(out, rc = -EPROTO);
- }
+ if (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1) &&
+ lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3) &&
+ lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_COMP_V1) &&
+ lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_FOREIGN))
+ GOTO(out, rc = -EPROTO);
/*
* This is coming from the MDS, so is probably in
if (LOV_MAGIC != cpu_to_le32(LOV_MAGIC)) {
int stripe_count;
- stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
- if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED)
- stripe_count = 0;
+ if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
+ lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
+ stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
+ if (le32_to_cpu(lmm->lmm_pattern) &
+ LOV_PATTERN_F_RELEASED)
+ stripe_count = 0;
+ }
/* if function called for directory - we should
* avoid swab not existent lsm objects */
if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) {
- lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
+ lustre_swab_lov_user_md_v1(
+ (struct lov_user_md_v1 *)lmm);
if (S_ISREG(body->mbo_mode))
lustre_swab_lov_user_md_objects(
((struct lov_user_md_v1 *)lmm)->lmm_objects,
stripe_count);
} else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
lustre_swab_lov_user_md_v3(
- (struct lov_user_md_v3 *)lmm);
+ (struct lov_user_md_v3 *)lmm);
if (S_ISREG(body->mbo_mode))
- lustre_swab_lov_user_md_objects(
- ((struct lov_user_md_v3 *)lmm)->lmm_objects,
- stripe_count);
- }
- }
+ lustre_swab_lov_user_md_objects(
+ ((struct lov_user_md_v3 *)lmm)->lmm_objects,
+ stripe_count);
+ } else if (lmm->lmm_magic ==
+ cpu_to_le32(LOV_MAGIC_COMP_V1)) {
+ lustre_swab_lov_comp_md_v1(
+ (struct lov_comp_md_v1 *)lmm);
+ } else if (lmm->lmm_magic ==
+ cpu_to_le32(LOV_MAGIC_FOREIGN)) {
+ struct lov_foreign_md *lfm;
+
+ lfm = (struct lov_foreign_md *)lmm;
+ __swab32s(&lfm->lfm_magic);
+ __swab32s(&lfm->lfm_length);
+ __swab32s(&lfm->lfm_type);
+ __swab32s(&lfm->lfm_flags);
+ }
+ }
out:
- *lmmp = lmm;
- *lmm_size = lmmsize;
- *request = req;
- return rc;
+ *lmmp = lmm;
+ *lmm_size = lmmsize;
+ *request = req;
+ return rc;
}
static int ll_lov_setea(struct inode *inode, struct file *file,
- unsigned long arg)
+ void __user *arg)
{
__u64 flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
struct lov_user_md *lump;
if (lump == NULL)
RETURN(-ENOMEM);
- if (copy_from_user(lump, (struct lov_user_md __user *)arg, lum_size))
+ if (copy_from_user(lump, arg, lum_size))
GOTO(out_lump, rc = -EFAULT);
- rc = ll_lov_setstripe_ea_info(inode, file, flags, lump, lum_size);
+ rc = ll_lov_setstripe_ea_info(inode, file_dentry(file), flags, lump,
+ lum_size);
+ cl_lov_delay_create_clear(&file->f_flags);
out_lump:
OBD_FREE_LARGE(lump, lum_size);
RETURN(rc);
}
-static int ll_file_getstripe(struct inode *inode,
- struct lov_user_md __user *lum)
+static int ll_file_getstripe(struct inode *inode, void __user *lum, size_t size)
{
struct lu_env *env;
__u16 refcheck;
if (IS_ERR(env))
RETURN(PTR_ERR(env));
- rc = cl_object_getstripe(env, ll_i2info(inode)->lli_clob, lum);
+ rc = cl_object_getstripe(env, ll_i2info(inode)->lli_clob, lum, size);
cl_env_put(env, &refcheck);
RETURN(rc);
}
static int ll_lov_setstripe(struct inode *inode, struct file *file,
- unsigned long arg)
+ void __user *arg)
{
struct lov_user_md __user *lum = (struct lov_user_md __user *)arg;
struct lov_user_md *klum;
RETURN(rc);
lum_size = rc;
- rc = ll_lov_setstripe_ea_info(inode, file, flags, klum, lum_size);
- if (rc == 0) {
+ rc = ll_lov_setstripe_ea_info(inode, file_dentry(file), flags, klum,
+ lum_size);
+ if (!rc) {
__u32 gen;
- put_user(0, &lum->lmm_stripe_count);
+ rc = put_user(0, &lum->lmm_stripe_count);
+ if (rc)
+ GOTO(out, rc);
+
+ rc = ll_layout_refresh(inode, &gen);
+ if (rc)
+ GOTO(out, rc);
- ll_layout_refresh(inode, &gen);
- rc = ll_file_getstripe(inode, (struct lov_user_md __user *)arg);
+ rc = ll_file_getstripe(inode, arg, lum_size);
}
+ cl_lov_delay_create_clear(&file->f_flags);
+out:
OBD_FREE(klum, lum_size);
RETURN(rc);
}
static int
ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ll_grouplock grouplock;
- int rc;
- ENTRY;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct cl_object *obj = lli->lli_clob;
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_grouplock grouplock;
+ int rc;
+ ENTRY;
if (arg == 0) {
CWARN("group id for group lock must not be 0\n");
LASSERT(fd->fd_grouplock.lg_lock == NULL);
spin_unlock(&lli->lli_lock);
+ /**
+ * XXX: group lock needs to protect all OST objects while PFL
+ * can add new OST objects during the IO, so we'd instantiate
+ * all OST objects before getting its group lock.
+ */
+ if (obj) {
+ struct lu_env *env;
+ __u16 refcheck;
+ struct cl_layout cl = {
+ .cl_is_composite = false,
+ };
+ struct lu_extent ext = {
+ .e_start = 0,
+ .e_end = OBD_OBJECT_EOF,
+ };
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ rc = cl_object_layout_get(env, obj, &cl);
+ if (!rc && cl.cl_is_composite)
+ rc = ll_layout_write_intent(inode, LAYOUT_INTENT_WRITE,
+ &ext);
+
+ cl_env_put(env, &refcheck);
+ if (rc)
+ RETURN(rc);
+ }
+
rc = cl_get_grouplock(ll_i2info(inode)->lli_clob,
arg, (file->f_flags & O_NONBLOCK), &grouplock);
if (rc)
RETURN(rc);
}
-/*
- * Read the data_version for inode.
- *
- * This value is computed using stripe object version on OST.
- * Version is computed using server side locking.
- *
- * @param flags if do sync on the OST side;
- * 0: no sync
- * LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs
- * LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs
- */
-int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
+static int
+ll_ioc_data_version(struct inode *inode, struct ioc_data_version *ioc)
{
struct cl_object *obj = ll_i2info(inode)->lli_clob;
struct lu_env *env;
ENTRY;
+ ioc->idv_version = 0;
+ ioc->idv_layout_version = UINT_MAX;
+
/* If no file object initialized, we consider its version is 0. */
- if (obj == NULL) {
- *data_version = 0;
+ if (obj == NULL)
RETURN(0);
- }
env = cl_env_get(&refcheck);
if (IS_ERR(env))
io = vvp_env_thread_io(env);
io->ci_obj = obj;
io->u.ci_data_version.dv_data_version = 0;
- io->u.ci_data_version.dv_flags = flags;
+ io->u.ci_data_version.dv_layout_version = UINT_MAX;
+ io->u.ci_data_version.dv_flags = ioc->idv_flags;
restart:
if (cl_io_init(env, io, CIT_DATA_VERSION, io->ci_obj) == 0)
else
result = io->ci_result;
- *data_version = io->u.ci_data_version.dv_data_version;
+ ioc->idv_version = io->u.ci_data_version.dv_data_version;
+ ioc->idv_layout_version = io->u.ci_data_version.dv_layout_version;
cl_io_fini(env, io);
}
/*
+ * Read the data_version for inode.
+ *
+ * This value is computed using stripe object version on OST.
+ * Version is computed using server side locking.
+ *
+ * @param flags if do sync on the OST side;
+ * 0: no sync
+ * LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs
+ * LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs
+ */
+int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
+{
+ struct ioc_data_version ioc = { .idv_flags = flags };
+ int rc;
+
+ rc = ll_ioc_data_version(inode, &ioc);
+ if (!rc)
+ *data_version = ioc.idv_version;
+
+ return rc;
+}
+
+/*
* Trigger a HSM release request for the provided inode.
*/
int ll_hsm_release(struct inode *inode)
{
- struct cl_env_nest nest;
struct lu_env *env;
struct obd_client_handle *och = NULL;
__u64 data_version = 0;
int rc;
+ __u16 refcheck;
ENTRY;
CDEBUG(D_INODE, "%s: Releasing file "DFID".\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
+ ll_i2sbi(inode)->ll_fsname,
PFID(&ll_i2info(inode)->lli_fid));
och = ll_lease_open(inode, NULL, FMODE_WRITE, MDS_OPEN_RELEASE);
if (rc != 0)
GOTO(out, rc);
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env))
GOTO(out, rc = PTR_ERR(env));
- ll_merge_attr(env, inode);
- cl_env_nested_put(&nest, env);
+ rc = ll_merge_attr(env, inode);
+ cl_env_put(env, &refcheck);
+
+ /* If error happen, we have the wrong size for a file.
+ * Don't release it.
+ */
+ if (rc != 0)
+ GOTO(out, rc);
/* Release the file.
* NB: lease lock handle is released in mdc_hsm_release_pack() because
if (llss == NULL)
RETURN(-ENOMEM);
- llss->inode1 = file1->f_path.dentry->d_inode;
- llss->inode2 = file2->f_path.dentry->d_inode;
+ llss->inode1 = file_inode(file1);
+ llss->inode2 = file_inode(file2);
rc = ll_check_swap_layouts_validity(llss->inode1, llss->inode2);
if (rc < 0)
int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss)
{
- struct md_op_data *op_data;
- int rc;
+ struct obd_export *exp = ll_i2mdexp(inode);
+ struct md_op_data *op_data;
+ int rc;
ENTRY;
/* Detect out-of range masks */
!cfs_capable(CFS_CAP_SYS_ADMIN))
RETURN(-EPERM);
- /* Detect out-of range archive id */
- if ((hss->hss_valid & HSS_ARCHIVE_ID) &&
- (hss->hss_archive_id > LL_HSM_MAX_ARCHIVE))
- RETURN(-EINVAL);
+ if (!exp_connect_archive_id_array(exp)) {
+ /* Detect out-of range archive id */
+ if ((hss->hss_valid & HSS_ARCHIVE_ID) &&
+ (hss->hss_archive_id > LL_HSM_ORIGIN_MAX_ARCHIVE))
+ RETURN(-EINVAL);
+ }
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
LUSTRE_OPC_ANY, hss);
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
- rc = obd_iocontrol(LL_IOC_HSM_STATE_SET, ll_i2mdexp(inode),
- sizeof(*op_data), op_data, NULL);
+ rc = obd_iocontrol(LL_IOC_HSM_STATE_SET, exp, sizeof(*op_data),
+ op_data, NULL);
ll_finish_md_op_data(op_data);
ATTR_MTIME | ATTR_MTIME_SET |
ATTR_ATIME | ATTR_ATIME_SET;
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
- rc = ll_setattr_raw(file->f_path.dentry, attr, true);
+ rc = ll_setattr_raw(file_dentry(file), attr, 0, true);
if (rc == -ENODATA)
rc = 0;
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
out:
if (hss != NULL)
static int ll_file_futimes_3(struct file *file, const struct ll_futimes_3 *lfu)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct iattr ia = {
.ia_valid = ATTR_ATIME | ATTR_ATIME_SET |
ATTR_MTIME | ATTR_MTIME_SET |
- ATTR_CTIME | ATTR_CTIME_SET,
+ ATTR_CTIME,
.ia_atime = {
.tv_sec = lfu->lfu_atime_sec,
.tv_nsec = lfu->lfu_atime_nsec,
if (!S_ISREG(inode->i_mode))
RETURN(-EINVAL);
- mutex_lock(&inode->i_mutex);
- rc = ll_setattr_raw(file->f_path.dentry, &ia, false);
- mutex_unlock(&inode->i_mutex);
+ inode_lock(inode);
+ rc = ll_setattr_raw(file_dentry(file), &ia, OP_XVALID_CTIME_SET,
+ false);
+ inode_unlock(inode);
RETURN(rc);
}
+static enum cl_lock_mode cl_mode_user_to_kernel(enum lock_mode_user mode)
+{
+ switch (mode) {
+ case MODE_READ_USER:
+ return CLM_READ;
+ case MODE_WRITE_USER:
+ return CLM_WRITE;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const char *const user_lockname[] = LOCK_MODE_NAMES;
+
+/* Used to allow the upper layers of the client to request an LDLM lock
+ * without doing an actual read or write.
+ *
+ * Used for ladvise lockahead to manually request specific locks.
+ *
+ * \param[in] file file this ladvise lock request is on
+ * \param[in] ladvise ladvise struct describing this lock request
+ *
+ * \retval 0 success, no detailed result available (sync requests
+ * and requests sent to the server [not handled locally]
+ * cannot return detailed results)
+ * \retval LLA_RESULT_{SAME,DIFFERENT} - detailed result of the lock request,
+ * see definitions for details.
+ * \retval negative negative errno on error
+ */
+int ll_file_lock_ahead(struct file *file, struct llapi_lu_ladvise *ladvise)
+{
+ struct lu_env *env = NULL;
+ struct cl_io *io = NULL;
+ struct cl_lock *lock = NULL;
+ struct cl_lock_descr *descr = NULL;
+ struct dentry *dentry = file->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+ enum cl_lock_mode cl_mode;
+ off_t start = ladvise->lla_start;
+ off_t end = ladvise->lla_end;
+ int result;
+ __u16 refcheck;
+
+ ENTRY;
+
+ CDEBUG(D_VFSTRACE, "Lock request: file=%.*s, inode=%p, mode=%s "
+ "start=%llu, end=%llu\n", dentry->d_name.len,
+ dentry->d_name.name, dentry->d_inode,
+ user_lockname[ladvise->lla_lockahead_mode], (__u64) start,
+ (__u64) end);
+
+ cl_mode = cl_mode_user_to_kernel(ladvise->lla_lockahead_mode);
+ if (cl_mode < 0)
+ GOTO(out, result = cl_mode);
+
+ /* Get IO environment */
+ result = cl_io_get(inode, &env, &io, &refcheck);
+ if (result <= 0)
+ GOTO(out, result);
+
+ result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
+ if (result > 0) {
+ /*
+ * nothing to do for this io. This currently happens when
+ * stripe sub-object's are not yet created.
+ */
+ result = io->ci_result;
+ } else if (result == 0) {
+ lock = vvp_env_lock(env);
+ descr = &lock->cll_descr;
+
+ descr->cld_obj = io->ci_obj;
+ /* Convert byte offsets to pages */
+ descr->cld_start = cl_index(io->ci_obj, start);
+ descr->cld_end = cl_index(io->ci_obj, end);
+ descr->cld_mode = cl_mode;
+ /* CEF_MUST is used because we do not want to convert a
+ * lockahead request to a lockless lock */
+ descr->cld_enq_flags = CEF_MUST | CEF_LOCK_NO_EXPAND |
+ CEF_NONBLOCK;
+
+ if (ladvise->lla_peradvice_flags & LF_ASYNC)
+ descr->cld_enq_flags |= CEF_SPECULATIVE;
+
+ result = cl_lock_request(env, io, lock);
+
+ /* On success, we need to release the lock */
+ if (result >= 0)
+ cl_lock_release(env, lock);
+ }
+ cl_io_fini(env, io);
+ cl_env_put(env, &refcheck);
+
+ /* -ECANCELED indicates a matching lock with a different extent
+ * was already present, and -EEXIST indicates a matching lock
+ * on exactly the same extent was already present.
+ * We convert them to positive values for userspace to make
+ * recognizing true errors easier.
+ * Note we can only return these detailed results on async requests,
+ * as sync requests look the same as i/o requests for locking. */
+ if (result == -ECANCELED)
+ result = LLA_RESULT_DIFFERENT;
+ else if (result == -EEXIST)
+ result = LLA_RESULT_SAME;
+
+out:
+ RETURN(result);
+}
+static const char *const ladvise_names[] = LU_LADVISE_NAMES;
+
+static int ll_ladvise_sanity(struct inode *inode,
+ struct llapi_lu_ladvise *ladvise)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ enum lu_ladvise_type advice = ladvise->lla_advice;
+ /* Note the peradvice flags is a 32 bit field, so per advice flags must
+ * be in the first 32 bits of enum ladvise_flags */
+ __u32 flags = ladvise->lla_peradvice_flags;
+ /* 3 lines at 80 characters per line, should be plenty */
+ int rc = 0;
+
+ if (advice > LU_LADVISE_MAX || advice == LU_LADVISE_INVALID) {
+ rc = -EINVAL;
+ CDEBUG(D_VFSTRACE, "%s: advice with value '%d' not recognized,"
+ "last supported advice is %s (value '%d'): rc = %d\n",
+ sbi->ll_fsname, advice,
+ ladvise_names[LU_LADVISE_MAX-1], LU_LADVISE_MAX-1, rc);
+ GOTO(out, rc);
+ }
+
+ /* Per-advice checks */
+ switch (advice) {
+ case LU_LADVISE_LOCKNOEXPAND:
+ if (flags & ~LF_LOCKNOEXPAND_MASK) {
+ rc = -EINVAL;
+ CDEBUG(D_VFSTRACE, "%s: Invalid flags (%x) for %s: "
+ "rc = %d\n", sbi->ll_fsname, flags,
+ ladvise_names[advice], rc);
+ GOTO(out, rc);
+ }
+ break;
+ case LU_LADVISE_LOCKAHEAD:
+ /* Currently only READ and WRITE modes can be requested */
+ if (ladvise->lla_lockahead_mode >= MODE_MAX_USER ||
+ ladvise->lla_lockahead_mode == 0) {
+ rc = -EINVAL;
+ CDEBUG(D_VFSTRACE, "%s: Invalid mode (%d) for %s: "
+ "rc = %d\n", sbi->ll_fsname,
+ ladvise->lla_lockahead_mode,
+ ladvise_names[advice], rc);
+ GOTO(out, rc);
+ }
+ case LU_LADVISE_WILLREAD:
+ case LU_LADVISE_DONTNEED:
+ default:
+ /* Note fall through above - These checks apply to all advices
+ * except LOCKNOEXPAND */
+ if (flags & ~LF_DEFAULT_MASK) {
+ rc = -EINVAL;
+ CDEBUG(D_VFSTRACE, "%s: Invalid flags (%x) for %s: "
+ "rc = %d\n", sbi->ll_fsname, flags,
+ ladvise_names[advice], rc);
+ GOTO(out, rc);
+ }
+ if (ladvise->lla_start >= ladvise->lla_end) {
+ rc = -EINVAL;
+ CDEBUG(D_VFSTRACE, "%s: Invalid range (%llu to %llu) "
+ "for %s: rc = %d\n", sbi->ll_fsname,
+ ladvise->lla_start, ladvise->lla_end,
+ ladvise_names[advice], rc);
+ GOTO(out, rc);
+ }
+ break;
+ }
+
+out:
+ return rc;
+}
+#undef ERRSIZE
+
/*
* Give file access advices
*
* much more data being sent to the client.
*/
static int ll_ladvise(struct inode *inode, struct file *file, __u64 flags,
- struct lu_ladvise *ladvise)
+ struct llapi_lu_ladvise *ladvise)
{
- struct cl_env_nest nest;
struct lu_env *env;
struct cl_io *io;
struct cl_ladvise_io *lio;
int rc;
+ __u16 refcheck;
ENTRY;
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env))
RETURN(PTR_ERR(env));
rc = io->ci_result;
cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
+ RETURN(rc);
+}
+
+static int ll_lock_noexpand(struct file *file, int flags)
+{
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+
+ fd->ll_lock_no_expand = !(flags & LF_UNSET);
+
+ return 0;
+}
+
+int ll_ioctl_fsgetxattr(struct inode *inode, unsigned int cmd,
+ unsigned long arg)
+{
+ struct fsxattr fsxattr;
+
+ if (copy_from_user(&fsxattr,
+ (const struct fsxattr __user *)arg,
+ sizeof(fsxattr)))
+ RETURN(-EFAULT);
+
+ fsxattr.fsx_xflags = ll_inode_flags_to_xflags(inode->i_flags);
+ if (ll_file_test_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT))
+ fsxattr.fsx_xflags |= FS_XFLAG_PROJINHERIT;
+ fsxattr.fsx_projid = ll_i2info(inode)->lli_projid;
+ if (copy_to_user((struct fsxattr __user *)arg,
+ &fsxattr, sizeof(fsxattr)))
+ RETURN(-EFAULT);
+
+ RETURN(0);
+}
+
+int ll_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
+{
+ /*
+ * Project Quota ID state is only allowed to change from within the init
+ * namespace. Enforce that restriction only if we are trying to change
+ * the quota ID state. Everything else is allowed in user namespaces.
+ */
+ if (current_user_ns() == &init_user_ns)
+ return 0;
+
+ if (ll_i2info(inode)->lli_projid != fa->fsx_projid)
+ return -EINVAL;
+
+ if (ll_file_test_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT)) {
+ if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
+ return -EINVAL;
+ } else {
+ if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd,
+ unsigned long arg)
+{
+
+ struct md_op_data *op_data;
+ struct ptlrpc_request *req = NULL;
+ int rc = 0;
+ struct fsxattr fsxattr;
+ struct cl_object *obj;
+ struct iattr *attr;
+ int flags;
+
+ if (copy_from_user(&fsxattr,
+ (const struct fsxattr __user *)arg,
+ sizeof(fsxattr)))
+ RETURN(-EFAULT);
+
+ rc = ll_ioctl_check_project(inode, &fsxattr);
+ if (rc)
+ RETURN(rc);
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ RETURN(PTR_ERR(op_data));
+
+ flags = ll_xflags_to_inode_flags(fsxattr.fsx_xflags);
+ op_data->op_attr_flags = ll_inode_to_ext_flags(flags);
+ if (fsxattr.fsx_xflags & FS_XFLAG_PROJINHERIT)
+ op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
+ op_data->op_projid = fsxattr.fsx_projid;
+ op_data->op_xvalid |= OP_XVALID_PROJID | OP_XVALID_FLAGS;
+ rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data, NULL,
+ 0, &req);
+ ptlrpc_req_finished(req);
+ if (rc)
+ GOTO(out_fsxattr, rc);
+ ll_update_inode_flags(inode, op_data->op_attr_flags);
+ obj = ll_i2info(inode)->lli_clob;
+ if (obj == NULL)
+ GOTO(out_fsxattr, rc);
+
+ OBD_ALLOC_PTR(attr);
+ if (attr == NULL)
+ GOTO(out_fsxattr, rc = -ENOMEM);
+
+ rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS,
+ fsxattr.fsx_xflags);
+ OBD_FREE_PTR(attr);
+out_fsxattr:
+ ll_finish_md_op_data(op_data);
+ RETURN(rc);
+}
+
+static long ll_file_unlock_lease(struct file *file, struct ll_ioc_lease *ioc,
+ unsigned long arg)
+{
+ struct inode *inode = file_inode(file);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_client_handle *och = NULL;
+ struct split_param sp;
+ bool lease_broken;
+ fmode_t fmode = 0;
+ enum mds_op_bias bias = 0;
+ struct file *layout_file = NULL;
+ void *data = NULL;
+ size_t data_size = 0;
+ long rc;
+ ENTRY;
+
+ mutex_lock(&lli->lli_och_mutex);
+ if (fd->fd_lease_och != NULL) {
+ och = fd->fd_lease_och;
+ fd->fd_lease_och = NULL;
+ }
+ mutex_unlock(&lli->lli_och_mutex);
+
+ if (och == NULL)
+ GOTO(out, rc = -ENOLCK);
+
+ fmode = och->och_flags;
+
+ switch (ioc->lil_flags) {
+ case LL_LEASE_RESYNC_DONE:
+ if (ioc->lil_count > IOC_IDS_MAX)
+ GOTO(out, rc = -EINVAL);
+
+ data_size = offsetof(typeof(*ioc), lil_ids[ioc->lil_count]);
+ OBD_ALLOC(data, data_size);
+ if (!data)
+ GOTO(out, rc = -ENOMEM);
+
+ if (copy_from_user(data, (void __user *)arg, data_size))
+ GOTO(out, rc = -EFAULT);
+
+ bias = MDS_CLOSE_RESYNC_DONE;
+ break;
+ case LL_LEASE_LAYOUT_MERGE: {
+ int fd;
+
+ if (ioc->lil_count != 1)
+ GOTO(out, rc = -EINVAL);
+
+ arg += sizeof(*ioc);
+ if (copy_from_user(&fd, (void __user *)arg, sizeof(__u32)))
+ GOTO(out, rc = -EFAULT);
+
+ layout_file = fget(fd);
+ if (!layout_file)
+ GOTO(out, rc = -EBADF);
+
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY ||
+ (layout_file->f_flags & O_ACCMODE) == O_RDONLY)
+ GOTO(out, rc = -EPERM);
+
+ data = file_inode(layout_file);
+ bias = MDS_CLOSE_LAYOUT_MERGE;
+ break;
+ }
+ case LL_LEASE_LAYOUT_SPLIT: {
+ int fdv;
+ int mirror_id;
+
+ if (ioc->lil_count != 2)
+ GOTO(out, rc = -EINVAL);
+
+ arg += sizeof(*ioc);
+ if (copy_from_user(&fdv, (void __user *)arg, sizeof(__u32)))
+ GOTO(out, rc = -EFAULT);
+
+ arg += sizeof(__u32);
+ if (copy_from_user(&mirror_id, (void __user *)arg,
+ sizeof(__u32)))
+ GOTO(out, rc = -EFAULT);
+
+ layout_file = fget(fdv);
+ if (!layout_file)
+ GOTO(out, rc = -EBADF);
+
+ sp.sp_inode = file_inode(layout_file);
+ sp.sp_mirror_id = (__u16)mirror_id;
+ data = &sp;
+ bias = MDS_CLOSE_LAYOUT_SPLIT;
+ break;
+ }
+ default:
+ /* without close intent */
+ break;
+ }
+
+ rc = ll_lease_close_intent(och, inode, &lease_broken, bias, data);
+ if (rc < 0)
+ GOTO(out, rc);
+
+ rc = ll_lease_och_release(inode, file);
+ if (rc < 0)
+ GOTO(out, rc);
+
+ if (lease_broken)
+ fmode = 0;
+ EXIT;
+
+out:
+ switch (ioc->lil_flags) {
+ case LL_LEASE_RESYNC_DONE:
+ if (data)
+ OBD_FREE(data, data_size);
+ break;
+ case LL_LEASE_LAYOUT_MERGE:
+ case LL_LEASE_LAYOUT_SPLIT:
+ if (layout_file)
+ fput(layout_file);
+ break;
+ }
+
+ if (!rc)
+ rc = ll_lease_type_from_fmode(fmode);
+ RETURN(rc);
+}
+
+static long ll_file_set_lease(struct file *file, struct ll_ioc_lease *ioc,
+ unsigned long arg)
+{
+ struct inode *inode = file_inode(file);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct obd_client_handle *och = NULL;
+ __u64 open_flags = 0;
+ bool lease_broken;
+ fmode_t fmode;
+ long rc;
+ ENTRY;
+
+ switch (ioc->lil_mode) {
+ case LL_LEASE_WRLCK:
+ if (!(file->f_mode & FMODE_WRITE))
+ RETURN(-EPERM);
+ fmode = FMODE_WRITE;
+ break;
+ case LL_LEASE_RDLCK:
+ if (!(file->f_mode & FMODE_READ))
+ RETURN(-EPERM);
+ fmode = FMODE_READ;
+ break;
+ case LL_LEASE_UNLCK:
+ RETURN(ll_file_unlock_lease(file, ioc, arg));
+ default:
+ RETURN(-EINVAL);
+ }
+
+ CDEBUG(D_INODE, "Set lease with mode %u\n", fmode);
+
+ /* apply for lease */
+ if (ioc->lil_flags & LL_LEASE_RESYNC)
+ open_flags = MDS_OPEN_RESYNC;
+ och = ll_lease_open(inode, file, fmode, open_flags);
+ if (IS_ERR(och))
+ RETURN(PTR_ERR(och));
+
+ if (ioc->lil_flags & LL_LEASE_RESYNC) {
+ rc = ll_lease_file_resync(och, inode, arg);
+ if (rc) {
+ ll_lease_close(och, inode, NULL);
+ RETURN(rc);
+ }
+ rc = ll_layout_refresh(inode, &fd->fd_layout_version);
+ if (rc) {
+ ll_lease_close(och, inode, NULL);
+ RETURN(rc);
+ }
+ }
+
+ rc = 0;
+ mutex_lock(&lli->lli_och_mutex);
+ if (fd->fd_lease_och == NULL) {
+ fd->fd_lease_och = och;
+ och = NULL;
+ }
+ mutex_unlock(&lli->lli_och_mutex);
+ if (och != NULL) {
+ /* impossible now that only excl is supported for now */
+ ll_lease_close(och, inode, &lease_broken);
+ rc = -EBUSY;
+ }
+ RETURN(rc);
+}
+
+static void ll_heat_get(struct inode *inode, struct lu_heat *heat)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ __u64 now = ktime_get_real_seconds();
+ int i;
+
+ spin_lock(&lli->lli_heat_lock);
+ heat->lh_flags = lli->lli_heat_flags;
+ for (i = 0; i < heat->lh_count; i++)
+ heat->lh_heat[i] = obd_heat_get(&lli->lli_heat_instances[i],
+ now, sbi->ll_heat_decay_weight,
+ sbi->ll_heat_period_second);
+ spin_unlock(&lli->lli_heat_lock);
+}
+
+static int ll_heat_set(struct inode *inode, enum lu_heat_flag flags)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ int rc = 0;
+
+ spin_lock(&lli->lli_heat_lock);
+ if (flags & LU_HEAT_FLAG_CLEAR)
+ obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
+
+ if (flags & LU_HEAT_FLAG_OFF)
+ lli->lli_heat_flags |= LU_HEAT_FLAG_OFF;
+ else
+ lli->lli_heat_flags &= ~LU_HEAT_FLAG_OFF;
+
+ spin_unlock(&lli->lli_heat_lock);
+
RETURN(rc);
}
static long
ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
int flags, rc;
ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), cmd=%x\n",
PFID(ll_inode2fid(inode)), inode, cmd);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
- /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
- if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
- RETURN(-ENOTTY);
+ /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
+ if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
+ RETURN(-ENOTTY);
- switch(cmd) {
- case LL_IOC_GETFLAGS:
- /* Get the current value of the file flags */
+ switch (cmd) {
+ case LL_IOC_GETFLAGS:
+ /* Get the current value of the file flags */
return put_user(fd->fd_flags, (int __user *)arg);
case LL_IOC_SETFLAGS:
case LL_IOC_CLRFLAGS:
fd->fd_flags &= ~flags;
}
RETURN(0);
- case LL_IOC_LOV_SETSTRIPE:
- RETURN(ll_lov_setstripe(inode, file, arg));
- case LL_IOC_LOV_SETEA:
- RETURN(ll_lov_setea(inode, file, arg));
+ case LL_IOC_LOV_SETSTRIPE:
+ case LL_IOC_LOV_SETSTRIPE_NEW:
+ RETURN(ll_lov_setstripe(inode, file, (void __user *)arg));
+ case LL_IOC_LOV_SETEA:
+ RETURN(ll_lov_setea(inode, file, (void __user *)arg));
case LL_IOC_LOV_SWAP_LAYOUTS: {
struct file *file2;
struct lustre_swap_layouts lsl;
if (copy_from_user(&lsl, (char __user *)arg,
- sizeof(struct lustre_swap_layouts)))
+ sizeof(struct lustre_swap_layouts)))
RETURN(-EFAULT);
if ((file->f_flags & O_ACCMODE) == O_RDONLY)
struct ll_inode_info *lli;
struct obd_client_handle *och = NULL;
- if (lsl.sl_flags != SWAP_LAYOUTS_CLOSE)
- GOTO(out, rc = -EINVAL);
-
lli = ll_i2info(inode);
mutex_lock(&lli->lli_och_mutex);
if (fd->fd_lease_och != NULL) {
mutex_unlock(&lli->lli_och_mutex);
if (och == NULL)
GOTO(out, rc = -ENOLCK);
- inode2 = file2->f_path.dentry->d_inode;
+ inode2 = file_inode(file2);
rc = ll_swap_layouts_close(och, inode, inode2);
} else {
rc = ll_swap_layouts(file, file2, &lsl);
RETURN(rc);
}
case LL_IOC_LOV_GETSTRIPE:
- RETURN(ll_file_getstripe(inode,
- (struct lov_user_md __user *)arg));
- case FSFILT_IOC_GETFLAGS:
- case FSFILT_IOC_SETFLAGS:
- RETURN(ll_iocontrol(inode, file, cmd, arg));
- case FSFILT_IOC_GETVERSION_OLD:
- case FSFILT_IOC_GETVERSION:
+ case LL_IOC_LOV_GETSTRIPE_NEW:
+ RETURN(ll_file_getstripe(inode, (void __user *)arg, 0));
+ case FS_IOC_GETFLAGS:
+ case FS_IOC_SETFLAGS:
+ RETURN(ll_iocontrol(inode, file, cmd, arg));
+ case FSFILT_IOC_GETVERSION:
+ case FS_IOC_GETVERSION:
RETURN(put_user(inode->i_generation, (int __user *)arg));
+ /* We need to special case any other ioctls we want to handle,
+ * to send them to the MDS/OST as appropriate and to properly
+ * network encode the arg field. */
+ case FS_IOC_SETVERSION:
+ RETURN(-ENOTSUPP);
+
case LL_IOC_GROUP_LOCK:
RETURN(ll_get_grouplock(inode, file, arg));
case LL_IOC_GROUP_UNLOCK:
case IOC_OBD_STATFS:
RETURN(ll_obd_statfs(inode, (void __user *)arg));
- /* We need to special case any other ioctls we want to handle,
- * to send them to the MDS/OST as appropriate and to properly
- * network encode the arg field.
- case FSFILT_IOC_SETVERSION_OLD:
- case FSFILT_IOC_SETVERSION:
- */
case LL_IOC_FLUSHCTX:
RETURN(ll_flush_ctx(inode));
case LL_IOC_PATH2FID: {
RETURN(-EFAULT);
idv.idv_flags &= LL_DV_RD_FLUSH | LL_DV_WR_FLUSH;
- rc = ll_data_version(inode, &idv.idv_version, idv.idv_flags);
+ rc = ll_ioc_data_version(inode, &idv);
if (rc == 0 &&
copy_to_user((char __user *)arg, &idv, sizeof(idv)))
OBD_FREE_PTR(hca);
RETURN(rc);
}
- case LL_IOC_SET_LEASE: {
- struct ll_inode_info *lli = ll_i2info(inode);
- struct obd_client_handle *och = NULL;
- bool lease_broken;
- fmode_t fmode;
+ case LL_IOC_SET_LEASE_OLD: {
+ struct ll_ioc_lease ioc = { .lil_mode = (__u32)arg };
- switch (arg) {
- case LL_LEASE_WRLCK:
- if (!(file->f_mode & FMODE_WRITE))
- RETURN(-EPERM);
- fmode = FMODE_WRITE;
- break;
- case LL_LEASE_RDLCK:
- if (!(file->f_mode & FMODE_READ))
- RETURN(-EPERM);
- fmode = FMODE_READ;
- break;
- case LL_LEASE_UNLCK:
- mutex_lock(&lli->lli_och_mutex);
- if (fd->fd_lease_och != NULL) {
- och = fd->fd_lease_och;
- fd->fd_lease_och = NULL;
- }
- mutex_unlock(&lli->lli_och_mutex);
-
- if (och == NULL)
- RETURN(-ENOLCK);
-
- fmode = och->och_flags;
- rc = ll_lease_close(och, inode, &lease_broken);
- if (rc < 0)
- RETURN(rc);
-
- if (lease_broken)
- fmode = 0;
-
- RETURN(ll_lease_type_from_fmode(fmode));
- default:
- RETURN(-EINVAL);
- }
-
- CDEBUG(D_INODE, "Set lease with mode %u\n", fmode);
+ RETURN(ll_file_set_lease(file, &ioc, 0));
+ }
+ case LL_IOC_SET_LEASE: {
+ struct ll_ioc_lease ioc;
- /* apply for lease */
- och = ll_lease_open(inode, file, fmode, 0);
- if (IS_ERR(och))
- RETURN(PTR_ERR(och));
+ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc)))
+ RETURN(-EFAULT);
- rc = 0;
- mutex_lock(&lli->lli_och_mutex);
- if (fd->fd_lease_och == NULL) {
- fd->fd_lease_och = och;
- och = NULL;
- }
- mutex_unlock(&lli->lli_och_mutex);
- if (och != NULL) {
- /* impossible now that only excl is supported for now */
- ll_lease_close(och, inode, &lease_broken);
- rc = -EBUSY;
- }
- RETURN(rc);
+ RETURN(ll_file_set_lease(file, &ioc, arg));
}
case LL_IOC_GET_LEASE: {
struct ll_inode_info *lli = ll_i2info(inode);
RETURN(ll_file_futimes_3(file, &lfu));
}
case LL_IOC_LADVISE: {
- struct ladvise_hdr *ladvise_hdr;
+ struct llapi_ladvise_hdr *k_ladvise_hdr;
+ struct llapi_ladvise_hdr __user *u_ladvise_hdr;
int i;
int num_advise;
- int alloc_size = sizeof(*ladvise_hdr);
+ int alloc_size = sizeof(*k_ladvise_hdr);
rc = 0;
- OBD_ALLOC_PTR(ladvise_hdr);
- if (ladvise_hdr == NULL)
+ u_ladvise_hdr = (void __user *)arg;
+ OBD_ALLOC_PTR(k_ladvise_hdr);
+ if (k_ladvise_hdr == NULL)
RETURN(-ENOMEM);
- if (copy_from_user(ladvise_hdr,
- (const struct ladvise_hdr __user *)arg,
- alloc_size))
+ if (copy_from_user(k_ladvise_hdr, u_ladvise_hdr, alloc_size))
GOTO(out_ladvise, rc = -EFAULT);
- if (ladvise_hdr->lah_magic != LADVISE_MAGIC ||
- ladvise_hdr->lah_count < 1)
+ if (k_ladvise_hdr->lah_magic != LADVISE_MAGIC ||
+ k_ladvise_hdr->lah_count < 1)
GOTO(out_ladvise, rc = -EINVAL);
- num_advise = ladvise_hdr->lah_count;
+ num_advise = k_ladvise_hdr->lah_count;
if (num_advise >= LAH_COUNT_MAX)
GOTO(out_ladvise, rc = -EFBIG);
- OBD_FREE_PTR(ladvise_hdr);
- alloc_size = offsetof(typeof(*ladvise_hdr),
+ OBD_FREE_PTR(k_ladvise_hdr);
+ alloc_size = offsetof(typeof(*k_ladvise_hdr),
lah_advise[num_advise]);
- OBD_ALLOC(ladvise_hdr, alloc_size);
- if (ladvise_hdr == NULL)
+ OBD_ALLOC(k_ladvise_hdr, alloc_size);
+ if (k_ladvise_hdr == NULL)
RETURN(-ENOMEM);
/*
* TODO: submit multiple advices to one server in a single RPC
*/
- if (copy_from_user(ladvise_hdr,
- (const struct ladvise_hdr __user *)arg,
- alloc_size))
+ if (copy_from_user(k_ladvise_hdr, u_ladvise_hdr, alloc_size))
GOTO(out_ladvise, rc = -EFAULT);
for (i = 0; i < num_advise; i++) {
- rc = ll_ladvise(inode, file, ladvise_hdr->lah_flags,
- &ladvise_hdr->lah_advise[i]);
+ struct llapi_lu_ladvise *k_ladvise =
+ &k_ladvise_hdr->lah_advise[i];
+ struct llapi_lu_ladvise __user *u_ladvise =
+ &u_ladvise_hdr->lah_advise[i];
+
+ rc = ll_ladvise_sanity(inode, k_ladvise);
if (rc)
+ GOTO(out_ladvise, rc);
+
+ switch (k_ladvise->lla_advice) {
+ case LU_LADVISE_LOCKNOEXPAND:
+ rc = ll_lock_noexpand(file,
+ k_ladvise->lla_peradvice_flags);
+ GOTO(out_ladvise, rc);
+ case LU_LADVISE_LOCKAHEAD:
+
+ rc = ll_file_lock_ahead(file, k_ladvise);
+
+ if (rc < 0)
+ GOTO(out_ladvise, rc);
+
+ if (put_user(rc,
+ &u_ladvise->lla_lockahead_result))
+ GOTO(out_ladvise, rc = -EFAULT);
+ break;
+ default:
+ rc = ll_ladvise(inode, file,
+ k_ladvise_hdr->lah_flags,
+ k_ladvise);
+ if (rc)
+ GOTO(out_ladvise, rc);
break;
+ }
+
}
out_ladvise:
- OBD_FREE(ladvise_hdr, alloc_size);
+ OBD_FREE(k_ladvise_hdr, alloc_size);
RETURN(rc);
}
- default: {
- int err;
+ case LL_IOC_FLR_SET_MIRROR: {
+ /* mirror I/O must be direct to avoid polluting page cache
+ * by stale data. */
+ if (!(file->f_flags & O_DIRECT))
+ RETURN(-EINVAL);
+
+ fd->fd_designated_mirror = (__u32)arg;
+ RETURN(0);
+ }
+ case LL_IOC_FSGETXATTR:
+ RETURN(ll_ioctl_fsgetxattr(inode, cmd, arg));
+ case LL_IOC_FSSETXATTR:
+ RETURN(ll_ioctl_fssetxattr(inode, cmd, arg));
+ case BLKSSZGET:
+ RETURN(put_user(PAGE_SIZE, (int __user *)arg));
+ case LL_IOC_HEAT_GET: {
+ struct lu_heat uheat;
+ struct lu_heat *heat;
+ int size;
+
+ if (copy_from_user(&uheat, (void __user *)arg, sizeof(uheat)))
+ RETURN(-EFAULT);
+
+ if (uheat.lh_count > OBD_HEAT_COUNT)
+ uheat.lh_count = OBD_HEAT_COUNT;
+
+ size = offsetof(typeof(uheat), lh_heat[uheat.lh_count]);
+ OBD_ALLOC(heat, size);
+ if (heat == NULL)
+ RETURN(-ENOMEM);
+
+ heat->lh_count = uheat.lh_count;
+ ll_heat_get(inode, heat);
+ rc = copy_to_user((char __user *)arg, heat, size);
+ OBD_FREE(heat, size);
+ RETURN(rc ? -EFAULT : 0);
+ }
+ case LL_IOC_HEAT_SET: {
+ __u64 flags;
- if (LLIOC_STOP ==
- ll_iocontrol_call(inode, file, cmd, arg, &err))
- RETURN(err);
+ if (copy_from_user(&flags, (void __user *)arg, sizeof(flags)))
+ RETURN(-EFAULT);
+ rc = ll_heat_set(inode, flags);
+ RETURN(rc);
+ }
+ default:
RETURN(obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL,
(void __user *)arg));
}
- }
}
#ifndef HAVE_FILE_LLSEEK_SIZE
generic_file_llseek_size(struct file *file, loff_t offset, int origin,
loff_t maxsize, loff_t eof)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
switch (origin) {
case SEEK_END:
* SEEK_CURs. Note that parallel writes and reads behave
* like SEEK_SET.
*/
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
offset = llseek_execute(file, file->f_pos + offset, maxsize);
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
return offset;
case SEEK_DATA:
/*
static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
loff_t retval, eof = 0;
ENTRY;
static int ll_flush(struct file *file, fl_owner_t id)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
int rc, err;
int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
enum cl_fsync_mode mode, int ignore_layout)
{
- struct cl_env_nest nest;
struct lu_env *env;
struct cl_io *io;
struct cl_fsync_io *fio;
int result;
+ __u16 refcheck;
ENTRY;
if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL &&
mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL)
RETURN(-EINVAL);
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env))
RETURN(PTR_ERR(env));
if (result == 0)
result = fio->fi_nr_written;
cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
RETURN(result);
}
/*
- * When dentry is provided (the 'else' case), *file->f_path.dentry may be
+ * When dentry is provided (the 'else' case), file_dentry() may be
* null and dentry must be used directly rather than pulled from
- * *file->f_path.dentry as is done otherwise.
+ * file_dentry() as is done otherwise.
*/
#ifdef HAVE_FILE_FSYNC_4ARGS
int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct dentry *dentry = file->f_path.dentry;
+ struct dentry *dentry = file_dentry(file);
#elif defined(HAVE_FILE_FSYNC_2ARGS)
int ll_fsync(struct file *file, int datasync)
{
- struct dentry *dentry = file->f_path.dentry;
+ struct dentry *dentry = file_dentry(file);
loff_t start = 0;
loff_t end = LLONG_MAX;
#else
#ifdef HAVE_FILE_FSYNC_4ARGS
rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
#else
/* fsync's caller has already called _fdata{sync,write}, we want
* that IO to finish before calling the osc and mdc sync methods */
* failed for pages in this mapping. */
if (!S_ISDIR(inode->i_mode)) {
err = lli->lli_async_rc;
- lli->lli_async_rc = 0;
- if (rc == 0)
- rc = err;
- err = lov_read_and_clear_async_rc(lli->lli_clob);
+ lli->lli_async_rc = 0;
if (rc == 0)
rc = err;
+ if (lli->lli_clob != NULL) {
+ err = lov_read_and_clear_async_rc(lli->lli_clob);
+ if (rc == 0)
+ rc = err;
+ }
}
err = md_fsync(ll_i2sbi(inode)->ll_md_exp, ll_inode2fid(inode), &req);
}
#ifdef HAVE_FILE_FSYNC_4ARGS
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
#endif
RETURN(rc);
}
static int
ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ldlm_enqueue_info einfo = {
.ei_type = LDLM_FLOCK,
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
- CDEBUG(D_DLMTRACE, "inode="DFID", pid=%u, flags="LPX64", mode=%u, "
- "start="LPU64", end="LPU64"\n", PFID(ll_inode2fid(inode)),
+ CDEBUG(D_DLMTRACE, "inode="DFID", pid=%u, flags=%#llx, mode=%u, "
+ "start=%llu, end=%llu\n", PFID(ll_inode2fid(inode)),
flock.l_flock.pid, flags, einfo.ei_mode,
flock.l_flock.start, flock.l_flock.end);
- rc = md_enqueue(sbi->ll_md_exp, &einfo, &flock, NULL, op_data, &lockh,
+ rc = md_enqueue(sbi->ll_md_exp, &einfo, &flock, op_data, &lockh,
flags);
/* Restore the file lock type if not TEST lock. */
if (rc2 && file_lock->fl_type != F_UNLCK) {
einfo.ei_mode = LCK_NL;
- md_enqueue(sbi->ll_md_exp, &einfo, &flock, NULL, op_data,
+ md_enqueue(sbi->ll_md_exp, &einfo, &flock, op_data,
&lockh, flags);
rc = rc2;
}
RETURN(rc);
}
-int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
- const char *name, int namelen)
+int ll_migrate(struct inode *parent, struct file *file, struct lmv_user_md *lum,
+ const char *name)
{
- struct dentry *dchild = NULL;
- struct inode *child_inode = NULL;
- struct md_op_data *op_data;
+ struct dentry *dchild = NULL;
+ struct inode *child_inode = NULL;
+ struct md_op_data *op_data;
struct ptlrpc_request *request = NULL;
struct obd_client_handle *och = NULL;
- struct qstr qstr;
- struct mdt_body *body;
- int rc;
- __u64 data_version = 0;
+ struct qstr qstr;
+ struct mdt_body *body;
+ __u64 data_version = 0;
+ size_t namelen = strlen(name);
+ int lumlen = lmv_user_md_size(lum->lum_stripe_count, lum->lum_magic);
+ int rc;
ENTRY;
- CDEBUG(D_VFSTRACE, "migrate %s under "DFID" to MDT%04x\n",
- name, PFID(ll_inode2fid(parent)), mdtidx);
+ CDEBUG(D_VFSTRACE, "migrate "DFID"/%s to MDT%04x stripe count %d\n",
+ PFID(ll_inode2fid(parent)), name,
+ lum->lum_stripe_offset, lum->lum_stripe_count);
- op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen,
- 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ if (lum->lum_magic != cpu_to_le32(LMV_USER_MAGIC) &&
+ lum->lum_magic != cpu_to_le32(LMV_USER_MAGIC_SPECIFIC))
+ lustre_swab_lmv_user_md(lum);
/* Get child FID first */
- qstr.hash = full_name_hash(name, namelen);
+ qstr.hash = ll_full_name_hash(file_dentry(file), name, namelen);
qstr.name = name;
qstr.len = namelen;
- dchild = d_lookup(file->f_path.dentry, &qstr);
- if (dchild != NULL) {
- if (dchild->d_inode != NULL)
+ dchild = d_lookup(file_dentry(file), &qstr);
+ if (dchild) {
+ if (dchild->d_inode)
child_inode = igrab(dchild->d_inode);
dput(dchild);
}
- if (child_inode == NULL) {
- rc = ll_get_fid_by_name(parent, name, namelen,
- &op_data->op_fid3, &child_inode);
- if (rc != 0)
- GOTO(out_free, rc);
+ if (!child_inode) {
+ rc = ll_get_fid_by_name(parent, name, namelen, NULL,
+ &child_inode);
+ if (rc)
+ RETURN(rc);
}
- if (child_inode == NULL)
- GOTO(out_free, rc = -EINVAL);
+ if (!child_inode)
+ RETURN(-ENOENT);
+
+ if (!(exp_connect_flags2(ll_i2sbi(parent)->ll_md_exp) &
+ OBD_CONNECT2_DIR_MIGRATE)) {
+ if (le32_to_cpu(lum->lum_stripe_count) > 1 ||
+ ll_i2info(child_inode)->lli_lsm_md) {
+ CERROR("%s: MDT doesn't support stripe directory "
+ "migration!\n", ll_i2sbi(parent)->ll_fsname);
+ GOTO(out_iput, rc = -EOPNOTSUPP);
+ }
+ }
/*
* lfs migrate command needs to be blocked on the client
if (child_inode == parent->i_sb->s_root->d_inode)
GOTO(out_iput, rc = -EINVAL);
- mutex_lock(&child_inode->i_mutex);
+ op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen,
+ child_inode->i_mode, LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ GOTO(out_iput, rc = PTR_ERR(op_data));
+
+ inode_lock(child_inode);
op_data->op_fid3 = *ll_inode2fid(child_inode);
if (!fid_is_sane(&op_data->op_fid3)) {
CERROR("%s: migrate %s, but FID "DFID" is insane\n",
- ll_get_fsname(parent->i_sb, NULL, 0), name,
+ ll_i2sbi(parent)->ll_fsname, name,
PFID(&op_data->op_fid3));
GOTO(out_unlock, rc = -EINVAL);
}
- rc = ll_get_mdt_idx_by_fid(ll_i2sbi(parent), &op_data->op_fid3);
- if (rc < 0)
- GOTO(out_unlock, rc);
+ op_data->op_cli_flags |= CLI_MIGRATE | CLI_SET_MEA;
+ op_data->op_data = lum;
+ op_data->op_data_size = lumlen;
- if (rc == mdtidx) {
- CDEBUG(D_INFO, "%s: "DFID" is already on MDT%04x\n", name,
- PFID(&op_data->op_fid3), mdtidx);
- GOTO(out_unlock, rc = 0);
- }
again:
if (S_ISREG(child_inode->i_mode)) {
och = ll_lease_open(child_inode, NULL, FMODE_WRITE, 0);
if (rc != 0)
GOTO(out_close, rc);
- op_data->op_handle = och->och_fh;
- op_data->op_data = och->och_mod;
+ op_data->op_open_handle = och->och_open_handle;
op_data->op_data_version = data_version;
op_data->op_lease_handle = och->och_lease_handle;
- op_data->op_bias |= MDS_RENAME_MIGRATE;
+ op_data->op_bias |= MDS_CLOSE_MIGRATE;
+
+ spin_lock(&och->och_mod->mod_open_req->rq_lock);
+ och->och_mod->mod_open_req->rq_replay = 0;
+ spin_unlock(&och->och_mod->mod_open_req->rq_lock);
}
- op_data->op_mds = mdtidx;
- op_data->op_cli_flags = CLI_MIGRATE;
- rc = md_rename(ll_i2sbi(parent)->ll_md_exp, op_data, name,
- namelen, name, namelen, &request);
- if (rc == 0)
+ rc = md_rename(ll_i2sbi(parent)->ll_md_exp, op_data, name, namelen,
+ name, namelen, &request);
+ if (rc == 0) {
+ LASSERT(request != NULL);
ll_update_times(request, parent);
+ }
- if (request != NULL) {
+ if (rc == 0 || rc == -EAGAIN) {
body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
- if (body == NULL) {
- ptlrpc_req_finished(request);
- GOTO(out_close, rc = -EPROTO);
- }
+ LASSERT(body != NULL);
/* If the server does release layout lock, then we cleanup
* the client och here, otherwise release it in out_close: */
- if (och != NULL &&
- body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED) {
+ if (och && body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED) {
obd_mod_put(och->och_mod);
md_clear_open_replay_data(ll_i2sbi(parent)->ll_md_exp,
och);
- och->och_fh.cookie = DEAD_HANDLE_MAGIC;
+ och->och_open_handle.cookie = DEAD_HANDLE_MAGIC;
OBD_FREE_PTR(och);
och = NULL;
}
- ptlrpc_req_finished(request);
}
- /* Try again if the file layout has changed. */
- if (rc == -EAGAIN && S_ISREG(child_inode->i_mode)) {
+ if (request != NULL) {
+ ptlrpc_req_finished(request);
request = NULL;
- goto again;
}
+
+ /* Try again if the lease has cancelled. */
+ if (rc == -EAGAIN && S_ISREG(child_inode->i_mode))
+ goto again;
+
out_close:
- if (och != NULL) /* close the file */
+ if (och)
ll_lease_close(och, child_inode, NULL);
- if (rc == 0)
+ if (!rc)
clear_nlink(child_inode);
out_unlock:
- mutex_unlock(&child_inode->i_mutex);
+ inode_unlock(child_inode);
+ ll_finish_md_op_data(op_data);
out_iput:
iput(child_inode);
-out_free:
- ll_finish_md_op_data(op_data);
RETURN(rc);
}
} else if (rc != 0) {
CDEBUG_LIMIT((rc == -EACCES || rc == -EIDRM) ? D_INFO : D_ERROR,
"%s: revalidate FID "DFID" error: rc = %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
+ ll_i2sbi(inode)->ll_fsname,
PFID(ll_inode2fid(inode)), rc);
}
return rc;
}
-static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
+static int ll_inode_revalidate(struct dentry *dentry, enum ldlm_intent_flags op)
{
- struct inode *inode = dentry->d_inode;
- struct ptlrpc_request *req = NULL;
- struct obd_export *exp;
- int rc = 0;
- ENTRY;
-
- LASSERT(inode != NULL);
+ struct inode *inode = dentry->d_inode;
+ struct obd_export *exp = ll_i2mdexp(inode);
+ struct lookup_intent oit = {
+ .it_op = op,
+ };
+ struct ptlrpc_request *req = NULL;
+ struct md_op_data *op_data;
+ int rc = 0;
+ ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),name=%s\n",
PFID(ll_inode2fid(inode)), inode, dentry->d_name.name);
- exp = ll_i2mdexp(inode);
-
- /* XXX: Enable OBD_CONNECT_ATTRFID to reduce unnecessary getattr RPC.
- * But under CMD case, it caused some lock issues, should be fixed
- * with new CMD ibits lock. See bug 12718 */
- if (exp_connect_flags(exp) & OBD_CONNECT_ATTRFID) {
- struct lookup_intent oit = { .it_op = IT_GETATTR };
- struct md_op_data *op_data;
-
- if (ibits == MDS_INODELOCK_LOOKUP)
- oit.it_op = IT_LOOKUP;
-
- /* Call getattr by fid, so do not provide name at all. */
- op_data = ll_prep_md_op_data(NULL, dentry->d_inode,
- dentry->d_inode, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
-
- rc = md_intent_lock(exp, op_data, &oit, &req,
- &ll_md_blocking_ast, 0);
- ll_finish_md_op_data(op_data);
- if (rc < 0) {
- rc = ll_inode_revalidate_fini(inode, rc);
- GOTO (out, rc);
- }
-
- rc = ll_revalidate_it_finish(req, &oit, dentry);
- if (rc != 0) {
- ll_intent_release(&oit);
- GOTO(out, rc);
- }
-
- /* Unlinked? Unhash dentry, so it is not picked up later by
- do_lookup() -> ll_revalidate_it(). We cannot use d_drop
- here to preserve get_cwd functionality on 2.6.
- Bug 10503 */
- if (!dentry->d_inode->i_nlink) {
- ll_lock_dcache(inode);
- d_lustre_invalidate(dentry, 0);
- ll_unlock_dcache(inode);
- }
-
- ll_lookup_finish_locks(&oit, dentry);
- } else if (!ll_have_md_lock(dentry->d_inode, &ibits, LCK_MINMODE)) {
- struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
- u64 valid = OBD_MD_FLGETATTR;
- struct md_op_data *op_data;
- int ealen = 0;
+ /* Call getattr by fid, so do not provide name at all. */
+ op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ RETURN(PTR_ERR(op_data));
- if (S_ISREG(inode->i_mode)) {
- rc = ll_get_default_mdsize(sbi, &ealen);
- if (rc)
- RETURN(rc);
- valid |= OBD_MD_FLEASIZE | OBD_MD_FLMODEASIZE;
- }
+ rc = md_intent_lock(exp, op_data, &oit, &req, &ll_md_blocking_ast, 0);
+ ll_finish_md_op_data(op_data);
+ if (rc < 0) {
+ rc = ll_inode_revalidate_fini(inode, rc);
+ GOTO(out, rc);
+ }
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
- 0, ealen, LUSTRE_OPC_ANY,
- NULL);
- if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ rc = ll_revalidate_it_finish(req, &oit, dentry);
+ if (rc != 0) {
+ ll_intent_release(&oit);
+ GOTO(out, rc);
+ }
- op_data->op_valid = valid;
- rc = md_getattr(sbi->ll_md_exp, op_data, &req);
- ll_finish_md_op_data(op_data);
- if (rc) {
- rc = ll_inode_revalidate_fini(inode, rc);
- RETURN(rc);
- }
+ /* Unlinked? Unhash dentry, so it is not picked up later by
+ * do_lookup() -> ll_revalidate_it(). We cannot use d_drop
+ * here to preserve get_cwd functionality on 2.6.
+ * Bug 10503 */
+ if (!dentry->d_inode->i_nlink) {
+ ll_lock_dcache(inode);
+ d_lustre_invalidate(dentry, 0);
+ ll_unlock_dcache(inode);
+ }
- rc = ll_prep_inode(&inode, req, NULL, NULL);
- }
+ ll_lookup_finish_locks(&oit, dentry);
out:
- ptlrpc_req_finished(req);
- return rc;
+ ptlrpc_req_finished(req);
+
+ return rc;
}
static int ll_merge_md_attr(struct inode *inode)
{
+ struct ll_inode_info *lli = ll_i2info(inode);
struct cl_attr attr = { 0 };
int rc;
- LASSERT(ll_i2info(inode)->lli_lsm_md != NULL);
+ LASSERT(lli->lli_lsm_md != NULL);
+
+ /* foreign dir is not striped dir */
+ if (lli->lli_lsm_md->lsm_md_magic == LMV_MAGIC_FOREIGN)
+ RETURN(0);
+
+ down_read(&lli->lli_lsm_sem);
rc = md_merge_attr(ll_i2mdexp(inode), ll_i2info(inode)->lli_lsm_md,
&attr, ll_md_blocking_ast);
+ up_read(&lli->lli_lsm_sem);
if (rc != 0)
RETURN(rc);
RETURN(0);
}
-static int
-ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
+static inline dev_t ll_compat_encode_dev(dev_t dev)
{
- struct inode *inode = dentry->d_inode;
- int rc;
- ENTRY;
+ /* The compat_sys_*stat*() syscalls will fail unless the
+ * device majors and minors are both less than 256. Note that
+ * the value returned here will be passed through
+ * old_encode_dev() in cp_compat_stat(). And so we are not
+ * trying to return a valid compat (u16) device number, just
+ * one that will pass the old_valid_dev() check. */
+
+ return MKDEV(MAJOR(dev) & 0xff, MINOR(dev) & 0xff);
+}
- rc = __ll_inode_revalidate(dentry, ibits);
- if (rc != 0)
- RETURN(rc);
+#ifdef HAVE_INODEOPS_ENHANCED_GETATTR
+int ll_getattr(const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int flags)
+{
+ struct dentry *de = path->dentry;
+#else
+int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
+{
+#endif
+ struct inode *inode = de->d_inode;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ int rc;
- /* if object isn't regular file, don't validate size */
- if (!S_ISREG(inode->i_mode)) {
- if (S_ISDIR(inode->i_mode) &&
- ll_i2info(inode)->lli_lsm_md != NULL) {
- rc = ll_merge_md_attr(inode);
- if (rc != 0)
- RETURN(rc);
- }
+ ll_stats_ops_tally(sbi, LPROC_LL_GETATTR, 1);
- LTIME_S(inode->i_atime) = ll_i2info(inode)->lli_atime;
- LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_mtime;
- LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_ctime;
- } else {
+ rc = ll_inode_revalidate(de, IT_GETATTR);
+ if (rc < 0)
+ RETURN(rc);
+
+ if (S_ISREG(inode->i_mode)) {
/* In case of restore, the MDT has the right size and has
* already send it back without granting the layout lock,
* inode is up-to-date so glimpse is useless.
* restore the MDT holds the layout lock so the glimpse will
* block up to the end of restore (getattr will block)
*/
- if (!ll_file_test_flag(ll_i2info(inode), LLIF_FILE_RESTORING))
+ if (!ll_file_test_flag(lli, LLIF_FILE_RESTORING)) {
rc = ll_glimpse_size(inode);
- }
- RETURN(rc);
-}
-
-int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
-{
- struct inode *inode = de->d_inode;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_inode_info *lli = ll_i2info(inode);
- int res = 0;
-
- res = ll_inode_revalidate(de, MDS_INODELOCK_UPDATE |
- MDS_INODELOCK_LOOKUP);
- ll_stats_ops_tally(sbi, LPROC_LL_GETATTR, 1);
+ if (rc < 0)
+ RETURN(rc);
+ }
+ } else {
+ /* If object isn't regular a file then don't validate size. */
+ if (S_ISDIR(inode->i_mode) &&
+ lli->lli_lsm_md != NULL) {
+ rc = ll_merge_md_attr(inode);
+ if (rc < 0)
+ RETURN(rc);
+ }
- if (res)
- return res;
+ inode->i_atime.tv_sec = lli->lli_atime;
+ inode->i_mtime.tv_sec = lli->lli_mtime;
+ inode->i_ctime.tv_sec = lli->lli_ctime;
+ }
OBD_FAIL_TIMEOUT(OBD_FAIL_GETATTR_DELAY, 30);
- stat->dev = inode->i_sb->s_dev;
- if (ll_need_32bit_api(sbi))
+ if (ll_need_32bit_api(sbi)) {
stat->ino = cl_fid_build_ino(&lli->lli_fid, 1);
- else
+ stat->dev = ll_compat_encode_dev(inode->i_sb->s_dev);
+ stat->rdev = ll_compat_encode_dev(inode->i_rdev);
+ } else {
stat->ino = inode->i_ino;
+ stat->dev = inode->i_sb->s_dev;
+ stat->rdev = inode->i_rdev;
+ }
+
stat->mode = inode->i_mode;
stat->uid = inode->i_uid;
stat->gid = inode->i_gid;
- stat->rdev = inode->i_rdev;
stat->atime = inode->i_atime;
stat->mtime = inode->i_mtime;
stat->ctime = inode->i_ctime;
- stat->blksize = 1 << inode->i_blkbits;
+ stat->blksize = sbi->ll_stat_blksize ?: 1 << inode->i_blkbits;
stat->nlink = inode->i_nlink;
stat->size = i_size_read(inode);
RETURN(acl);
}
+#ifdef HAVE_IOP_SET_ACL
+#ifdef CONFIG_FS_POSIX_ACL
+int ll_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ptlrpc_request *req = NULL;
+ const char *name = NULL;
+ char *value = NULL;
+ size_t value_size = 0;
+ int rc = 0;
+ ENTRY;
+
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ name = XATTR_NAME_POSIX_ACL_ACCESS;
+ if (acl)
+ rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+ break;
+
+ case ACL_TYPE_DEFAULT:
+ name = XATTR_NAME_POSIX_ACL_DEFAULT;
+ if (!S_ISDIR(inode->i_mode))
+ rc = acl ? -EACCES : 0;
+ break;
+
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ if (rc)
+ return rc;
+
+ if (acl) {
+ value_size = posix_acl_xattr_size(acl->a_count);
+ value = kmalloc(value_size, GFP_NOFS);
+ if (value == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ rc = posix_acl_to_xattr(&init_user_ns, acl, value, value_size);
+ if (rc < 0)
+ GOTO(out_value, rc);
+ }
+
+ rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode),
+ value ? OBD_MD_FLXATTR : OBD_MD_FLXATTRRM,
+ name, value, value_size, 0, 0, &req);
+
+ ptlrpc_req_finished(req);
+out_value:
+ kfree(value);
+out:
+ if (rc)
+ forget_cached_acl(inode, type);
+ else
+ set_cached_acl(inode, type, acl);
+ RETURN(rc);
+}
+#endif /* CONFIG_FS_POSIX_ACL */
+#endif /* HAVE_IOP_SET_ACL */
+
#ifndef HAVE_GENERIC_PERMISSION_2ARGS
static int
# ifdef HAVE_GENERIC_PERMISSION_4ARGS
* need to do it before permission check. */
if (inode == inode->i_sb->s_root->d_inode) {
- rc = __ll_inode_revalidate(inode->i_sb->s_root,
- MDS_INODELOCK_LOOKUP);
+ rc = ll_inode_revalidate(inode->i_sb->s_root, IT_LOOKUP);
if (rc)
RETURN(rc);
}
}
ll_stats_ops_tally(sbi, LPROC_LL_INODE_PERM, 1);
-
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
- rc = lustre_check_remote_perm(inode, mask);
- else
- rc = ll_generic_permission(inode, mask, flags, ll_check_acl);
-
+ rc = ll_generic_permission(inode, mask, flags, ll_check_acl);
/* restore current process's credentials and FS capability */
if (squash_id) {
revert_creds(old_cred);
.setattr = ll_setattr,
.getattr = ll_getattr,
.permission = ll_inode_permission,
+#ifdef HAVE_IOP_XATTR
.setxattr = ll_setxattr,
.getxattr = ll_getxattr,
- .listxattr = ll_listxattr,
.removexattr = ll_removexattr,
+#endif
+ .listxattr = ll_listxattr,
.fiemap = ll_fiemap,
#ifdef HAVE_IOP_GET_ACL
.get_acl = ll_get_acl,
#endif
+#ifdef HAVE_IOP_SET_ACL
+ .set_acl = ll_set_acl,
+#endif
};
-/* dynamic ioctl number support routins */
-static struct llioc_ctl_data {
- struct rw_semaphore ioc_sem;
- struct list_head ioc_head;
-} llioc = {
- __RWSEM_INITIALIZER(llioc.ioc_sem),
- LIST_HEAD_INIT(llioc.ioc_head)
-};
-
-
-struct llioc_data {
- struct list_head iocd_list;
- unsigned int iocd_size;
- llioc_callback_t iocd_cb;
- unsigned int iocd_count;
- unsigned int iocd_cmd[0];
-};
-
-void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd)
-{
- unsigned int size;
- struct llioc_data *in_data = NULL;
- ENTRY;
-
- if (cb == NULL || cmd == NULL ||
- count > LLIOC_MAX_CMD || count < 0)
- RETURN(NULL);
-
- size = sizeof(*in_data) + count * sizeof(unsigned int);
- OBD_ALLOC(in_data, size);
- if (in_data == NULL)
- RETURN(NULL);
-
- memset(in_data, 0, sizeof(*in_data));
- in_data->iocd_size = size;
- in_data->iocd_cb = cb;
- in_data->iocd_count = count;
- memcpy(in_data->iocd_cmd, cmd, sizeof(unsigned int) * count);
-
- down_write(&llioc.ioc_sem);
- list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
- up_write(&llioc.ioc_sem);
-
- RETURN(in_data);
-}
-
-void ll_iocontrol_unregister(void *magic)
-{
- struct llioc_data *tmp;
-
- if (magic == NULL)
- return;
-
- down_write(&llioc.ioc_sem);
- list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) {
- if (tmp == magic) {
- unsigned int size = tmp->iocd_size;
-
- list_del(&tmp->iocd_list);
- up_write(&llioc.ioc_sem);
-
- OBD_FREE(tmp, size);
- return;
- }
- }
- up_write(&llioc.ioc_sem);
-
- CWARN("didn't find iocontrol register block with magic: %p\n", magic);
-}
-
-EXPORT_SYMBOL(ll_iocontrol_register);
-EXPORT_SYMBOL(ll_iocontrol_unregister);
-
-static enum llioc_iter
-ll_iocontrol_call(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg, int *rcp)
-{
- enum llioc_iter ret = LLIOC_CONT;
- struct llioc_data *data;
- int rc = -EINVAL, i;
-
- down_read(&llioc.ioc_sem);
- list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
- for (i = 0; i < data->iocd_count; i++) {
- if (cmd != data->iocd_cmd[i])
- continue;
-
- ret = data->iocd_cb(inode, file, cmd, arg, data, &rc);
- break;
- }
-
- if (ret == LLIOC_STOP)
- break;
- }
- up_read(&llioc.ioc_sem);
-
- if (rcp)
- *rcp = rc;
- return ret;
-}
-
int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct cl_object *obj = lli->lli_clob;
- struct cl_env_nest nest;
struct lu_env *env;
int rc;
+ __u16 refcheck;
ENTRY;
if (obj == NULL)
RETURN(0);
- env = cl_env_nested_get(&nest);
+ env = cl_env_get(&refcheck);
if (IS_ERR(env))
RETURN(PTR_ERR(env));
}
out:
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
RETURN(rc);
}
{
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ptlrpc_request *req;
- struct mdt_body *body;
void *lvbdata;
void *lmm;
int lmmsize;
* layout here. Please note that we can't use the LVB buffer in
* completion AST because it doesn't have a large enough buffer */
rc = ll_get_default_mdsize(sbi, &lmmsize);
- if (rc == 0)
- rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode),
- OBD_MD_FLXATTR, XATTR_NAME_LOV, NULL, 0,
- lmmsize, 0, &req);
if (rc < 0)
RETURN(rc);
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (body == NULL)
- GOTO(out, rc = -EPROTO);
+ rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), OBD_MD_FLXATTR,
+ XATTR_NAME_LOV, lmmsize, &req);
+ if (rc < 0) {
+ if (rc == -ENODATA)
+ GOTO(out, rc = 0); /* empty layout */
+ else
+ RETURN(rc);
+ }
- lmmsize = body->mbo_eadatasize;
+ lmmsize = rc;
+ rc = 0;
if (lmmsize == 0) /* empty layout */
GOTO(out, rc = 0);
PFID(&lli->lli_fid), inode);
/* in case this is a caching lock and reinstate with new inode */
- md_set_lock_data(sbi->ll_md_exp, &lockh->cookie, inode, NULL);
+ md_set_lock_data(sbi->ll_md_exp, lockh, inode, NULL);
lock_res_and_lock(lock);
lvb_ready = ldlm_is_lvb_ready(lock);
unlock_res_and_lock(lock);
+
/* checking lvb_ready is racy but this is okay. The worst case is
* that multi processes may configure the file on the same time. */
-
if (lvb_ready)
GOTO(out, rc = 0);
/* refresh layout failed, need to wait */
wait_layout = rc == -EBUSY;
EXIT;
-
out:
LDLM_LOCK_PUT(lock);
ldlm_lock_decref(lockh, mode);
/* wait for IO to complete if it's still being used. */
if (wait_layout) {
CDEBUG(D_INODE, "%s: "DFID"(%p) wait for layout reconf\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(&lli->lli_fid), inode);
+ sbi->ll_fsname, PFID(&lli->lli_fid), inode);
memset(&conf, 0, sizeof conf);
conf.coc_opc = OBJECT_CONF_WAIT;
rc = -EAGAIN;
CDEBUG(D_INODE, "%s file="DFID" waiting layout return: %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(&lli->lli_fid), rc);
+ sbi->ll_fsname, PFID(&lli->lli_fid), rc);
}
RETURN(rc);
}
-static int ll_layout_refresh_locked(struct inode *inode)
+/**
+ * Issue layout intent RPC to MDS.
+ * \param inode [in] file inode
+ * \param intent [in] layout intent
+ *
+ * \retval 0 on success
+ * \retval < 0 error code
+ */
+static int ll_layout_intent(struct inode *inode, struct layout_intent *intent)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct md_op_data *op_data;
- struct lookup_intent it;
- struct lustre_handle lockh;
- enum ldlm_mode mode;
- struct ldlm_enqueue_info einfo = {
- .ei_type = LDLM_IBITS,
- .ei_mode = LCK_CR,
- .ei_cb_bl = &ll_md_blocking_ast,
- .ei_cb_cp = &ldlm_completion_ast,
- };
+ struct lookup_intent it;
+ struct ptlrpc_request *req;
int rc;
ENTRY;
-again:
- /* mostly layout lock is caching on the local side, so try to match
- * it before grabbing layout lock mutex. */
- mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
- LCK_CR | LCK_CW | LCK_PR | LCK_PW);
- if (mode != 0) { /* hit cached lock */
- rc = ll_layout_lock_set(&lockh, mode, inode);
- if (rc == -EAGAIN)
- goto again;
-
- RETURN(rc);
- }
-
op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
0, 0, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
- /* have to enqueue one */
+ op_data->op_data = intent;
+ op_data->op_data_size = sizeof(*intent);
+
memset(&it, 0, sizeof(it));
it.it_op = IT_LAYOUT;
- lockh.cookie = 0ULL;
+ if (intent->li_opc == LAYOUT_INTENT_WRITE ||
+ intent->li_opc == LAYOUT_INTENT_TRUNC)
+ it.it_flags = FMODE_WRITE;
LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file "DFID"(%p)",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(&lli->lli_fid), inode);
+ sbi->ll_fsname, PFID(&lli->lli_fid), inode);
- rc = md_enqueue(sbi->ll_md_exp, &einfo, NULL, &it, op_data, &lockh, 0);
+ rc = md_intent_lock(sbi->ll_md_exp, op_data, &it, &req,
+ &ll_md_blocking_ast, 0);
if (it.it_request != NULL)
ptlrpc_req_finished(it.it_request);
it.it_request = NULL;
ll_finish_md_op_data(op_data);
- mode = it.it_lock_mode;
- it.it_lock_mode = 0;
- ll_intent_drop_lock(&it);
-
- if (rc == 0) {
- /* set lock data in case this is a new lock */
+ /* set lock data in case this is a new lock */
+ if (!rc)
ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
- rc = ll_layout_lock_set(&lockh, mode, inode);
- if (rc == -EAGAIN)
- goto again;
- }
+
+ ll_intent_drop_lock(&it);
RETURN(rc);
}
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct lustre_handle lockh;
+ struct layout_intent intent = {
+ .li_opc = LAYOUT_INTENT_ACCESS,
+ };
+ enum ldlm_mode mode;
int rc;
ENTRY;
/* take layout lock mutex to enqueue layout lock exclusively. */
mutex_lock(&lli->lli_layout_mutex);
- rc = ll_layout_refresh_locked(inode);
- if (rc < 0)
- GOTO(out, rc);
+ while (1) {
+ /* mostly layout lock is caching on the local side, so try to
+ * match it before grabbing layout lock mutex. */
+ mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
+ LCK_CR | LCK_CW | LCK_PR | LCK_PW);
+ if (mode != 0) { /* hit cached lock */
+ rc = ll_layout_lock_set(&lockh, mode, inode);
+ if (rc == -EAGAIN)
+ continue;
+ break;
+ }
- *gen = ll_layout_version_get(lli);
-out:
+ rc = ll_layout_intent(inode, &intent);
+ if (rc != 0)
+ break;
+ }
+
+ if (rc == 0)
+ *gen = ll_layout_version_get(lli);
mutex_unlock(&lli->lli_layout_mutex);
RETURN(rc);
}
/**
+ * Issue layout intent RPC indicating where in a file an IO is about to write.
+ *
+ * \param[in] inode file inode.
+ * \param[in] ext write range with start offset of fille in bytes where
+ * an IO is about to write, and exclusive end offset in
+ * bytes.
+ *
+ * \retval 0 on success
+ * \retval < 0 error code
+ */
+int ll_layout_write_intent(struct inode *inode, enum layout_intent_opc opc,
+ struct lu_extent *ext)
+{
+ struct layout_intent intent = {
+ .li_opc = opc,
+ .li_extent.e_start = ext->e_start,
+ .li_extent.e_end = ext->e_end,
+ };
+ int rc;
+ ENTRY;
+
+ rc = ll_layout_intent(inode, &intent);
+
+ RETURN(rc);
+}
+
+/**
* This function send a restore request to the MDT
*/
int ll_layout_restore(struct inode *inode, loff_t offset, __u64 length)