* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2016, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
struct dentry *dentry, *tmp_subdir;
DECLARE_LL_D_HLIST_NODE_PTR(p);
- ll_lock_dcache(dir);
+ spin_lock(&dir->i_lock);
ll_d_hlist_for_each_entry(dentry, p, &dir->i_dentry) {
spin_lock(&dentry->d_lock);
if (!list_empty(&dentry->d_subdirs)) {
}
spin_unlock(&dentry->d_lock);
}
- ll_unlock_dcache(dir);
+ spin_unlock(&dir->i_lock);
}
int ll_test_inode_by_fid(struct inode *inode, void *opaque)
return lu_fid_eq(&ll_i2info(inode)->lli_fid, opaque);
}
-int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
- void *data, int flag)
+static int ll_dom_lock_cancel(struct inode *inode, struct ldlm_lock *lock)
{
- struct lustre_handle lockh;
+ struct lu_env *env;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ __u16 refcheck;
int rc;
ENTRY;
- switch (flag) {
- case LDLM_CB_BLOCKING:
- ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
- if (rc < 0) {
- CDEBUG(D_INODE, "ldlm_cli_cancel: rc = %d\n", rc);
- RETURN(rc);
- }
- break;
- case LDLM_CB_CANCELING: {
- struct inode *inode = ll_inode_from_resource_lock(lock);
- __u64 bits = lock->l_policy_data.l_inodebits.bits;
+ if (!lli->lli_clob) {
+ /* due to DoM read on open, there may exist pages for Lustre
+ * regular file even though cl_object is not set up yet. */
+ truncate_inode_pages(inode->i_mapping, 0);
+ RETURN(0);
+ }
- /* Inode is set to lock->l_resource->lr_lvb_inode
- * for mdc - bug 24555 */
- LASSERT(lock->l_ast_data == NULL);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- if (inode == NULL)
- break;
+ /* reach MDC layer to flush data under the DoM ldlm lock */
+ rc = cl_object_flush(env, lli->lli_clob, lock);
+
+ cl_env_put(env, &refcheck);
+ RETURN(rc);
+}
+
+static void ll_lock_cancel_bits(struct ldlm_lock *lock, __u64 to_cancel)
+{
+ struct inode *inode = ll_inode_from_resource_lock(lock);
+ struct ll_inode_info *lli;
+ __u64 bits = to_cancel;
+ int rc;
+
+ ENTRY;
+
+ if (!inode) {
+ /* That means the inode is evicted most likely and may cause
+ * the skipping of lock cleanups below, so print the message
+ * about that in log.
+ */
+ if (lock->l_resource->lr_lvb_inode)
+ LDLM_DEBUG(lock,
+ "can't take inode for the lock (%sevicted)\n",
+ lock->l_resource->lr_lvb_inode->i_state &
+ I_FREEING ? "" : "not ");
+ RETURN_EXIT;
+ }
+
+ if (!fid_res_name_eq(ll_inode2fid(inode),
+ &lock->l_resource->lr_name)) {
+ LDLM_ERROR(lock, "data mismatch with object "DFID"(%p)",
+ PFID(ll_inode2fid(inode)), inode);
+ LBUG();
+ }
+
+ if (bits & MDS_INODELOCK_XATTR) {
+ ll_xattr_cache_destroy(inode);
+ bits &= ~MDS_INODELOCK_XATTR;
+ }
- /* Invalidate all dentries associated with this inode */
- LASSERT(ldlm_is_canceling(lock));
+ /* For OPEN locks we differentiate between lock modes
+ * LCK_CR, LCK_CW, LCK_PR - bug 22891 */
+ if (bits & MDS_INODELOCK_OPEN)
+ ll_have_md_lock(inode, &bits, lock->l_req_mode);
- if (!fid_res_name_eq(ll_inode2fid(inode),
- &lock->l_resource->lr_name)) {
- LDLM_ERROR(lock, "data mismatch with object "DFID"(%p)",
- PFID(ll_inode2fid(inode)), inode);
+ if (bits & MDS_INODELOCK_OPEN) {
+ fmode_t fmode;
+
+ switch (lock->l_req_mode) {
+ case LCK_CW:
+ fmode = FMODE_WRITE;
+ break;
+ case LCK_PR:
+ fmode = FMODE_EXEC;
+ break;
+ case LCK_CR:
+ fmode = FMODE_READ;
+ break;
+ default:
+ LDLM_ERROR(lock, "bad lock mode for OPEN lock");
LBUG();
}
- if (bits & MDS_INODELOCK_XATTR) {
- if (S_ISDIR(inode->i_mode))
- ll_i2info(inode)->lli_def_stripe_offset = -1;
- ll_xattr_cache_destroy(inode);
- bits &= ~MDS_INODELOCK_XATTR;
- }
+ ll_md_real_close(inode, fmode);
- /* For OPEN locks we differentiate between lock modes
- * LCK_CR, LCK_CW, LCK_PR - bug 22891 */
- if (bits & MDS_INODELOCK_OPEN)
- ll_have_md_lock(inode, &bits, lock->l_req_mode);
-
- if (bits & MDS_INODELOCK_OPEN) {
- fmode_t fmode;
-
- switch (lock->l_req_mode) {
- case LCK_CW:
- fmode = FMODE_WRITE;
- break;
- case LCK_PR:
- fmode = FMODE_EXEC;
- break;
- case LCK_CR:
- fmode = FMODE_READ;
- break;
- default:
- LDLM_ERROR(lock, "bad lock mode for OPEN lock");
- LBUG();
- }
+ bits &= ~MDS_INODELOCK_OPEN;
+ }
- ll_md_real_close(inode, fmode);
+ if (bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE |
+ MDS_INODELOCK_LAYOUT | MDS_INODELOCK_PERM |
+ MDS_INODELOCK_DOM))
+ ll_have_md_lock(inode, &bits, LCK_MINMODE);
- bits &= ~MDS_INODELOCK_OPEN;
- }
+ if (bits & MDS_INODELOCK_DOM) {
+ rc = ll_dom_lock_cancel(inode, lock);
+ if (rc < 0)
+ CDEBUG(D_INODE, "cannot flush DoM data "
+ DFID": rc = %d\n",
+ PFID(ll_inode2fid(inode)), rc);
+ }
- if (bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE |
- MDS_INODELOCK_LAYOUT | MDS_INODELOCK_PERM))
- ll_have_md_lock(inode, &bits, LCK_MINMODE);
-
- if (bits & MDS_INODELOCK_LAYOUT) {
- struct cl_object_conf conf = {
- .coc_opc = OBJECT_CONF_INVALIDATE,
- .coc_inode = inode,
- };
-
- rc = ll_layout_conf(inode, &conf);
- if (rc < 0)
- CDEBUG(D_INODE, "cannot invalidate layout of "
- DFID": rc = %d\n",
- PFID(ll_inode2fid(inode)), rc);
- }
+ if (bits & MDS_INODELOCK_LAYOUT) {
+ struct cl_object_conf conf = {
+ .coc_opc = OBJECT_CONF_INVALIDATE,
+ .coc_inode = inode,
+ };
- if (bits & MDS_INODELOCK_UPDATE) {
- struct ll_inode_info *lli = ll_i2info(inode);
- lli->lli_update_atime = 1;
- }
+ rc = ll_layout_conf(inode, &conf);
+ if (rc < 0)
+ CDEBUG(D_INODE, "cannot invalidate layout of "
+ DFID": rc = %d\n",
+ PFID(ll_inode2fid(inode)), rc);
+ }
- if ((bits & MDS_INODELOCK_UPDATE) && S_ISDIR(inode->i_mode)) {
- struct ll_inode_info *lli = ll_i2info(inode);
+ lli = ll_i2info(inode);
- CDEBUG(D_INODE, "invalidating inode "DFID" lli = %p, "
- "pfid = "DFID"\n", PFID(ll_inode2fid(inode)),
- lli, PFID(&lli->lli_pfid));
- truncate_inode_pages(inode->i_mapping, 0);
+ if (bits & MDS_INODELOCK_UPDATE)
+ lli->lli_update_atime = 1;
- if (unlikely(!fid_is_zero(&lli->lli_pfid))) {
- struct inode *master_inode = NULL;
- unsigned long hash;
+ if ((bits & MDS_INODELOCK_UPDATE) && S_ISDIR(inode->i_mode)) {
+ CDEBUG(D_INODE, "invalidating inode "DFID" lli = %p, "
+ "pfid = "DFID"\n", PFID(ll_inode2fid(inode)),
+ lli, PFID(&lli->lli_pfid));
+ truncate_inode_pages(inode->i_mapping, 0);
- /* This is slave inode, since all of the child
- * dentry is connected on the master inode, so
- * we have to invalidate the negative children
- * on master inode */
- CDEBUG(D_INODE, "Invalidate s"DFID" m"DFID"\n",
- PFID(ll_inode2fid(inode)),
- PFID(&lli->lli_pfid));
+ if (unlikely(!fid_is_zero(&lli->lli_pfid))) {
+ struct inode *master_inode = NULL;
+ unsigned long hash;
- hash = cl_fid_build_ino(&lli->lli_pfid,
+ /* This is slave inode, since all of the child dentry
+ * is connected on the master inode, so we have to
+ * invalidate the negative children on master inode */
+ CDEBUG(D_INODE, "Invalidate s"DFID" m"DFID"\n",
+ PFID(ll_inode2fid(inode)), PFID(&lli->lli_pfid));
+
+ hash = cl_fid_build_ino(&lli->lli_pfid,
ll_need_32bit_api(ll_i2sbi(inode)));
- /* Do not lookup the inode with ilookup5,
- * otherwise it will cause dead lock,
- *
- * 1. Client1 send chmod req to the MDT0, then
- * on MDT0, it enqueues master and all of its
- * slaves lock, (mdt_attr_set() ->
- * mdt_lock_slaves()), after gets master and
- * stripe0 lock, it will send the enqueue req
- * (for stripe1) to MDT1, then MDT1 finds the
- * lock has been granted to client2. Then MDT1
- * sends blocking ast to client2.
- *
- * 2. At the same time, client2 tries to unlink
- * the striped dir (rm -rf striped_dir), and
- * during lookup, it will hold the master inode
- * of the striped directory, whose inode state
- * is NEW, then tries to revalidate all of its
- * slaves, (ll_prep_inode()->ll_iget()->
- * ll_read_inode2()-> ll_update_inode().). And
- * it will be blocked on the server side because
- * of 1.
- *
- * 3. Then the client get the blocking_ast req,
- * cancel the lock, but being blocked if using
- * ->ilookup5()), because master inode state is
- * NEW. */
- master_inode = ilookup5_nowait(inode->i_sb,
- hash, ll_test_inode_by_fid,
+ /* Do not lookup the inode with ilookup5, otherwise
+ * it will cause dead lock,
+ * 1. Client1 send chmod req to the MDT0, then on MDT0,
+ * it enqueues master and all of its slaves lock,
+ * (mdt_attr_set() -> mdt_lock_slaves()), after gets
+ * master and stripe0 lock, it will send the enqueue
+ * req (for stripe1) to MDT1, then MDT1 finds the lock
+ * has been granted to client2. Then MDT1 sends blocking
+ * ast to client2.
+ * 2. At the same time, client2 tries to unlink
+ * the striped dir (rm -rf striped_dir), and during
+ * lookup, it will hold the master inode of the striped
+ * directory, whose inode state is NEW, then tries to
+ * revalidate all of its slaves, (ll_prep_inode()->
+ * ll_iget()->ll_read_inode2()-> ll_update_inode().).
+ * And it will be blocked on the server side because
+ * of 1.
+ * 3. Then the client get the blocking_ast req, cancel
+ * the lock, but being blocked if using ->ilookup5()),
+ * because master inode state is NEW. */
+ master_inode = ilookup5_nowait(inode->i_sb, hash,
+ ll_test_inode_by_fid,
(void *)&lli->lli_pfid);
- if (master_inode) {
- ll_invalidate_negative_children(
- master_inode);
- iput(master_inode);
- }
- } else {
- ll_invalidate_negative_children(inode);
+ if (master_inode) {
+ ll_invalidate_negative_children(master_inode);
+ iput(master_inode);
}
+ } else {
+ ll_invalidate_negative_children(inode);
}
+ }
- if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) &&
- inode->i_sb->s_root != NULL &&
- inode != inode->i_sb->s_root->d_inode)
- ll_invalidate_aliases(inode);
+ if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) &&
+ inode->i_sb->s_root != NULL &&
+ inode != inode->i_sb->s_root->d_inode)
+ ll_invalidate_aliases(inode);
- iput(inode);
+ if (bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM))
+ forget_all_cached_acls(inode);
+
+ iput(inode);
+ RETURN_EXIT;
+}
+
+/* Check if the given lock may be downgraded instead of canceling and
+ * that convert is really needed. */
+int ll_md_need_convert(struct ldlm_lock *lock)
+{
+ struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+ struct inode *inode;
+ __u64 wanted = lock->l_policy_data.l_inodebits.cancel_bits;
+ __u64 bits = lock->l_policy_data.l_inodebits.bits & ~wanted;
+ enum ldlm_mode mode = LCK_MINMODE;
+
+ if (!lock->l_conn_export ||
+ !exp_connect_lock_convert(lock->l_conn_export))
+ return 0;
+
+ if (!wanted || !bits || ldlm_is_cancel(lock))
+ return 0;
+
+ /* do not convert locks other than DOM for now */
+ if (!((bits | wanted) & MDS_INODELOCK_DOM))
+ return 0;
+
+ /* We may have already remaining bits in some other lock so
+ * lock convert will leave us just extra lock for the same bit.
+ * Check if client has other lock with the same bits and the same
+ * or lower mode and don't convert if any.
+ */
+ switch (lock->l_req_mode) {
+ case LCK_PR:
+ mode = LCK_PR;
+ /* fallthrough */
+ case LCK_PW:
+ mode |= LCK_CR;
break;
+ case LCK_CW:
+ mode = LCK_CW;
+ /* fallthrough */
+ case LCK_CR:
+ mode |= LCK_CR;
+ break;
+ default:
+ /* do not convert other modes */
+ return 0;
}
+
+ /* is lock is too old to be converted? */
+ lock_res_and_lock(lock);
+ if (ktime_after(ktime_get(),
+ ktime_add(lock->l_last_used,
+ ktime_set(ns->ns_dirty_age_limit, 0)))) {
+ unlock_res_and_lock(lock);
+ return 0;
+ }
+ unlock_res_and_lock(lock);
+
+ inode = ll_inode_from_resource_lock(lock);
+ ll_have_md_lock(inode, &bits, mode);
+ iput(inode);
+ return !!(bits);
+}
+
+int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
+ void *data, int flag)
+{
+ struct lustre_handle lockh;
+ __u64 bits = lock->l_policy_data.l_inodebits.bits;
+ int rc;
+
+ ENTRY;
+
+ switch (flag) {
+ case LDLM_CB_BLOCKING:
+ {
+ __u64 cancel_flags = LCF_ASYNC;
+
+ if (ll_md_need_convert(lock)) {
+ cancel_flags |= LCF_CONVERT;
+ /* For lock convert some cancel actions may require
+ * this lock with non-dropped canceled bits, e.g. page
+ * flush for DOM lock. So call ll_lock_cancel_bits()
+ * here while canceled bits are still set.
+ */
+ bits = lock->l_policy_data.l_inodebits.cancel_bits;
+ if (bits & MDS_INODELOCK_DOM)
+ ll_lock_cancel_bits(lock, MDS_INODELOCK_DOM);
+ }
+ ldlm_lock2handle(lock, &lockh);
+ rc = ldlm_cli_cancel(&lockh, cancel_flags);
+ if (rc < 0) {
+ CDEBUG(D_INODE, "ldlm_cli_cancel: rc = %d\n", rc);
+ RETURN(rc);
+ }
+ break;
+ }
+ case LDLM_CB_CANCELING:
+ /* Nothing to do for non-granted locks */
+ if (!ldlm_is_granted(lock))
+ break;
+
+ if (ldlm_is_converting(lock)) {
+ /* this is called on already converted lock, so
+ * ibits has remained bits only and cancel_bits
+ * are bits that were dropped.
+ * Note that DOM lock is handled prior lock convert
+ * and is excluded here.
+ */
+ bits = lock->l_policy_data.l_inodebits.cancel_bits &
+ ~MDS_INODELOCK_DOM;
+ } else {
+ LASSERT(ldlm_is_canceling(lock));
+ }
+ ll_lock_cancel_bits(lock, bits);
+ break;
default:
LBUG();
}
discon_alias = invalid_alias = NULL;
- ll_lock_dcache(inode);
+ spin_lock(&inode->i_lock);
ll_d_hlist_for_each_entry(alias, p, &inode->i_dentry) {
LASSERT(alias != dentry);
dget_dlock(alias);
spin_unlock(&alias->d_lock);
}
- ll_unlock_dcache(inode);
+ spin_unlock(&inode->i_lock);
return alias;
}
static int ll_lookup_it_finish(struct ptlrpc_request *request,
struct lookup_intent *it,
- struct inode *parent, struct dentry **de)
+ struct inode *parent, struct dentry **de,
+ void *secctx, __u32 secctxlen)
{
struct inode *inode = NULL;
__u64 bits = 0;
CDEBUG(D_DENTRY, "it %p it_disposition %x\n", it,
it->it_disposition);
if (!it_disposition(it, DISP_LOOKUP_NEG)) {
- rc = ll_prep_inode(&inode, request, (*de)->d_sb, it);
- if (rc)
- RETURN(rc);
-
- ll_set_lock_data(ll_i2sbi(parent)->ll_md_exp, inode, it, &bits);
-
- /* We used to query real size from OSTs here, but actually
- this is not needed. For stat() calls size would be updated
- from subsequent do_revalidate()->ll_inode_revalidate_it() in
- 2.4 and
- vfs_getattr_it->ll_getattr()->ll_inode_revalidate_it() in 2.6
- Everybody else who needs correct file size would call
- ll_glimpse_size or some equivalent themselves anyway.
- Also see bug 7198. */
+ struct req_capsule *pill = &request->rq_pill;
+ struct mdt_body *body = req_capsule_server_get(pill,
+ &RMF_MDT_BODY);
+
+ rc = ll_prep_inode(&inode, request, (*de)->d_sb, it);
+ if (rc)
+ RETURN(rc);
+
+ if (it->it_op & IT_OPEN)
+ ll_dom_finish_open(inode, request, it);
+
+ ll_set_lock_data(ll_i2sbi(parent)->ll_md_exp, inode, it, &bits);
+
+ /* We used to query real size from OSTs here, but actually
+ * this is not needed. For stat() calls size would be updated
+ * from subsequent do_revalidate()->ll_inode_revalidate_it() in
+ * 2.4 and
+ * vfs_getattr_it->ll_getattr()->ll_inode_revalidate_it() in 2.6
+ * Everybody else who needs correct file size would call
+ * ll_glimpse_size or some equivalent themselves anyway.
+ * Also see bug 7198.
+ */
+
+ /* If security context was returned by MDT, put it in
+ * inode now to save an extra getxattr from security hooks,
+ * and avoid deadlock.
+ */
+ if (body->mbo_valid & OBD_MD_SECCTX) {
+ secctx = req_capsule_server_get(pill, &RMF_FILE_SECCTX);
+ secctxlen = req_capsule_get_size(pill,
+ &RMF_FILE_SECCTX,
+ RCL_SERVER);
+
+ if (secctxlen)
+ CDEBUG(D_SEC, "server returned security context"
+ " for "DFID"\n",
+ PFID(ll_inode2fid(inode)));
+ }
+
+ if (secctx != NULL && secctxlen != 0) {
+ inode_lock(inode);
+ rc = security_inode_notifysecctx(inode, secctx,
+ secctxlen);
+ inode_unlock(inode);
+ if (rc)
+ CWARN("cannot set security context for "
+ DFID": rc = %d\n",
+ PFID(ll_inode2fid(inode)), rc);
+ }
}
/* Only hash *de if it is unhashed (new dentry).
if (bits & MDS_INODELOCK_LOOKUP)
d_lustre_revalidate(*de);
} else if (!it_disposition(it, DISP_OPEN_CREATE)) {
- /* If file created on server, don't depend on parent UPDATE
- * lock to unhide it. It is left hidden and next lookup can
- * find it in ll_splice_alias.
+ /*
+ * If file was created on the server, the dentry is revalidated
+ * in ll_create_it if the lock allows for it.
*/
/* Check that parent has UPDATE lock. */
struct lookup_intent parent_it = {
struct lu_fid fid = ll_i2info(parent)->lli_fid;
/* If it is striped directory, get the real stripe parent */
- if (unlikely(ll_i2info(parent)->lli_lsm_md != NULL)) {
+ if (unlikely(ll_dir_striped(parent))) {
rc = md_get_fid_from_lsm(ll_i2mdexp(parent),
ll_i2info(parent)->lli_lsm_md,
(*de)->d_name.name,
static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
struct lookup_intent *it,
- void **secctx, __u32 *secctxlen)
+ void **secctx, __u32 *secctxlen,
+ struct pcc_create_attach *pca)
{
struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
struct dentry *save = dentry, *retval;
struct ptlrpc_request *req = NULL;
struct md_op_data *op_data = NULL;
- __u32 opc;
- int rc;
- ENTRY;
+ struct lov_user_md *lum = NULL;
+ __u32 opc;
+ int rc;
+ char secctx_name[XATTR_NAME_MAX + 1];
+
+ ENTRY;
if (dentry->d_name.len > ll_i2sbi(parent)->ll_namelen)
RETURN(ERR_PTR(-ENAMETOOLONG));
}
if (it->it_op & IT_OPEN && it->it_flags & FMODE_WRITE &&
- dentry->d_sb->s_flags & MS_RDONLY)
+ dentry->d_sb->s_flags & SB_RDONLY)
RETURN(ERR_PTR(-EROFS));
if (it->it_op & IT_CREAT)
*secctx = op_data->op_file_secctx;
if (secctxlen != NULL)
*secctxlen = op_data->op_file_secctx_size;
+ } else {
+ if (secctx != NULL)
+ *secctx = NULL;
+ if (secctxlen != NULL)
+ *secctxlen = 0;
+ }
+
+ /* ask for security context upon intent */
+ if (it->it_op & (IT_LOOKUP | IT_GETATTR | IT_OPEN)) {
+ /* get name of security xattr to request to server */
+ rc = ll_listsecurity(parent, secctx_name,
+ sizeof(secctx_name));
+ if (rc < 0) {
+ CDEBUG(D_SEC, "cannot get security xattr name for "
+ DFID": rc = %d\n",
+ PFID(ll_inode2fid(parent)), rc);
+ } else if (rc > 0) {
+ op_data->op_file_secctx_name = secctx_name;
+ op_data->op_file_secctx_name_size = rc;
+ CDEBUG(D_SEC, "'%.*s' is security xattr for "DFID"\n",
+ rc, secctx_name, PFID(ll_inode2fid(parent)));
+ }
+ }
+
+ if (pca && pca->pca_dataset) {
+ struct pcc_dataset *dataset = pca->pca_dataset;
+
+ OBD_ALLOC_PTR(lum);
+ if (lum == NULL)
+ GOTO(out, retval = ERR_PTR(-ENOMEM));
+
+ lum->lmm_magic = LOV_USER_MAGIC_V1;
+ lum->lmm_pattern = LOV_PATTERN_F_RELEASED | LOV_PATTERN_RAID0;
+ op_data->op_data = lum;
+ op_data->op_data_size = sizeof(*lum);
+ op_data->op_archive_id = dataset->pccd_rwid;
+
+ rc = obd_fid_alloc(NULL, ll_i2mdexp(parent), &op_data->op_fid2,
+ op_data);
+ if (rc)
+ GOTO(out, retval = ERR_PTR(rc));
+
+ rc = pcc_inode_create(parent->i_sb, dataset, &op_data->op_fid2,
+ &pca->pca_dentry);
+ if (rc)
+ GOTO(out, retval = ERR_PTR(rc));
+
+ it->it_flags |= MDS_OPEN_PCC;
}
rc = md_intent_lock(ll_i2mdexp(parent), op_data, it, &req,
if (rc < 0)
GOTO(out, retval = ERR_PTR(rc));
- rc = ll_lookup_it_finish(req, it, parent, &dentry);
+ /* dir layout may change */
+ ll_unlock_md_op_lsm(op_data);
+ rc = ll_lookup_it_finish(req, it, parent, &dentry,
+ secctx != NULL ? *secctx : NULL,
+ secctxlen != NULL ? *secctxlen : 0);
if (rc != 0) {
ll_intent_release(it);
GOTO(out, retval = ERR_PTR(rc));
ll_finish_md_op_data(op_data);
}
+ if (lum != NULL)
+ OBD_FREE_PTR(lum);
+
ptlrpc_req_finished(req);
return retval;
}
itp = NULL;
else
itp = ⁢
- de = ll_lookup_it(parent, dentry, itp, NULL, NULL);
+ de = ll_lookup_it(parent, dentry, itp, NULL, NULL, NULL);
if (itp != NULL)
ll_intent_release(itp);
return de;
}
+#ifdef FMODE_CREATED /* added in Linux v4.18-rc1-20-g73a09dd */
+# define ll_is_opened(o, f) ((f)->f_mode & FMODE_OPENED)
+# define ll_finish_open(f, d, o) finish_open((f), (d), NULL)
+# define ll_last_arg
+# define ll_set_created(o, f) \
+do { \
+ (f)->f_mode |= FMODE_CREATED; \
+} while (0)
+
+#else
+# define ll_is_opened(o, f) (*(o))
+# define ll_finish_open(f, d, o) finish_open((f), (d), NULL, (o))
+# define ll_last_arg , int *opened
+# define ll_set_created(o, f) \
+do { \
+ *(o) |= FILE_CREATED; \
+} while (0)
+
+#endif
+
/*
* For cached negative dentry and new dentry, handle lookup/create/open
* together.
*/
static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned open_flags,
- umode_t mode, int *opened)
+ umode_t mode ll_last_arg)
{
struct lookup_intent *it;
struct dentry *de;
long long lookup_flags = LOOKUP_OPEN;
void *secctx = NULL;
__u32 secctxlen = 0;
+ struct ll_sb_info *sbi;
+ struct pcc_create_attach pca = { NULL, NULL };
int rc = 0;
ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, dir="DFID"(%p), file %p,"
"open_flags %x, mode %x opened %d\n",
dentry->d_name.len, dentry->d_name.name,
- PFID(ll_inode2fid(dir)), dir, file, open_flags, mode, *opened);
+ PFID(ll_inode2fid(dir)), dir, file, open_flags, mode,
+ ll_is_opened(opened, file));
/* Only negative dentries enter here */
LASSERT(dentry->d_inode == NULL);
if (open_flags & O_CREAT) {
it->it_op |= IT_CREAT;
lookup_flags |= LOOKUP_CREATE;
+ sbi = ll_i2sbi(dir);
+ /* Volatile file is used for HSM restore, so do not use PCC */
+ if (!filename_is_volatile(dentry->d_name.name,
+ dentry->d_name.len, NULL)) {
+ struct pcc_matcher item;
+ struct pcc_dataset *dataset;
+
+ item.pm_uid = from_kuid(&init_user_ns, current_uid());
+ item.pm_gid = from_kgid(&init_user_ns, current_gid());
+ item.pm_projid = ll_i2info(dir)->lli_projid;
+ item.pm_name = &dentry->d_name;
+ dataset = pcc_dataset_match_get(&sbi->ll_pcc_super,
+ &item);
+ pca.pca_dataset = dataset;
+ }
}
it->it_create_mode = (mode & S_IALLUGO) | S_IFREG;
it->it_flags = (open_flags & ~O_ACCMODE) | OPEN_FMODE(open_flags);
it->it_flags &= ~MDS_OPEN_FL_INTERNAL;
/* Dentry added to dcache tree in ll_lookup_it */
- de = ll_lookup_it(dir, dentry, it, &secctx, &secctxlen);
+ de = ll_lookup_it(dir, dentry, it, &secctx, &secctxlen, &pca);
if (IS_ERR(de))
rc = PTR_ERR(de);
else if (de != NULL)
goto out_release;
}
- *opened |= FILE_CREATED;
+ rc = pcc_inode_create_fini(dentry->d_inode, &pca);
+ if (rc) {
+ if (de != NULL)
+ dput(de);
+ GOTO(out_release, rc);
+ }
+
+ ll_set_created(opened, file);
+ } else {
+ /* Open the file with O_CREAT, but the file already
+ * existed on MDT. This may happend in the case that
+ * the LOOKUP ibits lock is revoked and the
+ * corresponding dentry cache is deleted.
+ * i.e. In the current Lustre, the truncate operation
+ * will revoke the LOOKUP ibits lock, and the file
+ * dentry cache will be invalidated. The following open
+ * with O_CREAT flag will call into ->atomic_open, the
+ * file was wrongly though as newly created file and
+ * try to auto cache the file. So after client knows it
+ * is not a DISP_OPEN_CREATE, it should cleanup the
+ * already created PCC copy.
+ */
+ pcc_create_attach_cleanup(dir->i_sb, &pca);
}
+
if (dentry->d_inode && it_disposition(it, DISP_OPEN_OPEN)) {
/* Open dentry. */
if (S_ISFIFO(dentry->d_inode->i_mode)) {
rc = finish_no_open(file, de);
} else {
file->private_data = it;
- rc = finish_open(file, dentry, NULL, opened);
+ rc = ll_finish_open(file, dentry, opened);
/* We dget in ll_splice_alias. finish_open takes
* care of dget for fd open.
*/
} else {
rc = finish_no_open(file, de);
}
+ } else {
+ pcc_create_attach_cleanup(dir->i_sb, &pca);
}
out_release:
it = ll_convert_intent(&nd->intent.open, nd->flags,
(nd->path.mnt->mnt_flags & MNT_READONLY) ||
- (nd->path.mnt->mnt_sb->s_flags & MS_RDONLY));
+ (nd->path.mnt->mnt_sb->s_flags & SB_RDONLY));
if (IS_ERR(it))
RETURN((struct dentry *)it);
}
- de = ll_lookup_it(parent, dentry, it, NULL, NULL);
+ de = ll_lookup_it(parent, dentry, it, NULL, NULL, NULL);
if (de)
dentry = de;
if ((nd->flags & LOOKUP_OPEN) && !IS_ERR(dentry)) { /* Open */
OBD_FREE(it, sizeof(*it));
}
} else {
- de = ll_lookup_it(parent, dentry, NULL, NULL, NULL);
+ de = ll_lookup_it(parent, dentry, NULL, NULL, NULL, NULL);
}
RETURN(de);
void *secctx, __u32 secctxlen)
{
struct inode *inode;
+ __u64 bits = 0;
int rc = 0;
ENTRY;
RETURN(rc);
}
+ ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, inode, it, &bits);
+ if (bits & MDS_INODELOCK_LOOKUP)
+ d_lustre_revalidate(dentry);
+
RETURN(0);
}
LASSERT(body);
if (body->mbo_valid & OBD_MD_FLMTIME &&
- body->mbo_mtime > LTIME_S(inode->i_mtime)) {
- CDEBUG(D_INODE, "setting fid "DFID" mtime from %lu to %llu"
- "\n", PFID(ll_inode2fid(inode)),
- LTIME_S(inode->i_mtime), body->mbo_mtime);
- LTIME_S(inode->i_mtime) = body->mbo_mtime;
+ body->mbo_mtime > inode->i_mtime.tv_sec) {
+ CDEBUG(D_INODE,
+ "setting fid " DFID " mtime from %lld to %llu\n",
+ PFID(ll_inode2fid(inode)),
+ (s64)inode->i_mtime.tv_sec, body->mbo_mtime);
+ inode->i_mtime.tv_sec = body->mbo_mtime;
}
if (body->mbo_valid & OBD_MD_FLCTIME &&
- body->mbo_ctime > LTIME_S(inode->i_ctime))
- LTIME_S(inode->i_ctime) = body->mbo_ctime;
+ body->mbo_ctime > inode->i_ctime.tv_sec)
+ inode->i_ctime.tv_sec = body->mbo_ctime;
}
static int ll_new_node(struct inode *dir, struct dentry *dchild,
from_kuid(&init_user_ns, current_fsuid()),
from_kgid(&init_user_ns, current_fsgid()),
cfs_curproc_cap_pack(), rdev, &request);
- if (err < 0 && err != -EREMOTE)
- GOTO(err_exit, err);
-
- /* If the client doesn't know where to create a subdirectory (or
- * in case of a race that sends the RPC to the wrong MDS), the
- * MDS will return -EREMOTE and the client will fetch the layout
- * of the directory, then create the directory on the right MDT. */
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 14, 58, 0)
+ /*
+ * server < 2.12.58 doesn't pack default LMV in intent_getattr reply,
+ * fetch default LMV here.
+ */
if (unlikely(err == -EREMOTE)) {
struct ll_inode_info *lli = ll_i2info(dir);
struct lmv_user_md *lum;
err2 = ll_dir_getstripe(dir, (void **)&lum, &lumsize, &request,
OBD_MD_DEFAULT_MEA);
+ ll_finish_md_op_data(op_data);
+ op_data = NULL;
if (err2 == 0) {
- /* Update stripe_offset and retry */
- lli->lli_def_stripe_offset = lum->lum_stripe_offset;
- } else if (err2 == -ENODATA &&
- lli->lli_def_stripe_offset != -1) {
- /* If there are no default stripe EA on the MDT, but the
+ struct lustre_md md = { NULL };
+
+ md.body = req_capsule_server_get(&request->rq_pill,
+ &RMF_MDT_BODY);
+ if (!md.body)
+ GOTO(err_exit, err = -EPROTO);
+
+ OBD_ALLOC_PTR(md.default_lmv);
+ if (!md.default_lmv)
+ GOTO(err_exit, err = -ENOMEM);
+
+ md.default_lmv->lsm_md_magic = lum->lum_magic;
+ md.default_lmv->lsm_md_stripe_count =
+ lum->lum_stripe_count;
+ md.default_lmv->lsm_md_master_mdt_index =
+ lum->lum_stripe_offset;
+ md.default_lmv->lsm_md_hash_type = lum->lum_hash_type;
+
+ err = ll_update_inode(dir, &md);
+ md_free_lustre_md(sbi->ll_md_exp, &md);
+ if (err)
+ GOTO(err_exit, err);
+ } else if (err2 == -ENODATA && lli->lli_default_lsm_md) {
+ /*
+ * If there are no default stripe EA on the MDT, but the
* client has default stripe, then it probably means
- * default stripe EA has just been deleted. */
- lli->lli_def_stripe_offset = -1;
+ * default stripe EA has just been deleted.
+ */
+ down_write(&lli->lli_lsm_sem);
+ if (lli->lli_default_lsm_md)
+ OBD_FREE_PTR(lli->lli_default_lsm_md);
+ lli->lli_default_lsm_md = NULL;
+ up_write(&lli->lli_lsm_sem);
} else {
GOTO(err_exit, err);
}
ptlrpc_req_finished(request);
request = NULL;
- ll_finish_md_op_data(op_data);
goto again;
}
+#endif
+
+ if (err < 0)
+ GOTO(err_exit, err);
ll_update_times(request, dir);
dev_t rdev)
{
struct qstr *name = &dchild->d_name;
+ ktime_t kstart = ktime_get();
int err;
- ENTRY;
+ ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, dir="DFID"(%p) mode %o dev %x\n",
name->len, name->name, PFID(ll_inode2fid(dir)), dir,
- mode, rdev);
+ mode, rdev);
if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
mode &= ~current_umask();
- switch (mode & S_IFMT) {
- case 0:
- mode |= S_IFREG; /* for mode = 0 case, fallthrough */
- case S_IFREG:
- case S_IFCHR:
- case S_IFBLK:
- case S_IFIFO:
- case S_IFSOCK:
+ switch (mode & S_IFMT) {
+ case 0:
+ mode |= S_IFREG;
+ /* fallthrough */
+ case S_IFREG:
+ case S_IFCHR:
+ case S_IFBLK:
+ case S_IFIFO:
+ case S_IFSOCK:
err = ll_new_node(dir, dchild, NULL, mode, old_encode_dev(rdev),
LUSTRE_OPC_MKNOD);
- break;
- case S_IFDIR:
- err = -EPERM;
- break;
- default:
- err = -EINVAL;
- }
+ break;
+ case S_IFDIR:
+ err = -EPERM;
+ break;
+ default:
+ err = -EINVAL;
+ }
- if (!err)
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKNOD, 1);
+ if (!err)
+ ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKNOD,
+ ktime_us_delta(ktime_get(), kstart));
- RETURN(err);
+ RETURN(err);
}
#ifdef HAVE_IOP_ATOMIC_OPEN
static int ll_create_nd(struct inode *dir, struct dentry *dentry,
umode_t mode, bool want_excl)
{
+ ktime_t kstart = ktime_get();
int rc;
CFS_FAIL_TIMEOUT(OBD_FAIL_LLITE_CREATE_FILE_PAUSE, cfs_fail_val);
* volatile file name, so we use ll_mknod() here. */
rc = ll_mknod(dir, dentry, mode, 0);
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_CREATE, 1);
-
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, unhashed %d\n",
dentry->d_name.len, dentry->d_name.name, d_unhashed(dentry));
+ if (!rc)
+ ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_CREATE,
+ ktime_us_delta(ktime_get(), kstart));
+
return rc;
}
#else /* !HAVE_IOP_ATOMIC_OPEN */
{
struct ll_dentry_data *lld = ll_d2d(dentry);
struct lookup_intent *it = NULL;
+ ktime_t kstart = ktime_get();
int rc;
CFS_FAIL_TIMEOUT(OBD_FAIL_LLITE_CREATE_FILE_PAUSE, cfs_fail_val);
filp = lookup_instantiate_filp(nd, dentry, NULL);
if (IS_ERR(filp))
rc = PTR_ERR(filp);
- }
+ }
out:
- ll_intent_release(it);
- OBD_FREE(it, sizeof(*it));
+ ll_intent_release(it);
+ OBD_FREE(it, sizeof(*it));
- if (!rc)
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_CREATE, 1);
+ if (!rc)
+ ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_CREATE,
+ ktime_us_delta(ktime_get(), kstart));
- return rc;
+ return rc;
}
#endif /* HAVE_IOP_ATOMIC_OPEN */
const char *oldpath)
{
struct qstr *name = &dchild->d_name;
+ ktime_t kstart = ktime_get();
int err;
ENTRY;
err = ll_new_node(dir, dchild, oldpath, S_IFLNK | S_IRWXUGO, 0,
LUSTRE_OPC_SYMLINK);
- if (!err)
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_SYMLINK, 1);
+ if (!err)
+ ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_SYMLINK,
+ ktime_us_delta(ktime_get(), kstart));
- RETURN(err);
+ RETURN(err);
}
static int ll_link(struct dentry *old_dentry, struct inode *dir,
struct ll_sb_info *sbi = ll_i2sbi(dir);
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
+ ktime_t kstart = ktime_get();
int err;
ENTRY;
"target=%.*s\n", PFID(ll_inode2fid(src)), src,
PFID(ll_inode2fid(dir)), dir, name->len, name->name);
- op_data = ll_prep_md_op_data(NULL, src, dir, name->name, name->len,
- 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ op_data = ll_prep_md_op_data(NULL, src, dir, name->name, name->len,
+ 0, LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ RETURN(PTR_ERR(op_data));
- err = md_link(sbi->ll_md_exp, op_data, &request);
- ll_finish_md_op_data(op_data);
- if (err)
- GOTO(out, err);
+ err = md_link(sbi->ll_md_exp, op_data, &request);
+ ll_finish_md_op_data(op_data);
+ if (err)
+ GOTO(out, err);
- ll_update_times(request, dir);
- ll_stats_ops_tally(sbi, LPROC_LL_LINK, 1);
- EXIT;
+ ll_update_times(request, dir);
+ ll_stats_ops_tally(sbi, LPROC_LL_LINK,
+ ktime_us_delta(ktime_get(), kstart));
+ EXIT;
out:
- ptlrpc_req_finished(request);
- RETURN(err);
+ ptlrpc_req_finished(request);
+ RETURN(err);
}
static int ll_mkdir(struct inode *dir, struct dentry *dchild, ll_umode_t mode)
{
struct qstr *name = &dchild->d_name;
- int err;
- ENTRY;
+ ktime_t kstart = ktime_get();
+ int err;
+ ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, dir="DFID"(%p)\n",
name->len, name->name, PFID(ll_inode2fid(dir)), dir);
err = ll_new_node(dir, dchild, NULL, mode, 0, LUSTRE_OPC_MKDIR);
if (err == 0)
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKDIR, 1);
+ ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKDIR,
+ ktime_us_delta(ktime_get(), kstart));
RETURN(err);
}
static int ll_rmdir(struct inode *dir, struct dentry *dchild)
{
struct qstr *name = &dchild->d_name;
- struct ptlrpc_request *request = NULL;
- struct md_op_data *op_data;
- int rc;
- ENTRY;
+ struct ptlrpc_request *request = NULL;
+ struct md_op_data *op_data;
+ ktime_t kstart = ktime_get();
+ int rc;
+
+ ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, dir="DFID"(%p)\n",
name->len, name->name, PFID(ll_inode2fid(dir)), dir);
op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
op_data->op_fid2 = op_data->op_fid3;
- rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
- ll_finish_md_op_data(op_data);
- if (rc == 0) {
- ll_update_times(request, dir);
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_RMDIR, 1);
- }
+ rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
+ ll_finish_md_op_data(op_data);
+ if (!rc)
+ ll_update_times(request, dir);
- ptlrpc_req_finished(request);
- RETURN(rc);
+ ptlrpc_req_finished(request);
+ if (!rc)
+ ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_RMDIR,
+ ktime_us_delta(ktime_get(), kstart));
+ RETURN(rc);
}
/**
{
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
+ ktime_t kstart = ktime_get();
int rc;
ENTRY;
op_data->op_cli_flags |= CLI_RM_ENTRY;
rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
ll_finish_md_op_data(op_data);
- if (rc == 0) {
+ if (!rc)
ll_update_times(request, dir);
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_RMDIR, 1);
- }
ptlrpc_req_finished(request);
+ if (!rc)
+ ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_RMDIR,
+ ktime_us_delta(ktime_get(), kstart));
RETURN(rc);
}
static int ll_unlink(struct inode *dir, struct dentry *dchild)
{
struct qstr *name = &dchild->d_name;
- struct ptlrpc_request *request = NULL;
- struct md_op_data *op_data;
- int rc;
- ENTRY;
+ struct ptlrpc_request *request = NULL;
+ struct md_op_data *op_data;
+ struct mdt_body *body;
+ ktime_t kstart = ktime_get();
+ int rc;
+
+ ENTRY;
+
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, dir="DFID"(%p)\n",
name->len, name->name, PFID(ll_inode2fid(dir)), dir);
- /*
- * XXX: unlink bind mountpoint maybe call to here,
- * just check it as vfs_unlink does.
- */
+ /*
+ * XXX: unlink bind mountpoint maybe call to here,
+ * just check it as vfs_unlink does.
+ */
if (unlikely(d_mountpoint(dchild)))
RETURN(-EBUSY);
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
- if (dchild->d_inode != NULL)
- op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
+ op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
op_data->op_fid2 = op_data->op_fid3;
rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
if (rc)
GOTO(out, rc);
- ll_update_times(request, dir);
- ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_UNLINK, 1);
+ /*
+ * The server puts attributes in on the last unlink, use them to update
+ * the link count so the inode can be freed immediately.
+ */
+ body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
+ if (body->mbo_valid & OBD_MD_FLNLINK)
+ set_nlink(dchild->d_inode, body->mbo_nlink);
- out:
- ptlrpc_req_finished(request);
- RETURN(rc);
+ ll_update_times(request, dir);
+
+out:
+ ptlrpc_req_finished(request);
+ if (!rc)
+ ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_UNLINK,
+ ktime_us_delta(ktime_get(), kstart));
+ RETURN(rc);
}
static int ll_rename(struct inode *src, struct dentry *src_dchild,
struct ptlrpc_request *request = NULL;
struct ll_sb_info *sbi = ll_i2sbi(src);
struct md_op_data *op_data;
+ ktime_t kstart = ktime_get();
int err;
ENTRY;
if (tgt_dchild->d_inode != NULL)
op_data->op_fid4 = *ll_inode2fid(tgt_dchild->d_inode);
- err = md_rename(sbi->ll_md_exp, op_data,
- src_name->name, src_name->len,
- tgt_name->name, tgt_name->len, &request);
- ll_finish_md_op_data(op_data);
- if (!err) {
- ll_update_times(request, src);
- ll_update_times(request, tgt);
- ll_stats_ops_tally(sbi, LPROC_LL_RENAME, 1);
- }
+ err = md_rename(sbi->ll_md_exp, op_data,
+ src_name->name, src_name->len,
+ tgt_name->name, tgt_name->len, &request);
+ ll_finish_md_op_data(op_data);
+ if (!err) {
+ ll_update_times(request, src);
+ ll_update_times(request, tgt);
+ }
- ptlrpc_req_finished(request);
+ ptlrpc_req_finished(request);
- if (err == 0)
+ if (!err) {
d_move(src_dchild, tgt_dchild);
+ ll_stats_ops_tally(sbi, LPROC_LL_RENAME,
+ ktime_us_delta(ktime_get(), kstart));
+ }
RETURN(err);
}