1) statahead should not alias dentry with inode, which should be done by VFS layer operation with parent dir's i_mutex held
2) code cleanup
i=tom.wang
i=jinshan.xiong
struct md_enqueue_info *,
struct ldlm_enqueue_info *);
- int (*m_revalidate_lock)(struct obd_export *,
- struct lookup_intent *,
- struct lu_fid *);
+ int (*m_revalidate_lock)(struct obd_export *, struct lookup_intent *,
+ struct lu_fid *, __u32 *);
/*
* NOTE: If adding ops, add another LPROCFS_MD_OP_INIT() line to
static inline int md_revalidate_lock(struct obd_export *exp,
struct lookup_intent *it,
- struct lu_fid *fid)
+ struct lu_fid *fid, __u32 *bits)
{
int rc;
ENTRY;
EXP_CHECK_MD_OP(exp, revalidate_lock);
EXP_MD_COUNTER_INCREMENT(exp, revalidate_lock);
- rc = MDP(exp->exp_obd, revalidate_lock)(exp, it, fid);
+ rc = MDP(exp->exp_obd, revalidate_lock)(exp, it, fid, bits);
RETURN(rc);
}
}
}
- if (it->it_op == IT_GETATTR)
+ if (it->it_op == IT_GETATTR) {
first = ll_statahead_enter(parent, &de, 0);
+ if (first == 1) {
+ ll_statahead_exit(parent, de, 1);
+ ll_finish_md_op_data(op_data);
+ GOTO(out, rc = 1);
+ }
+ }
do_lock:
it->it_create_mode &= ~current->fs->umask;
"inode %p refc %d\n", de->d_name.len,
de->d_name.name, de, de->d_parent, de->d_inode,
atomic_read(&de->d_count));
- ll_lookup_finish_locks(it, de);
- lock_dentry(de);
- de->d_flags &= ~DCACHE_LUSTRE_INVALID;
- unlock_dentry(de);
+ if (first != 1) {
+ if (de->d_flags & DCACHE_LUSTRE_INVALID) {
+ lock_dentry(de);
+ de->d_flags &= ~DCACHE_LUSTRE_INVALID;
+ unlock_dentry(de);
+ }
+ ll_lookup_finish_locks(it, de);
+ }
}
RETURN(rc);
*/
if (it && it->it_op == IT_GETATTR && rc == 1) {
first = ll_statahead_enter(parent, &de, 0);
- if (!first)
+ if (first >= 0)
ll_statahead_exit(parent, de, 1);
else if (first == -EEXIST)
ll_statahead_mark(parent, de);
if (request)
ptlrpc_req_finished(request);
if (rc < 0) {
- CERROR("lock enqueue: rc: %d\n", rc);
+ CERROR("lock enqueue: "DFID" at "LPU64": rc %d\n",
+ PFID(ll_inode2fid(dir)), hash, rc);
return ERR_PTR(rc);
}
} else {
ldlm_lock_dump_handle(D_OTHER, &lockh);
page = ll_dir_page_locate(dir, hash, &start, &end);
- if (IS_ERR(page))
+ if (IS_ERR(page)) {
+ CERROR("dir page locate: "DFID" at "LPU64": rc %ld\n",
+ PFID(ll_inode2fid(dir)), hash, PTR_ERR(page));
GOTO(out_unlock, page);
+ }
if (page != NULL) {
/*
page = read_cache_page(mapping, hash_x_index((unsigned long)hash),
(filler_t*)mapping->a_ops->readpage, NULL);
- if (IS_ERR(page))
+ if (IS_ERR(page)) {
+ CERROR("read cache page: "DFID" at "LPU64": rc %ld\n",
+ PFID(ll_inode2fid(dir)), hash, PTR_ERR(page));
GOTO(out_unlock, page);
+ }
wait_on_page(page);
(void)kmap(page);
- if (!PageUptodate(page))
+ if (!PageUptodate(page)) {
+ CERROR("page not updated: "DFID" at "LPU64": rc %d\n",
+ PFID(ll_inode2fid(dir)), hash, -5);
goto fail;
+ }
if (!PageChecked(page))
ll_check_page(dir, page);
- if (PageError(page))
+ if (PageError(page)) {
+ CERROR("page error: "DFID" at "LPU64": rc %d\n",
+ PFID(ll_inode2fid(dir)), hash, -5);
goto fail;
+ }
hash_collision:
dp = page_address(page);
/* metadata stat-ahead */
unsigned int ll_sa_max; /* max statahead RPCs */
- unsigned int ll_sa_wrong; /* statahead thread stopped for
- * low hit ratio */
- unsigned int ll_sa_total; /* statahead thread started
+ atomic_t ll_sa_total; /* statahead thread started
* count */
- unsigned long long ll_sa_blocked; /* ls count waiting for
- * statahead */
- unsigned long long ll_sa_cached; /* ls count got in cache */
- unsigned long long ll_sa_hit; /* hit count */
- unsigned long long ll_sa_miss; /* miss count */
+ atomic_t ll_sa_wrong; /* statahead thread stopped for
+ * low hit ratio */
dev_t ll_sdev_orig; /* save s_dev before assign for
* clustred nfs */
struct lookup_intent *ll_convert_intent(struct open_intent *oit,
int lookup_flags);
#endif
+void ll_lookup_it_alias(struct dentry **de, struct inode *inode, __u32 bits);
int ll_lookup_it_finish(struct ptlrpc_request *request,
- struct lookup_intent *it, void *data);
-void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry);
+ struct lookup_intent *it, void *data,
+ struct inode **alias);
/* llite/rw.c */
int ll_prepare_write(struct file *, struct page *, unsigned from, unsigned to);
/* metadata statahead is enabled by default */
sbi->ll_sa_max = LL_SA_RPC_DEF;
+ atomic_set(&sbi->ll_sa_total, 0);
+ atomic_set(&sbi->ll_sa_wrong, 0);
RETURN(sbi);
}
struct ll_sb_info *sbi = ll_s2sbi(sb);
return snprintf(page, count,
- "statahead wrong: %u\n"
"statahead total: %u\n"
- "ls blocked: %llu\n"
- "ls cached: %llu\n"
- "hit count: %llu\n"
- "miss count: %llu\n",
- sbi->ll_sa_wrong,
- sbi->ll_sa_total,
- sbi->ll_sa_blocked,
- sbi->ll_sa_cached,
- sbi->ll_sa_hit,
- sbi->ll_sa_miss);
+ "statahead wrong: %u\n",
+ atomic_read(&sbi->ll_sa_total),
+ atomic_read(&sbi->ll_sa_wrong));
}
static int ll_rd_lazystatfs(char *page, char **start, off_t off,
return de;
}
+void ll_lookup_it_alias(struct dentry **de, struct inode *inode, __u32 bits)
+{
+ struct dentry *save = *de;
+ ENTRY;
+
+ ll_dops_init(*de, 1);
+ *de = ll_find_alias(inode, *de);
+ if (*de != save) {
+ struct ll_dentry_data *lld = ll_d2d(*de);
+
+ /* just make sure the ll_dentry_data is ready */
+ if (unlikely(lld == NULL)) {
+ ll_set_dd(*de);
+ lld = ll_d2d(*de);
+ if (likely(lld != NULL))
+ lld->lld_sa_generation = 0;
+ }
+ }
+ /* we have lookup look - unhide dentry */
+ if (bits & MDS_INODELOCK_LOOKUP) {
+ lock_dentry(*de);
+ (*de)->d_flags &= ~DCACHE_LUSTRE_INVALID;
+ unlock_dentry(*de);
+ }
+ EXIT;
+}
+
int ll_lookup_it_finish(struct ptlrpc_request *request,
- struct lookup_intent *it, void *data)
+ struct lookup_intent *it, void *data,
+ struct inode **alias)
{
struct it_cb_data *icbd = data;
struct dentry **de = icbd->icbd_childp;
/* NB 1 request reference will be taken away by ll_intent_lock()
* when I return */
if (!it_disposition(it, DISP_LOOKUP_NEG)) {
- struct dentry *save = *de;
__u32 bits;
rc = ll_prep_inode(&inode, request, (*de)->d_sb);
md_set_lock_data(sbi->ll_md_exp,
&it->d.lustre.it_lock_handle, inode, &bits);
+ if (alias != NULL) {
+ *alias = inode;
+ RETURN(0);
+ }
+
/* We used to query real size from OSTs here, but actually
this is not needed. For stat() calls size would be updated
from subsequent do_revalidate()->ll_inode_revalidate_it() in
Everybody else who needs correct file size would call
cl_glimpse_size or some equivalent themselves anyway.
Also see bug 7198. */
-
- ll_dops_init(*de, 1);
- *de = ll_find_alias(inode, *de);
- if (*de != save) {
- struct ll_dentry_data *lld = ll_d2d(*de);
-
- /* just make sure the ll_dentry_data is ready */
- if (unlikely(lld == NULL)) {
- ll_set_dd(*de);
- lld = ll_d2d(*de);
- if (likely(lld != NULL))
- lld->lld_sa_generation = 0;
- }
- }
- /* we have lookup look - unhide dentry */
- if (bits & MDS_INODELOCK_LOOKUP) {
- lock_dentry(*de);
- (*de)->d_flags &= ~(DCACHE_LUSTRE_INVALID);
- unlock_dentry(*de);
- }
+ ll_lookup_it_alias(de, inode, bits);
} else {
ll_dops_init(*de, 1);
/* Check that parent has UPDATE lock. If there is none, we
if (rc < 0)
GOTO(out, retval = ERR_PTR(rc));
- rc = ll_lookup_it_finish(req, it, &icbd);
+ rc = ll_lookup_it_finish(req, it, &icbd, NULL);
if (rc != 0) {
ll_intent_release(it);
GOTO(out, retval = ERR_PTR(rc));
int se_stat;
struct ptlrpc_request *se_req;
struct md_enqueue_info *se_minfo;
+ struct dentry *se_dentry;
+ struct inode *se_inode;
};
enum {
(sai->sai_consecutive_miss > 8));
}
+static void ll_sai_entry_free(struct ll_sai_entry *entry)
+{
+ struct dentry *dentry = entry->se_dentry;
+ struct inode *inode = entry->se_inode;
+
+ if (dentry) {
+ entry->se_dentry = NULL;
+ dput(dentry);
+ }
+ if (inode) {
+ entry->se_inode = NULL;
+ iput(inode);
+ }
+ LASSERT(cfs_list_empty(&entry->se_list));
+ OBD_FREE_PTR(entry);
+}
+
/**
* process the deleted entry's member and free the entry.
* (1) release intent
static void ll_sai_entry_cleanup(struct ll_sai_entry *entry, int free)
{
struct md_enqueue_info *minfo = entry->se_minfo;
- struct ptlrpc_request *req = entry->se_req;
+ struct ptlrpc_request *req = entry->se_req;
ENTRY;
if (minfo) {
entry->se_req = NULL;
ptlrpc_req_finished(req);
}
- if (free) {
- LASSERT(cfs_list_empty(&entry->se_list));
- OBD_FREE_PTR(entry);
- }
+ if (free)
+ ll_sai_entry_free(entry);
EXIT;
}
CDEBUG(D_READA, "alloc sai entry %p index %u\n",
entry, index);
entry->se_index = index;
- entry->se_stat = SA_ENTRY_UNSTATED;
+ entry->se_stat = SA_ENTRY_UNSTATED;
cfs_spin_lock(&lli->lli_lock);
cfs_list_add_tail(&entry->se_list, &sai->sai_entries_sent);
* delete it from sai_entries_stated head when fini, it need not
* to process entry's member.
*/
-static void ll_sai_entry_fini(struct ll_statahead_info *sai)
+static int ll_sai_entry_fini(struct ll_statahead_info *sai)
{
struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
struct ll_sai_entry *entry;
+ int rc = 0;
ENTRY;
cfs_spin_lock(&lli->lli_lock);
entry = cfs_list_entry(sai->sai_entries_stated.next,
struct ll_sai_entry, se_list);
if (entry->se_index < sai->sai_index_next) {
- cfs_list_del(&entry->se_list);
- OBD_FREE_PTR(entry);
+ cfs_list_del_init(&entry->se_list);
+ rc = entry->se_stat;
+ ll_sai_entry_free(entry);
}
- } else
+ } else {
LASSERT(sa_is_stopped(sai));
+ }
cfs_spin_unlock(&lli->lli_lock);
- EXIT;
+ RETURN(rc);
}
/**
entry->se_req = ptlrpc_request_addref(req);
entry->se_minfo = minfo;
RETURN(entry);
- } else if (entry->se_index > index)
+ } else if (entry->se_index > index) {
RETURN(NULL);
+ }
}
}
RETURN(NULL);
if (!cfs_list_empty(&entry->se_list))
cfs_list_del_init(&entry->se_list);
+ /* stale entry */
if (unlikely(entry->se_index < sai->sai_index_next)) {
cfs_spin_unlock(&lli->lli_lock);
- OBD_FREE_PTR(entry);
+ ll_sai_entry_free(entry);
RETURN(0);
}
LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
- /*
- * XXX: No fid in reply, this is probaly cross-ref case.
- * SA can't handle it yet.
- */
+ /* XXX: No fid in reply, this is probaly cross-ref case.
+ * SA can't handle it yet. */
if (body->valid & OBD_MD_MDS)
GOTO(out, rc = -EAGAIN);
- rc = ll_lookup_it_finish(req, it, &icbd);
- if (!rc)
- /*
- * Here dentry->d_inode might be NULL,
- * because the entry may have been removed before
- * we start doing stat ahead.
- */
- ll_lookup_finish_locks(it, dentry);
-
- if (dentry != save) {
- minfo->mi_dentry = dentry;
- dput(save);
- }
+ /* Here dentry->d_inode might be NULL, because the entry may
+ * have been removed before we start doing stat ahead. */
+
+ /* BUG 15962, 21739: since statahead thread does not hold
+ * parent's i_mutex, it can not alias the dentry to inode.
+ * Here we just create/update inode in memory, and let the
+ * main "ls -l" thread to alias such dentry to the inode with
+ * parent's i_mutex held.
+ * On the other hand, we hold ldlm ibits lock for the inode
+ * yet, to allow other operations to cancel such lock in time,
+ * we should drop the ldlm lock reference count, then the main
+ * "ls -l" thread should check/get such ldlm ibits lock before
+ * aliasing such dentry to the inode later. If we don't do such
+ * drop here, it maybe cause deadlock with i_muext held by
+ * others, just like bug 21739. */
+ rc = ll_lookup_it_finish(req, it, &icbd, &entry->se_inode);
+ if (entry->se_inode != NULL)
+ entry->se_dentry = dget(dentry);
+ LASSERT(dentry == save);
+ ll_intent_drop_lock(it);
} else {
/*
* revalidate.
EXIT;
out:
+ /* The "ll_sai_entry_to_stated()" will drop related ldlm ibits lock
+ * reference count with ll_intent_drop_lock() called in spite of the
+ * above operations failed or not. Do not worry about calling
+ * "ll_intent_drop_lock()" more than once. */
if (likely(ll_sai_entry_to_stated(sai, entry)))
cfs_waitq_signal(&sai->sai_waitq);
return rc;
dentry->d_name.len, dentry->d_name.name, rc);
cfs_spin_lock(&lli->lli_lock);
+ /* stale entry */
if (unlikely(lli->lli_sai == NULL ||
lli->lli_sai->sai_generation != minfo->mi_generation)) {
cfs_spin_unlock(&lli->lli_lock);
sai = ll_sai_get(lli->lli_sai);
entry = ll_sai_entry_set(sai,
(unsigned int)(long)minfo->mi_cbdata,
- rc ? SA_ENTRY_UNSTATED :
- SA_ENTRY_STATED, req, minfo);
+ rc < 0 ? rc : SA_ENTRY_STATED, req,
+ minfo);
LASSERT(entry != NULL);
if (likely(sa_is_running(sai))) {
ll_sai_entry_to_received(sai, entry);
int rc;
ENTRY;
- if (inode == NULL)
+ if (unlikely(inode == NULL))
RETURN(1);
if (d_mountpoint(dentry))
RETURN(1);
- if (dentry == dentry->d_sb->s_root)
+ if (unlikely(dentry == dentry->d_sb->s_root))
RETURN(1);
- rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode));
+ rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
+ NULL);
if (rc == 1) {
ll_intent_release(&it);
RETURN(1);
if (rc) {
CDEBUG(D_READA, "set sai entry %p index %u stat %d rc %d\n",
se, se->se_index, se->se_stat, rc);
- se->se_stat = rc;
+ se->se_stat = rc < 0 ? rc : SA_ENTRY_STATED;
if (ll_sai_entry_to_stated(sai, se))
cfs_waitq_signal(&sai->sai_waitq);
} else {
cfs_daemonize(pname);
}
- sbi->ll_sa_total++;
+ atomic_inc(&sbi->ll_sa_total);
cfs_spin_lock(&lli->lli_lock);
thread->t_flags = SVC_RUNNING;
cfs_spin_unlock(&lli->lli_lock);
if (IS_ERR(page)) {
rc = PTR_ERR(page);
- CDEBUG(D_READA, "error reading dir "DFID" at "LPU64"/%u: rc %d\n",
- PFID(ll_inode2fid(dir)), pos,
- sai->sai_index, rc);
+ CDEBUG(D_READA, "error reading dir "DFID" at "LPU64
+ "/%u: [rc %d] [parent %u]\n",
+ PFID(ll_inode2fid(dir)), pos, sai->sai_index,
+ rc, lli->lli_opendir_pid);
break;
}
char *name = ent->lde_name;
int namelen = le16_to_cpu(ent->lde_namelen);
- if (namelen == 0)
+ if (unlikely(namelen == 0))
/*
* Skip dummy record.
*/
struct lu_dirent *ent;
if (IS_ERR(page)) {
+ struct ll_inode_info *lli = ll_i2info(dir);
+
rc = PTR_ERR(page);
- CERROR("error reading dir "DFID" at "LPU64": rc %d\n",
- PFID(ll_inode2fid(dir)), pos, rc);
+ CERROR("error reading dir "DFID" at "LPU64": "
+ "[rc %d] [parent %u]\n",
+ PFID(ll_inode2fid(dir)), pos,
+ rc, lli->lli_opendir_pid);
break;
}
return rc;
}
+static int is_same_dentry(struct dentry *d1, struct dentry *d2)
+{
+ if (unlikely(d1 == d2))
+ return 1;
+ if (d1->d_name.len == d2->d_name.len &&
+ memcmp(d1->d_name.name, d2->d_name.name, d1->d_name.len) == 0)
+ return 1;
+ return 0;
+}
+
/**
* Start statahead thread if this is the first dir entry.
* Otherwise if a thread is started already, wait it until it is ahead of me.
- * \retval 0 -- stat ahead thread process such dentry, for lookup, it miss
- * \retval 1 -- stat ahead thread process such dentry, for lookup, it hit
+ * \retval 0 -- stat ahead thread process such dentry, miss for lookup
+ * \retval 1 -- stat ahead thread process such dentry, hit for any case
* \retval -EEXIST -- stat ahead thread started, and this is the first dentry
* \retval -EBADFD -- statahead thread exit and not dentry available
* \retval -EAGAIN -- try to stat by caller
sai = lli->lli_sai;
if (sai) {
- struct ll_sb_info *sbi;
-
if (unlikely(sa_is_stopped(sai) &&
cfs_list_empty(&sai->sai_entries_stated)))
RETURN(-EBADFD);
}
}
- sbi = ll_i2sbi(dir);
- if (ll_sai_entry_stated(sai)) {
- sbi->ll_sa_cached++;
- } else {
- sbi->ll_sa_blocked++;
+ if (!ll_sai_entry_stated(sai)) {
/*
* thread started already, avoid double-stat.
*/
ll_sai_entry_stated(sai) ||
sa_is_stopped(sai),
&lwi);
+ if (unlikely(rc == -EINTR))
+ RETURN(rc);
+ }
+
+ if (ll_sai_entry_stated(sai)) {
+ struct ll_sai_entry *entry;
+
+ entry = cfs_list_entry(sai->sai_entries_stated.next,
+ struct ll_sai_entry, se_list);
+ /* This is for statahead lookup */
+ if (entry->se_inode != NULL) {
+ struct lookup_intent it = {.it_op = IT_GETATTR};
+ struct dentry *dchild = entry->se_dentry;
+ struct inode *ichild = entry->se_inode;
+ int found = 0;
+ __u32 bits;
+
+ LASSERT(dchild != *dentryp);
+
+ if (!lookup)
+ mutex_lock(&dir->i_mutex);
+
+ rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
+ ll_inode2fid(ichild),
+ &bits);
+ if (rc == 1) {
+ struct dentry *save = dchild;
+
+ ll_lookup_it_alias(&dchild, ichild,
+ bits);
+ ll_lookup_finish_locks(&it, dchild);
+ if (dchild != save)
+ dput(save);
+ found = is_same_dentry(dchild,
+ *dentryp);
+ } else {
+ /* Someone has canceled related ldlm
+ * lock before the real "revalidate"
+ * using it.
+ * Drop the inode reference count held
+ * by interpreter. */
+ iput(ichild);
+ }
+
+ if (!lookup)
+ mutex_unlock(&dir->i_mutex);
+
+ entry->se_dentry = NULL;
+ entry->se_inode = NULL;
+ if (found) {
+ if (lookup) {
+ LASSERT(dchild != *dentryp);
+ /* VFS will drop the reference
+ * count for dchild and *dentryp
+ * by itself. */
+ *dentryp = dchild;
+ } else {
+ LASSERT(dchild == *dentryp);
+ /* Drop the dentry reference
+ * count held by statahead. */
+ dput(dchild);
+ }
+ RETURN(1);
+ } else {
+ /* Drop the dentry reference count held
+ * by statahead. */
+ dput(dchild);
+ }
+ }
}
if (lookup) {
/*
* do nothing for revalidate.
*/
- RETURN(rc);
+ RETURN(0);
}
/* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
sai->sai_inode = igrab(dir);
if (unlikely(sai->sai_inode == NULL)) {
- CWARN("Do not start stat ahead on dying inode "DFID" .\n",
+ CWARN("Do not start stat ahead on dying inode "DFID"\n",
PFID(&lli->lli_fid));
OBD_FREE_PTR(sai);
GOTO(out, rc = -ESTALE);
struct ll_inode_info *nlli = ll_i2info(parent->d_inode);
CWARN("Race condition, someone changed %.*s just now: "
- "old parent "DFID", new parent "DFID" .\n",
+ "old parent "DFID", new parent "DFID"\n",
(*dentryp)->d_name.len, (*dentryp)->d_name.name,
PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
dput(parent);
struct ll_statahead_info *sai;
struct ll_sb_info *sbi;
struct ll_dentry_data *ldd = ll_d2d(dentry);
+ int rc;
ENTRY;
LASSERT(dir != NULL);
LASSERT(sai != NULL);
sbi = ll_i2sbi(dir);
- if (result >= 1) {
- sbi->ll_sa_hit++;
+ rc = ll_sai_entry_fini(sai);
+ /* rc == -ENOENT means such dentry was removed just between statahead
+ * readdir and pre-fetched, count it as hit.
+ *
+ * result == -ENOENT has two meanings:
+ * 1. such dentry was removed just between statahead pre-fetched and
+ * main process stat such dentry.
+ * 2. main process stat non-exist dentry.
+ * We can not distinguish such two cases, just count them as miss. */
+ if (result >= 1 || unlikely(rc == -ENOENT)) {
sai->sai_hit++;
sai->sai_consecutive_miss = 0;
sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
} else {
- sbi->ll_sa_miss++;
sai->sai_miss++;
sai->sai_consecutive_miss++;
if (sa_low_hit(sai) && sa_is_running(sai)) {
- sbi->ll_sa_wrong++;
+ atomic_inc(&sbi->ll_sa_wrong);
CDEBUG(D_READA, "Statahead for dir "DFID" hit ratio "
"too low: hit/miss %u/%u, sent/replied %u/%u, "
"stopping statahead thread: pid %d\n",
if (!sa_is_stopped(sai))
cfs_waitq_signal(&sai->sai_thread.t_ctl_waitq);
- ll_sai_entry_fini(sai);
if (likely(ldd != NULL))
ldd->lld_sa_generation = sai->sai_generation;
RETURN(rc);
}
-int lmv_revalidate_lock(struct obd_export *exp,
- struct lookup_intent *it,
- struct lu_fid *fid)
+int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
+ struct lu_fid *fid, __u32 *bits)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
if (IS_ERR(tgt))
RETURN(PTR_ERR(tgt));
- rc = md_revalidate_lock(tgt->ltd_exp, it, fid);
+ rc = md_revalidate_lock(tgt->ltd_exp, it, fid, bits);
RETURN(rc);
}
;
}
-int mdc_revalidate_lock(struct obd_export *exp,
- struct lookup_intent *it,
- struct lu_fid *fid);
+int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
+ struct lu_fid *fid, __u32 *bits);
int mdc_intent_getattr_async(struct obd_export *exp,
struct md_enqueue_info *minfo,
RETURN(rc);
}
+int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
+ struct lu_fid *fid, __u32 *bits)
+{
+ /* We could just return 1 immediately, but since we should only
+ * be called in revalidate_it if we already have a lock, let's
+ * verify that. */
+ struct ldlm_res_id res_id;
+ struct lustre_handle lockh;
+ ldlm_policy_data_t policy;
+ ldlm_mode_t mode;
+ ENTRY;
+
+ fid_build_reg_res_name(fid, &res_id);
+ /* As not all attributes are kept under update lock, e.g.
+ owner/group/acls are under lookup lock, we need both
+ ibits for GETATTR. */
+ policy.l_inodebits.bits = (it->it_op == IT_GETATTR) ?
+ MDS_INODELOCK_UPDATE : MDS_INODELOCK_LOOKUP;
+
+ mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
+ LDLM_FL_BLOCK_GRANTED, &res_id, LDLM_IBITS,
+ &policy, LCK_CR|LCK_CW|LCK_PR|LCK_PW, &lockh, 0);
+ if (mode) {
+ it->d.lustre.it_lock_handle = lockh.cookie;
+ it->d.lustre.it_lock_mode = mode;
+ if (bits) {
+ struct ldlm_lock *lock = ldlm_handle2lock(&lockh);
+
+ LASSERT(lock != NULL);
+ *bits = lock->l_policy_data.l_inodebits.bits;
+ LDLM_LOCK_PUT(lock);
+ }
+ }
+
+ RETURN(!!mode);
+}
+
/*
* This long block is all about fixing up the lock and request state
* so that it is correct as of the moment _before_ the operation was
/* We could just return 1 immediately, but since we should only
* be called in revalidate_it if we already have a lock, let's
* verify that. */
- ldlm_policy_data_t policy;
- ldlm_mode_t mode;
-
- /* As not all attributes are kept under update lock, e.g.
- owner/group/acls are under lookup lock, we need both
- ibits for GETATTR. */
-
- /* For CMD, UPDATE lock and LOOKUP lock can not be got
- * at the same for cross-object, so we can not match
- * the 2 lock at the same time FIXME: but how to handle
- * the above situation */
- policy.l_inodebits.bits = (it->it_op == IT_GETATTR) ?
- MDS_INODELOCK_UPDATE : MDS_INODELOCK_LOOKUP;
-
- mode = mdc_lock_match(exp, LDLM_FL_BLOCK_GRANTED,
- &op_data->op_fid2, LDLM_IBITS, &policy,
- LCK_CR|LCK_CW|LCK_PR|LCK_PW, &lockh);
- if (mode) {
- it->d.lustre.it_lock_handle = lockh.cookie;
- it->d.lustre.it_lock_mode = mode;
- }
-
+ rc = mdc_revalidate_lock(exp, it, &op_data->op_fid2, NULL);
/* Only return failure if it was not GETATTR by cfid
(from inode_revalidate) */
- if (mode || op_data->op_namelen != 0)
- RETURN(!!mode);
+ if (rc || op_data->op_namelen != 0)
+ RETURN(rc);
}
/* lookup_it may be called only after revalidate_it has run, because
RETURN(0);
}
-
-int mdc_revalidate_lock(struct obd_export *exp,
- struct lookup_intent *it,
- struct lu_fid *fid)
-{
- /* We could just return 1 immediately, but since we should only
- * be called in revalidate_it if we already have a lock, let's
- * verify that. */
- struct ldlm_res_id res_id;
- struct lustre_handle lockh;
- ldlm_policy_data_t policy;
- ldlm_mode_t mode;
- ENTRY;
-
- fid_build_reg_res_name(fid, &res_id);
- /* As not all attributes are kept under update lock, e.g.
- owner/group/acls are under lookup lock, we need both
- ibits for GETATTR. */
- policy.l_inodebits.bits = (it->it_op == IT_GETATTR) ?
- MDS_INODELOCK_UPDATE : MDS_INODELOCK_LOOKUP;
-
- mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
- LDLM_FL_BLOCK_GRANTED, &res_id, LDLM_IBITS,
- &policy, LCK_CR|LCK_CW|LCK_PR|LCK_PW, &lockh, 0);
- if (mode) {
- it->d.lustre.it_lock_handle = lockh.cookie;
- it->d.lustre.it_lock_mode = mode;
- }
-
- RETURN(!!mode);
-}
require_dsh_mds || exit 0
# Skip these tests
-# bug number: 17466 18857,15962
+# bug number: 17466 18857
ALWAYS_EXCEPT="61d 33a 33b $REPLAY_SINGLE_EXCEPT"
if [ "$FAILURE_MODE" = "HARD" ] && mixed_ost_devs; then