struct lookup_intent *lld_it;
#endif
unsigned int lld_sa_generation;
+ cfs_list_t lld_sa_alias;
};
#define ll_d2d(de) ((struct ll_dentry_data*)((de)->d_fsdata))
struct obd_capa *lli_mds_capa;
cfs_list_t lli_oss_capas;
- /* metadata stat-ahead */
+ /* metadata statahead */
+ /* protect statahead stuff: lli_opendir_pid, lli_opendir_key, lli_sai,
+ * lli_sa_dentry, and so on. */
+ cfs_spinlock_t lli_sa_lock;
/*
* "opendir_pid" is the token when lookup/revalid -- I am the owner of
* dir statahead.
* before child -- it is me should cleanup the dir readahead. */
void *lli_opendir_key;
struct ll_statahead_info *lli_sai;
+ cfs_list_t lli_sa_dentry;
struct cl_object *lli_clob;
/* the most recent timestamps obtained from mds */
struct ost_lvb lli_lvb;
}
struct it_cb_data {
- struct inode *icbd_parent;
+ struct inode *icbd_parent;
struct dentry **icbd_childp;
- obd_id hash;
+ obd_id hash;
+ struct inode **icbd_alias;
+ __u32 *bits;
};
__u32 ll_i2suppgid(struct inode *i);
#endif
void ll_lookup_it_alias(struct dentry **de, struct inode *inode, __u32 bits);
int ll_lookup_it_finish(struct ptlrpc_request *request,
- struct lookup_intent *it, void *data,
- struct inode **alias);
+ struct lookup_intent *it, void *data);
/* llite/rw.c */
int ll_prepare_write(struct file *, struct page *, unsigned from, unsigned to);
void ll_intent_drop_lock(struct lookup_intent *);
void ll_intent_release(struct lookup_intent *);
int ll_drop_dentry(struct dentry *dentry);
-extern void ll_set_dd(struct dentry *de);
+extern int ll_set_dd(struct dentry *de);
int ll_drop_dentry(struct dentry *dentry);
void ll_unhash_aliases(struct inode *);
void ll_frob_intent(struct lookup_intent **itp, struct lookup_intent *deft);
unsigned int sai_skip_hidden;/* skipped hidden dentry count */
unsigned int sai_ls_all:1; /* "ls -al", do stat-ahead for
* hidden entries */
+ unsigned int sai_nolock; /* without lookup lock case */
cfs_waitq_t sai_waitq; /* stat-ahead wait queue */
struct ptlrpc_thread sai_thread; /* stat-ahead thread */
cfs_list_t sai_entries_sent; /* entries sent out */
if (lli->lli_opendir_pid != cfs_curproc_pid())
return;
- cfs_spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
if (likely(lli->lli_sai != NULL && ldd != NULL))
ldd->lld_sa_generation = lli->lli_sai->sai_generation;
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
}
static inline
return do_statahead_enter(dir, dentryp, lookup);
}
-static void inline ll_dops_init(struct dentry *de, int block)
+static int inline ll_dops_init(struct dentry *de, int block)
{
struct ll_dentry_data *lld = ll_d2d(de);
+ int rc = 0;
if (lld == NULL && block != 0) {
- ll_set_dd(de);
+ rc = ll_set_dd(de);
+ if (rc)
+ return rc;
+
lld = ll_d2d(de);
}
lld->lld_sa_generation = 0;
de->d_op = &ll_d_ops;
+ return rc;
}
/* llite ioctl register support rountine */
break;
case LDLM_CB_CANCELING: {
struct inode *inode = ll_inode_from_lock(lock);
+ struct ll_inode_info *lli;
__u64 bits = lock->l_policy_data.l_inodebits.bits;
struct lu_fid *fid;
ll_md_real_close(inode, flags);
}
+ lli = ll_i2info(inode);
if (bits & MDS_INODELOCK_UPDATE)
- ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
+ lli->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
if (S_ISDIR(inode->i_mode) &&
(bits & MDS_INODELOCK_UPDATE)) {
ll_drop_negative_dentry(inode);
}
+ if ((bits & MDS_INODELOCK_LOOKUP) &&
+ !cfs_list_empty(&lli->lli_sa_dentry)) {
+ struct ll_dentry_data *lld, *next;
+
+ cfs_spin_lock(&lli->lli_sa_lock);
+ cfs_list_for_each_entry_safe(lld, next,
+ &lli->lli_sa_dentry,
+ lld_sa_alias)
+ cfs_list_del_init(&lld->lld_sa_alias);
+ cfs_spin_unlock(&lli->lli_sa_lock);
+ }
+
if (inode->i_sb->s_root &&
inode != inode->i_sb->s_root->d_inode &&
(bits & MDS_INODELOCK_LOOKUP))
struct ll_dentry_data *lld = ll_d2d(*de);
/* just make sure the ll_dentry_data is ready */
- if (unlikely(lld == NULL)) {
- ll_set_dd(*de);
- lld = ll_d2d(*de);
- if (likely(lld != NULL))
- lld->lld_sa_generation = 0;
- }
+ if (unlikely(lld == NULL))
+ ll_dops_init(*de, 1);
}
/* we have lookup look - unhide dentry */
if (bits & MDS_INODELOCK_LOOKUP) {
}
int ll_lookup_it_finish(struct ptlrpc_request *request,
- struct lookup_intent *it, void *data,
- struct inode **alias)
+ struct lookup_intent *it, void *data)
{
struct it_cb_data *icbd = data;
struct dentry **de = icbd->icbd_childp;
md_set_lock_data(sbi->ll_md_exp,
&it->d.lustre.it_lock_handle, inode, &bits);
- if (alias != NULL) {
- *alias = inode;
+ if (icbd->bits != NULL)
+ *icbd->bits = bits;
+ if (icbd->icbd_alias != NULL) {
+ *icbd->icbd_alias = inode;
RETURN(0);
}
icbd.icbd_childp = &dentry;
icbd.icbd_parent = parent;
+ icbd.icbd_alias = NULL;
+ icbd.bits = NULL;
if (it->it_op & IT_CREAT ||
(it->it_op & IT_OPEN && it->it_create_mode & O_CREAT))
if (rc < 0)
GOTO(out, retval = ERR_PTR(rc));
- rc = ll_lookup_it_finish(req, it, &icbd, NULL);
+ rc = ll_lookup_it_finish(req, it, &icbd);
if (rc != 0) {
ll_intent_release(it);
GOTO(out, retval = ERR_PTR(rc));
/**
* Check whether first entry was stated already or not.
- * No need to hold lli_lock, for:
+ * No need to hold lli_sa_lock, for:
* (1) it is me that remove entry from the list
* (2) the statahead thread only add new entry to the list
*/
(sai->sai_consecutive_miss > 8));
}
+static inline int sa_skip_nolock(struct ll_statahead_info *sai)
+{
+ return (sai->sai_nolock >= 3);
+}
+
static void ll_sai_entry_free(struct ll_sai_entry *entry)
{
struct dentry *dentry = entry->se_dentry;
struct inode *inode = entry->se_inode;
if (dentry) {
+ struct ll_dentry_data *lld = ll_d2d(dentry);
+ struct ll_inode_info *lli;
+
entry->se_dentry = NULL;
+ LASSERT(inode != NULL);
+ lli = ll_i2info(inode);
+ if (!cfs_list_empty(&lli->lli_sa_dentry)) {
+ cfs_spin_lock(&lli->lli_sa_lock);
+ cfs_list_del_init(&lld->lld_sa_alias);
+ cfs_spin_unlock(&lli->lli_sa_lock);
+ }
dput(dentry);
}
if (inode) {
if (cfs_atomic_dec_and_test(&sai->sai_refcount)) {
struct ll_sai_entry *entry, *next;
- cfs_spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
if (unlikely(cfs_atomic_read(&sai->sai_refcount) > 0)) {
/* It is race case, the interpret callback just hold
* a reference count */
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
EXIT;
return;
}
LASSERT(lli->lli_opendir_key == NULL);
lli->lli_sai = NULL;
lli->lli_opendir_pid = 0;
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
LASSERT(sa_is_stopped(sai));
entry->se_index = index;
entry->se_stat = SA_ENTRY_UNSTATED;
- cfs_spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
cfs_list_add_tail(&entry->se_list, &sai->sai_entries_sent);
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
RETURN(entry);
}
int rc = 0;
ENTRY;
- cfs_spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
sai->sai_index_next++;
if (likely(!cfs_list_empty(&sai->sai_entries_stated))) {
entry = cfs_list_entry(sai->sai_entries_stated.next,
} else {
LASSERT(sa_is_stopped(sai));
}
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
RETURN(rc);
}
/**
- * inside lli_lock.
+ * inside lli_sa_lock.
* \retval NULL : can not find the entry in sai_entries_sent with the index
* \retval entry: find the entry in sai_entries_sent with the index
*/
}
/**
- * inside lli_lock.
+ * inside lli_sa_lock.
* Move entry to sai_entries_received and
* insert it into sai_entries_received tail.
*/
ll_sai_entry_cleanup(entry, 0);
- cfs_spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
if (!cfs_list_empty(&entry->se_list))
cfs_list_del_init(&entry->se_list);
/* stale entry */
if (unlikely(entry->se_index < sai->sai_index_next)) {
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
ll_sai_entry_free(entry);
RETURN(0);
}
cfs_list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
if (se->se_index < entry->se_index) {
cfs_list_add(&entry->se_list, &se->se_list);
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
RETURN(1);
}
}
* I am the first entry.
*/
cfs_list_add(&entry->se_list, &sai->sai_entries_stated);
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
RETURN(1);
}
struct mdt_body *body;
ENTRY;
- cfs_spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
LASSERT(!sa_received_empty(sai));
entry = cfs_list_entry(sai->sai_entries_received.next,
struct ll_sai_entry, se_list);
cfs_list_del_init(&entry->se_list);
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
if (unlikely(entry->se_index < sai->sai_index_next)) {
CWARN("Found stale entry: [index %u] [next %u]\n",
* lookup.
*/
struct dentry *save = dentry;
+ __u32 bits = 0;
struct it_cb_data icbd = {
.icbd_parent = minfo->mi_dir,
- .icbd_childp = &dentry
+ .icbd_childp = &dentry,
+ .icbd_alias = &entry->se_inode,
+ .bits = &bits
};
LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
* aliasing such dentry to the inode later. If we don't do such
* drop here, it maybe cause deadlock with i_muext held by
* others, just like bug 21739. */
- rc = ll_lookup_it_finish(req, it, &icbd, &entry->se_inode);
- if (entry->se_inode != NULL)
- entry->se_dentry = dget(dentry);
+ rc = ll_lookup_it_finish(req, it, &icbd);
+ if (entry->se_inode != NULL) {
+ struct ll_dentry_data *lld = ll_d2d(dentry);
+ struct ll_inode_info *sei = ll_i2info(entry->se_inode);
+
+ /* For statahead lookup case, both MDS_INODELOCK_LOOKUP
+ * and MDS_INODELOCK_UPDATE should be granted */
+ if (likely(bits & MDS_INODELOCK_LOOKUP &&
+ bits & MDS_INODELOCK_UPDATE)) {
+ /* the first dentry ref_count will be dropped by
+ * ll_sai_entry_to_stated(), so hold another ref
+ * in advance */
+ entry->se_dentry = dget(dentry);
+ cfs_spin_lock(&sei->lli_sa_lock);
+ cfs_list_add(&lld->lld_sa_alias,
+ &sei->lli_sa_dentry);
+ cfs_spin_unlock(&sei->lli_sa_lock);
+ sai->sai_nolock = 0;
+ } else {
+ iput(entry->se_inode);
+ entry->se_inode = NULL;
+ sai->sai_nolock++;
+ }
+ }
LASSERT(dentry == save);
ll_intent_drop_lock(it);
} else {
CDEBUG(D_READA, "interpret statahead %.*s rc %d\n",
dentry->d_name.len, dentry->d_name.name, rc);
- cfs_spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
/* stale entry */
if (unlikely(lli->lli_sai == NULL ||
lli->lli_sai->sai_generation != minfo->mi_generation)) {
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
ll_intent_release(it);
dput(dentry);
iput(dir);
if (likely(sa_is_running(sai))) {
ll_sai_entry_to_received(sai, entry);
sai->sai_replied++;
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
cfs_waitq_signal(&sai->sai_thread.t_ctl_waitq);
} else {
if (!cfs_list_empty(&entry->se_list))
cfs_list_del_init(&entry->se_list);
sai->sai_replied++;
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
ll_sai_entry_cleanup(entry, 1);
}
ll_sai_put(sai);
ll_name2qstr(&name, entry_name, entry_name_len);
dentry = d_lookup(parent, &name);
if (!dentry) {
+ if (unlikely(sa_skip_nolock(sai))) {
+ CWARN("can not obtain lookup lock, skip the succeedent "
+ "lookup cases, will cause statahead miss, and "
+ "statahead maybe exit for that.\n");
+ GOTO(out, rc = -EAGAIN);
+ }
+
dentry = d_alloc(parent, &name);
if (dentry) {
- rc = do_sa_lookup(dir, dentry);
+ rc = ll_dops_init(dentry, 1);
+ if (!rc)
+ rc = do_sa_lookup(dir, dentry);
if (rc)
dput(dentry);
} else {
}
atomic_inc(&sbi->ll_sa_total);
- cfs_spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
thread->t_flags = SVC_RUNNING;
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_READA, "start doing statahead for %s\n", parent->d_name.name);
out:
ll_dir_chain_fini(&chain);
- cfs_spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
thread->t_flags = SVC_STOPPED;
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
cfs_waitq_signal(&sai->sai_waitq);
cfs_waitq_signal(&thread->t_ctl_waitq);
ll_sai_put(sai);
if (unlikely(key == NULL))
return;
- cfs_spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
return;
}
if (!sa_is_stopped(lli->lli_sai)) {
thread->t_flags = SVC_STOPPING;
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_READA, "stopping statahead thread, pid %d\n",
sa_is_stopped(lli->lli_sai),
&lwi);
} else {
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
}
/*
ll_sai_put(lli->lli_sai);
} else {
lli->lli_opendir_pid = 0;
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
}
}
struct ll_sai_entry, se_list);
/* This is for statahead lookup */
if (entry->se_inode != NULL) {
- struct lookup_intent it = {.it_op = IT_GETATTR};
+ struct lookup_intent it = {.it_op = IT_LOOKUP};
struct dentry *dchild = entry->se_dentry;
struct inode *ichild = entry->se_inode;
+ struct ll_dentry_data *lld = ll_d2d(dchild);
+ struct ll_inode_info *sei = ll_i2info(ichild);
+ struct dentry *save = dchild;
+ int invalid = 0;
+ __u32 bits = MDS_INODELOCK_LOOKUP |
+ MDS_INODELOCK_UPDATE;
int found = 0;
- __u32 bits;
LASSERT(dchild != *dentryp);
if (!lookup)
mutex_lock(&dir->i_mutex);
+ /*
+ * Make sure dentry is still valid.
+ * For statahead lookup case, we need both
+ * LOOKUP lock and UPDATE lock which obtained
+ * by statahead thread originally.
+ *
+ * Consider following racer case:
+ * 1. statahead thread on client1 get lock with
+ * both LOOKUK and UPDATE bits for "aaa"
+ * 2. rename thread on client2 cancel such lock
+ * from client1, then rename "aaa" to "bbb"
+ * 3. ls thread on client1 obtain LOOKUP lock
+ * for "bbb" again
+ * 4. here the dentry "aaa" created by statahead
+ * thread should be invalid even related
+ * LOOKUP lock valid for the same inode
+ */
rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
ll_inode2fid(ichild),
&bits);
- if (rc == 1) {
- struct dentry *save = dchild;
-
- ll_lookup_it_alias(&dchild, ichild,
- bits);
- ll_lookup_finish_locks(&it, dchild);
- if (dchild != save)
- dput(save);
- found = is_same_dentry(dchild,
- *dentryp);
- } else {
- /* Someone has canceled related ldlm
+ cfs_spin_lock(&sei->lli_sa_lock);
+ if (!cfs_list_empty(&lld->lld_sa_alias))
+ cfs_list_del_init(&lld->lld_sa_alias);
+ else
+ invalid = 1;
+ cfs_spin_unlock(&sei->lli_sa_lock);
+ if (rc != 1)
+ /* Someone has cancelled the original
* lock before the real "revalidate"
- * using it.
- * Drop the inode reference count held
- * by interpreter. */
- iput(ichild);
+ * using it. Drop it. */
+ goto out_mutex;
+
+ if (invalid) {
+ /* Someone has cancelled the original
+ * lock, and reobtained it, the dentry
+ * maybe invalid anymore, Drop it. */
+ ll_intent_drop_lock(&it);
+ goto out_mutex;
}
+ ll_lookup_it_alias(&dchild, ichild, bits);
+ ll_lookup_finish_locks(&it, dchild);
+ if (dchild != save)
+ dput(save);
+ found = is_same_dentry(dchild, *dentryp);
+ ichild = NULL;
+
+out_mutex:
if (!lookup)
mutex_unlock(&dir->i_mutex);
+ /* Drop the inode reference count held by
+ * interpreter. */
+ if (ichild != NULL)
+ iput(ichild);
entry->se_dentry = NULL;
entry->se_inode = NULL;
* by itself. */
*dentryp = dchild;
} else {
- LASSERT(dchild == *dentryp);
+ LASSERTF(dchild == *dentryp,
+ "[%.*s/%.*s] "
+ "[%p "DFID"] "
+ "[%p "DFID"]\n",
+ dchild->d_parent->d_name.len,
+ dchild->d_parent->d_name.name,
+ dchild->d_name.len,
+ dchild->d_name.name,
+ dchild,
+ PFID(ll_inode2fid(dchild->d_inode)),
+ *dentryp,
+ PFID(ll_inode2fid((*dentryp)->d_inode)));
/* Drop the dentry reference
* count held by statahead. */
dput(dchild);
RETURN(-EEXIST);
out:
- cfs_spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
lli->lli_opendir_key = NULL;
lli->lli_opendir_pid = 0;
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
return rc;
}
PFID(&lli->lli_fid), sai->sai_hit,
sai->sai_miss, sai->sai_sent,
sai->sai_replied, cfs_curproc_pid());
- cfs_spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
if (!sa_is_stopped(sai))
sai->sai_thread.t_flags = SVC_STOPPING;
- cfs_spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
}
}