#include "llite_internal.h"
struct ll_sai_entry {
- struct list_head se_list;
+ cfs_list_t se_list;
unsigned int se_index;
int se_stat;
struct ptlrpc_request *se_req;
struct md_enqueue_info *se_minfo;
+ struct dentry *se_dentry;
+ struct inode *se_inode;
};
enum {
};
static unsigned int sai_generation = 0;
-static spinlock_t sai_generation_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t sai_generation_lock = CFS_SPIN_LOCK_UNLOCKED;
/**
* Check whether first entry was stated already or not.
struct ll_sai_entry *entry;
int rc = 0;
- if (!list_empty(&sai->sai_entries_stated)) {
- entry = list_entry(sai->sai_entries_stated.next,
- struct ll_sai_entry, se_list);
+ if (!cfs_list_empty(&sai->sai_entries_stated)) {
+ entry = cfs_list_entry(sai->sai_entries_stated.next,
+ struct ll_sai_entry, se_list);
if (entry->se_index == sai->sai_index_next)
rc = 1;
}
static inline int sa_received_empty(struct ll_statahead_info *sai)
{
- return list_empty(&sai->sai_entries_received);
+ return cfs_list_empty(&sai->sai_entries_received);
}
static inline int sa_not_full(struct ll_statahead_info *sai)
(sai->sai_consecutive_miss > 8));
}
+static void ll_sai_entry_free(struct ll_sai_entry *entry)
+{
+ struct dentry *dentry = entry->se_dentry;
+ struct inode *inode = entry->se_inode;
+
+ if (dentry) {
+ entry->se_dentry = NULL;
+ dput(dentry);
+ }
+ if (inode) {
+ entry->se_inode = NULL;
+ iput(inode);
+ }
+ LASSERT(cfs_list_empty(&entry->se_list));
+ OBD_FREE_PTR(entry);
+}
+
/**
* process the deleted entry's member and free the entry.
* (1) release intent
static void ll_sai_entry_cleanup(struct ll_sai_entry *entry, int free)
{
struct md_enqueue_info *minfo = entry->se_minfo;
- struct ptlrpc_request *req = entry->se_req;
+ struct ptlrpc_request *req = entry->se_req;
ENTRY;
if (minfo) {
entry->se_req = NULL;
ptlrpc_req_finished(req);
}
- if (free) {
- LASSERT(list_empty(&entry->se_list));
- OBD_FREE_PTR(entry);
- }
+ if (free)
+ ll_sai_entry_free(entry);
EXIT;
}
if (!sai)
return NULL;
- spin_lock(&sai_generation_lock);
+ cfs_spin_lock(&sai_generation_lock);
sai->sai_generation = ++sai_generation;
if (unlikely(sai_generation == 0))
sai->sai_generation = ++sai_generation;
- spin_unlock(&sai_generation_lock);
- atomic_set(&sai->sai_refcount, 1);
+ cfs_spin_unlock(&sai_generation_lock);
+ cfs_atomic_set(&sai->sai_refcount, 1);
sai->sai_max = LL_SA_RPC_MIN;
cfs_waitq_init(&sai->sai_waitq);
cfs_waitq_init(&sai->sai_thread.t_ctl_waitq);
struct ll_statahead_info *ll_sai_get(struct ll_statahead_info *sai)
{
LASSERT(sai);
- atomic_inc(&sai->sai_refcount);
+ cfs_atomic_inc(&sai->sai_refcount);
return sai;
}
lli = ll_i2info(inode);
LASSERT(lli->lli_sai == sai);
- if (atomic_dec_and_test(&sai->sai_refcount)) {
+ if (cfs_atomic_dec_and_test(&sai->sai_refcount)) {
struct ll_sai_entry *entry, *next;
- spin_lock(&lli->lli_lock);
- if (unlikely(atomic_read(&sai->sai_refcount) > 0)) {
+ cfs_spin_lock(&lli->lli_lock);
+ if (unlikely(cfs_atomic_read(&sai->sai_refcount) > 0)) {
/* It is race case, the interpret callback just hold
* a reference count */
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
EXIT;
return;
}
LASSERT(lli->lli_opendir_key == NULL);
lli->lli_sai = NULL;
lli->lli_opendir_pid = 0;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
LASSERT(sa_is_stopped(sai));
PFID(&lli->lli_fid),
sai->sai_sent, sai->sai_replied);
- list_for_each_entry_safe(entry, next, &sai->sai_entries_sent,
- se_list) {
- list_del_init(&entry->se_list);
+ cfs_list_for_each_entry_safe(entry, next,
+ &sai->sai_entries_sent, se_list) {
+ cfs_list_del_init(&entry->se_list);
ll_sai_entry_cleanup(entry, 1);
}
- list_for_each_entry_safe(entry, next, &sai->sai_entries_received,
- se_list) {
- list_del_init(&entry->se_list);
+ cfs_list_for_each_entry_safe(entry, next,
+ &sai->sai_entries_received,
+ se_list) {
+ cfs_list_del_init(&entry->se_list);
ll_sai_entry_cleanup(entry, 1);
}
- list_for_each_entry_safe(entry, next, &sai->sai_entries_stated,
- se_list) {
- list_del_init(&entry->se_list);
+ cfs_list_for_each_entry_safe(entry, next,
+ &sai->sai_entries_stated,
+ se_list) {
+ cfs_list_del_init(&entry->se_list);
ll_sai_entry_cleanup(entry, 1);
}
iput(inode);
entry->se_index = index;
entry->se_stat = SA_ENTRY_UNSTATED;
- spin_lock(&lli->lli_lock);
- list_add_tail(&entry->se_list, &sai->sai_entries_sent);
- spin_unlock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
+ cfs_list_add_tail(&entry->se_list, &sai->sai_entries_sent);
+ cfs_spin_unlock(&lli->lli_lock);
RETURN(entry);
}
int rc = 0;
ENTRY;
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
sai->sai_index_next++;
- if (likely(!list_empty(&sai->sai_entries_stated))) {
- entry = list_entry(sai->sai_entries_stated.next,
- struct ll_sai_entry, se_list);
+ if (likely(!cfs_list_empty(&sai->sai_entries_stated))) {
+ entry = cfs_list_entry(sai->sai_entries_stated.next,
+ struct ll_sai_entry, se_list);
if (entry->se_index < sai->sai_index_next) {
- list_del(&entry->se_list);
+ cfs_list_del_init(&entry->se_list);
rc = entry->se_stat;
- OBD_FREE_PTR(entry);
+ ll_sai_entry_free(entry);
}
} else {
LASSERT(sa_is_stopped(sai));
}
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
RETURN(rc);
}
struct ll_sai_entry *entry;
ENTRY;
- if (!list_empty(&sai->sai_entries_sent)) {
- list_for_each_entry(entry, &sai->sai_entries_sent, se_list) {
+ if (!cfs_list_empty(&sai->sai_entries_sent)) {
+ cfs_list_for_each_entry(entry, &sai->sai_entries_sent,
+ se_list) {
if (entry->se_index == index) {
entry->se_stat = stat;
entry->se_req = ptlrpc_request_addref(req);
static inline void
ll_sai_entry_to_received(struct ll_statahead_info *sai, struct ll_sai_entry *entry)
{
- if (!list_empty(&entry->se_list))
- list_del_init(&entry->se_list);
- list_add_tail(&entry->se_list, &sai->sai_entries_received);
+ if (!cfs_list_empty(&entry->se_list))
+ cfs_list_del_init(&entry->se_list);
+ cfs_list_add_tail(&entry->se_list, &sai->sai_entries_received);
}
/**
ll_sai_entry_cleanup(entry, 0);
- spin_lock(&lli->lli_lock);
- if (!list_empty(&entry->se_list))
- list_del_init(&entry->se_list);
+ cfs_spin_lock(&lli->lli_lock);
+ if (!cfs_list_empty(&entry->se_list))
+ cfs_list_del_init(&entry->se_list);
+ /* stale entry */
if (unlikely(entry->se_index < sai->sai_index_next)) {
- spin_unlock(&lli->lli_lock);
- OBD_FREE_PTR(entry);
+ cfs_spin_unlock(&lli->lli_lock);
+ ll_sai_entry_free(entry);
RETURN(0);
}
- list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
+ cfs_list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
if (se->se_index < entry->se_index) {
- list_add(&entry->se_list, &se->se_list);
- spin_unlock(&lli->lli_lock);
+ cfs_list_add(&entry->se_list, &se->se_list);
+ cfs_spin_unlock(&lli->lli_lock);
RETURN(1);
}
}
/*
* I am the first entry.
*/
- list_add(&entry->se_list, &sai->sai_entries_stated);
- spin_unlock(&lli->lli_lock);
+ cfs_list_add(&entry->se_list, &sai->sai_entries_stated);
+ cfs_spin_unlock(&lli->lli_lock);
RETURN(1);
}
struct mdt_body *body;
ENTRY;
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
LASSERT(!sa_received_empty(sai));
- entry = list_entry(sai->sai_entries_received.next, struct ll_sai_entry,
- se_list);
- list_del_init(&entry->se_list);
- spin_unlock(&lli->lli_lock);
+ entry = cfs_list_entry(sai->sai_entries_received.next,
+ struct ll_sai_entry, se_list);
+ cfs_list_del_init(&entry->se_list);
+ cfs_spin_unlock(&lli->lli_lock);
if (unlikely(entry->se_index < sai->sai_index_next)) {
CWARN("Found stale entry: [index %u] [next %u]\n",
LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
- /*
- * XXX: No fid in reply, this is probaly cross-ref case.
- * SA can't handle it yet.
- */
+ /* XXX: No fid in reply, this is probaly cross-ref case.
+ * SA can't handle it yet. */
if (body->valid & OBD_MD_MDS)
GOTO(out, rc = -EAGAIN);
- /* BUG 15962: if statahead insert dentry into dcache (for
- * lookup),it should hold parent dir's i_mutex to synchronize
- * with other operations from VFS layer.
- * E.g.: create/delete/rename/lookup, and so on. */
- mutex_lock(&minfo->mi_dir->i_mutex);
- rc = ll_lookup_it_finish(req, it, &icbd);
- mutex_unlock(&minfo->mi_dir->i_mutex);
- if (!rc)
- /*
- * Here dentry->d_inode might be NULL,
- * because the entry may have been removed before
- * we start doing stat ahead.
- */
- ll_finish_locks(it, dentry);
-
- if (dentry != save) {
- minfo->mi_dentry = dentry;
- dput(save);
- }
+ /* Here dentry->d_inode might be NULL, because the entry may
+ * have been removed before we start doing stat ahead. */
+
+ /* BUG 15962, 21739: since statahead thread does not hold
+ * parent's i_mutex, it can not alias the dentry to inode.
+ * Here we just create/update inode in memory, and let the
+ * main "ls -l" thread to alias such dentry to the inode with
+ * parent's i_mutex held.
+ * On the other hand, we hold ldlm ibits lock for the inode
+ * yet, to allow other operations to cancel such lock in time,
+ * we should drop the ldlm lock reference count, then the main
+ * "ls -l" thread should check/get such ldlm ibits lock before
+ * aliasing such dentry to the inode later. If we don't do such
+ * drop here, it maybe cause deadlock with i_muext held by
+ * others, just like bug 21739. */
+ rc = ll_lookup_it_finish(req, it, &icbd, &entry->se_inode);
+ if (entry->se_inode != NULL)
+ entry->se_dentry = dget(dentry);
+ LASSERT(dentry == save);
+ ll_intent_drop_lock(it);
} else {
/*
* revalidate.
GOTO(out, rc);
}
+ cfs_spin_lock(&ll_lookup_lock);
spin_lock(&dcache_lock);
lock_dentry(dentry);
__d_drop(dentry);
unlock_dentry(dentry);
d_rehash_cond(dentry, 0);
spin_unlock(&dcache_lock);
+ cfs_spin_unlock(&ll_lookup_lock);
- ll_finish_locks(it, dentry);
+ ll_lookup_finish_locks(it, dentry);
}
EXIT;
out:
+ /* The "ll_sai_entry_to_stated()" will drop related ldlm ibits lock
+ * reference count with ll_intent_drop_lock() called in spite of the
+ * above operations failed or not. Do not worry about calling
+ * "ll_intent_drop_lock()" more than once. */
if (likely(ll_sai_entry_to_stated(sai, entry)))
cfs_waitq_signal(&sai->sai_waitq);
return rc;
CDEBUG(D_READA, "interpret statahead %.*s rc %d\n",
dentry->d_name.len, dentry->d_name.name, rc);
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
+ /* stale entry */
if (unlikely(lli->lli_sai == NULL ||
lli->lli_sai->sai_generation != minfo->mi_generation)) {
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
ll_intent_release(it);
dput(dentry);
iput(dir);
if (likely(sa_is_running(sai))) {
ll_sai_entry_to_received(sai, entry);
sai->sai_replied++;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
cfs_waitq_signal(&sai->sai_thread.t_ctl_waitq);
} else {
- if (!list_empty(&entry->se_list))
- list_del_init(&entry->se_list);
+ if (!cfs_list_empty(&entry->se_list))
+ cfs_list_del_init(&entry->se_list);
sai->sai_replied++;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
ll_sai_entry_cleanup(entry, 1);
}
ll_sai_put(sai);
if (unlikely(dentry == dentry->d_sb->s_root))
RETURN(1);
- rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode));
+ rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
+ NULL);
if (rc == 1) {
ll_intent_release(&it);
RETURN(1);
}
atomic_inc(&sbi->ll_sa_total);
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
thread->t_flags = SVC_RUNNING;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_READA, "start doing statahead for %s\n", parent->d_name.name);
char *name = ent->lde_name;
int namelen = le16_to_cpu(ent->lde_namelen);
- if (namelen == 0)
+ if (unlikely(namelen == 0))
/*
* Skip dummy record.
*/
out:
ll_dir_chain_fini(&chain);
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
thread->t_flags = SVC_STOPPED;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
cfs_waitq_signal(&sai->sai_waitq);
cfs_waitq_signal(&thread->t_ctl_waitq);
ll_sai_put(sai);
if (unlikely(key == NULL))
return;
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
return;
}
if (!sa_is_stopped(lli->lli_sai)) {
thread->t_flags = SVC_STOPPING;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_READA, "stopping statahead thread, pid %d\n",
sa_is_stopped(lli->lli_sai),
&lwi);
} else {
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
}
/*
ll_sai_put(lli->lli_sai);
} else {
lli->lli_opendir_pid = 0;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
}
}
return rc;
}
+static int is_same_dentry(struct dentry *d1, struct dentry *d2)
+{
+ if (unlikely(d1 == d2))
+ return 1;
+ if (d1->d_parent == d2->d_parent &&
+ d1->d_name.hash == d2->d_name.hash &&
+ d1->d_name.len == d2->d_name.len &&
+ memcmp(d1->d_name.name, d2->d_name.name, d1->d_name.len) == 0)
+ return 1;
+ return 0;
+}
+
/**
* Start statahead thread if this is the first dir entry.
* Otherwise if a thread is started already, wait it until it is ahead of me.
- * \retval 0 -- stat ahead thread process such dentry, for lookup, it miss
- * \retval 1 -- stat ahead thread process such dentry, for lookup, it hit
+ * \retval 0 -- stat ahead thread process such dentry, miss for lookup
+ * \retval 1 -- stat ahead thread process such dentry, hit for any case
* \retval -EEXIST -- stat ahead thread started, and this is the first dentry
* \retval -EBADFD -- statahead thread exit and not dentry available
* \retval -EAGAIN -- try to stat by caller
struct ll_inode_info *lli;
struct ll_statahead_info *sai;
struct dentry *parent;
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ struct l_wait_info lwi = { 0 };
int rc = 0;
ENTRY;
if (sai) {
if (unlikely(sa_is_stopped(sai) &&
- list_empty(&sai->sai_entries_stated)))
+ cfs_list_empty(&sai->sai_entries_stated)))
RETURN(-EBADFD);
if ((*dentryp)->d_name.name[0] == '.') {
}
if (!ll_sai_entry_stated(sai)) {
- /* BUG 15962:
- *
- * If statahead insert dentry into dcache (for lookup),
- * it should hold parent dir's i_mutex to synchronize
- * with other operations from VFS layer.
- * E.g.: create/delete/rename/lookup, and so on.
- *
- * To prevent the dead lock between statahead and its
- * parent process, the parent process should release
- * such i_mutex before waiting for statahead to fetch
- * related dentry attribute from MDS.
- *
- * It is no matter for parent process to release such
- * i_mutex temporary, if someone else create dentry for
- * the same item in such interval, we can find it after
- * woke up by statahead. */
- if (lookup) {
- LASSERT(mutex_is_locked(&dir->i_mutex));
- mutex_unlock(&dir->i_mutex);
- }
+ /*
+ * thread started already, avoid double-stat.
+ */
+ lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
rc = l_wait_event(sai->sai_waitq,
ll_sai_entry_stated(sai) ||
sa_is_stopped(sai),
&lwi);
- if (lookup)
- mutex_lock(&dir->i_mutex);
+ if (unlikely(rc == -EINTR))
+ RETURN(rc);
+ }
+
+ if (ll_sai_entry_stated(sai)) {
+ struct ll_sai_entry *entry;
+
+ entry = cfs_list_entry(sai->sai_entries_stated.next,
+ struct ll_sai_entry, se_list);
+ /* This is for statahead lookup */
+ if (entry->se_inode != NULL) {
+ struct lookup_intent it = {.it_op = IT_GETATTR};
+ struct dentry *dchild = entry->se_dentry;
+ struct inode *ichild = entry->se_inode;
+ int found = 0;
+ __u32 bits;
+
+ LASSERT(dchild != *dentryp);
+
+ if (!lookup)
+ mutex_lock(&dir->i_mutex);
+
+ rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
+ ll_inode2fid(ichild),
+ &bits);
+ if (rc == 1) {
+ struct dentry *save = dchild;
+
+ ll_lookup_it_alias(&dchild, ichild,
+ bits);
+ ll_lookup_finish_locks(&it, dchild);
+ if (dchild != save)
+ dput(save);
+ found = is_same_dentry(dchild,
+ *dentryp);
+ } else {
+ /* Someone has canceled related ldlm
+ * lock before the real "revalidate"
+ * using it.
+ * Drop the inode reference count held
+ * by interpreter. */
+ iput(ichild);
+ }
+
+ if (!lookup)
+ mutex_unlock(&dir->i_mutex);
+
+ entry->se_dentry = NULL;
+ entry->se_inode = NULL;
+ if (found) {
+ if (lookup) {
+ LASSERT(dchild != *dentryp);
+ /* VFS will drop the reference
+ * count for dchild and *dentryp
+ * by itself. */
+ *dentryp = dchild;
+ } else {
+ LASSERT(dchild == *dentryp);
+ /* Drop the dentry reference
+ * count held by statahead. */
+ dput(dchild);
+ }
+ RETURN(1);
+ } else {
+ /* Drop the dentry reference count held
+ * by statahead. */
+ dput(dchild);
+ }
+ }
}
if (lookup) {
/*
* do nothing for revalidate.
*/
- RETURN(rc);
+ RETURN(0);
}
/* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
sai->sai_inode = igrab(dir);
if (unlikely(sai->sai_inode == NULL)) {
- CWARN("Do not start stat ahead on dying inode "DFID" .\n",
+ CWARN("Do not start stat ahead on dying inode "DFID"\n",
PFID(&lli->lli_fid));
OBD_FREE_PTR(sai);
GOTO(out, rc = -ESTALE);
struct ll_inode_info *nlli = ll_i2info(parent->d_inode);
CWARN("Race condition, someone changed %.*s just now: "
- "old parent "DFID", new parent "DFID" .\n",
+ "old parent "DFID", new parent "DFID"\n",
(*dentryp)->d_name.len, (*dentryp)->d_name.name,
PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
dput(parent);
RETURN(-EEXIST);
out:
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
lli->lli_opendir_key = NULL;
lli->lli_opendir_pid = 0;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
return rc;
}
* 1. such dentry was removed just between statahead pre-fetched and
* main process stat such dentry.
* 2. main process stat non-exist dentry.
- * Since we can distinguish such two cases, just count it as miss. */
+ * We can not distinguish such two cases, just count them as miss. */
if (result >= 1 || unlikely(rc == -ENOENT)) {
sai->sai_hit++;
sai->sai_consecutive_miss = 0;
PFID(&lli->lli_fid), sai->sai_hit,
sai->sai_miss, sai->sai_sent,
sai->sai_replied, cfs_curproc_pid());
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
if (!sa_is_stopped(sai))
sai->sai_thread.t_flags = SVC_STOPPING;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
}
}