* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
#include "llite_internal.h"
struct ll_sai_entry {
- struct list_head se_list;
+ cfs_list_t se_list;
unsigned int se_index;
int se_stat;
struct ptlrpc_request *se_req;
};
static unsigned int sai_generation = 0;
-static spinlock_t sai_generation_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t sai_generation_lock = CFS_SPIN_LOCK_UNLOCKED;
/**
* Check whether first entry was stated already or not.
- * No need to hold lli_lock, for:
+ * No need to hold lli_sa_lock, for:
* (1) it is me that remove entry from the list
* (2) the statahead thread only add new entry to the list
*/
struct ll_sai_entry *entry;
int rc = 0;
- if (!list_empty(&sai->sai_entries_stated)) {
- entry = list_entry(sai->sai_entries_stated.next,
- struct ll_sai_entry, se_list);
+ if (!cfs_list_empty(&sai->sai_entries_stated)) {
+ entry = cfs_list_entry(sai->sai_entries_stated.next,
+ struct ll_sai_entry, se_list);
if (entry->se_index == sai->sai_index_next)
rc = 1;
}
static inline int sa_received_empty(struct ll_statahead_info *sai)
{
- return list_empty(&sai->sai_entries_received);
+ return cfs_list_empty(&sai->sai_entries_received);
}
static inline int sa_not_full(struct ll_statahead_info *sai)
{
- return (sai->sai_index < sai->sai_hit + sai->sai_miss + sai->sai_max);
+ return !!(sai->sai_index < sai->sai_index_next + sai->sai_max);
}
static inline int sa_is_running(struct ll_statahead_info *sai)
static void ll_sai_entry_cleanup(struct ll_sai_entry *entry, int free)
{
struct md_enqueue_info *minfo = entry->se_minfo;
- struct ptlrpc_request *req = entry->se_req;
+ struct ptlrpc_request *req = entry->se_req;
ENTRY;
if (minfo) {
ptlrpc_req_finished(req);
}
if (free) {
- LASSERT(list_empty(&entry->se_list));
+ LASSERT(cfs_list_empty(&entry->se_list));
OBD_FREE_PTR(entry);
}
if (!sai)
return NULL;
- spin_lock(&sai_generation_lock);
+ cfs_spin_lock(&sai_generation_lock);
sai->sai_generation = ++sai_generation;
if (unlikely(sai_generation == 0))
sai->sai_generation = ++sai_generation;
- spin_unlock(&sai_generation_lock);
- atomic_set(&sai->sai_refcount, 1);
+ cfs_spin_unlock(&sai_generation_lock);
+ cfs_atomic_set(&sai->sai_refcount, 1);
sai->sai_max = LL_SA_RPC_MIN;
cfs_waitq_init(&sai->sai_waitq);
cfs_waitq_init(&sai->sai_thread.t_ctl_waitq);
struct ll_statahead_info *ll_sai_get(struct ll_statahead_info *sai)
{
LASSERT(sai);
- atomic_inc(&sai->sai_refcount);
+ cfs_atomic_inc(&sai->sai_refcount);
return sai;
}
lli = ll_i2info(inode);
LASSERT(lli->lli_sai == sai);
- if (atomic_dec_and_test(&sai->sai_refcount)) {
+ if (cfs_atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
struct ll_sai_entry *entry, *next;
- spin_lock(&lli->lli_lock);
- if (unlikely(atomic_read(&sai->sai_refcount) > 0)) {
+ if (unlikely(cfs_atomic_read(&sai->sai_refcount) > 0)) {
/* It is race case, the interpret callback just hold
* a reference count */
- spin_unlock(&lli->lli_lock);
- EXIT;
- return;
+ cfs_spin_unlock(&lli->lli_sa_lock);
+ RETURN_EXIT;
}
LASSERT(lli->lli_opendir_key == NULL);
lli->lli_sai = NULL;
lli->lli_opendir_pid = 0;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
LASSERT(sa_is_stopped(sai));
PFID(&lli->lli_fid),
sai->sai_sent, sai->sai_replied);
- list_for_each_entry_safe(entry, next, &sai->sai_entries_sent,
- se_list) {
- list_del_init(&entry->se_list);
+ cfs_list_for_each_entry_safe(entry, next,
+ &sai->sai_entries_sent, se_list) {
+ cfs_list_del_init(&entry->se_list);
ll_sai_entry_cleanup(entry, 1);
}
- list_for_each_entry_safe(entry, next, &sai->sai_entries_received,
- se_list) {
- list_del_init(&entry->se_list);
+ cfs_list_for_each_entry_safe(entry, next,
+ &sai->sai_entries_received,
+ se_list) {
+ cfs_list_del_init(&entry->se_list);
ll_sai_entry_cleanup(entry, 1);
}
- list_for_each_entry_safe(entry, next, &sai->sai_entries_stated,
- se_list) {
- list_del_init(&entry->se_list);
+ cfs_list_for_each_entry_safe(entry, next,
+ &sai->sai_entries_stated,
+ se_list) {
+ cfs_list_del_init(&entry->se_list);
ll_sai_entry_cleanup(entry, 1);
}
iput(inode);
entry->se_index = index;
entry->se_stat = SA_ENTRY_UNSTATED;
- spin_lock(&lli->lli_lock);
- list_add_tail(&entry->se_list, &sai->sai_entries_sent);
- spin_unlock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
+ cfs_list_add_tail(&entry->se_list, &sai->sai_entries_sent);
+ cfs_spin_unlock(&lli->lli_sa_lock);
RETURN(entry);
}
int rc = 0;
ENTRY;
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
sai->sai_index_next++;
- if (likely(!list_empty(&sai->sai_entries_stated))) {
- entry = list_entry(sai->sai_entries_stated.next,
- struct ll_sai_entry, se_list);
+ if (likely(!cfs_list_empty(&sai->sai_entries_stated))) {
+ entry = cfs_list_entry(sai->sai_entries_stated.next,
+ struct ll_sai_entry, se_list);
if (entry->se_index < sai->sai_index_next) {
- list_del(&entry->se_list);
+ cfs_list_del_init(&entry->se_list);
rc = entry->se_stat;
OBD_FREE_PTR(entry);
}
} else {
LASSERT(sa_is_stopped(sai));
}
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
RETURN(rc);
}
/**
- * inside lli_lock.
+ * inside lli_sa_lock.
* \retval NULL : can not find the entry in sai_entries_sent with the index
* \retval entry: find the entry in sai_entries_sent with the index
*/
struct ll_sai_entry *entry;
ENTRY;
- if (!list_empty(&sai->sai_entries_sent)) {
- list_for_each_entry(entry, &sai->sai_entries_sent, se_list) {
+ if (!cfs_list_empty(&sai->sai_entries_sent)) {
+ cfs_list_for_each_entry(entry, &sai->sai_entries_sent,
+ se_list) {
if (entry->se_index == index) {
entry->se_stat = stat;
entry->se_req = ptlrpc_request_addref(req);
}
/**
- * inside lli_lock.
+ * inside lli_sa_lock.
* Move entry to sai_entries_received and
* insert it into sai_entries_received tail.
*/
static inline void
ll_sai_entry_to_received(struct ll_statahead_info *sai, struct ll_sai_entry *entry)
{
- if (!list_empty(&entry->se_list))
- list_del_init(&entry->se_list);
- list_add_tail(&entry->se_list, &sai->sai_entries_received);
+ if (!cfs_list_empty(&entry->se_list))
+ cfs_list_del_init(&entry->se_list);
+ cfs_list_add_tail(&entry->se_list, &sai->sai_entries_received);
}
/**
ll_sai_entry_cleanup(entry, 0);
- spin_lock(&lli->lli_lock);
- if (!list_empty(&entry->se_list))
- list_del_init(&entry->se_list);
+ cfs_spin_lock(&lli->lli_sa_lock);
+ if (!cfs_list_empty(&entry->se_list))
+ cfs_list_del_init(&entry->se_list);
+ /* stale entry */
if (unlikely(entry->se_index < sai->sai_index_next)) {
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
OBD_FREE_PTR(entry);
RETURN(0);
}
- list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
+ cfs_list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
if (se->se_index < entry->se_index) {
- list_add(&entry->se_list, &se->se_list);
- spin_unlock(&lli->lli_lock);
+ cfs_list_add(&entry->se_list, &se->se_list);
+ cfs_spin_unlock(&lli->lli_sa_lock);
RETURN(1);
}
}
/*
* I am the first entry.
*/
- list_add(&entry->se_list, &sai->sai_entries_stated);
- spin_unlock(&lli->lli_lock);
+ cfs_list_add(&entry->se_list, &sai->sai_entries_stated);
+ cfs_spin_unlock(&lli->lli_sa_lock);
RETURN(1);
}
struct mdt_body *body;
ENTRY;
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
LASSERT(!sa_received_empty(sai));
- entry = list_entry(sai->sai_entries_received.next, struct ll_sai_entry,
- se_list);
- list_del_init(&entry->se_list);
- spin_unlock(&lli->lli_lock);
+ entry = cfs_list_entry(sai->sai_entries_received.next,
+ struct ll_sai_entry, se_list);
+ cfs_list_del_init(&entry->se_list);
+ cfs_spin_unlock(&lli->lli_sa_lock);
if (unlikely(entry->se_index < sai->sai_index_next)) {
CWARN("Found stale entry: [index %u] [next %u]\n",
LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
- /*
- * XXX: No fid in reply, this is probaly cross-ref case.
- * SA can't handle it yet.
- */
+ /* XXX: No fid in reply, this is probaly cross-ref case.
+ * SA can't handle it yet. */
if (body->valid & OBD_MD_MDS)
GOTO(out, rc = -EAGAIN);
- /* BUG 15962: if statahead insert dentry into dcache (for
- * lookup),it should hold parent dir's i_mutex to synchronize
- * with other operations from VFS layer.
- * E.g.: create/delete/rename/lookup, and so on. */
- mutex_lock(&minfo->mi_dir->i_mutex);
+ /* Here dentry->d_inode might be NULL, because the entry may
+ * have been removed before we start doing stat ahead. */
rc = ll_lookup_it_finish(req, it, &icbd);
- mutex_unlock(&minfo->mi_dir->i_mutex);
if (!rc)
- /*
- * Here dentry->d_inode might be NULL,
- * because the entry may have been removed before
- * we start doing stat ahead.
- */
- ll_finish_locks(it, dentry);
+ ll_lookup_finish_locks(it, dentry);
if (dentry != save) {
minfo->mi_dentry = dentry;
GOTO(out, rc);
}
+ cfs_spin_lock(&ll_lookup_lock);
spin_lock(&dcache_lock);
lock_dentry(dentry);
__d_drop(dentry);
unlock_dentry(dentry);
d_rehash_cond(dentry, 0);
spin_unlock(&dcache_lock);
+ cfs_spin_unlock(&ll_lookup_lock);
- ll_finish_locks(it, dentry);
+ ll_lookup_finish_locks(it, dentry);
}
EXIT;
out:
+ /* The "ll_sai_entry_to_stated()" will drop related ldlm ibits lock
+ * reference count with ll_intent_drop_lock() called in spite of the
+ * above operations failed or not. Do not worry about calling
+ * "ll_intent_drop_lock()" more than once. */
if (likely(ll_sai_entry_to_stated(sai, entry)))
cfs_waitq_signal(&sai->sai_waitq);
return rc;
CDEBUG(D_READA, "interpret statahead %.*s rc %d\n",
dentry->d_name.len, dentry->d_name.name, rc);
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
+ /* stale entry */
if (unlikely(lli->lli_sai == NULL ||
lli->lli_sai->sai_generation != minfo->mi_generation)) {
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
ll_intent_release(it);
dput(dentry);
iput(dir);
if (likely(sa_is_running(sai))) {
ll_sai_entry_to_received(sai, entry);
sai->sai_replied++;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
cfs_waitq_signal(&sai->sai_thread.t_ctl_waitq);
} else {
- if (!list_empty(&entry->se_list))
- list_del_init(&entry->se_list);
+ if (!cfs_list_empty(&entry->se_list))
+ cfs_list_del_init(&entry->se_list);
sai->sai_replied++;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
ll_sai_entry_cleanup(entry, 1);
}
ll_sai_put(sai);
struct ll_inode_info *lli = ll_i2info(dir);
struct ll_statahead_info *sai = lli->lli_sai;
struct qstr name;
- struct dentry *dentry;
+ struct dentry *dentry = NULL;
struct ll_sai_entry *se;
int rc;
ENTRY;
dentry = d_lookup(parent, &name);
if (!dentry) {
dentry = d_alloc(parent, &name);
- if (dentry) {
+ if (dentry)
rc = do_sa_lookup(dir, dentry);
- if (rc)
- dput(dentry);
- } else {
+ else
GOTO(out, rc = -ENOMEM);
- }
} else {
rc = do_sa_revalidate(dir, dentry);
- if (rc)
- dput(dentry);
}
EXIT;
out:
if (rc) {
+ if (dentry != NULL)
+ dput(dentry);
+ se->se_stat = rc < 0 ? rc : SA_ENTRY_STATED;
CDEBUG(D_READA, "set sai entry %p index %u stat %d rc %d\n",
se, se->se_index, se->se_stat, rc);
- se->se_stat = rc < 0 ? rc : SA_ENTRY_STATED;
if (ll_sai_entry_to_stated(sai, se))
cfs_waitq_signal(&sai->sai_waitq);
} else {
}
atomic_inc(&sbi->ll_sa_total);
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
thread->t_flags = SVC_RUNNING;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_READA, "start doing statahead for %s\n", parent->d_name.name);
+ sai->sai_pid = cfs_curproc_pid();
+ lli->lli_sa_pos = 0;
ll_dir_chain_init(&chain);
- page = ll_get_dir_page(dir, pos, 0, &chain);
+ page = ll_get_dir_page(NULL, dir, pos, 0, &chain);
while (1) {
struct l_wait_info lwi = { 0 };
dp = page_address(page);
for (ent = lu_dirent_start(dp); ent != NULL;
ent = lu_dirent_next(ent)) {
- char *name = ent->lde_name;
- int namelen = le16_to_cpu(ent->lde_namelen);
+ __u64 hash;
+ int namelen;
+ char *name;
- if (namelen == 0)
+ hash = le64_to_cpu(ent->lde_hash);
+ if (unlikely(hash < pos))
+ /*
+ * Skip until we find target hash value.
+ */
+ continue;
+
+ namelen = le16_to_cpu(ent->lde_namelen);
+ if (unlikely(namelen == 0))
/*
* Skip dummy record.
*/
continue;
+ name = ent->lde_name;
if (name[0] == '.') {
if (namelen == 1) {
/*
* chain is exhausted.
* Normal case: continue to the next page.
*/
- page = ll_get_dir_page(dir, pos, 1, &chain);
+ lli->lli_sa_pos = pos;
+ page = ll_get_dir_page(NULL, dir, pos, 1, &chain);
} else {
/*
* go into overflow page.
out:
ll_dir_chain_fini(&chain);
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
thread->t_flags = SVC_STOPPED;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
cfs_waitq_signal(&sai->sai_waitq);
cfs_waitq_signal(&thread->t_ctl_waitq);
ll_sai_put(sai);
/**
* called in ll_file_release().
*/
-void ll_stop_statahead(struct inode *inode, void *key)
+void ll_stop_statahead(struct inode *dir, void *key)
{
- struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_inode_info *lli = ll_i2info(dir);
if (unlikely(key == NULL))
return;
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
return;
}
if (!sa_is_stopped(lli->lli_sai)) {
thread->t_flags = SVC_STOPPING;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_READA, "stopping statahead thread, pid %d\n",
sa_is_stopped(lli->lli_sai),
&lwi);
} else {
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
}
/*
ll_sai_put(lli->lli_sai);
} else {
lli->lli_opendir_pid = 0;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
}
}
static int is_first_dirent(struct inode *dir, struct dentry *dentry)
{
+ struct ll_inode_info *lli = ll_i2info(dir);
struct ll_dir_chain chain;
struct qstr *target = &dentry->d_name;
struct page *page;
int rc = LS_NONE_FIRST_DE;
ENTRY;
+ lli->lli_sa_pos = 0;
ll_dir_chain_init(&chain);
- page = ll_get_dir_page(dir, pos, 0, &chain);
+ page = ll_get_dir_page(NULL, dir, pos, 0, &chain);
while (1) {
struct lu_dirpage *dp;
dp = page_address(page);
for (ent = lu_dirent_start(dp); ent != NULL;
ent = lu_dirent_next(ent)) {
- char *name = ent->lde_name;
- int namelen = le16_to_cpu(ent->lde_namelen);
+ int namelen;
+ char *name;
- if (namelen == 0)
+ namelen = le16_to_cpu(ent->lde_namelen);
+ if (unlikely(namelen == 0))
/*
* skip dummy record.
*/
continue;
+ name = ent->lde_name;
if (name[0] == '.') {
if (namelen == 1)
/*
* chain is exhausted
* Normal case: continue to the next page.
*/
- page = ll_get_dir_page(dir, pos, 1, &chain);
+ lli->lli_sa_pos = pos;
+ page = ll_get_dir_page(NULL, dir, pos, 1, &chain);
} else {
/*
* go into overflow page.
struct ll_inode_info *lli;
struct ll_statahead_info *sai;
struct dentry *parent;
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ struct l_wait_info lwi = { 0 };
int rc = 0;
ENTRY;
if (sai) {
if (unlikely(sa_is_stopped(sai) &&
- list_empty(&sai->sai_entries_stated)))
+ cfs_list_empty(&sai->sai_entries_stated)))
RETURN(-EBADFD);
if ((*dentryp)->d_name.name[0] == '.') {
}
if (!ll_sai_entry_stated(sai)) {
- /* BUG 15962:
- *
- * If statahead insert dentry into dcache (for lookup),
- * it should hold parent dir's i_mutex to synchronize
- * with other operations from VFS layer.
- * E.g.: create/delete/rename/lookup, and so on.
- *
- * To prevent the dead lock between statahead and its
- * parent process, the parent process should release
- * such i_mutex before waiting for statahead to fetch
- * related dentry attribute from MDS.
- *
- * It is no matter for parent process to release such
- * i_mutex temporary, if someone else create dentry for
- * the same item in such interval, we can find it after
- * woke up by statahead. */
- if (lookup) {
- LASSERT(mutex_is_locked(&dir->i_mutex));
- mutex_unlock(&dir->i_mutex);
- }
+ /*
+ * thread started already, avoid double-stat.
+ */
+ lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
rc = l_wait_event(sai->sai_waitq,
ll_sai_entry_stated(sai) ||
sa_is_stopped(sai),
&lwi);
- if (lookup)
- mutex_lock(&dir->i_mutex);
+ if (unlikely(rc == -EINTR))
+ RETURN(rc);
}
if (lookup) {
/*
* do nothing for revalidate.
*/
- RETURN(rc);
+ RETURN(0);
}
/* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
sai->sai_inode = igrab(dir);
if (unlikely(sai->sai_inode == NULL)) {
- CWARN("Do not start stat ahead on dying inode "DFID" .\n",
+ CWARN("Do not start stat ahead on dying inode "DFID"\n",
PFID(&lli->lli_fid));
OBD_FREE_PTR(sai);
GOTO(out, rc = -ESTALE);
struct ll_inode_info *nlli = ll_i2info(parent->d_inode);
CWARN("Race condition, someone changed %.*s just now: "
- "old parent "DFID", new parent "DFID" .\n",
+ "old parent "DFID", new parent "DFID"\n",
(*dentryp)->d_name.len, (*dentryp)->d_name.name,
PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
dput(parent);
RETURN(-EEXIST);
out:
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
lli->lli_opendir_key = NULL;
lli->lli_opendir_pid = 0;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
return rc;
}
* 1. such dentry was removed just between statahead pre-fetched and
* main process stat such dentry.
* 2. main process stat non-exist dentry.
- * Since we can distinguish such two cases, just count it as miss. */
+ * We can not distinguish such two cases, just count them as miss. */
if (result >= 1 || unlikely(rc == -ENOENT)) {
sai->sai_hit++;
sai->sai_consecutive_miss = 0;
PFID(&lli->lli_fid), sai->sai_hit,
sai->sai_miss, sai->sai_sent,
sai->sai_replied, cfs_curproc_pid());
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_sa_lock);
if (!sa_is_stopped(sai))
sai->sai_thread.t_flags = SVC_STOPPING;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
}
}