From: Lai Siyao Date: Thu, 16 Oct 2014 18:16:18 +0000 (-0400) Subject: LU-3270 statahead: use dcache-like interface for sa entry X-Git-Tag: 2.6.90~37 X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=commitdiff_plain;h=99d96c8e428754927d62069d129b2464b3b30e61 LU-3270 statahead: use dcache-like interface for sa entry Rename ll_sa_entry to sa_entry, and manage sa_entry cache with dcache-like interfaces. sa_entry is not needed to be refcounted, because only scanner can free it, so after it's put in stat list, statahead thread shouldn't access it any longer. ll_statahead_interpret() doesn't need to take sai refcount, because statahead thread will wait for all inflight RPC to finish. Signed-off-by: Lai Siyao Change-Id: Ibd55227f4c0710139bd039a4e7232dc1cb5041be Reviewed-on: http://review.whamcloud.com/9664 Tested-by: Jenkins Tested-by: Maloo Reviewed-by: James Simmons Reviewed-by: Fan Yong Reviewed-by: Oleg Drokin --- diff --git a/lustre/include/obd.h b/lustre/include/obd.h index d85d984..61cb703 100644 --- a/lustre/include/obd.h +++ b/lustre/include/obd.h @@ -866,12 +866,12 @@ typedef int (* md_enqueue_cb_t)(struct ptlrpc_request *req, int rc); struct md_enqueue_info { - struct md_op_data mi_data; - struct lookup_intent mi_it; - struct lustre_handle mi_lockh; - struct inode *mi_dir; - md_enqueue_cb_t mi_cb; - __u64 mi_cbdata; + struct md_op_data mi_data; + struct lookup_intent mi_it; + struct lustre_handle mi_lockh; + struct inode *mi_dir; + md_enqueue_cb_t mi_cb; + void *mi_cbdata; }; struct obd_ops { diff --git a/lustre/llite/llite_internal.h b/lustre/llite/llite_internal.h index fbc51f4..204c42e 100644 --- a/lustre/llite/llite_internal.h +++ b/lustre/llite/llite_internal.h @@ -1341,10 +1341,11 @@ struct ll_statahead_info { wait_queue_head_t sai_waitq; /* stat-ahead wait queue */ struct ptlrpc_thread sai_thread; /* stat-ahead thread */ struct ptlrpc_thread sai_agl_thread; /* AGL thread */ - struct list_head sai_entries; /* entry list */ - struct list_head sai_entries_received; /* entries returned */ - struct list_head sai_entries_stated; /* entries stated */ - struct list_head sai_entries_agl; /* AGL entries to be sent */ + struct list_head sai_interim_entries; /* entries which got async + * stat reply, but not + * instantiated */ + struct list_head sai_entries; /* completed entries */ + struct list_head sai_agls; /* AGLs to be sent */ struct list_head sai_cache[LL_SA_CACHE_SIZE]; spinlock_t sai_cache_lock[LL_SA_CACHE_SIZE]; atomic_t sai_cache_count; /* entry count in cache */ diff --git a/lustre/llite/statahead.c b/lustre/llite/statahead.c index f781f7c..45bfe84 100644 --- a/lustre/llite/statahead.c +++ b/lustre/llite/statahead.c @@ -49,44 +49,44 @@ #define SA_OMITTED_ENTRY_MAX 8ULL typedef enum { - /** negative values are for error cases */ - SA_ENTRY_INIT = 0, /** init entry */ - SA_ENTRY_SUCC = 1, /** stat succeed */ - SA_ENTRY_INVA = 2, /** invalid entry */ - SA_ENTRY_DEST = 3, /** entry to be destroyed */ -} se_stat_t; - -struct ll_sa_entry { - /* link into sai->sai_entries */ - struct list_head se_link; - /* link into sai->sai_entries_{received,stated} */ + /** negative values are for error cases */ + SA_ENTRY_INIT = 0, /** init entry */ + SA_ENTRY_SUCC = 1, /** stat succeed */ + SA_ENTRY_INVA = 2, /** invalid entry */ +} se_state_t; + +/* sa_entry is not refcounted: statahead thread allocates it and do async stat, + * and in async stat callback ll_statahead_interpret() will add it into + * sai_cb_entries, later statahead thread will call sa_handle_callback() to + * instantiate entry and move it into sai_entries, and then only scanner process + * can access and free it. */ +struct sa_entry { + /* link into sai_cb_entries or sai_entries */ struct list_head se_list; /* link into sai hash table locally */ struct list_head se_hash; - /* entry reference count */ - atomic_t se_refcount; /* entry index in the sai */ - __u64 se_index; + __u64 se_index; /* low layer ldlm lock handle */ - __u64 se_handle; + __u64 se_handle; /* entry status */ - se_stat_t se_stat; + se_state_t se_state; /* entry size, contains name */ - int se_size; + int se_size; /* pointer to async getattr enqueue info */ struct md_enqueue_info *se_minfo; /* pointer to the async getattr request */ struct ptlrpc_request *se_req; /* pointer to the target inode */ - struct inode *se_inode; + struct inode *se_inode; /* entry name */ - struct qstr se_qstr; + struct qstr se_qstr; }; static unsigned int sai_generation = 0; static DEFINE_SPINLOCK(sai_generation_lock); -static inline int ll_sa_entry_unhashed(struct ll_sa_entry *entry) +static inline int sa_unhashed(struct sa_entry *entry) { return list_empty(&entry->se_hash); } @@ -94,24 +94,24 @@ static inline int ll_sa_entry_unhashed(struct ll_sa_entry *entry) /* * The entry only can be released by the caller, it is necessary to hold lock. */ -static inline int ll_sa_entry_stated(struct ll_sa_entry *entry) +static inline int sa_ready(struct sa_entry *entry) { - smp_rmb(); - return (entry->se_stat != SA_ENTRY_INIT); + smp_rmb(); + return (entry->se_state != SA_ENTRY_INIT); } -static inline int ll_sa_entry_hash(int val) +static inline int sa_hash(int val) { - return val & LL_SA_CACHE_MASK; + return val & LL_SA_CACHE_MASK; } /* * Insert entry to hash SA table. */ static inline void -ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry) +sa_rehash(struct ll_statahead_info *sai, struct sa_entry *entry) { - int i = ll_sa_entry_hash(entry->se_qstr.hash); + int i = sa_hash(entry->se_qstr.hash); spin_lock(&sai->sai_cache_lock[i]); list_add_tail(&entry->se_hash, &sai->sai_cache[i]); @@ -122,9 +122,9 @@ ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry) * Remove entry from SA table. */ static inline void -ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry) +sa_unhash(struct ll_statahead_info *sai, struct sa_entry *entry) { - int i = ll_sa_entry_hash(entry->se_qstr.hash); + int i = sa_hash(entry->se_qstr.hash); spin_lock(&sai->sai_cache_lock[i]); list_del_init(&entry->se_hash); @@ -137,18 +137,11 @@ static inline int agl_should_run(struct ll_statahead_info *sai, return (inode != NULL && S_ISREG(inode->i_mode) && sai->sai_agl_valid); } -static inline struct ll_sa_entry * -sa_first_received_entry(struct ll_statahead_info *sai) -{ - return list_entry(sai->sai_entries_received.next, - struct ll_sa_entry, se_list); -} - static inline struct ll_inode_info * agl_first_entry(struct ll_statahead_info *sai) { - return list_entry(sai->sai_entries_agl.next, - struct ll_inode_info, lli_agl_list); + return list_entry(sai->sai_agls.next, struct ll_inode_info, + lli_agl_list); } static inline int sa_sent_full(struct ll_statahead_info *sai) @@ -156,14 +149,14 @@ static inline int sa_sent_full(struct ll_statahead_info *sai) return atomic_read(&sai->sai_cache_count) >= sai->sai_max; } -static inline int sa_received_empty(struct ll_statahead_info *sai) +static inline int sa_has_callback(struct ll_statahead_info *sai) { - return list_empty(&sai->sai_entries_received); + return !list_empty(&sai->sai_interim_entries); } static inline int agl_list_empty(struct ll_statahead_info *sai) { - return list_empty(&sai->sai_entries_agl); + return list_empty(&sai->sai_agls); } /** @@ -188,56 +181,29 @@ static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index) sai->sai_index); } -/* - * Insert it into sai_entries tail when init. - */ -static struct ll_sa_entry * -ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index, const char *name, - int len) +/* allocate sa_entry and add it into hash to let scanner process to find it */ +static struct sa_entry * +sa_alloc(struct ll_statahead_info *sai, __u64 index, const char *name, int len) { - struct ll_inode_info *lli; - struct ll_sa_entry *entry; - int entry_size; - char *dname; - ENTRY; + struct ll_inode_info *lli; + struct sa_entry *entry; + int entry_size; + char *dname; + ENTRY; - entry_size = sizeof(struct ll_sa_entry) + (len & ~3) + 4; - OBD_ALLOC(entry, entry_size); - if (unlikely(entry == NULL)) - RETURN(ERR_PTR(-ENOMEM)); + entry_size = sizeof(struct sa_entry) + (len & ~3) + 4; + OBD_ALLOC(entry, entry_size); + if (unlikely(entry == NULL)) + RETURN(ERR_PTR(-ENOMEM)); CDEBUG(D_READA, "alloc sa entry %.*s(%p) index "LPU64"\n", len, name, entry, index); - entry->se_index = index; + entry->se_index = index; - /* - * Statahead entry reference rules: - * - * 1) When statahead entry is initialized, its reference is set as 2. - * One reference is used by the directory scanner. When the scanner - * searches the statahead cache for the given name, it can perform - * lockless hash lookup (only the scanner can remove entry from hash - * list), and once found, it needn't to call "atomic_inc()" for the - * entry reference. So the performance is improved. After using the - * statahead entry, the scanner will call "atomic_dec()" to drop the - * reference held when initialization. If it is the last reference, - * the statahead entry will be freed. - * - * 2) All other threads, including statahead thread and ptlrpcd thread, - * when they process the statahead entry, the reference for target - * should be held to guarantee the entry will not be released by the - * directory scanner. After processing the entry, these threads will - * drop the entry reference. If it is the last reference, the entry - * will be freed. - * - * The second reference when initializes the statahead entry is used - * by the statahead thread, following the rule 2). - */ - atomic_set(&entry->se_refcount, 2); - entry->se_stat = SA_ENTRY_INIT; + entry->se_state = SA_ENTRY_INIT; entry->se_size = entry_size; - dname = (char *)entry + sizeof(struct ll_sa_entry); + dname = (char *)entry + sizeof(struct sa_entry); memcpy(dname, name, len); dname[len] = 0; entry->se_qstr.hash = full_name_hash(name, len); @@ -246,9 +212,8 @@ ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index, const char *name, lli = ll_i2info(sai->sai_inode); spin_lock(&lli->lli_sa_lock); - list_add_tail(&entry->se_link, &sai->sai_entries); INIT_LIST_HEAD(&entry->se_list); - ll_sa_entry_enhash(sai, entry); + sa_rehash(sai, entry); spin_unlock(&lli->lli_sa_lock); atomic_inc(&sai->sai_cache_count); @@ -256,152 +221,123 @@ ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index, const char *name, RETURN(entry); } -/* - * Used by the directory scanner to search entry with name. - * - * Only the caller can remove the entry from hash, so it is unnecessary to hold - * hash lock. It is caller's duty to release the init refcount on the entry, so - * it is also unnecessary to increase refcount on the entry. - */ -static struct ll_sa_entry * -ll_sa_entry_get_byname(struct ll_statahead_info *sai, const struct qstr *qstr) +/* free sa_entry which should have been unhashed and not in any list */ +static void sa_free(struct ll_statahead_info *sai, struct sa_entry *entry) { - struct ll_sa_entry *entry; - int i = ll_sa_entry_hash(qstr->hash); + CDEBUG(D_READA, "free sa entry %.*s(%p) index "LPU64"\n", + entry->se_qstr.len, entry->se_qstr.name, entry, + entry->se_index); - list_for_each_entry(entry, &sai->sai_cache[i], se_hash) { - if (entry->se_qstr.hash == qstr->hash && - entry->se_qstr.len == qstr->len && - memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0) - return entry; - } - return NULL; + LASSERT(list_empty(&entry->se_list)); + LASSERT(sa_unhashed(entry)); + + OBD_FREE(entry, entry->se_size); + atomic_dec(&sai->sai_cache_count); } -/* - * Used by the async getattr request callback to find entry with index. - * - * Inside lli_sa_lock to prevent others to change the list during the search. - * It needs to increase entry refcount before returning to guarantee that the - * entry cannot be freed by others. +/* find sa_entry by name, used by directory scanner, lock is not needed because + * only scanner can remove the entry from hash. */ -static struct ll_sa_entry * -ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index) +static struct sa_entry * +sa_get(struct ll_statahead_info *sai, const struct qstr *qstr) { - struct ll_sa_entry *entry; + struct sa_entry *entry; + int i = sa_hash(qstr->hash); - list_for_each_entry(entry, &sai->sai_entries, se_link) { - if (entry->se_index == index) { - LASSERT(atomic_read(&entry->se_refcount) > 0); - atomic_inc(&entry->se_refcount); + list_for_each_entry(entry, &sai->sai_cache[i], se_hash) { + if (entry->se_qstr.hash == qstr->hash && + entry->se_qstr.len == qstr->len && + memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0) return entry; - } - if (entry->se_index > index) - break; } return NULL; } -static void ll_sa_entry_put(struct ll_statahead_info *sai, - struct ll_sa_entry *entry) -{ - if (atomic_dec_and_test(&entry->se_refcount)) { - CDEBUG(D_READA, "free sa entry %.*s(%p) index "LPU64"\n", - entry->se_qstr.len, entry->se_qstr.name, entry, - entry->se_index); - - LASSERT(list_empty(&entry->se_link)); - LASSERT(list_empty(&entry->se_list)); - LASSERT(ll_sa_entry_unhashed(entry)); - - if (entry->se_inode) - iput(entry->se_inode); - - OBD_FREE(entry, entry->se_size); - atomic_dec(&sai->sai_cache_count); - } -} - static inline void -do_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry) +sa_kill(struct ll_statahead_info *sai, struct sa_entry *entry) { struct ll_inode_info *lli = ll_i2info(sai->sai_inode); - LASSERT(!ll_sa_entry_unhashed(entry)); - LASSERT(!list_empty(&entry->se_link)); + LASSERT(!sa_unhashed(entry)); + LASSERT(!list_empty(&entry->se_list)); + LASSERT(sa_ready(entry)); - ll_sa_entry_unhash(sai, entry); + sa_unhash(sai, entry); spin_lock(&lli->lli_sa_lock); - entry->se_stat = SA_ENTRY_DEST; - list_del_init(&entry->se_link); - if (likely(!list_empty(&entry->se_list))) - list_del_init(&entry->se_list); + list_del_init(&entry->se_list); spin_unlock(&lli->lli_sa_lock); - ll_sa_entry_put(sai, entry); + if (entry->se_inode != NULL) + iput(entry->se_inode); + + sa_free(sai, entry); } -/* - * Delete it from sai_entries_stated list when fini. - */ +/* called by scanner after use, sa_entry will be killed */ static void -ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry) +sa_put(struct ll_statahead_info *sai, struct sa_entry *entry) { - struct ll_sa_entry *pos, *next; + struct sa_entry *tmp, *next; - if (entry) - do_sa_entry_fini(sai, entry); + if (entry != NULL && entry->se_state == SA_ENTRY_SUCC) { + struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode); - /* drop old entry, only 'scanner' process does this, no need to lock */ - list_for_each_entry_safe(pos, next, &sai->sai_entries, se_link) { - if (!is_omitted_entry(sai, pos->se_index)) + sai->sai_hit++; + sai->sai_consecutive_miss = 0; + sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max); + } else { + sai->sai_miss++; + sai->sai_consecutive_miss++; + } + + if (entry != NULL) + sa_kill(sai, entry); + + /* kill old completed entries, only scanner process does this, no need + * to lock */ + list_for_each_entry_safe(tmp, next, &sai->sai_entries, se_list) { + if (!is_omitted_entry(sai, tmp->se_index)) break; - /* keep those whose statahead RPC not finished */ - if (pos->se_stat == SA_ENTRY_SUCC || - pos->se_stat == SA_ENTRY_INVA) - do_sa_entry_fini(sai, pos); + sa_kill(sai, tmp); } + + wake_up(&sai->sai_thread.t_ctl_waitq); } -/* - * Inside lli_sa_lock. - */ -static void -__sa_entry_post_stat(struct ll_statahead_info *sai, struct ll_sa_entry *entry, - se_stat_t stat) +/* update state and sort add entry to sai_entries by index, return true if + * scanner is waiting on this entry. */ +static bool +__sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret) { - struct ll_sa_entry *se; - struct list_head *pos = &sai->sai_entries_stated; + struct sa_entry *se; + struct list_head *pos = &sai->sai_entries; - LASSERT(entry->se_stat == SA_ENTRY_INIT); + LASSERT(!sa_ready(entry)); + LASSERT(list_empty(&entry->se_list)); - if (!list_empty(&entry->se_list)) - list_del_init(&entry->se_list); + entry->se_state = ret < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC; - list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) { + list_for_each_entry_reverse(se, &sai->sai_entries, se_list) { if (se->se_index < entry->se_index) { pos = &se->se_list; break; } } - list_add(&entry->se_list, pos); - entry->se_stat = stat; + + return (entry->se_index == sai->sai_index_wait); } -/* - * Move entry to sai_entries_stated and sort with the index. - * \retval 1 -- entry to be destroyed. - * \retval 0 -- entry is inserted into stated list. - */ +/* release resources used in async stat RPC, complete entry information and + * wakeup if necessary */ static void -sa_entry_post_stat(struct ll_statahead_info *sai, struct ll_sa_entry *entry, - se_stat_t stat) +sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret) { struct ll_inode_info *lli = ll_i2info(sai->sai_inode); struct md_enqueue_info *minfo = entry->se_minfo; struct ptlrpc_request *req = entry->se_req; + bool wakeup; /* release resources used in RPC */ if (minfo) { @@ -417,12 +353,15 @@ sa_entry_post_stat(struct ll_statahead_info *sai, struct ll_sa_entry *entry, } spin_lock(&lli->lli_sa_lock); - __sa_entry_post_stat(sai, entry, stat); + wakeup = __sa_make_ready(sai, entry, ret); spin_unlock(&lli->lli_sa_lock); + + if (wakeup) + wake_up(&sai->sai_waitq); } /* - * Insert inode into the list of sai_entries_agl. + * Insert inode into the list of sai_agls. */ static void ll_agl_add(struct ll_statahead_info *sai, struct inode *inode, int index) @@ -442,7 +381,7 @@ static void ll_agl_add(struct ll_statahead_info *sai, spin_lock(&parent->lli_agl_lock); if (agl_list_empty(sai)) added = 1; - list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl); + list_add_tail(&child->lli_agl_list, &sai->sai_agls); spin_unlock(&parent->lli_agl_lock); } else { spin_unlock(&child->lli_agl_lock); @@ -476,10 +415,9 @@ static struct ll_statahead_info *ll_sai_alloc(void) init_waitqueue_head(&sai->sai_thread.t_ctl_waitq); init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq); + INIT_LIST_HEAD(&sai->sai_interim_entries); INIT_LIST_HEAD(&sai->sai_entries); - INIT_LIST_HEAD(&sai->sai_entries_received); - INIT_LIST_HEAD(&sai->sai_entries_stated); - INIT_LIST_HEAD(&sai->sai_entries_agl); + INIT_LIST_HEAD(&sai->sai_agls); for (i = 0; i < LL_SA_CACHE_SIZE; i++) { INIT_LIST_HEAD(&sai->sai_cache[i]); @@ -509,7 +447,7 @@ static void ll_sai_put(struct ll_statahead_info *sai) struct ll_inode_info *lli = ll_i2info(sai->sai_inode); if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) { - struct ll_sa_entry *entry, *next; + struct sa_entry *entry, *next; struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode); lli->lli_sai = NULL; @@ -518,14 +456,14 @@ static void ll_sai_put(struct ll_statahead_info *sai) LASSERT(thread_is_stopped(&sai->sai_thread)); LASSERT(thread_is_stopped(&sai->sai_agl_thread)); LASSERT(sai->sai_sent == sai->sai_replied); + LASSERT(!sa_has_callback(sai)); list_for_each_entry_safe(entry, next, &sai->sai_entries, - se_link) - do_sa_entry_fini(sai, entry); + se_list) + sa_kill(sai, entry); LASSERT(atomic_read(&sai->sai_cache_count) == 0); LASSERT(agl_list_empty(sai)); - LASSERT(atomic_read(&sai->sai_refcount) == 0); iput(sai->sai_inode); OBD_FREE_PTR(sai); @@ -533,13 +471,13 @@ static void ll_sai_put(struct ll_statahead_info *sai) } } -/* Do NOT forget to drop inode refcount when into sai_entries_agl. */ +/* Do NOT forget to drop inode refcount when into sai_agls. */ static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai) { - struct ll_inode_info *lli = ll_i2info(inode); - __u64 index = lli->lli_agl_index; - int rc; - ENTRY; + struct ll_inode_info *lli = ll_i2info(inode); + __u64 index = lli->lli_agl_index; + int rc; + ENTRY; LASSERT(list_empty(&lli->lli_agl_list)); @@ -596,18 +534,19 @@ static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai) EXIT; } -/* prepare inode for received statahead entry, and add it into agl list */ -static void sa_post_one(struct ll_statahead_info *sai, - struct ll_sa_entry *entry) +/* prepare inode for sa entry, add it into agl list, now sa_entry is ready + * to be used by scanner process. */ +static void sa_instantiate(struct ll_statahead_info *sai, + struct sa_entry *entry) { - struct inode *dir = sai->sai_inode; - struct inode *child; - struct md_enqueue_info *minfo; - struct lookup_intent *it; - struct ptlrpc_request *req; - struct mdt_body *body; - int rc = 0; - ENTRY; + struct inode *dir = sai->sai_inode; + struct inode *child; + struct md_enqueue_info *minfo; + struct lookup_intent *it; + struct ptlrpc_request *req; + struct mdt_body *body; + int rc = 0; + ENTRY; LASSERT(entry->se_handle != 0); @@ -651,8 +590,9 @@ static void sa_post_one(struct ll_statahead_info *sai, if (rc) GOTO(out, rc); - CDEBUG(D_DLMTRACE, "%s: setting l_data to inode "DFID"(%p)\n", + CDEBUG(D_READA, "%s: setting %.*s"DFID" l_data to inode %p\n", ll_get_fsname(child->i_sb, NULL, 0), + entry->se_qstr.len, entry->se_qstr.name, PFID(ll_inode2fid(child)), child); ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL); @@ -664,36 +604,33 @@ static void sa_post_one(struct ll_statahead_info *sai, EXIT; out: - /* The "sa_entry_post_stat()" will drop related ldlm ibits lock - * reference count by calling "ll_intent_drop_lock()" in spite of the - * above operations failed or not. Do not worry about calling - * "ll_intent_drop_lock()" more than once. */ - sa_entry_post_stat(sai, entry, rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC); - if (entry->se_index == sai->sai_index_wait) - wake_up(&sai->sai_waitq); - ll_sa_entry_put(sai, entry); + /* sa_make_ready() will drop ldlm ibits lock refcount by calling + * ll_intent_drop_lock() in spite of failures. Do not worry about + * calling ll_intent_drop_lock() more than once. */ + sa_make_ready(sai, entry, rc); } -static void ll_post_statahead(struct ll_statahead_info *sai) +/* once there are async stat replies, instantiate sa_entry */ +static void sa_handle_callback(struct ll_statahead_info *sai) { struct ll_inode_info *lli; lli = ll_i2info(sai->sai_inode); - while (!sa_received_empty(sai)) { - struct ll_sa_entry *entry; + while (sa_has_callback(sai)) { + struct sa_entry *entry; spin_lock(&lli->lli_sa_lock); - if (unlikely(sa_received_empty(sai))) { + if (unlikely(!sa_has_callback(sai))) { spin_unlock(&lli->lli_sa_lock); break; } - entry = sa_first_received_entry(sai); - atomic_inc(&entry->se_refcount); + entry = list_entry(sai->sai_interim_entries.next, + struct sa_entry, se_list); list_del_init(&entry->se_list); spin_unlock(&lli->lli_sa_lock); - sa_post_one(sai, entry); + sa_instantiate(sai, entry); } spin_lock(&lli->lli_agl_lock); @@ -711,30 +648,35 @@ static void ll_post_statahead(struct ll_statahead_info *sai) spin_unlock(&lli->lli_agl_lock); } +/* callback for async stat, because this is called in ptlrpcd context, we only + * put sa_entry in sai_cb_entries list, and let sa_handle_callback() to really + * prepare inode and instantiate sa_entry later. */ static int ll_statahead_interpret(struct ptlrpc_request *req, struct md_enqueue_info *minfo, int rc) { struct lookup_intent *it = &minfo->mi_it; struct inode *dir = minfo->mi_dir; struct ll_inode_info *lli = ll_i2info(dir); - struct ll_statahead_info *sai; - struct ll_sa_entry *entry; - int wakeup; + struct ll_statahead_info *sai = lli->lli_sai; + struct sa_entry *entry = (struct sa_entry *)minfo->mi_cbdata; + bool wakeup; ENTRY; if (it_disposition(it, DISP_LOOKUP_NEG)) rc = -ENOENT; - sai = ll_sai_get(dir); + /* because statahead thread will wait for all inflight RPC to finish, + * sai should be always valid, no need to refcount */ LASSERT(sai != NULL); LASSERT(!thread_is_stopped(&sai->sai_thread)); + LASSERT(entry != NULL); + + CDEBUG(D_READA, "sa_entry %.*s rc %d\n", + entry->se_qstr.len, entry->se_qstr.name, rc); spin_lock(&lli->lli_sa_lock); - entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata); - LASSERT(entry != NULL); if (rc != 0) { - __sa_entry_post_stat(sai, entry, SA_ENTRY_INVA); - wakeup = (entry->se_index == sai->sai_index_wait); + wakeup = __sa_make_ready(sai, entry, rc); } else { entry->se_minfo = minfo; entry->se_req = ptlrpc_request_addref(req); @@ -744,26 +686,23 @@ static int ll_statahead_interpret(struct ptlrpc_request *req, * with parent's lock held, for example: unlink. */ entry->se_handle = it->d.lustre.it_lock_handle; ll_intent_drop_lock(it); - wakeup = sa_received_empty(sai); - list_add_tail(&entry->se_list, &sai->sai_entries_received); + wakeup = !sa_has_callback(sai); + list_add_tail(&entry->se_list, &sai->sai_interim_entries); } sai->sai_replied++; - spin_unlock(&lli->lli_sa_lock); - - ll_sa_entry_put(sai, entry); if (wakeup) wake_up(&sai->sai_thread.t_ctl_waitq); + spin_unlock(&lli->lli_sa_lock); if (rc != 0) { ll_intent_release(it); iput(dir); OBD_FREE_PTR(minfo); } - ll_sai_put(sai); RETURN(rc); } -static void sa_args_fini(struct md_enqueue_info *minfo, +static void sa_fini_data(struct md_enqueue_info *minfo, struct ldlm_enqueue_info *einfo) { LASSERT(minfo && einfo); @@ -783,10 +722,10 @@ static void sa_args_fini(struct md_enqueue_info *minfo, * "ocapa". So here reserve "op_data.op_capa[1,2]" in "pcapa" before calling * "md_intent_getattr_async". */ -static int sa_args_init(struct inode *dir, struct inode *child, - struct ll_sa_entry *entry, struct md_enqueue_info **pmi, - struct ldlm_enqueue_info **pei, - struct obd_capa **pcapa) +static int sa_prep_data(struct inode *dir, struct inode *child, + struct sa_entry *entry, struct md_enqueue_info **pmi, + struct ldlm_enqueue_info **pei, + struct obd_capa **pcapa) { struct qstr *qstr = &entry->se_qstr; struct md_enqueue_info *minfo; @@ -814,7 +753,7 @@ static int sa_args_init(struct inode *dir, struct inode *child, minfo->mi_it.it_op = IT_GETATTR; minfo->mi_dir = igrab(dir); minfo->mi_cb = ll_statahead_interpret; - minfo->mi_cbdata = entry->se_index; + minfo->mi_cbdata = entry; einfo->ei_type = LDLM_IBITS; einfo->ei_mode = it_to_lock_mode(&minfo->mi_it); @@ -831,27 +770,27 @@ static int sa_args_init(struct inode *dir, struct inode *child, return 0; } -static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry) +static int sa_lookup(struct inode *dir, struct sa_entry *entry) { - struct md_enqueue_info *minfo; - struct ldlm_enqueue_info *einfo; - struct obd_capa *capas[2]; - int rc; - ENTRY; + struct md_enqueue_info *minfo; + struct ldlm_enqueue_info *einfo; + struct obd_capa *capas[2]; + int rc; + ENTRY; - rc = sa_args_init(dir, NULL, entry, &minfo, &einfo, capas); - if (rc) - RETURN(rc); - - rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo); - if (!rc) { - capa_put(capas[0]); - capa_put(capas[1]); - } else { - sa_args_fini(minfo, einfo); - } + rc = sa_prep_data(dir, NULL, entry, &minfo, &einfo, capas); + if (rc) + RETURN(rc); - RETURN(rc); + rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo); + if (!rc) { + capa_put(capas[0]); + capa_put(capas[1]); + } else { + sa_fini_data(minfo, einfo); + } + + RETURN(rc); } /** @@ -860,72 +799,72 @@ static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry) * \retval 0 -- will send stat-ahead request * \retval others -- prepare stat-ahead request failed */ -static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry, - struct dentry *dentry) +static int sa_revalidate(struct inode *dir, struct sa_entry *entry, + struct dentry *dentry) { - struct inode *inode = dentry->d_inode; - struct lookup_intent it = { .it_op = IT_GETATTR, - .d.lustre.it_lock_handle = 0 }; - struct md_enqueue_info *minfo; - struct ldlm_enqueue_info *einfo; - struct obd_capa *capas[2]; - int rc; - ENTRY; - - if (unlikely(inode == NULL)) - RETURN(1); - - if (d_mountpoint(dentry)) - RETURN(1); - - entry->se_inode = igrab(inode); - rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),NULL); - if (rc == 1) { - entry->se_handle = it.d.lustre.it_lock_handle; - ll_intent_release(&it); - RETURN(1); - } + struct inode *inode = dentry->d_inode; + struct lookup_intent it = { .it_op = IT_GETATTR, + .d.lustre.it_lock_handle = 0 }; + struct md_enqueue_info *minfo; + struct ldlm_enqueue_info *einfo; + struct obd_capa *capas[2]; + int rc; + ENTRY; - rc = sa_args_init(dir, inode, entry, &minfo, &einfo, capas); - if (rc) { - entry->se_inode = NULL; - iput(inode); - RETURN(rc); - } + if (unlikely(inode == NULL)) + RETURN(1); - rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo); - if (!rc) { - capa_put(capas[0]); - capa_put(capas[1]); - } else { - entry->se_inode = NULL; - iput(inode); - sa_args_fini(minfo, einfo); - } + if (d_mountpoint(dentry)) + RETURN(1); + + entry->se_inode = igrab(inode); + rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode), + NULL); + if (rc == 1) { + entry->se_handle = it.d.lustre.it_lock_handle; + ll_intent_release(&it); + RETURN(1); + } + + rc = sa_prep_data(dir, inode, entry, &minfo, &einfo, capas); + if (rc) { + entry->se_inode = NULL; + iput(inode); + RETURN(rc); + } - RETURN(rc); + rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo); + if (!rc) { + capa_put(capas[0]); + capa_put(capas[1]); + } else { + entry->se_inode = NULL; + iput(inode); + sa_fini_data(minfo, einfo); + } + + RETURN(rc); } -static void ll_statahead_one(struct dentry *parent, const char *name, - const int namelen) +static void sa_statahead(struct dentry *parent, const char *name, int len) { - struct inode *dir = parent->d_inode; - struct ll_inode_info *lli = ll_i2info(dir); - struct ll_statahead_info *sai = lli->lli_sai; - struct dentry *dentry = NULL; - struct ll_sa_entry *entry; - int rc; + struct inode *dir = parent->d_inode; + struct ll_inode_info *lli = ll_i2info(dir); + struct ll_statahead_info *sai = lli->lli_sai; + struct dentry *dentry = NULL; + struct sa_entry *entry; + int rc; ENTRY; - entry = ll_sa_entry_alloc(sai, sai->sai_index, name,namelen); + entry = sa_alloc(sai, sai->sai_index, name, len); if (IS_ERR(entry)) RETURN_EXIT; dentry = d_lookup(parent, &entry->se_qstr); if (!dentry) { - rc = do_sa_lookup(dir, entry); + rc = sa_lookup(dir, entry); } else { - rc = do_sa_revalidate(dir, entry, dentry); + rc = sa_revalidate(dir, entry, dentry); if (rc == 1 && agl_should_run(sai, dentry->d_inode)) ll_agl_add(sai, dentry->d_inode, entry->se_index); } @@ -933,18 +872,12 @@ static void ll_statahead_one(struct dentry *parent, const char *name, if (dentry != NULL) dput(dentry); - if (rc) { - sa_entry_post_stat(sai, entry, - rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC); - if (entry->se_index == sai->sai_index_wait) - wake_up(&sai->sai_waitq); - } else { + if (rc != 0) + sa_make_ready(sai, entry, rc); + else sai->sai_sent++; - } sai->sai_index++; - /* drop one refcount on entry by ll_sa_entry_alloc */ - ll_sa_entry_put(sai, entry); EXIT; } @@ -1162,16 +1095,16 @@ static int ll_statahead_thread(void *arg) do { l_wait_event(thread->t_ctl_waitq, !sa_sent_full(sai) || - !sa_received_empty(sai) || + sa_has_callback(sai) || !agl_list_empty(sai) || !thread_is_running(thread), &lwi); - ll_post_statahead(sai); + sa_handle_callback(sai); } while (sa_sent_full(sai) && thread_is_running(thread)); - ll_statahead_one(parent, name, namelen); + sa_statahead(parent, name, namelen); } pos = le64_to_cpu(dp->ldp_hash_end); @@ -1205,12 +1138,12 @@ static int ll_statahead_thread(void *arg) * for file release to stop me. */ while (thread_is_running(thread)) { l_wait_event(thread->t_ctl_waitq, - !sa_received_empty(sai) || + sa_has_callback(sai) || !agl_list_empty(sai) || !thread_is_running(thread), &lwi); - ll_post_statahead(sai); + sa_handle_callback(sai); } EXIT; @@ -1240,8 +1173,8 @@ out: sai->sai_sent == sai->sai_replied, &lwi); } - /* release resources held by received entries. */ - ll_post_statahead(sai); + /* release resources held by statahead RPCs */ + sa_handle_callback(sai); spin_lock(&lli->lli_sa_lock); thread_set_flags(thread, SVC_STOPPED); @@ -1296,7 +1229,7 @@ void ll_deauthorize_statahead(struct inode *dir, void *key) if (sai != NULL && thread_is_running(&sai->sai_thread)) { /* * statahead thread may not quit yet because it needs to cache - * stated entries, now it's time to tell it to quit. + * entries, now it's time to tell it to quit. */ thread_set_flags(&sai->sai_thread, SVC_STOPPING); wake_up(&sai->sai_thread.t_ctl_waitq); @@ -1439,29 +1372,12 @@ out: return rc; } -static void -ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry) -{ - if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC) { - struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode); - - sai->sai_hit++; - sai->sai_consecutive_miss = 0; - sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max); - } else { - sai->sai_miss++; - sai->sai_consecutive_miss++; - } - ll_sa_entry_fini(sai, entry); - wake_up(&sai->sai_thread.t_ctl_waitq); -} - static int revalidate_statahead_dentry(struct inode *dir, struct ll_statahead_info *sai, struct dentry **dentryp, int only_unplug) { - struct ll_sa_entry *entry = NULL; + struct sa_entry *entry = NULL; struct l_wait_info lwi = { 0 }; int rc = 0; ENTRY; @@ -1493,31 +1409,31 @@ static int revalidate_statahead_dentry(struct inode *dir, } } - entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name); + entry = sa_get(sai, &(*dentryp)->d_name); if (entry == NULL || only_unplug) { - ll_sai_unplug(sai, entry); + sa_put(sai, entry); RETURN(entry ? 1 : -EAGAIN); } /* if statahead is busy in readdir, help it do post-work */ - if (!ll_sa_entry_stated(entry) && sai->sai_in_readpage) - ll_post_statahead(sai); + if (!sa_ready(entry) && sai->sai_in_readpage) + sa_handle_callback(sai); - if (!ll_sa_entry_stated(entry)) { + if (!sa_ready(entry)) { sai->sai_index_wait = entry->se_index; lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL, LWI_ON_SIGNAL_NOOP, NULL); rc = l_wait_event(sai->sai_waitq, - ll_sa_entry_stated(entry) || + sa_ready(entry) || thread_is_stopped(&sai->sai_thread), &lwi); if (rc < 0) { - ll_sai_unplug(sai, entry); + sa_put(sai, entry); RETURN(-EAGAIN); } } - if (entry->se_stat == SA_ENTRY_SUCC && entry->se_inode != NULL) { + if (entry->se_state == SA_ENTRY_SUCC && entry->se_inode != NULL) { struct inode *inode = entry->se_inode; struct lookup_intent it = { .it_op = IT_GETATTR, .d.lustre.it_lock_handle = @@ -1532,7 +1448,7 @@ static int revalidate_statahead_dentry(struct inode *dir, alias = ll_splice_alias(inode, *dentryp); if (IS_ERR(alias)) { - ll_sai_unplug(sai, entry); + sa_put(sai, entry); RETURN(PTR_ERR(alias)); } *dentryp = alias; @@ -1548,7 +1464,7 @@ static int revalidate_statahead_dentry(struct inode *dir, (*dentryp)->d_name.name, PFID(ll_inode2fid((*dentryp)->d_inode)), PFID(ll_inode2fid(inode))); - ll_sai_unplug(sai, entry); + sa_put(sai, entry); RETURN(-ESTALE); } else { iput(inode); @@ -1562,7 +1478,7 @@ static int revalidate_statahead_dentry(struct inode *dir, } } - ll_sai_unplug(sai, entry); + sa_put(sai, entry); RETURN(rc); }