X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fllite%2Fstatahead.c;h=bb75469565e55881c5076ddb5eab92935b8acd79;hp=315e7621d777693eb82e16a2ed18ff6d0ea5745b;hb=18667c0697276c0038f0237a9444cbb132d30fbd;hpb=1cbf25caab01ac561bb35053ca34fb6eb7336b3d diff --git a/lustre/llite/statahead.c b/lustre/llite/statahead.c index 315e762..bb75469 100644 --- a/lustre/llite/statahead.c +++ b/lustre/llite/statahead.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -28,6 +26,8 @@ /* * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -37,7 +37,6 @@ #include #include #include -#include #include #include @@ -46,74 +45,133 @@ #include #include #include -#include #include "llite_internal.h" -struct ll_sai_entry { - cfs_list_t se_list; - unsigned int se_index; - int se_stat; - struct ptlrpc_request *se_req; - struct md_enqueue_info *se_minfo; -}; - -enum { - SA_ENTRY_UNSTATED = 0, - SA_ENTRY_STATED +#define SA_OMITTED_ENTRY_MAX 8ULL + +typedef enum { + /** negative values are for error cases */ + SA_ENTRY_INIT = 0, /** init entry */ + SA_ENTRY_SUCC = 1, /** stat succeed */ + SA_ENTRY_INVA = 2, /** invalid entry */ + SA_ENTRY_DEST = 3, /** entry to be destroyed */ +} se_stat_t; + +struct ll_sa_entry { + /* link into sai->sai_entries */ + cfs_list_t se_link; + /* link into sai->sai_entries_{received,stated} */ + cfs_list_t se_list; + /* link into sai hash table locally */ + cfs_list_t se_hash; + /* entry reference count */ + atomic_t se_refcount; + /* entry index in the sai */ + __u64 se_index; + /* low layer ldlm lock handle */ + __u64 se_handle; + /* entry status */ + se_stat_t se_stat; + /* entry size, contains name */ + int se_size; + /* pointer to async getattr enqueue info */ + struct md_enqueue_info *se_minfo; + /* pointer to the async getattr request */ + struct ptlrpc_request *se_req; + /* pointer to the target inode */ + struct inode *se_inode; + /* entry name */ + struct qstr se_qstr; }; static unsigned int sai_generation = 0; -static cfs_spinlock_t sai_generation_lock = CFS_SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(sai_generation_lock); -/** - * Check whether first entry was stated already or not. - * No need to hold lli_sa_lock, for: - * (1) it is me that remove entry from the list - * (2) the statahead thread only add new entry to the list +static inline int ll_sa_entry_unhashed(struct ll_sa_entry *entry) +{ + return cfs_list_empty(&entry->se_hash); +} + +/* + * The entry only can be released by the caller, it is necessary to hold lock. */ -static int ll_sai_entry_stated(struct ll_statahead_info *sai) +static inline int ll_sa_entry_stated(struct ll_sa_entry *entry) { - struct ll_sai_entry *entry; - int rc = 0; - - if (!cfs_list_empty(&sai->sai_entries_stated)) { - entry = cfs_list_entry(sai->sai_entries_stated.next, - struct ll_sai_entry, se_list); - if (entry->se_index == sai->sai_index_next) - rc = 1; - } - return rc; + smp_rmb(); + return (entry->se_stat != SA_ENTRY_INIT); } -static inline int sa_received_empty(struct ll_statahead_info *sai) +static inline int ll_sa_entry_hash(int val) { - return cfs_list_empty(&sai->sai_entries_received); + return val & LL_SA_CACHE_MASK; } -static inline int sa_not_full(struct ll_statahead_info *sai) +/* + * Insert entry to hash SA table. + */ +static inline void +ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry) { - return !!(sai->sai_index < sai->sai_index_next + sai->sai_max); + int i = ll_sa_entry_hash(entry->se_qstr.hash); + + spin_lock(&sai->sai_cache_lock[i]); + cfs_list_add_tail(&entry->se_hash, &sai->sai_cache[i]); + spin_unlock(&sai->sai_cache_lock[i]); } -static inline int sa_is_running(struct ll_statahead_info *sai) +/* + * Remove entry from SA table. + */ +static inline void +ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry) { - return !!(sai->sai_thread.t_flags & SVC_RUNNING); + int i = ll_sa_entry_hash(entry->se_qstr.hash); + + spin_lock(&sai->sai_cache_lock[i]); + cfs_list_del_init(&entry->se_hash); + spin_unlock(&sai->sai_cache_lock[i]); } -static inline int sa_is_stopping(struct ll_statahead_info *sai) +static inline int agl_should_run(struct ll_statahead_info *sai, + struct inode *inode) { - return !!(sai->sai_thread.t_flags & SVC_STOPPING); + return (inode != NULL && S_ISREG(inode->i_mode) && sai->sai_agl_valid); } -static inline int sa_is_stopped(struct ll_statahead_info *sai) +static inline struct ll_sa_entry * +sa_first_received_entry(struct ll_statahead_info *sai) { - return !!(sai->sai_thread.t_flags & SVC_STOPPED); + return cfs_list_entry(sai->sai_entries_received.next, + struct ll_sa_entry, se_list); +} + +static inline struct ll_inode_info * +agl_first_entry(struct ll_statahead_info *sai) +{ + return cfs_list_entry(sai->sai_entries_agl.next, + struct ll_inode_info, lli_agl_list); +} + +static inline int sa_sent_full(struct ll_statahead_info *sai) +{ + return atomic_read(&sai->sai_cache_count) >= sai->sai_max; +} + +static inline int sa_received_empty(struct ll_statahead_info *sai) +{ + return cfs_list_empty(&sai->sai_entries_received); +} + +static inline int agl_list_empty(struct ll_statahead_info *sai) +{ + return cfs_list_empty(&sai->sai_entries_agl); } /** * (1) hit ratio less than 80% * or * (2) consecutive miss more than 8 + * then means low hit. */ static inline int sa_low_hit(struct ll_statahead_info *sai) { @@ -121,415 +179,602 @@ static inline int sa_low_hit(struct ll_statahead_info *sai) (sai->sai_consecutive_miss > 8)); } -/** - * process the deleted entry's member and free the entry. - * (1) release intent - * (2) free md_enqueue_info - * (3) drop dentry's ref count - * (4) release request's ref count +/* + * If the given index is behind of statahead window more than + * SA_OMITTED_ENTRY_MAX, then it is old. */ -static void ll_sai_entry_cleanup(struct ll_sai_entry *entry, int free) +static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index) +{ + return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX < + sai->sai_index); +} + +/* + * Insert it into sai_entries tail when init. + */ +static struct ll_sa_entry * +ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index, + const char *name, int len) +{ + struct ll_inode_info *lli; + struct ll_sa_entry *entry; + int entry_size; + char *dname; + ENTRY; + + entry_size = sizeof(struct ll_sa_entry) + (len & ~3) + 4; + OBD_ALLOC(entry, entry_size); + if (unlikely(entry == NULL)) + RETURN(ERR_PTR(-ENOMEM)); + + CDEBUG(D_READA, "alloc sa entry %.*s(%p) index "LPU64"\n", + len, name, entry, index); + + entry->se_index = index; + + /* + * Statahead entry reference rules: + * + * 1) When statahead entry is initialized, its reference is set as 2. + * One reference is used by the directory scanner. When the scanner + * searches the statahead cache for the given name, it can perform + * lockless hash lookup (only the scanner can remove entry from hash + * list), and once found, it needn't to call "atomic_inc()" for the + * entry reference. So the performance is improved. After using the + * statahead entry, the scanner will call "atomic_dec()" to drop the + * reference held when initialization. If it is the last reference, + * the statahead entry will be freed. + * + * 2) All other threads, including statahead thread and ptlrpcd thread, + * when they process the statahead entry, the reference for target + * should be held to guarantee the entry will not be released by the + * directory scanner. After processing the entry, these threads will + * drop the entry reference. If it is the last reference, the entry + * will be freed. + * + * The second reference when initializes the statahead entry is used + * by the statahead thread, following the rule 2). + */ + atomic_set(&entry->se_refcount, 2); + entry->se_stat = SA_ENTRY_INIT; + entry->se_size = entry_size; + dname = (char *)entry + sizeof(struct ll_sa_entry); + memcpy(dname, name, len); + dname[len] = 0; + entry->se_qstr.hash = full_name_hash(name, len); + entry->se_qstr.len = len; + entry->se_qstr.name = dname; + + lli = ll_i2info(sai->sai_inode); + spin_lock(&lli->lli_sa_lock); + cfs_list_add_tail(&entry->se_link, &sai->sai_entries); + CFS_INIT_LIST_HEAD(&entry->se_list); + ll_sa_entry_enhash(sai, entry); + spin_unlock(&lli->lli_sa_lock); + + atomic_inc(&sai->sai_cache_count); + + RETURN(entry); +} + +/* + * Used by the directory scanner to search entry with name. + * + * Only the caller can remove the entry from hash, so it is unnecessary to hold + * hash lock. It is caller's duty to release the init refcount on the entry, so + * it is also unnecessary to increase refcount on the entry. + */ +static struct ll_sa_entry * +ll_sa_entry_get_byname(struct ll_statahead_info *sai, const struct qstr *qstr) +{ + struct ll_sa_entry *entry; + int i = ll_sa_entry_hash(qstr->hash); + + cfs_list_for_each_entry(entry, &sai->sai_cache[i], se_hash) { + if (entry->se_qstr.hash == qstr->hash && + entry->se_qstr.len == qstr->len && + memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0) + return entry; + } + return NULL; +} + +/* + * Used by the async getattr request callback to find entry with index. + * + * Inside lli_sa_lock to prevent others to change the list during the search. + * It needs to increase entry refcount before returning to guarantee that the + * entry cannot be freed by others. + */ +static struct ll_sa_entry * +ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index) +{ + struct ll_sa_entry *entry; + + cfs_list_for_each_entry(entry, &sai->sai_entries, se_link) { + if (entry->se_index == index) { + LASSERT(atomic_read(&entry->se_refcount) > 0); + atomic_inc(&entry->se_refcount); + return entry; + } + if (entry->se_index > index) + break; + } + return NULL; +} + +static void ll_sa_entry_cleanup(struct ll_statahead_info *sai, + struct ll_sa_entry *entry) { struct md_enqueue_info *minfo = entry->se_minfo; struct ptlrpc_request *req = entry->se_req; - ENTRY; if (minfo) { entry->se_minfo = NULL; ll_intent_release(&minfo->mi_it); - dput(minfo->mi_dentry); iput(minfo->mi_dir); OBD_FREE_PTR(minfo); } + if (req) { entry->se_req = NULL; ptlrpc_req_finished(req); } - if (free) { - LASSERT(cfs_list_empty(&entry->se_list)); - OBD_FREE_PTR(entry); - } - - EXIT; -} - -static struct ll_statahead_info *ll_sai_alloc(void) -{ - struct ll_statahead_info *sai; - - OBD_ALLOC_PTR(sai); - if (!sai) - return NULL; - - cfs_spin_lock(&sai_generation_lock); - sai->sai_generation = ++sai_generation; - if (unlikely(sai_generation == 0)) - sai->sai_generation = ++sai_generation; - cfs_spin_unlock(&sai_generation_lock); - cfs_atomic_set(&sai->sai_refcount, 1); - sai->sai_max = LL_SA_RPC_MIN; - cfs_waitq_init(&sai->sai_waitq); - cfs_waitq_init(&sai->sai_thread.t_ctl_waitq); - CFS_INIT_LIST_HEAD(&sai->sai_entries_sent); - CFS_INIT_LIST_HEAD(&sai->sai_entries_received); - CFS_INIT_LIST_HEAD(&sai->sai_entries_stated); - return sai; } -static inline -struct ll_statahead_info *ll_sai_get(struct ll_statahead_info *sai) +static void ll_sa_entry_put(struct ll_statahead_info *sai, + struct ll_sa_entry *entry) { - LASSERT(sai); - cfs_atomic_inc(&sai->sai_refcount); - return sai; + if (atomic_dec_and_test(&entry->se_refcount)) { + CDEBUG(D_READA, "free sa entry %.*s(%p) index "LPU64"\n", + entry->se_qstr.len, entry->se_qstr.name, entry, + entry->se_index); + + LASSERT(cfs_list_empty(&entry->se_link)); + LASSERT(cfs_list_empty(&entry->se_list)); + LASSERT(ll_sa_entry_unhashed(entry)); + + ll_sa_entry_cleanup(sai, entry); + if (entry->se_inode) + iput(entry->se_inode); + + OBD_FREE(entry, entry->se_size); + atomic_dec(&sai->sai_cache_count); + } } -static void ll_sai_put(struct ll_statahead_info *sai) +static inline void +do_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry) { - struct inode *inode = sai->sai_inode; - struct ll_inode_info *lli; - ENTRY; + struct ll_inode_info *lli = ll_i2info(sai->sai_inode); - LASSERT(inode != NULL); - lli = ll_i2info(inode); - LASSERT(lli->lli_sai == sai); + LASSERT(!ll_sa_entry_unhashed(entry)); + LASSERT(!cfs_list_empty(&entry->se_link)); - if (cfs_atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) { - struct ll_sai_entry *entry, *next; + ll_sa_entry_unhash(sai, entry); - if (unlikely(cfs_atomic_read(&sai->sai_refcount) > 0)) { - /* It is race case, the interpret callback just hold - * a reference count */ - cfs_spin_unlock(&lli->lli_sa_lock); - RETURN_EXIT; - } + spin_lock(&lli->lli_sa_lock); + entry->se_stat = SA_ENTRY_DEST; + cfs_list_del_init(&entry->se_link); + if (likely(!cfs_list_empty(&entry->se_list))) + cfs_list_del_init(&entry->se_list); + spin_unlock(&lli->lli_sa_lock); - LASSERT(lli->lli_opendir_key == NULL); - lli->lli_sai = NULL; - lli->lli_opendir_pid = 0; - cfs_spin_unlock(&lli->lli_sa_lock); + ll_sa_entry_put(sai, entry); +} - LASSERT(sa_is_stopped(sai)); +/* + * Delete it from sai_entries_stated list when fini. + */ +static void +ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry) +{ + struct ll_sa_entry *pos, *next; - if (sai->sai_sent > sai->sai_replied) - CDEBUG(D_READA,"statahead for dir "DFID" does not " - "finish: [sent:%u] [replied:%u]\n", - PFID(&lli->lli_fid), - sai->sai_sent, sai->sai_replied); + if (entry) + do_sa_entry_fini(sai, entry); - cfs_list_for_each_entry_safe(entry, next, - &sai->sai_entries_sent, se_list) { - cfs_list_del_init(&entry->se_list); - ll_sai_entry_cleanup(entry, 1); - } - cfs_list_for_each_entry_safe(entry, next, - &sai->sai_entries_received, - se_list) { - cfs_list_del_init(&entry->se_list); - ll_sai_entry_cleanup(entry, 1); - } - cfs_list_for_each_entry_safe(entry, next, - &sai->sai_entries_stated, - se_list) { - cfs_list_del_init(&entry->se_list); - ll_sai_entry_cleanup(entry, 1); - } - iput(inode); - OBD_FREE_PTR(sai); - } - EXIT; + /* drop old entry, only 'scanner' process does this, no need to lock */ + cfs_list_for_each_entry_safe(pos, next, &sai->sai_entries, se_link) { + if (!is_omitted_entry(sai, pos->se_index)) + break; + do_sa_entry_fini(sai, pos); + } } -/** - * insert it into sai_entries_sent tail when init. +/* + * Inside lli_sa_lock. */ -static struct ll_sai_entry * -ll_sai_entry_init(struct ll_statahead_info *sai, unsigned int index) +static void +do_sa_entry_to_stated(struct ll_statahead_info *sai, + struct ll_sa_entry *entry, se_stat_t stat) { - struct ll_inode_info *lli = ll_i2info(sai->sai_inode); - struct ll_sai_entry *entry; - ENTRY; - - OBD_ALLOC_PTR(entry); - if (entry == NULL) - RETURN(ERR_PTR(-ENOMEM)); + struct ll_sa_entry *se; + cfs_list_t *pos = &sai->sai_entries_stated; - CDEBUG(D_READA, "alloc sai entry %p index %u\n", - entry, index); - entry->se_index = index; - entry->se_stat = SA_ENTRY_UNSTATED; + if (!cfs_list_empty(&entry->se_list)) + cfs_list_del_init(&entry->se_list); - cfs_spin_lock(&lli->lli_sa_lock); - cfs_list_add_tail(&entry->se_list, &sai->sai_entries_sent); - cfs_spin_unlock(&lli->lli_sa_lock); + cfs_list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) { + if (se->se_index < entry->se_index) { + pos = &se->se_list; + break; + } + } - RETURN(entry); + cfs_list_add(&entry->se_list, pos); + entry->se_stat = stat; } -/** - * delete it from sai_entries_stated head when fini, it need not - * to process entry's member. +/* + * Move entry to sai_entries_stated and sort with the index. + * \retval 1 -- entry to be destroyed. + * \retval 0 -- entry is inserted into stated list. */ -static int ll_sai_entry_fini(struct ll_statahead_info *sai) +static int +ll_sa_entry_to_stated(struct ll_statahead_info *sai, + struct ll_sa_entry *entry, se_stat_t stat) { - struct ll_inode_info *lli = ll_i2info(sai->sai_inode); - struct ll_sai_entry *entry; - int rc = 0; - ENTRY; + struct ll_inode_info *lli = ll_i2info(sai->sai_inode); + int ret = 1; - cfs_spin_lock(&lli->lli_sa_lock); - sai->sai_index_next++; - if (likely(!cfs_list_empty(&sai->sai_entries_stated))) { - entry = cfs_list_entry(sai->sai_entries_stated.next, - struct ll_sai_entry, se_list); - if (entry->se_index < sai->sai_index_next) { - cfs_list_del_init(&entry->se_list); - rc = entry->se_stat; - OBD_FREE_PTR(entry); - } - } else { - LASSERT(sa_is_stopped(sai)); - } - cfs_spin_unlock(&lli->lli_sa_lock); + ll_sa_entry_cleanup(sai, entry); - RETURN(rc); + spin_lock(&lli->lli_sa_lock); + if (likely(entry->se_stat != SA_ENTRY_DEST)) { + do_sa_entry_to_stated(sai, entry, stat); + ret = 0; + } + spin_unlock(&lli->lli_sa_lock); + + return ret; } -/** - * inside lli_sa_lock. - * \retval NULL : can not find the entry in sai_entries_sent with the index - * \retval entry: find the entry in sai_entries_sent with the index +/* + * Insert inode into the list of sai_entries_agl. */ -static struct ll_sai_entry * -ll_sai_entry_set(struct ll_statahead_info *sai, unsigned int index, int stat, - struct ptlrpc_request *req, struct md_enqueue_info *minfo) +static void ll_agl_add(struct ll_statahead_info *sai, + struct inode *inode, int index) { - struct ll_sai_entry *entry; - ENTRY; + struct ll_inode_info *child = ll_i2info(inode); + struct ll_inode_info *parent = ll_i2info(sai->sai_inode); + int added = 0; + + spin_lock(&child->lli_agl_lock); + if (child->lli_agl_index == 0) { + child->lli_agl_index = index; + spin_unlock(&child->lli_agl_lock); + + LASSERT(cfs_list_empty(&child->lli_agl_list)); + + igrab(inode); + spin_lock(&parent->lli_agl_lock); + if (agl_list_empty(sai)) + added = 1; + cfs_list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl); + spin_unlock(&parent->lli_agl_lock); + } else { + spin_unlock(&child->lli_agl_lock); + } + + if (added > 0) + wake_up(&sai->sai_agl_thread.t_ctl_waitq); +} - if (!cfs_list_empty(&sai->sai_entries_sent)) { - cfs_list_for_each_entry(entry, &sai->sai_entries_sent, - se_list) { - if (entry->se_index == index) { - entry->se_stat = stat; - entry->se_req = ptlrpc_request_addref(req); - entry->se_minfo = minfo; - RETURN(entry); - } else if (entry->se_index > index) { - RETURN(NULL); - } - } - } - RETURN(NULL); +static struct ll_statahead_info *ll_sai_alloc(void) +{ + struct ll_statahead_info *sai; + int i; + ENTRY; + + OBD_ALLOC_PTR(sai); + if (!sai) + RETURN(NULL); + + atomic_set(&sai->sai_refcount, 1); + + spin_lock(&sai_generation_lock); + sai->sai_generation = ++sai_generation; + if (unlikely(sai_generation == 0)) + sai->sai_generation = ++sai_generation; + spin_unlock(&sai_generation_lock); + + sai->sai_max = LL_SA_RPC_MIN; + sai->sai_index = 1; + init_waitqueue_head(&sai->sai_waitq); + init_waitqueue_head(&sai->sai_thread.t_ctl_waitq); + init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq); + + CFS_INIT_LIST_HEAD(&sai->sai_entries); + CFS_INIT_LIST_HEAD(&sai->sai_entries_received); + CFS_INIT_LIST_HEAD(&sai->sai_entries_stated); + CFS_INIT_LIST_HEAD(&sai->sai_entries_agl); + + for (i = 0; i < LL_SA_CACHE_SIZE; i++) { + CFS_INIT_LIST_HEAD(&sai->sai_cache[i]); + spin_lock_init(&sai->sai_cache_lock[i]); + } + atomic_set(&sai->sai_cache_count, 0); + + RETURN(sai); } -/** - * inside lli_sa_lock. - * Move entry to sai_entries_received and - * insert it into sai_entries_received tail. - */ -static inline void -ll_sai_entry_to_received(struct ll_statahead_info *sai, struct ll_sai_entry *entry) +static inline struct ll_statahead_info * +ll_sai_get(struct ll_statahead_info *sai) { - if (!cfs_list_empty(&entry->se_list)) - cfs_list_del_init(&entry->se_list); - cfs_list_add_tail(&entry->se_list, &sai->sai_entries_received); + atomic_inc(&sai->sai_refcount); + return sai; } -/** - * Move entry to sai_entries_stated and - * sort with the index. - */ -static int -ll_sai_entry_to_stated(struct ll_statahead_info *sai, struct ll_sai_entry *entry) +static void ll_sai_put(struct ll_statahead_info *sai) { - struct ll_inode_info *lli = ll_i2info(sai->sai_inode); - struct ll_sai_entry *se; - ENTRY; + struct inode *inode = sai->sai_inode; + struct ll_inode_info *lli = ll_i2info(inode); + ENTRY; + + if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) { + struct ll_sa_entry *entry, *next; - ll_sai_entry_cleanup(entry, 0); + if (unlikely(atomic_read(&sai->sai_refcount) > 0)) { + /* It is race case, the interpret callback just hold + * a reference count */ + spin_unlock(&lli->lli_sa_lock); + RETURN_EXIT; + } - cfs_spin_lock(&lli->lli_sa_lock); - if (!cfs_list_empty(&entry->se_list)) - cfs_list_del_init(&entry->se_list); + LASSERT(lli->lli_opendir_key == NULL); + LASSERT(thread_is_stopped(&sai->sai_thread)); + LASSERT(thread_is_stopped(&sai->sai_agl_thread)); - /* stale entry */ - if (unlikely(entry->se_index < sai->sai_index_next)) { - cfs_spin_unlock(&lli->lli_sa_lock); - OBD_FREE_PTR(entry); - RETURN(0); + lli->lli_sai = NULL; + lli->lli_opendir_pid = 0; + spin_unlock(&lli->lli_sa_lock); + + if (sai->sai_sent > sai->sai_replied) + CDEBUG(D_READA,"statahead for dir "DFID" does not " + "finish: [sent:"LPU64"] [replied:"LPU64"]\n", + PFID(&lli->lli_fid), + sai->sai_sent, sai->sai_replied); + + cfs_list_for_each_entry_safe(entry, next, + &sai->sai_entries, se_link) + do_sa_entry_fini(sai, entry); + + LASSERT(list_empty(&sai->sai_entries)); + LASSERT(sa_received_empty(sai)); + LASSERT(list_empty(&sai->sai_entries_stated)); + + LASSERT(atomic_read(&sai->sai_cache_count) == 0); + LASSERT(agl_list_empty(sai)); + + iput(inode); + OBD_FREE_PTR(sai); + } + + EXIT; +} + +/* Do NOT forget to drop inode refcount when into sai_entries_agl. */ +static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai) +{ + struct ll_inode_info *lli = ll_i2info(inode); + __u64 index = lli->lli_agl_index; + int rc; + ENTRY; + + LASSERT(cfs_list_empty(&lli->lli_agl_list)); + + /* AGL maybe fall behind statahead with one entry */ + if (is_omitted_entry(sai, index + 1)) { + lli->lli_agl_index = 0; + iput(inode); + RETURN_EXIT; } - cfs_list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) { - if (se->se_index < entry->se_index) { - cfs_list_add(&entry->se_list, &se->se_list); - cfs_spin_unlock(&lli->lli_sa_lock); - RETURN(1); - } + /* Someone is in glimpse (sync or async), do nothing. */ + rc = down_write_trylock(&lli->lli_glimpse_sem); + if (rc == 0) { + lli->lli_agl_index = 0; + iput(inode); + RETURN_EXIT; } /* - * I am the first entry. + * Someone triggered glimpse within 1 sec before. + * 1) The former glimpse succeeded with glimpse lock granted by OST, and + * if the lock is still cached on client, AGL needs to do nothing. If + * it is cancelled by other client, AGL maybe cannot obtaion new lock + * for no glimpse callback triggered by AGL. + * 2) The former glimpse succeeded, but OST did not grant glimpse lock. + * Under such case, it is quite possible that the OST will not grant + * glimpse lock for AGL also. + * 3) The former glimpse failed, compared with other two cases, it is + * relative rare. AGL can ignore such case, and it will not muchly + * affect the performance. */ - cfs_list_add(&entry->se_list, &sai->sai_entries_stated); - cfs_spin_unlock(&lli->lli_sa_lock); - RETURN(1); + if (lli->lli_glimpse_time != 0 && + cfs_time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) { + up_write(&lli->lli_glimpse_sem); + lli->lli_agl_index = 0; + iput(inode); + RETURN_EXIT; + } + + CDEBUG(D_READA, "Handling (init) async glimpse: inode = " + DFID", idx = "LPU64"\n", PFID(&lli->lli_fid), index); + + cl_agl(inode); + lli->lli_agl_index = 0; + lli->lli_glimpse_time = cfs_time_current(); + up_write(&lli->lli_glimpse_sem); + + CDEBUG(D_READA, "Handled (init) async glimpse: inode= " + DFID", idx = "LPU64", rc = %d\n", + PFID(&lli->lli_fid), index, rc); + + iput(inode); + + EXIT; } -/** - * finish lookup/revalidate. - */ -static int do_statahead_interpret(struct ll_statahead_info *sai) +static void ll_post_statahead(struct ll_statahead_info *sai) { - struct ll_inode_info *lli = ll_i2info(sai->sai_inode); - struct ll_sai_entry *entry; - struct ptlrpc_request *req; + struct inode *dir = sai->sai_inode; + struct inode *child; + struct ll_inode_info *lli = ll_i2info(dir); + struct ll_sa_entry *entry; struct md_enqueue_info *minfo; struct lookup_intent *it; - struct dentry *dentry; - int rc = 0; + struct ptlrpc_request *req; struct mdt_body *body; + int rc = 0; ENTRY; - cfs_spin_lock(&lli->lli_sa_lock); - LASSERT(!sa_received_empty(sai)); - entry = cfs_list_entry(sai->sai_entries_received.next, - struct ll_sai_entry, se_list); - cfs_list_del_init(&entry->se_list); - cfs_spin_unlock(&lli->lli_sa_lock); - - if (unlikely(entry->se_index < sai->sai_index_next)) { - CWARN("Found stale entry: [index %u] [next %u]\n", - entry->se_index, sai->sai_index_next); - ll_sai_entry_cleanup(entry, 1); - RETURN(0); - } + spin_lock(&lli->lli_sa_lock); + if (unlikely(sa_received_empty(sai))) { + spin_unlock(&lli->lli_sa_lock); + RETURN_EXIT; + } + entry = sa_first_received_entry(sai); + atomic_inc(&entry->se_refcount); + cfs_list_del_init(&entry->se_list); + spin_unlock(&lli->lli_sa_lock); - if (entry->se_stat != SA_ENTRY_STATED) - GOTO(out, rc = entry->se_stat); + LASSERT(entry->se_handle != 0); - req = entry->se_req; minfo = entry->se_minfo; it = &minfo->mi_it; - dentry = minfo->mi_dentry; - + req = entry->se_req; body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); if (body == NULL) GOTO(out, rc = -EFAULT); - if (dentry->d_inode == NULL) { + child = entry->se_inode; + if (child == NULL) { /* * lookup. */ - struct dentry *save = dentry; - struct it_cb_data icbd = { - .icbd_parent = minfo->mi_dir, - .icbd_childp = &dentry - }; - LASSERT(fid_is_zero(&minfo->mi_data.op_fid2)); /* XXX: No fid in reply, this is probaly cross-ref case. * SA can't handle it yet. */ if (body->valid & OBD_MD_MDS) GOTO(out, rc = -EAGAIN); - - /* Here dentry->d_inode might be NULL, because the entry may - * have been removed before we start doing stat ahead. */ - rc = ll_lookup_it_finish(req, it, &icbd); - if (!rc) - ll_lookup_finish_locks(it, dentry); - - if (dentry != save) { - minfo->mi_dentry = dentry; - dput(save); - } } else { /* * revalidate. */ - if (!lu_fid_eq(&minfo->mi_data.op_fid2, &body->fid1)) { - ll_unhash_aliases(dentry->d_inode); - GOTO(out, rc = -EAGAIN); + /* unlinked and re-created with the same name */ + if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->fid1))){ + entry->se_inode = NULL; + iput(child); + child = NULL; } + } - rc = ll_revalidate_it_finish(req, it, dentry); - if (rc) { - ll_unhash_aliases(dentry->d_inode); - GOTO(out, rc); - } + it->d.lustre.it_lock_handle = entry->se_handle; + rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL); + if (rc != 1) + GOTO(out, rc = -EAGAIN); + + rc = ll_prep_inode(&child, req, dir->i_sb, it); + if (rc) + GOTO(out, rc); + + CDEBUG(D_DLMTRACE, "%s: setting l_data to inode "DFID"(%p)\n", + ll_get_fsname(child->i_sb, NULL, 0), + PFID(ll_inode2fid(child)), child); + ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL); + + entry->se_inode = child; + + if (agl_should_run(sai, child)) + ll_agl_add(sai, child, entry->se_index); - cfs_spin_lock(&ll_lookup_lock); - spin_lock(&dcache_lock); - lock_dentry(dentry); - __d_drop(dentry); - dentry->d_flags &= ~DCACHE_LUSTRE_INVALID; - unlock_dentry(dentry); - d_rehash_cond(dentry, 0); - spin_unlock(&dcache_lock); - cfs_spin_unlock(&ll_lookup_lock); - - ll_lookup_finish_locks(it, dentry); - } EXIT; out: - /* The "ll_sai_entry_to_stated()" will drop related ldlm ibits lock - * reference count with ll_intent_drop_lock() called in spite of the - * above operations failed or not. Do not worry about calling - * "ll_intent_drop_lock()" more than once. */ - if (likely(ll_sai_entry_to_stated(sai, entry))) - cfs_waitq_signal(&sai->sai_waitq); - return rc; + /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock + * reference count by calling "ll_intent_drop_lock()" in spite of the + * above operations failed or not. Do not worry about calling + * "ll_intent_drop_lock()" more than once. */ + rc = ll_sa_entry_to_stated(sai, entry, + rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC); + if (rc == 0 && entry->se_index == sai->sai_index_wait) + wake_up(&sai->sai_waitq); + ll_sa_entry_put(sai, entry); } static int ll_statahead_interpret(struct ptlrpc_request *req, - struct md_enqueue_info *minfo, - int rc) + struct md_enqueue_info *minfo, int rc) { - struct lookup_intent *it = &minfo->mi_it; - struct dentry *dentry = minfo->mi_dentry; + struct lookup_intent *it = &minfo->mi_it; struct inode *dir = minfo->mi_dir; struct ll_inode_info *lli = ll_i2info(dir); - struct ll_statahead_info *sai; - struct ll_sai_entry *entry; + struct ll_statahead_info *sai = NULL; + struct ll_sa_entry *entry; + int wakeup; ENTRY; - CDEBUG(D_READA, "interpret statahead %.*s rc %d\n", - dentry->d_name.len, dentry->d_name.name, rc); + if (it_disposition(it, DISP_LOOKUP_NEG)) + rc = -ENOENT; + + spin_lock(&lli->lli_sa_lock); + /* stale entry */ + if (unlikely(lli->lli_sai == NULL || + lli->lli_sai->sai_generation != minfo->mi_generation)) { + spin_unlock(&lli->lli_sa_lock); + GOTO(out, rc = -ESTALE); + } else { + sai = ll_sai_get(lli->lli_sai); + if (unlikely(!thread_is_running(&sai->sai_thread))) { + sai->sai_replied++; + spin_unlock(&lli->lli_sa_lock); + GOTO(out, rc = -EBADFD); + } + + entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata); + if (entry == NULL) { + sai->sai_replied++; + spin_unlock(&lli->lli_sa_lock); + GOTO(out, rc = -EIDRM); + } + + if (rc != 0) { + do_sa_entry_to_stated(sai, entry, SA_ENTRY_INVA); + wakeup = (entry->se_index == sai->sai_index_wait); + } else { + entry->se_minfo = minfo; + entry->se_req = ptlrpc_request_addref(req); + /* Release the async ibits lock ASAP to avoid deadlock + * when statahead thread tries to enqueue lock on parent + * for readpage and other tries to enqueue lock on child + * with parent's lock held, for example: unlink. */ + entry->se_handle = it->d.lustre.it_lock_handle; + ll_intent_drop_lock(it); + wakeup = sa_received_empty(sai); + cfs_list_add_tail(&entry->se_list, + &sai->sai_entries_received); + } + sai->sai_replied++; + spin_unlock(&lli->lli_sa_lock); + + ll_sa_entry_put(sai, entry); + if (wakeup) + wake_up(&sai->sai_thread.t_ctl_waitq); + } + + EXIT; - cfs_spin_lock(&lli->lli_sa_lock); - /* stale entry */ - if (unlikely(lli->lli_sai == NULL || - lli->lli_sai->sai_generation != minfo->mi_generation)) { - cfs_spin_unlock(&lli->lli_sa_lock); +out: + if (rc != 0) { ll_intent_release(it); - dput(dentry); iput(dir); OBD_FREE_PTR(minfo); - RETURN(-ESTALE); - } else { - sai = ll_sai_get(lli->lli_sai); - entry = ll_sai_entry_set(sai, - (unsigned int)(long)minfo->mi_cbdata, - rc < 0 ? rc : SA_ENTRY_STATED, req, - minfo); - LASSERT(entry != NULL); - if (likely(sa_is_running(sai))) { - ll_sai_entry_to_received(sai, entry); - sai->sai_replied++; - cfs_spin_unlock(&lli->lli_sa_lock); - cfs_waitq_signal(&sai->sai_thread.t_ctl_waitq); - } else { - if (!cfs_list_empty(&entry->se_list)) - cfs_list_del_init(&entry->se_list); - sai->sai_replied++; - cfs_spin_unlock(&lli->lli_sa_lock); - ll_sai_entry_cleanup(entry, 1); - } - ll_sai_put(sai); - RETURN(rc); } + if (sai != NULL) + ll_sai_put(sai); + return rc; } static void sa_args_fini(struct md_enqueue_info *minfo, @@ -552,12 +797,13 @@ static void sa_args_fini(struct md_enqueue_info *minfo, * "ocapa". So here reserve "op_data.op_capa[1,2]" in "pcapa" before calling * "md_intent_getattr_async". */ -static int sa_args_init(struct inode *dir, struct dentry *dentry, - struct md_enqueue_info **pmi, +static int sa_args_init(struct inode *dir, struct inode *child, + struct ll_sa_entry *entry, struct md_enqueue_info **pmi, struct ldlm_enqueue_info **pei, struct obd_capa **pcapa) { - struct ll_inode_info *lli = ll_i2info(dir); + struct qstr *qstr = &entry->se_qstr; + struct ll_inode_info *lli = ll_i2info(dir); struct md_enqueue_info *minfo; struct ldlm_enqueue_info *einfo; struct md_op_data *op_data; @@ -572,9 +818,8 @@ static int sa_args_init(struct inode *dir, struct dentry *dentry, return -ENOMEM; } - op_data = ll_prep_md_op_data(&minfo->mi_data, dir, dentry->d_inode, - dentry->d_name.name, dentry->d_name.len, - 0, LUSTRE_OPC_ANY, NULL); + op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, qstr->name, + qstr->len, 0, LUSTRE_OPC_ANY, NULL); if (IS_ERR(op_data)) { OBD_FREE_PTR(einfo); OBD_FREE_PTR(minfo); @@ -582,11 +827,10 @@ static int sa_args_init(struct inode *dir, struct dentry *dentry, } minfo->mi_it.it_op = IT_GETATTR; - minfo->mi_dentry = dentry; minfo->mi_dir = igrab(dir); minfo->mi_cb = ll_statahead_interpret; minfo->mi_generation = lli->lli_sai->sai_generation; - minfo->mi_cbdata = (void *)(long)lli->lli_sai->sai_index; + minfo->mi_cbdata = entry->se_index; einfo->ei_type = LDLM_IBITS; einfo->ei_mode = it_to_lock_mode(&minfo->mi_it); @@ -603,10 +847,7 @@ static int sa_args_init(struct inode *dir, struct dentry *dentry, return 0; } -/** - * similar to ll_lookup_it(). - */ -static int do_sa_lookup(struct inode *dir, struct dentry *dentry) +static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry) { struct md_enqueue_info *minfo; struct ldlm_enqueue_info *einfo; @@ -614,7 +855,7 @@ static int do_sa_lookup(struct inode *dir, struct dentry *dentry) int rc; ENTRY; - rc = sa_args_init(dir, dentry, &minfo, &einfo, capas); + rc = sa_args_init(dir, NULL, entry, &minfo, &einfo, capas); if (rc) RETURN(rc); @@ -635,10 +876,12 @@ static int do_sa_lookup(struct inode *dir, struct dentry *dentry) * \retval 0 -- will send stat-ahead request * \retval others -- prepare stat-ahead request failed */ -static int do_sa_revalidate(struct inode *dir, struct dentry *dentry) +static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry, + struct dentry *dentry) { struct inode *inode = dentry->d_inode; - struct lookup_intent it = { .it_op = IT_GETATTR }; + struct lookup_intent it = { .it_op = IT_GETATTR, + .d.lustre.it_lock_handle = 0 }; struct md_enqueue_info *minfo; struct ldlm_enqueue_info *einfo; struct obd_capa *capas[2]; @@ -651,259 +894,390 @@ static int do_sa_revalidate(struct inode *dir, struct dentry *dentry) if (d_mountpoint(dentry)) RETURN(1); - if (unlikely(dentry == dentry->d_sb->s_root)) - RETURN(1); - - rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode)); + entry->se_inode = igrab(inode); + rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),NULL); if (rc == 1) { + entry->se_handle = it.d.lustre.it_lock_handle; ll_intent_release(&it); RETURN(1); } - rc = sa_args_init(dir, dentry, &minfo, &einfo, capas); - if (rc) + rc = sa_args_init(dir, inode, entry, &minfo, &einfo, capas); + if (rc) { + entry->se_inode = NULL; + iput(inode); RETURN(rc); + } rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo); if (!rc) { capa_put(capas[0]); capa_put(capas[1]); } else { + entry->se_inode = NULL; + iput(inode); sa_args_fini(minfo, einfo); } RETURN(rc); } -static inline void ll_name2qstr(struct qstr *q, const char *name, int namelen) +static void ll_statahead_one(struct dentry *parent, const char* entry_name, + int entry_name_len) { - q->name = name; - q->len = namelen; - q->hash = full_name_hash(name, namelen); -} - -static int ll_statahead_one(struct dentry *parent, const char* entry_name, - int entry_name_len) -{ - struct inode *dir = parent->d_inode; - struct ll_inode_info *lli = ll_i2info(dir); - struct ll_statahead_info *sai = lli->lli_sai; - struct qstr name; + struct inode *dir = parent->d_inode; + struct ll_inode_info *lli = ll_i2info(dir); + struct ll_statahead_info *sai = lli->lli_sai; struct dentry *dentry = NULL; - struct ll_sai_entry *se; + struct ll_sa_entry *entry; int rc; + int rc1; ENTRY; - if (parent->d_flags & DCACHE_LUSTRE_INVALID) { - CDEBUG(D_READA, "parent dentry@%p %.*s is " - "invalid, skip statahead\n", - parent, parent->d_name.len, parent->d_name.name); - RETURN(-EINVAL); - } - - se = ll_sai_entry_init(sai, sai->sai_index); - if (IS_ERR(se)) - RETURN(PTR_ERR(se)); + entry = ll_sa_entry_alloc(sai, sai->sai_index, entry_name, + entry_name_len); + if (IS_ERR(entry)) + RETURN_EXIT; - ll_name2qstr(&name, entry_name, entry_name_len); - dentry = d_lookup(parent, &name); + dentry = d_lookup(parent, &entry->se_qstr); if (!dentry) { - dentry = d_alloc(parent, &name); - if (dentry) - rc = do_sa_lookup(dir, dentry); - else - GOTO(out, rc = -ENOMEM); + rc = do_sa_lookup(dir, entry); } else { - rc = do_sa_revalidate(dir, dentry); + rc = do_sa_revalidate(dir, entry, dentry); + if (rc == 1 && agl_should_run(sai, dentry->d_inode)) + ll_agl_add(sai, dentry->d_inode, entry->se_index); } - EXIT; + if (dentry != NULL) + dput(dentry); -out: - if (rc) { - if (dentry != NULL) - dput(dentry); - se->se_stat = rc < 0 ? rc : SA_ENTRY_STATED; - CDEBUG(D_READA, "set sai entry %p index %u stat %d rc %d\n", - se, se->se_index, se->se_stat, rc); - if (ll_sai_entry_to_stated(sai, se)) - cfs_waitq_signal(&sai->sai_waitq); - } else { - sai->sai_sent++; - } + if (rc) { + rc1 = ll_sa_entry_to_stated(sai, entry, + rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC); + if (rc1 == 0 && entry->se_index == sai->sai_index_wait) + wake_up(&sai->sai_waitq); + } else { + sai->sai_sent++; + } sai->sai_index++; - return rc; + /* drop one refcount on entry by ll_sa_entry_alloc */ + ll_sa_entry_put(sai, entry); + + EXIT; } -static int ll_statahead_thread(void *arg) +static int ll_agl_thread(void *arg) { struct dentry *parent = (struct dentry *)arg; - struct inode *dir = parent->d_inode; - struct ll_inode_info *lli = ll_i2info(dir); - struct ll_sb_info *sbi = ll_i2sbi(dir); - struct ll_statahead_info *sai = ll_sai_get(lli->lli_sai); - struct ptlrpc_thread *thread = &sai->sai_thread; - struct page *page; - __u64 pos = 0; - int first = 0; - int rc = 0; - struct ll_dir_chain chain; + struct inode *dir = parent->d_inode; + struct ll_inode_info *plli = ll_i2info(dir); + struct ll_inode_info *clli; + struct ll_sb_info *sbi = ll_i2sbi(dir); + struct ll_statahead_info *sai = ll_sai_get(plli->lli_sai); + struct ptlrpc_thread *thread = &sai->sai_agl_thread; + struct l_wait_info lwi = { 0 }; ENTRY; - { - char pname[16]; - snprintf(pname, 15, "ll_sa_%u", lli->lli_opendir_pid); - cfs_daemonize(pname); - } - - atomic_inc(&sbi->ll_sa_total); - cfs_spin_lock(&lli->lli_sa_lock); - thread->t_flags = SVC_RUNNING; - cfs_spin_unlock(&lli->lli_sa_lock); - cfs_waitq_signal(&thread->t_ctl_waitq); - CDEBUG(D_READA, "start doing statahead for %s\n", parent->d_name.name); - - sai->sai_pid = cfs_curproc_pid(); - lli->lli_sa_pos = 0; - ll_dir_chain_init(&chain); - page = ll_get_dir_page(NULL, dir, pos, 0, &chain); + thread->t_pid = current_pid(); + CDEBUG(D_READA, "agl thread started: sai %p, parent %.*s\n", + sai, parent->d_name.len, parent->d_name.name); + + atomic_inc(&sbi->ll_agl_total); + spin_lock(&plli->lli_agl_lock); + sai->sai_agl_valid = 1; + if (thread_is_init(thread)) + /* If someone else has changed the thread state + * (e.g. already changed to SVC_STOPPING), we can't just + * blindly overwrite that setting. */ + thread_set_flags(thread, SVC_RUNNING); + spin_unlock(&plli->lli_agl_lock); + wake_up(&thread->t_ctl_waitq); while (1) { - struct l_wait_info lwi = { 0 }; - struct lu_dirpage *dp; - struct lu_dirent *ent; - - if (IS_ERR(page)) { - rc = PTR_ERR(page); - CDEBUG(D_READA, "error reading dir "DFID" at "LPU64 - "/%u: [rc %d] [parent %u]\n", - PFID(ll_inode2fid(dir)), pos, sai->sai_index, - rc, lli->lli_opendir_pid); - break; - } - - dp = page_address(page); - for (ent = lu_dirent_start(dp); ent != NULL; - ent = lu_dirent_next(ent)) { - __u64 hash; - int namelen; - char *name; - - hash = le64_to_cpu(ent->lde_hash); - if (unlikely(hash < pos)) - /* - * Skip until we find target hash value. - */ - continue; - - namelen = le16_to_cpu(ent->lde_namelen); - if (unlikely(namelen == 0)) - /* - * Skip dummy record. - */ - continue; - - name = ent->lde_name; - if (name[0] == '.') { - if (namelen == 1) { - /* - * skip "." - */ - continue; - } else if (name[1] == '.' && namelen == 2) { - /* - * skip ".." - */ - continue; - } else if (!sai->sai_ls_all) { - /* - * skip hidden files. - */ - sai->sai_skip_hidden++; - continue; - } - } + l_wait_event(thread->t_ctl_waitq, + !agl_list_empty(sai) || + !thread_is_running(thread), + &lwi); - /* - * don't stat-ahead first entry. - */ - if (unlikely(!first)) { - first++; - continue; - } - -keep_de: - l_wait_event(thread->t_ctl_waitq, - !sa_is_running(sai) || sa_not_full(sai) || - !sa_received_empty(sai), - &lwi); - - while (!sa_received_empty(sai) && sa_is_running(sai)) - do_statahead_interpret(sai); - - if (unlikely(!sa_is_running(sai))) { - ll_put_page(page); - GOTO(out, rc); - } + if (!thread_is_running(thread)) + break; - if (!sa_not_full(sai)) - /* - * do not skip the current de. - */ - goto keep_de; + spin_lock(&plli->lli_agl_lock); + /* The statahead thread maybe help to process AGL entries, + * so check whether list empty again. */ + if (!agl_list_empty(sai)) { + clli = agl_first_entry(sai); + cfs_list_del_init(&clli->lli_agl_list); + spin_unlock(&plli->lli_agl_lock); + ll_agl_trigger(&clli->lli_vfs_inode, sai); + } else { + spin_unlock(&plli->lli_agl_lock); + } + } + + spin_lock(&plli->lli_agl_lock); + sai->sai_agl_valid = 0; + while (!agl_list_empty(sai)) { + clli = agl_first_entry(sai); + cfs_list_del_init(&clli->lli_agl_list); + spin_unlock(&plli->lli_agl_lock); + clli->lli_agl_index = 0; + iput(&clli->lli_vfs_inode); + spin_lock(&plli->lli_agl_lock); + } + thread_set_flags(thread, SVC_STOPPED); + spin_unlock(&plli->lli_agl_lock); + wake_up(&thread->t_ctl_waitq); + ll_sai_put(sai); + CDEBUG(D_READA, "agl thread stopped: sai %p, parent %.*s\n", + sai, parent->d_name.len, parent->d_name.name); + RETURN(0); +} - rc = ll_statahead_one(parent, name, namelen); - if (rc < 0) { - ll_put_page(page); - GOTO(out, rc); - } - } - pos = le64_to_cpu(dp->ldp_hash_end); - ll_put_page(page); - if (pos == DIR_END_OFF) { - /* - * End of directory reached. - */ - while (1) { - l_wait_event(thread->t_ctl_waitq, - !sa_is_running(sai) || - !sa_received_empty(sai) || - sai->sai_sent == sai->sai_replied, - &lwi); - if (!sa_received_empty(sai) && - sa_is_running(sai)) - do_statahead_interpret(sai); - else - GOTO(out, rc); - } - } else if (1) { - /* - * chain is exhausted. - * Normal case: continue to the next page. - */ - lli->lli_sa_pos = pos; - page = ll_get_dir_page(NULL, dir, pos, 1, &chain); - } else { - /* - * go into overflow page. - */ - } - } - EXIT; +static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai) +{ + struct ptlrpc_thread *thread = &sai->sai_agl_thread; + struct l_wait_info lwi = { 0 }; + struct ll_inode_info *plli; + struct task_struct *task; + ENTRY; + + CDEBUG(D_READA, "start agl thread: sai %p, parent %.*s\n", + sai, parent->d_name.len, parent->d_name.name); + + plli = ll_i2info(parent->d_inode); + task = kthread_run(ll_agl_thread, parent, + "ll_agl_%u", plli->lli_opendir_pid); + if (IS_ERR(task)) { + CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task)); + thread_set_flags(thread, SVC_STOPPED); + RETURN_EXIT; + } + + l_wait_event(thread->t_ctl_waitq, + thread_is_running(thread) || thread_is_stopped(thread), + &lwi); + EXIT; +} +static int ll_statahead_thread(void *arg) +{ + struct dentry *parent = (struct dentry *)arg; + struct inode *dir = parent->d_inode; + struct ll_inode_info *plli = ll_i2info(dir); + struct ll_inode_info *clli; + struct ll_sb_info *sbi = ll_i2sbi(dir); + struct ll_statahead_info *sai = ll_sai_get(plli->lli_sai); + struct ptlrpc_thread *thread = &sai->sai_thread; + struct ptlrpc_thread *agl_thread = &sai->sai_agl_thread; + int first = 0; + int rc = 0; + struct md_op_data *op_data; + struct ll_dir_chain chain; + struct l_wait_info lwi = { 0 }; + struct lu_dirent *ent; + struct page *page = NULL; + ENTRY; + + thread->t_pid = current_pid(); + CDEBUG(D_READA, "statahead thread starting: sai %p, parent %.*s\n", + sai, parent->d_name.len, parent->d_name.name); + + op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0, + LUSTRE_OPC_ANY, dir); + if (IS_ERR(op_data)) + RETURN(PTR_ERR(op_data)); + + op_data->op_hash_offset = 0; + op_data->op_max_pages = + ll_i2sbi(dir)->ll_md_brw_size >> PAGE_CACHE_SHIFT; + + if (sbi->ll_flags & LL_SBI_AGL_ENABLED) + ll_start_agl(parent, sai); + + atomic_inc(&sbi->ll_sa_total); + spin_lock(&plli->lli_sa_lock); + if (thread_is_init(thread)) + /* If someone else has changed the thread state + * (e.g. already changed to SVC_STOPPING), we can't just + * blindly overwrite that setting. */ + thread_set_flags(thread, SVC_RUNNING); + spin_unlock(&plli->lli_sa_lock); + wake_up(&thread->t_ctl_waitq); + + ll_dir_chain_init(&chain); + for (ent = ll_dir_entry_start(dir, op_data, &page); + ent != NULL && !IS_ERR(ent); + ent = ll_dir_entry_next(dir, op_data, ent, &page)) { + __u64 hash; + int namelen; + char *name; + + hash = le64_to_cpu(ent->lde_hash); + if (unlikely(hash < op_data->op_hash_offset)) + /* + * Skip until we find target hash value. + */ + continue; + + namelen = le16_to_cpu(ent->lde_namelen); + if (unlikely(namelen == 0)) + /* + * Skip dummy record. + */ + continue; + + name = ent->lde_name; + if (name[0] == '.') { + if (namelen == 1) { + /* + * skip "." + */ + continue; + } else if (name[1] == '.' && namelen == 2) { + /* + * skip ".." + */ + continue; + } else if (!sai->sai_ls_all) { + /* + * skip hidden files. + */ + sai->sai_skip_hidden++; + continue; + } + } + + /* + * don't stat-ahead first entry. + */ + if (unlikely(++first == 1)) + continue; + +keep_it: + l_wait_event(thread->t_ctl_waitq, + !sa_sent_full(sai) || + !sa_received_empty(sai) || + !agl_list_empty(sai) || + !thread_is_running(thread), + &lwi); + +interpret_it: + while (!sa_received_empty(sai)) + ll_post_statahead(sai); + + if (unlikely(!thread_is_running(thread))) + GOTO(out, rc = 0); + + /* If no window for metadata statahead, but there are + * some AGL entries to be triggered, then try to help + * to process the AGL entries. */ + if (sa_sent_full(sai)) { + spin_lock(&plli->lli_agl_lock); + while (!agl_list_empty(sai)) { + clli = agl_first_entry(sai); + cfs_list_del_init(&clli->lli_agl_list); + spin_unlock(&plli->lli_agl_lock); + ll_agl_trigger(&clli->lli_vfs_inode, + sai); + + if (!sa_received_empty(sai)) + goto interpret_it; + + if (unlikely( + !thread_is_running(thread))) + GOTO(out, rc = 0); + + if (!sa_sent_full(sai)) + goto do_it; + + spin_lock(&plli->lli_agl_lock); + } + spin_unlock(&plli->lli_agl_lock); + + goto keep_it; + } + +do_it: + ll_statahead_one(parent, name, namelen); + } + + if (page != NULL) { + kunmap(page); + page_cache_release(page); + } + + /* + * End of directory reached. + */ + while (1) { + l_wait_event(thread->t_ctl_waitq, + !sa_received_empty(sai) || + sai->sai_sent == sai->sai_replied || + !thread_is_running(thread), + &lwi); + + while (!sa_received_empty(sai)) + ll_post_statahead(sai); + + if (unlikely(!thread_is_running(thread))) + GOTO(out, rc = 0); + + if (sai->sai_sent == sai->sai_replied && + sa_received_empty(sai)) + break; + } + + spin_lock(&plli->lli_agl_lock); + while (!agl_list_empty(sai) && + thread_is_running(thread)) { + clli = agl_first_entry(sai); + cfs_list_del_init(&clli->lli_agl_list); + spin_unlock(&plli->lli_agl_lock); + ll_agl_trigger(&clli->lli_vfs_inode, sai); + spin_lock(&plli->lli_agl_lock); + } + spin_unlock(&plli->lli_agl_lock); out: + EXIT; + ll_finish_md_op_data(op_data); + if (sai->sai_agl_valid) { + spin_lock(&plli->lli_agl_lock); + thread_set_flags(agl_thread, SVC_STOPPING); + spin_unlock(&plli->lli_agl_lock); + wake_up(&agl_thread->t_ctl_waitq); + + CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n", + sai, (unsigned int)agl_thread->t_pid); + l_wait_event(agl_thread->t_ctl_waitq, + thread_is_stopped(agl_thread), + &lwi); + } else { + /* Set agl_thread flags anyway. */ + thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED); + } ll_dir_chain_fini(&chain); - cfs_spin_lock(&lli->lli_sa_lock); - thread->t_flags = SVC_STOPPED; - cfs_spin_unlock(&lli->lli_sa_lock); - cfs_waitq_signal(&sai->sai_waitq); - cfs_waitq_signal(&thread->t_ctl_waitq); + spin_lock(&plli->lli_sa_lock); + if (!sa_received_empty(sai)) { + thread_set_flags(thread, SVC_STOPPING); + spin_unlock(&plli->lli_sa_lock); + + /* To release the resources held by received entries. */ + while (!sa_received_empty(sai)) + ll_post_statahead(sai); + + spin_lock(&plli->lli_sa_lock); + } + thread_set_flags(thread, SVC_STOPPED); + spin_unlock(&plli->lli_sa_lock); + wake_up(&sai->sai_waitq); + wake_up(&thread->t_ctl_waitq); ll_sai_put(sai); dput(parent); - CDEBUG(D_READA, "statahead thread stopped, pid %d\n", - cfs_curproc_pid()); - return rc; + CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %.*s\n", + sai, parent->d_name.len, parent->d_name.name); + return rc; } /** @@ -911,14 +1285,14 @@ out: */ void ll_stop_statahead(struct inode *dir, void *key) { - struct ll_inode_info *lli = ll_i2info(dir); + struct ll_inode_info *lli = ll_i2info(dir); - if (unlikely(key == NULL)) - return; + if (unlikely(key == NULL)) + return; - cfs_spin_lock(&lli->lli_sa_lock); - if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) { - cfs_spin_unlock(&lli->lli_sa_lock); + spin_lock(&lli->lli_sa_lock); + if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) { + spin_unlock(&lli->lli_sa_lock); return; } @@ -928,30 +1302,30 @@ void ll_stop_statahead(struct inode *dir, void *key) struct l_wait_info lwi = { 0 }; struct ptlrpc_thread *thread = &lli->lli_sai->sai_thread; - if (!sa_is_stopped(lli->lli_sai)) { - thread->t_flags = SVC_STOPPING; - cfs_spin_unlock(&lli->lli_sa_lock); - cfs_waitq_signal(&thread->t_ctl_waitq); - - CDEBUG(D_READA, "stopping statahead thread, pid %d\n", - cfs_curproc_pid()); - l_wait_event(thread->t_ctl_waitq, - sa_is_stopped(lli->lli_sai), - &lwi); - } else { - cfs_spin_unlock(&lli->lli_sa_lock); - } - - /* - * Put the ref which was held when first statahead_enter. - * It maybe not the last ref for some statahead requests - * maybe inflight. - */ - ll_sai_put(lli->lli_sai); - } else { - lli->lli_opendir_pid = 0; - cfs_spin_unlock(&lli->lli_sa_lock); - } + if (!thread_is_stopped(thread)) { + thread_set_flags(thread, SVC_STOPPING); + spin_unlock(&lli->lli_sa_lock); + wake_up(&thread->t_ctl_waitq); + + CDEBUG(D_READA, "stop statahead thread: sai %p pid %u\n", + lli->lli_sai, (unsigned int)thread->t_pid); + l_wait_event(thread->t_ctl_waitq, + thread_is_stopped(thread), + &lwi); + } else { + spin_unlock(&lli->lli_sa_lock); + } + + /* + * Put the ref which was held when first statahead_enter. + * It maybe not the last ref for some statahead requests + * maybe inflight. + */ + ll_sai_put(lli->lli_sai); + } else { + lli->lli_opendir_pid = 0; + spin_unlock(&lli->lli_sa_lock); + } } enum { @@ -964,149 +1338,185 @@ enum { */ LS_FIRST_DE, /** - * the first hidden dirent, that is "." + * the first hidden dirent, that is "." */ LS_FIRST_DOT_DE }; static int is_first_dirent(struct inode *dir, struct dentry *dentry) { - struct ll_inode_info *lli = ll_i2info(dir); - struct ll_dir_chain chain; - struct qstr *target = &dentry->d_name; - struct page *page; - __u64 pos = 0; - int dot_de; - int rc = LS_NONE_FIRST_DE; - ENTRY; - - lli->lli_sa_pos = 0; - ll_dir_chain_init(&chain); - page = ll_get_dir_page(NULL, dir, pos, 0, &chain); - - while (1) { - struct lu_dirpage *dp; - struct lu_dirent *ent; - - if (IS_ERR(page)) { - struct ll_inode_info *lli = ll_i2info(dir); - - rc = PTR_ERR(page); - CERROR("error reading dir "DFID" at "LPU64": " - "[rc %d] [parent %u]\n", - PFID(ll_inode2fid(dir)), pos, - rc, lli->lli_opendir_pid); - break; - } - - dp = page_address(page); - for (ent = lu_dirent_start(dp); ent != NULL; - ent = lu_dirent_next(ent)) { - int namelen; - char *name; - - namelen = le16_to_cpu(ent->lde_namelen); - if (unlikely(namelen == 0)) - /* - * skip dummy record. - */ - continue; - - name = ent->lde_name; - if (name[0] == '.') { - if (namelen == 1) - /* - * skip "." - */ - continue; - else if (name[1] == '.' && namelen == 2) - /* - * skip ".." - */ - continue; - else - dot_de = 1; - } else { - dot_de = 0; - } - - if (dot_de && target->name[0] != '.') { - CDEBUG(D_READA, "%.*s skip hidden file %.*s\n", - target->len, target->name, - namelen, name); - continue; - } - - if (target->len != namelen || - memcmp(target->name, name, namelen) != 0) - rc = LS_NONE_FIRST_DE; - else if (!dot_de) - rc = LS_FIRST_DE; - else - rc = LS_FIRST_DOT_DE; - - ll_put_page(page); - GOTO(out, rc); - } - pos = le64_to_cpu(dp->ldp_hash_end); - ll_put_page(page); - if (pos == DIR_END_OFF) { - /* - * End of directory reached. - */ - break; - } else if (1) { - /* - * chain is exhausted - * Normal case: continue to the next page. - */ - lli->lli_sa_pos = pos; - page = ll_get_dir_page(NULL, dir, pos, 1, &chain); - } else { - /* - * go into overflow page. - */ - } - } + struct ll_dir_chain chain; + struct qstr *target = &dentry->d_name; + struct md_op_data *op_data; + int dot_de; + struct lu_dirent *ent; + struct page *page = NULL; + int rc = LS_NONE_FIRST_DE; + ENTRY; + + ll_dir_chain_init(&chain); + + op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0, + LUSTRE_OPC_ANY, dir); + if (IS_ERR(op_data)) + GOTO(out, rc = PTR_ERR(op_data)); + /** + *FIXME choose the start offset of the readdir + */ + op_data->op_stripe_offset = 0; + op_data->op_hash_offset = 0; + op_data->op_max_pages = + ll_i2sbi(dir)->ll_md_brw_size >> PAGE_CACHE_SHIFT; + + for (ent = ll_dir_entry_start(dir, op_data, &page); + ent != NULL && !IS_ERR(ent); + ent = ll_dir_entry_next(dir, op_data, ent, &page)) { + __u64 hash; + int namelen; + char *name; + + hash = le64_to_cpu(ent->lde_hash); + /* The ll_get_dir_page() can return any page containing + * the given hash which may be not the start hash. */ + if (unlikely(hash < op_data->op_hash_offset)) + continue; + + namelen = le16_to_cpu(ent->lde_namelen); + if (unlikely(namelen == 0)) + /* + * skip dummy record. + */ + continue; + + name = ent->lde_name; + if (name[0] == '.') { + if (namelen == 1) + /* + * skip "." + */ + continue; + else if (name[1] == '.' && namelen == 2) + /* + * skip ".." + */ + continue; + else + dot_de = 1; + } else { + dot_de = 0; + } + + if (dot_de && target->name[0] != '.') { + CDEBUG(D_READA, "%.*s skip hidden file %.*s\n", + target->len, target->name, + namelen, name); + continue; + } + + if (target->len != namelen || + memcmp(target->name, name, namelen) != 0) + rc = LS_NONE_FIRST_DE; + else if (!dot_de) + rc = LS_FIRST_DE; + else + rc = LS_FIRST_DOT_DE; + + break; + } EXIT; + if (page != NULL) { + kunmap(page); + page_cache_release(page); + } + ll_finish_md_op_data(op_data); out: - ll_dir_chain_fini(&chain); + ll_dir_chain_fini(&chain); return rc; } +static void +ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry) +{ + struct ptlrpc_thread *thread = &sai->sai_thread; + struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode); + int hit; + ENTRY; + + if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC) + hit = 1; + else + hit = 0; + + ll_sa_entry_fini(sai, entry); + if (hit) { + sai->sai_hit++; + sai->sai_consecutive_miss = 0; + sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max); + } else { + struct ll_inode_info *lli = ll_i2info(sai->sai_inode); + + sai->sai_miss++; + sai->sai_consecutive_miss++; + if (sa_low_hit(sai) && thread_is_running(thread)) { + atomic_inc(&sbi->ll_sa_wrong); + CDEBUG(D_READA, "Statahead for dir "DFID" hit " + "ratio too low: hit/miss "LPU64"/"LPU64 + ", sent/replied "LPU64"/"LPU64", stopping " + "statahead thread\n", + PFID(&lli->lli_fid), sai->sai_hit, + sai->sai_miss, sai->sai_sent, + sai->sai_replied); + spin_lock(&lli->lli_sa_lock); + if (!thread_is_stopped(thread)) + thread_set_flags(thread, SVC_STOPPING); + spin_unlock(&lli->lli_sa_lock); + } + } + + if (!thread_is_stopped(thread)) + wake_up(&thread->t_ctl_waitq); + + EXIT; +} + /** * Start statahead thread if this is the first dir entry. * Otherwise if a thread is started already, wait it until it is ahead of me. - * \retval 0 -- stat ahead thread process such dentry, for lookup, it miss - * \retval 1 -- stat ahead thread process such dentry, for lookup, it hit - * \retval -EEXIST -- stat ahead thread started, and this is the first dentry - * \retval -EBADFD -- statahead thread exit and not dentry available - * \retval -EAGAIN -- try to stat by caller - * \retval others -- error + * \retval 1 -- find entry with lock in cache, the caller needs to do + * nothing. + * \retval 0 -- find entry in cache, but without lock, the caller needs + * refresh from MDS. + * \retval others -- the caller need to process as non-statahead. */ -int do_statahead_enter(struct inode *dir, struct dentry **dentryp, int lookup) +int do_statahead_enter(struct inode *dir, struct dentry **dentryp, + int only_unplug) { - struct ll_inode_info *lli; - struct ll_statahead_info *sai; - struct dentry *parent; - struct l_wait_info lwi = { 0 }; - int rc = 0; - ENTRY; - - LASSERT(dir != NULL); - lli = ll_i2info(dir); - LASSERT(lli->lli_opendir_pid == cfs_curproc_pid()); - sai = lli->lli_sai; + struct ll_inode_info *lli = ll_i2info(dir); + struct ll_statahead_info *sai = lli->lli_sai; + struct dentry *parent; + struct ll_sa_entry *entry; + struct ptlrpc_thread *thread; + struct l_wait_info lwi = { 0 }; + struct task_struct *task; + int rc = 0; + struct ll_inode_info *plli; + ENTRY; + + LASSERT(lli->lli_opendir_pid == current_pid()); if (sai) { - if (unlikely(sa_is_stopped(sai) && - cfs_list_empty(&sai->sai_entries_stated))) - RETURN(-EBADFD); + thread = &sai->sai_thread; + if (unlikely(thread_is_stopped(thread) && + cfs_list_empty(&sai->sai_entries_stated))) { + /* to release resource */ + ll_stop_statahead(dir, lli->lli_opendir_key); + RETURN(-EAGAIN); + } if ((*dentryp)->d_name.name[0] == '.') { - if (likely(sai->sai_ls_all || - sai->sai_miss_hidden >= sai->sai_skip_hidden)) { + if (sai->sai_ls_all || + sai->sai_miss_hidden >= sai->sai_skip_hidden) { /* * Hidden dentry is the first one, or statahead * thread does not skip so many hidden dentries @@ -1127,40 +1537,79 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, int lookup) * "sai_ls_all" enabled as above. */ sai->sai_miss_hidden++; - RETURN(-ENOENT); + RETURN(-EAGAIN); } } - if (!ll_sai_entry_stated(sai)) { - /* - * thread started already, avoid double-stat. - */ - lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL); + entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name); + if (entry == NULL || only_unplug) { + ll_sai_unplug(sai, entry); + RETURN(entry ? 1 : -EAGAIN); + } + + if (!ll_sa_entry_stated(entry)) { + sai->sai_index_wait = entry->se_index; + lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL, + LWI_ON_SIGNAL_NOOP, NULL); rc = l_wait_event(sai->sai_waitq, - ll_sai_entry_stated(sai) || - sa_is_stopped(sai), + ll_sa_entry_stated(entry) || + thread_is_stopped(thread), &lwi); - if (unlikely(rc == -EINTR)) - RETURN(rc); + if (rc < 0) { + ll_sai_unplug(sai, entry); + RETURN(-EAGAIN); + } } - if (lookup) { - struct dentry *result; - - result = d_lookup((*dentryp)->d_parent, - &(*dentryp)->d_name); - if (result) { - LASSERT(result != *dentryp); - /* BUG 16303: do not drop reference count for - * "*dentryp", VFS will do that by itself. */ - *dentryp = result; - RETURN(1); + if (entry->se_stat == SA_ENTRY_SUCC && + entry->se_inode != NULL) { + struct inode *inode = entry->se_inode; + struct lookup_intent it = { .it_op = IT_GETATTR, + .d.lustre.it_lock_handle = + entry->se_handle }; + __u64 bits; + + rc = md_revalidate_lock(ll_i2mdexp(dir), &it, + ll_inode2fid(inode), &bits); + if (rc == 1) { + if ((*dentryp)->d_inode == NULL) { + struct dentry *alias; + + alias = ll_splice_alias(inode, + *dentryp); + if (IS_ERR(alias)) { + ll_sai_unplug(sai, entry); + RETURN(PTR_ERR(alias)); + } + *dentryp = alias; + } else if ((*dentryp)->d_inode != inode) { + /* revalidate, but inode is recreated */ + CDEBUG(D_READA, + "%s: stale dentry %.*s inode " + DFID", statahead inode "DFID + "\n", + ll_get_fsname((*dentryp)->d_inode->i_sb, NULL, 0), + (*dentryp)->d_name.len, + (*dentryp)->d_name.name, + PFID(ll_inode2fid((*dentryp)->d_inode)), + PFID(ll_inode2fid(inode))); + ll_intent_release(&it); + ll_sai_unplug(sai, entry); + RETURN(-ESTALE); + } else { + iput(inode); + } + entry->se_inode = NULL; + + if ((bits & MDS_INODELOCK_LOOKUP) && + d_lustre_invalid(*dentryp)) + d_lustre_revalidate(*dentryp); + ll_intent_release(&it); } } - /* - * do nothing for revalidate. - */ - RETURN(0); + + ll_sai_unplug(sai, entry); + RETURN(rc); } /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */ @@ -1178,7 +1627,6 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, int lookup) if (unlikely(sai->sai_inode == NULL)) { CWARN("Do not start stat ahead on dying inode "DFID"\n", PFID(&lli->lli_fid)); - OBD_FREE_PTR(sai); GOTO(out, rc = -ESTALE); } @@ -1193,94 +1641,56 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, int lookup) PFID(&lli->lli_fid), PFID(&nlli->lli_fid)); dput(parent); iput(sai->sai_inode); - OBD_FREE_PTR(sai); - RETURN(-EAGAIN); + GOTO(out, rc = -EAGAIN); } + CDEBUG(D_READA, "start statahead thread: sai %p, parent %.*s\n", + sai, parent->d_name.len, parent->d_name.name); + + /* The sai buffer already has one reference taken at allocation time, + * but as soon as we expose the sai by attaching it to the lli that + * default reference can be dropped by another thread calling + * ll_stop_statahead. We need to take a local reference to protect + * the sai buffer while we intend to access it. */ + ll_sai_get(sai); lli->lli_sai = sai; - rc = cfs_kernel_thread(ll_statahead_thread, parent, 0); - if (rc < 0) { - CERROR("can't start ll_sa thread, rc: %d\n", rc); - dput(parent); + + plli = ll_i2info(parent->d_inode); + task = kthread_run(ll_statahead_thread, parent, "ll_sa_%u", + plli->lli_opendir_pid); + thread = &sai->sai_thread; + if (IS_ERR(task)) { + rc = PTR_ERR(task); + CERROR("cannot start ll_sa thread: rc = %d\n", rc); + dput(parent); lli->lli_opendir_key = NULL; - sai->sai_thread.t_flags = SVC_STOPPED; + thread_set_flags(thread, SVC_STOPPED); + thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED); + /* Drop both our own local reference and the default + * reference from allocation time. */ ll_sai_put(sai); + ll_sai_put(sai); LASSERT(lli->lli_sai == NULL); RETURN(-EAGAIN); } - l_wait_event(sai->sai_thread.t_ctl_waitq, - sa_is_running(sai) || sa_is_stopped(sai), + l_wait_event(thread->t_ctl_waitq, + thread_is_running(thread) || thread_is_stopped(thread), &lwi); + ll_sai_put(sai); /* * We don't stat-ahead for the first dirent since we are already in - * lookup, and -EEXIST also indicates that this is the first dirent. + * lookup. */ - RETURN(-EEXIST); + RETURN(-EAGAIN); out: - cfs_spin_lock(&lli->lli_sa_lock); - lli->lli_opendir_key = NULL; - lli->lli_opendir_pid = 0; - cfs_spin_unlock(&lli->lli_sa_lock); - return rc; -} - -/** - * update hit/miss count. - */ -void ll_statahead_exit(struct inode *dir, struct dentry *dentry, int result) -{ - struct ll_inode_info *lli; - struct ll_statahead_info *sai; - struct ll_sb_info *sbi; - struct ll_dentry_data *ldd = ll_d2d(dentry); - int rc; - ENTRY; - - LASSERT(dir != NULL); - lli = ll_i2info(dir); - LASSERT(lli->lli_opendir_pid == cfs_curproc_pid()); - sai = lli->lli_sai; - LASSERT(sai != NULL); - sbi = ll_i2sbi(dir); - - rc = ll_sai_entry_fini(sai); - /* rc == -ENOENT means such dentry was removed just between statahead - * readdir and pre-fetched, count it as hit. - * - * result == -ENOENT has two meanings: - * 1. such dentry was removed just between statahead pre-fetched and - * main process stat such dentry. - * 2. main process stat non-exist dentry. - * We can not distinguish such two cases, just count them as miss. */ - if (result >= 1 || unlikely(rc == -ENOENT)) { - sai->sai_hit++; - sai->sai_consecutive_miss = 0; - sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max); - } else { - sai->sai_miss++; - sai->sai_consecutive_miss++; - if (sa_low_hit(sai) && sa_is_running(sai)) { - atomic_inc(&sbi->ll_sa_wrong); - CDEBUG(D_READA, "Statahead for dir "DFID" hit ratio " - "too low: hit/miss %u/%u, sent/replied %u/%u, " - "stopping statahead thread: pid %d\n", - PFID(&lli->lli_fid), sai->sai_hit, - sai->sai_miss, sai->sai_sent, - sai->sai_replied, cfs_curproc_pid()); - cfs_spin_lock(&lli->lli_sa_lock); - if (!sa_is_stopped(sai)) - sai->sai_thread.t_flags = SVC_STOPPING; - cfs_spin_unlock(&lli->lli_sa_lock); - } - } - - if (!sa_is_stopped(sai)) - cfs_waitq_signal(&sai->sai_thread.t_ctl_waitq); - if (likely(ldd != NULL)) - ldd->lld_sa_generation = sai->sai_generation; - - EXIT; + if (sai != NULL) + OBD_FREE_PTR(sai); + spin_lock(&lli->lli_sa_lock); + lli->lli_opendir_key = NULL; + lli->lli_opendir_pid = 0; + spin_unlock(&lli->lli_sa_lock); + return rc; }