X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fllite%2Fstatahead.c;h=27c41586b67e07dc09b2115cbbfedd3962e7bbe8;hp=c62b3ea0f379a257033cc8c088bf8793fe860104;hb=5db0e2de00f6b3d687fc4fe01f05a687fc74271c;hpb=83cce524dd47e5433dfdbe530f3b453807ef6976 diff --git a/lustre/llite/statahead.c b/lustre/llite/statahead.c index c62b3ea..27c4158 100644 --- a/lustre/llite/statahead.c +++ b/lustre/llite/statahead.c @@ -27,7 +27,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2013, Intel Corporation. + * Copyright (c) 2011, 2015, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -36,6 +36,7 @@ #include #include +#include #include #include #include @@ -57,11 +58,11 @@ typedef enum { /* sa_entry is not refcounted: statahead thread allocates it and do async stat, * and in async stat callback ll_statahead_interpret() will add it into - * sai_cb_entries, later statahead thread will call sa_handle_callback() to + * sai_interim_entries, later statahead thread will call sa_handle_callback() to * instantiate entry and move it into sai_entries, and then only scanner process * can access and free it. */ struct sa_entry { - /* link into sai_cb_entries or sai_entries */ + /* link into sai_interim_entries or sai_entries */ struct list_head se_list; /* link into sai hash table locally */ struct list_head se_hash; @@ -81,6 +82,8 @@ struct sa_entry { struct inode *se_inode; /* entry name */ struct qstr se_qstr; + /* entry fid */ + struct lu_fid se_fid; }; static unsigned int sai_generation = 0; @@ -91,23 +94,20 @@ static inline int sa_unhashed(struct sa_entry *entry) return list_empty(&entry->se_hash); } -/* - * The entry only can be released by the caller, it is necessary to hold lock. - */ +/* sa_entry is ready to use */ static inline int sa_ready(struct sa_entry *entry) { smp_rmb(); return (entry->se_state != SA_ENTRY_INIT); } +/* hash value to put in sai_cache */ static inline int sa_hash(int val) { return val & LL_SA_CACHE_MASK; } -/* - * Insert entry to hash SA table. - */ +/* hash entry into sai_cache */ static inline void sa_rehash(struct ll_statahead_info *sai, struct sa_entry *entry) { @@ -118,9 +118,7 @@ sa_rehash(struct ll_statahead_info *sai, struct sa_entry *entry) spin_unlock(&sai->sai_cache_lock[i]); } -/* - * Remove entry from SA table. - */ +/* unhash entry from sai_cache */ static inline void sa_unhash(struct ll_statahead_info *sai, struct sa_entry *entry) { @@ -144,11 +142,13 @@ agl_first_entry(struct ll_statahead_info *sai) lli_agl_list); } +/* statahead window is full */ static inline int sa_sent_full(struct ll_statahead_info *sai) { return atomic_read(&sai->sai_cache_count) >= sai->sai_max; } +/* got async stat replies */ static inline int sa_has_callback(struct ll_statahead_info *sai) { return !list_empty(&sai->sai_interim_entries); @@ -172,7 +172,7 @@ static inline int sa_low_hit(struct ll_statahead_info *sai) } /* - * If the given index is behind of statahead window more than + * if the given index is behind of statahead window more than * SA_OMITTED_ENTRY_MAX, then it is old. */ static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index) @@ -181,9 +181,10 @@ static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index) sai->sai_index); } -/* allocate sa_entry and add it into hash to let scanner process to find it */ +/* allocate sa_entry and hash it to allow scanner process to find it */ static struct sa_entry * -sa_alloc(struct ll_statahead_info *sai, __u64 index, const char *name, int len) +sa_alloc(struct ll_statahead_info *sai, __u64 index, const char *name, int len, + const struct lu_fid *fid) { struct ll_inode_info *lli; struct sa_entry *entry; @@ -209,8 +210,10 @@ sa_alloc(struct ll_statahead_info *sai, __u64 index, const char *name, int len) entry->se_qstr.hash = full_name_hash(name, len); entry->se_qstr.len = len; entry->se_qstr.name = dname; + entry->se_fid = *fid; + + lli = ll_i2info(sai->sai_dentry->d_inode); - lli = ll_i2info(sai->sai_inode); spin_lock(&lli->lli_sa_lock); INIT_LIST_HEAD(&entry->se_list); sa_rehash(sai, entry); @@ -221,7 +224,7 @@ sa_alloc(struct ll_statahead_info *sai, __u64 index, const char *name, int len) RETURN(entry); } -/* free sa_entry which should have been unhashed and not in any list */ +/* free sa_entry, which should have been unhashed and not in any list */ static void sa_free(struct ll_statahead_info *sai, struct sa_entry *entry) { CDEBUG(D_READA, "free sa entry %.*s(%p) index "LPU64"\n", @@ -235,8 +238,9 @@ static void sa_free(struct ll_statahead_info *sai, struct sa_entry *entry) atomic_dec(&sai->sai_cache_count); } -/* find sa_entry by name, used by directory scanner, lock is not needed because - * only scanner can remove the entry from hash. +/* + * find sa_entry by name, used by directory scanner, lock is not needed because + * only scanner can remove the entry from cache. */ static struct sa_entry * sa_get(struct ll_statahead_info *sai, const struct qstr *qstr) @@ -253,10 +257,11 @@ sa_get(struct ll_statahead_info *sai, const struct qstr *qstr) return NULL; } +/* unhash and unlink sa_entry, and then free it */ static inline void sa_kill(struct ll_statahead_info *sai, struct sa_entry *entry) { - struct ll_inode_info *lli = ll_i2info(sai->sai_inode); + struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode); LASSERT(!sa_unhashed(entry)); LASSERT(!list_empty(&entry->se_list)); @@ -281,7 +286,7 @@ sa_put(struct ll_statahead_info *sai, struct sa_entry *entry) struct sa_entry *tmp, *next; if (entry != NULL && entry->se_state == SA_ENTRY_SUCC) { - struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode); + struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode); sai->sai_hit++; sai->sai_consecutive_miss = 0; @@ -312,12 +317,11 @@ __sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret) { struct sa_entry *se; struct list_head *pos = &sai->sai_entries; + __u64 index = entry->se_index; LASSERT(!sa_ready(entry)); LASSERT(list_empty(&entry->se_list)); - entry->se_state = ret < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC; - list_for_each_entry_reverse(se, &sai->sai_entries, se_list) { if (se->se_index < entry->se_index) { pos = &se->se_list; @@ -325,16 +329,19 @@ __sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret) } } list_add(&entry->se_list, pos); + entry->se_state = ret < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC; - return (entry->se_index == sai->sai_index_wait); + return (index == sai->sai_index_wait); } -/* release resources used in async stat RPC, complete entry information and - * wakeup if necessary */ +/* + * release resources used in async stat RPC, update entry state and wakeup if + * scanner process it waiting on this entry. + */ static void sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret) { - struct ll_inode_info *lli = ll_i2info(sai->sai_inode); + struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode); struct md_enqueue_info *minfo = entry->se_minfo; struct ptlrpc_request *req = entry->se_req; bool wakeup; @@ -360,14 +367,12 @@ sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret) wake_up(&sai->sai_waitq); } -/* - * Insert inode into the list of sai_agls. - */ +/* insert inode into the list of sai_agls */ static void ll_agl_add(struct ll_statahead_info *sai, struct inode *inode, int index) { struct ll_inode_info *child = ll_i2info(inode); - struct ll_inode_info *parent = ll_i2info(sai->sai_inode); + struct ll_inode_info *parent = ll_i2info(sai->sai_dentry->d_inode); int added = 0; spin_lock(&child->lli_agl_lock); @@ -391,24 +396,20 @@ static void ll_agl_add(struct ll_statahead_info *sai, wake_up(&sai->sai_agl_thread.t_ctl_waitq); } -static struct ll_statahead_info *ll_sai_alloc(void) +/* allocate sai */ +static struct ll_statahead_info *ll_sai_alloc(struct dentry *dentry) { struct ll_statahead_info *sai; - int i; + struct ll_inode_info *lli = ll_i2info(dentry->d_inode); + int i; ENTRY; OBD_ALLOC_PTR(sai); if (!sai) RETURN(NULL); + sai->sai_dentry = dget(dentry); atomic_set(&sai->sai_refcount, 1); - - spin_lock(&sai_generation_lock); - sai->sai_generation = ++sai_generation; - if (unlikely(sai_generation == 0)) - sai->sai_generation = ++sai_generation; - spin_unlock(&sai_generation_lock); - sai->sai_max = LL_SA_RPC_MIN; sai->sai_index = 1; init_waitqueue_head(&sai->sai_waitq); @@ -425,9 +426,27 @@ static struct ll_statahead_info *ll_sai_alloc(void) } atomic_set(&sai->sai_cache_count, 0); + spin_lock(&sai_generation_lock); + lli->lli_sa_generation = ++sai_generation; + if (unlikely(sai_generation == 0)) + lli->lli_sa_generation = ++sai_generation; + spin_unlock(&sai_generation_lock); + RETURN(sai); } +/* free sai */ +static inline void ll_sai_free(struct ll_statahead_info *sai) +{ + LASSERT(sai->sai_dentry != NULL); + dput(sai->sai_dentry); + OBD_FREE_PTR(sai); +} + +/* + * take refcount of sai if sai for @dir exists, which means statahead is on for + * this directory. + */ static inline struct ll_statahead_info *ll_sai_get(struct inode *dir) { struct ll_inode_info *lli = ll_i2info(dir); @@ -442,13 +461,17 @@ static inline struct ll_statahead_info *ll_sai_get(struct inode *dir) return sai; } +/* + * put sai refcount after use, if refcount reaches zero, free sai and sa_entries + * attached to it. + */ static void ll_sai_put(struct ll_statahead_info *sai) { - struct ll_inode_info *lli = ll_i2info(sai->sai_inode); + struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode); if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) { struct sa_entry *entry, *next; - struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode); + struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode); lli->lli_sai = NULL; spin_unlock(&lli->lli_sa_lock); @@ -465,8 +488,7 @@ static void ll_sai_put(struct ll_statahead_info *sai) LASSERT(atomic_read(&sai->sai_cache_count) == 0); LASSERT(agl_list_empty(sai)); - iput(sai->sai_inode); - OBD_FREE_PTR(sai); + ll_sai_free(sai); atomic_dec(&sbi->ll_sa_running); } } @@ -534,12 +556,14 @@ static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai) EXIT; } -/* prepare inode for sa entry, add it into agl list, now sa_entry is ready - * to be used by scanner process. */ +/* + * prepare inode for sa entry, add it into agl list, now sa_entry is ready + * to be used by scanner process. + */ static void sa_instantiate(struct ll_statahead_info *sai, struct sa_entry *entry) { - struct inode *dir = sai->sai_inode; + struct inode *dir = sai->sai_dentry->d_inode; struct inode *child; struct md_enqueue_info *minfo; struct lookup_intent *it; @@ -557,31 +581,18 @@ static void sa_instantiate(struct ll_statahead_info *sai, if (body == NULL) GOTO(out, rc = -EFAULT); - child = entry->se_inode; - if (child == NULL) { - /* - * lookup. - */ - LASSERT(fid_is_zero(&minfo->mi_data.op_fid2)); - - /* XXX: No fid in reply, this is probaly cross-ref case. - * SA can't handle it yet. */ - if (body->mbo_valid & OBD_MD_MDS) - GOTO(out, rc = -EAGAIN); - } else { - /* - * revalidate. - */ - /* unlinked and re-created with the same name */ + child = entry->se_inode; + if (child != NULL) { + /* revalidate; unlinked and re-created with the same name */ if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->mbo_fid1))) { - entry->se_inode = NULL; - iput(child); - child = NULL; - } - } + entry->se_inode = NULL; + iput(child); + child = NULL; + } + } - it->d.lustre.it_lock_handle = entry->se_handle; + it->it_lock_handle = entry->se_handle; rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL); if (rc != 1) GOTO(out, rc = -EAGAIN); @@ -610,12 +621,12 @@ out: sa_make_ready(sai, entry, rc); } -/* once there are async stat replies, instantiate sa_entry */ +/* once there are async stat replies, instantiate sa_entry from replies */ static void sa_handle_callback(struct ll_statahead_info *sai) { struct ll_inode_info *lli; - lli = ll_i2info(sai->sai_inode); + lli = ll_i2info(sai->sai_dentry->d_inode); while (sa_has_callback(sai)) { struct sa_entry *entry; @@ -632,25 +643,13 @@ static void sa_handle_callback(struct ll_statahead_info *sai) sa_instantiate(sai, entry); } - - spin_lock(&lli->lli_agl_lock); - while (!agl_list_empty(sai)) { - struct ll_inode_info *clli; - - clli = agl_first_entry(sai); - list_del_init(&clli->lli_agl_list); - spin_unlock(&lli->lli_agl_lock); - - ll_agl_trigger(&clli->lli_vfs_inode, sai); - - spin_lock(&lli->lli_agl_lock); - } - spin_unlock(&lli->lli_agl_lock); } -/* callback for async stat, because this is called in ptlrpcd context, we only - * put sa_entry in sai_cb_entries list, and let sa_handle_callback() to really - * prepare inode and instantiate sa_entry later. */ +/* + * callback for async stat RPC, because this is called in ptlrpcd context, we + * only put sa_entry in sai_interim_entries, and wake up statahead thread to + * really prepare inode and instantiate sa_entry later. + */ static int ll_statahead_interpret(struct ptlrpc_request *req, struct md_enqueue_info *minfo, int rc) { @@ -660,7 +659,7 @@ static int ll_statahead_interpret(struct ptlrpc_request *req, struct ll_statahead_info *sai = lli->lli_sai; struct sa_entry *entry = (struct sa_entry *)minfo->mi_cbdata; __u64 handle = 0; - bool wakeup; + wait_queue_head_t *waitq = NULL; ENTRY; if (it_disposition(it, DISP_LOOKUP_NEG)) @@ -684,13 +683,14 @@ static int ll_statahead_interpret(struct ptlrpc_request *req, * thread enqueues lock on parent in readdir and another * process enqueues lock on child with parent lock held, eg. * unlink. */ - handle = it->d.lustre.it_lock_handle; + handle = it->it_lock_handle; ll_intent_drop_lock(it); } spin_lock(&lli->lli_sa_lock); if (rc != 0) { - wakeup = __sa_make_ready(sai, entry, rc); + if (__sa_make_ready(sai, entry, rc)) + waitq = &sai->sai_waitq; } else { entry->se_minfo = minfo; entry->se_req = ptlrpc_request_addref(req); @@ -699,123 +699,98 @@ static int ll_statahead_interpret(struct ptlrpc_request *req, * for readpage and other tries to enqueue lock on child * with parent's lock held, for example: unlink. */ entry->se_handle = handle; - wakeup = !sa_has_callback(sai); + if (!sa_has_callback(sai)) + waitq = &sai->sai_thread.t_ctl_waitq; + list_add_tail(&entry->se_list, &sai->sai_interim_entries); } sai->sai_replied++; - if (wakeup) - wake_up(&sai->sai_thread.t_ctl_waitq); + if (waitq != NULL) + wake_up(waitq); spin_unlock(&lli->lli_sa_lock); RETURN(rc); } -static void sa_fini_data(struct md_enqueue_info *minfo, - struct ldlm_enqueue_info *einfo) +/* finish async stat RPC arguments */ +static void sa_fini_data(struct md_enqueue_info *minfo) { - LASSERT(minfo && einfo); iput(minfo->mi_dir); - capa_put(minfo->mi_data.op_capa1); - capa_put(minfo->mi_data.op_capa2); OBD_FREE_PTR(minfo); - OBD_FREE_PTR(einfo); } -/** - * There is race condition between "capa_put" and "ll_statahead_interpret" for - * accessing "op_data.op_capa[1,2]" as following: - * "capa_put" releases "op_data.op_capa[1,2]"'s reference count after calling - * "md_intent_getattr_async". But "ll_statahead_interpret" maybe run first, and - * fill "op_data.op_capa[1,2]" as POISON, then cause "capa_put" access invalid - * "ocapa". So here reserve "op_data.op_capa[1,2]" in "pcapa" before calling - * "md_intent_getattr_async". +/* + * prepare arguments for async stat RPC. */ -static int sa_prep_data(struct inode *dir, struct inode *child, - struct sa_entry *entry, struct md_enqueue_info **pmi, - struct ldlm_enqueue_info **pei, - struct obd_capa **pcapa) +static struct md_enqueue_info * +sa_prep_data(struct inode *dir, struct inode *child, struct sa_entry *entry) { - struct qstr *qstr = &entry->se_qstr; - struct md_enqueue_info *minfo; - struct ldlm_enqueue_info *einfo; - struct md_op_data *op_data; - - OBD_ALLOC_PTR(einfo); - if (einfo == NULL) - return -ENOMEM; - - OBD_ALLOC_PTR(minfo); - if (minfo == NULL) { - OBD_FREE_PTR(einfo); - return -ENOMEM; - } + struct md_enqueue_info *minfo; + struct ldlm_enqueue_info *einfo; + struct md_op_data *op_data; - op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, qstr->name, - qstr->len, 0, LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) { - OBD_FREE_PTR(einfo); - OBD_FREE_PTR(minfo); - return PTR_ERR(op_data); - } + OBD_ALLOC_PTR(minfo); + if (minfo == NULL) + return ERR_PTR(-ENOMEM); + + op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, NULL, 0, 0, + LUSTRE_OPC_ANY, NULL); + if (IS_ERR(op_data)) { + OBD_FREE_PTR(minfo); + return (struct md_enqueue_info *)op_data; + } + + if (child == NULL) + op_data->op_fid2 = entry->se_fid; minfo->mi_it.it_op = IT_GETATTR; minfo->mi_dir = igrab(dir); minfo->mi_cb = ll_statahead_interpret; minfo->mi_cbdata = entry; - einfo->ei_type = LDLM_IBITS; - einfo->ei_mode = it_to_lock_mode(&minfo->mi_it); - einfo->ei_cb_bl = ll_md_blocking_ast; - einfo->ei_cb_cp = ldlm_completion_ast; - einfo->ei_cb_gl = NULL; - einfo->ei_cbdata = NULL; - - *pmi = minfo; - *pei = einfo; - pcapa[0] = op_data->op_capa1; - pcapa[1] = op_data->op_capa2; + einfo = &minfo->mi_einfo; + einfo->ei_type = LDLM_IBITS; + einfo->ei_mode = it_to_lock_mode(&minfo->mi_it); + einfo->ei_cb_bl = ll_md_blocking_ast; + einfo->ei_cb_cp = ldlm_completion_ast; + einfo->ei_cb_gl = NULL; + einfo->ei_cbdata = NULL; - return 0; + return minfo; } +/* async stat for file not found in dcache */ static int sa_lookup(struct inode *dir, struct sa_entry *entry) { struct md_enqueue_info *minfo; - struct ldlm_enqueue_info *einfo; - struct obd_capa *capas[2]; int rc; ENTRY; - rc = sa_prep_data(dir, NULL, entry, &minfo, &einfo, capas); - if (rc) - RETURN(rc); + minfo = sa_prep_data(dir, NULL, entry); + if (IS_ERR(minfo)) + RETURN(PTR_ERR(minfo)); - rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo); - if (!rc) { - capa_put(capas[0]); - capa_put(capas[1]); - } else { - sa_fini_data(minfo, einfo); - } + rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo); + if (rc < 0) + sa_fini_data(minfo); RETURN(rc); } /** - * similar to ll_revalidate_it(). - * \retval 1 -- dentry valid - * \retval 0 -- will send stat-ahead request - * \retval others -- prepare stat-ahead request failed + * async stat for file found in dcache, similar to .revalidate + * + * \retval 1 dentry valid, no RPC sent + * \retval 0 dentry invalid, will send async stat RPC + * \retval negative number upon error */ static int sa_revalidate(struct inode *dir, struct sa_entry *entry, struct dentry *dentry) { struct inode *inode = dentry->d_inode; struct lookup_intent it = { .it_op = IT_GETATTR, - .d.lustre.it_lock_handle = 0 }; + .it_lock_handle = 0 }; struct md_enqueue_info *minfo; - struct ldlm_enqueue_info *einfo; - struct obd_capa *capas[2]; int rc; ENTRY; @@ -829,32 +804,31 @@ static int sa_revalidate(struct inode *dir, struct sa_entry *entry, rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode), NULL); if (rc == 1) { - entry->se_handle = it.d.lustre.it_lock_handle; + entry->se_handle = it.it_lock_handle; ll_intent_release(&it); RETURN(1); } - rc = sa_prep_data(dir, inode, entry, &minfo, &einfo, capas); - if (rc) { + minfo = sa_prep_data(dir, inode, entry); + if (IS_ERR(minfo)) { entry->se_inode = NULL; iput(inode); - RETURN(rc); + RETURN(PTR_ERR(minfo)); } - rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo); - if (!rc) { - capa_put(capas[0]); - capa_put(capas[1]); - } else { + rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo); + if (rc < 0) { entry->se_inode = NULL; iput(inode); - sa_fini_data(minfo, einfo); + sa_fini_data(minfo); } RETURN(rc); } -static void sa_statahead(struct dentry *parent, const char *name, int len) +/* async stat for file with @name */ +static void sa_statahead(struct dentry *parent, const char *name, int len, + const struct lu_fid *fid) { struct inode *dir = parent->d_inode; struct ll_inode_info *lli = ll_i2info(dir); @@ -864,7 +838,7 @@ static void sa_statahead(struct dentry *parent, const char *name, int len) int rc; ENTRY; - entry = sa_alloc(sai, sai->sai_index, name, len); + entry = sa_alloc(sai, sai->sai_index, name, len, fid); if (IS_ERR(entry)) RETURN_EXIT; @@ -890,6 +864,7 @@ static void sa_statahead(struct dentry *parent, const char *name, int len) EXIT; } +/* async glimpse (agl) thread main function */ static int ll_agl_thread(void *arg) { struct dentry *parent = (struct dentry *)arg; @@ -961,6 +936,7 @@ static int ll_agl_thread(void *arg) RETURN(0); } +/* start agl thread */ static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai) { struct ptlrpc_thread *thread = &sai->sai_agl_thread; @@ -987,6 +963,7 @@ static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai) EXIT; } +/* statahead thread main function */ static int ll_statahead_thread(void *arg) { struct dentry *parent = (struct dentry *)arg; @@ -994,7 +971,7 @@ static int ll_statahead_thread(void *arg) struct ll_inode_info *lli = ll_i2info(dir); struct ll_sb_info *sbi = ll_i2sbi(dir); struct ll_statahead_info *sai; - struct ptlrpc_thread *thread; + struct ptlrpc_thread *sa_thread; struct ptlrpc_thread *agl_thread; int first = 0; struct md_op_data *op_data; @@ -1006,9 +983,9 @@ static int ll_statahead_thread(void *arg) ENTRY; sai = ll_sai_get(dir); - thread = &sai->sai_thread; + sa_thread = &sai->sai_thread; agl_thread = &sai->sai_agl_thread; - thread->t_pid = current_pid(); + sa_thread->t_pid = current_pid(); CDEBUG(D_READA, "statahead thread starting: sai %p, parent %.*s\n", sai, parent->d_name.len, parent->d_name.name); @@ -1024,16 +1001,16 @@ static int ll_statahead_thread(void *arg) atomic_inc(&sbi->ll_sa_total); spin_lock(&lli->lli_sa_lock); - if (thread_is_init(thread)) + if (thread_is_init(sa_thread)) /* If someone else has changed the thread state * (e.g. already changed to SVC_STOPPING), we can't just * blindly overwrite that setting. */ - thread_set_flags(thread, SVC_RUNNING); + thread_set_flags(sa_thread, SVC_RUNNING); spin_unlock(&lli->lli_sa_lock); - wake_up(&thread->t_ctl_waitq); + wake_up(&sa_thread->t_ctl_waitq); ll_dir_chain_init(&chain); - while (pos != MDS_DIR_END_OFF && thread_is_running(thread)) { + while (pos != MDS_DIR_END_OFF && thread_is_running(sa_thread)) { struct lu_dirpage *dp; struct lu_dirent *ent; @@ -1051,12 +1028,13 @@ static int ll_statahead_thread(void *arg) dp = page_address(page); for (ent = lu_dirent_start(dp); - ent != NULL && thread_is_running(thread) && + ent != NULL && thread_is_running(sa_thread) && !sa_low_hit(sai); ent = lu_dirent_next(ent)) { __u64 hash; int namelen; char *name; + struct lu_fid fid; hash = le64_to_cpu(ent->lde_hash); if (unlikely(hash < pos)) @@ -1099,20 +1077,38 @@ static int ll_statahead_thread(void *arg) if (unlikely(++first == 1)) continue; + fid_le_to_cpu(&fid, &ent->lde_fid); + /* wait for spare statahead window */ do { - l_wait_event(thread->t_ctl_waitq, + l_wait_event(sa_thread->t_ctl_waitq, !sa_sent_full(sai) || sa_has_callback(sai) || !agl_list_empty(sai) || - !thread_is_running(thread), + !thread_is_running(sa_thread), &lwi); sa_handle_callback(sai); + + spin_lock(&lli->lli_agl_lock); + while (sa_sent_full(sai) && + !agl_list_empty(sai)) { + struct ll_inode_info *clli; + + clli = agl_first_entry(sai); + list_del_init(&clli->lli_agl_list); + spin_unlock(&lli->lli_agl_lock); + + ll_agl_trigger(&clli->lli_vfs_inode, + sai); + + spin_lock(&lli->lli_agl_lock); + } + spin_unlock(&lli->lli_agl_lock); } while (sa_sent_full(sai) && - thread_is_running(thread)); + thread_is_running(sa_thread)); - sa_statahead(parent, name, namelen); + sa_statahead(parent, name, namelen, &fid); } pos = le64_to_cpu(dp->ldp_hash_end); @@ -1137,18 +1133,17 @@ static int ll_statahead_thread(void *arg) if (rc < 0) { spin_lock(&lli->lli_sa_lock); - thread_set_flags(thread, SVC_STOPPING); + thread_set_flags(sa_thread, SVC_STOPPING); lli->lli_sa_enabled = 0; spin_unlock(&lli->lli_sa_lock); } /* statahead is finished, but statahead entries need to be cached, wait * for file release to stop me. */ - while (thread_is_running(thread)) { - l_wait_event(thread->t_ctl_waitq, + while (thread_is_running(sa_thread)) { + l_wait_event(sa_thread->t_ctl_waitq, sa_has_callback(sai) || - !agl_list_empty(sai) || - !thread_is_running(thread), + !thread_is_running(sa_thread), &lwi); sa_handle_callback(sai); @@ -1176,8 +1171,9 @@ out: * safely because statahead RPC will access sai data */ while (sai->sai_sent != sai->sai_replied) { /* in case we're not woken up, timeout wait */ - lwi = LWI_TIMEOUT(HZ >> 3, NULL, NULL); - l_wait_event(thread->t_ctl_waitq, + lwi = LWI_TIMEOUT(msecs_to_jiffies(MSEC_PER_SEC >> 3), + NULL, NULL); + l_wait_event(sa_thread->t_ctl_waitq, sai->sai_sent == sai->sai_replied, &lwi); } @@ -1185,19 +1181,20 @@ out: sa_handle_callback(sai); spin_lock(&lli->lli_sa_lock); - thread_set_flags(thread, SVC_STOPPED); + thread_set_flags(sa_thread, SVC_STOPPED); spin_unlock(&lli->lli_sa_lock); - wake_up(&sai->sai_waitq); - wake_up(&thread->t_ctl_waitq); - ll_sai_put(sai); CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %.*s\n", sai, parent->d_name.len, parent->d_name.name); - dput(parent); + + wake_up(&sai->sai_waitq); + wake_up(&sa_thread->t_ctl_waitq); + ll_sai_put(sai); + return rc; } -/* authorize opened dir handle @key to statahead later */ +/* authorize opened dir handle @key to statahead */ void ll_authorize_statahead(struct inode *dir, void *key) { struct ll_inode_info *lli = ll_i2info(dir); @@ -1216,8 +1213,10 @@ void ll_authorize_statahead(struct inode *dir, void *key) spin_unlock(&lli->lli_sa_lock); } -/* deauthorize opened dir handle @key to statahead, but statahead thread may - * still be running, notify it to quit. */ +/* + * deauthorize opened dir handle @key to statahead, and notify statahead thread + * to quit if it's running. + */ void ll_deauthorize_statahead(struct inode *dir, void *key) { struct ll_inode_info *lli = ll_i2info(dir); @@ -1246,20 +1245,21 @@ void ll_deauthorize_statahead(struct inode *dir, void *key) } enum { - /** - * not first dirent, or is "." - */ - LS_NONE_FIRST_DE = 0, - /** - * the first non-hidden dirent - */ - LS_FIRST_DE, - /** - * the first hidden dirent, that is "." - */ - LS_FIRST_DOT_DE + /** + * not first dirent, or is "." + */ + LS_NOT_FIRST_DE = 0, + /** + * the first non-hidden dirent + */ + LS_FIRST_DE, + /** + * the first hidden dirent, that is "." + */ + LS_FIRST_DOT_DE }; +/* file is first dirent under @dir */ static int is_first_dirent(struct inode *dir, struct dentry *dentry) { struct ll_dir_chain chain; @@ -1267,7 +1267,7 @@ static int is_first_dirent(struct inode *dir, struct dentry *dentry) struct md_op_data *op_data; int dot_de; struct page *page = NULL; - int rc = LS_NONE_FIRST_DE; + int rc = LS_NOT_FIRST_DE; __u64 pos = 0; ENTRY; @@ -1346,7 +1346,7 @@ static int is_first_dirent(struct inode *dir, struct dentry *dentry) if (target->len != namelen || memcmp(target->name, name, namelen) != 0) - rc = LS_NONE_FIRST_DE; + rc = LS_NOT_FIRST_DE; else if (!dot_de) rc = LS_FIRST_DE; else @@ -1379,13 +1379,27 @@ out: return rc; } +/** + * revalidate @dentryp from statahead cache + * + * \param[in] dir parent directory + * \param[in] sai sai structure + * \param[out] dentryp pointer to dentry which will be revalidated + * \param[in] unplug unplug statahead window only (normally for negative + * dentry) + * \retval 1 on success, dentry is saved in @dentryp + * \retval 0 if revalidation failed (no proper lock on client) + * \retval negative number upon error + */ static int revalidate_statahead_dentry(struct inode *dir, struct ll_statahead_info *sai, struct dentry **dentryp, - int only_unplug) + bool unplug) { struct sa_entry *entry = NULL; struct l_wait_info lwi = { 0 }; + struct ll_dentry_data *ldd; + struct ll_inode_info *lli = ll_i2info(dir); int rc = 0; ENTRY; @@ -1416,34 +1430,38 @@ static int revalidate_statahead_dentry(struct inode *dir, } } + if (unplug) + GOTO(out, rc = 1); + entry = sa_get(sai, &(*dentryp)->d_name); - if (entry == NULL || only_unplug) { - sa_put(sai, entry); - RETURN(entry ? 1 : -EAGAIN); - } + if (entry == NULL) + GOTO(out, rc = -EAGAIN); /* if statahead is busy in readdir, help it do post-work */ if (!sa_ready(entry) && sai->sai_in_readpage) sa_handle_callback(sai); if (!sa_ready(entry)) { + spin_lock(&lli->lli_sa_lock); sai->sai_index_wait = entry->se_index; + spin_unlock(&lli->lli_sa_lock); lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL, - LWI_ON_SIGNAL_NOOP, NULL); - rc = l_wait_event(sai->sai_waitq, - sa_ready(entry) || - thread_is_stopped(&sai->sai_thread), - &lwi); + LWI_ON_SIGNAL_NOOP, NULL); + rc = l_wait_event(sai->sai_waitq, sa_ready(entry), &lwi); if (rc < 0) { - sa_put(sai, entry); - RETURN(-EAGAIN); + /* + * entry may not be ready, so it may be used by inflight + * statahead RPC, don't free it. + */ + entry = NULL; + GOTO(out, rc = -EAGAIN); } } if (entry->se_state == SA_ENTRY_SUCC && entry->se_inode != NULL) { struct inode *inode = entry->se_inode; struct lookup_intent it = { .it_op = IT_GETATTR, - .d.lustre.it_lock_handle = + .it_lock_handle = entry->se_handle }; __u64 bits; @@ -1455,10 +1473,13 @@ static int revalidate_statahead_dentry(struct inode *dir, alias = ll_splice_alias(inode, *dentryp); if (IS_ERR(alias)) { - sa_put(sai, entry); - RETURN(PTR_ERR(alias)); + ll_intent_release(&it); + GOTO(out, rc = PTR_ERR(alias)); } *dentryp = alias; + /* statahead prepared this inode, transfer inode + * refcount from sa_entry to dentry */ + entry->se_inode = NULL; } else if ((*dentryp)->d_inode != inode) { /* revalidate, but inode is recreated */ CDEBUG(D_READA, @@ -1471,12 +1492,9 @@ static int revalidate_statahead_dentry(struct inode *dir, (*dentryp)->d_name.name, PFID(ll_inode2fid((*dentryp)->d_inode)), PFID(ll_inode2fid(inode))); - sa_put(sai, entry); - RETURN(-ESTALE); - } else { - iput(inode); + ll_intent_release(&it); + GOTO(out, rc = -ESTALE); } - entry->se_inode = NULL; if ((bits & MDS_INODELOCK_LOOKUP) && d_lustre_invalid(*dentryp)) @@ -1484,16 +1502,40 @@ static int revalidate_statahead_dentry(struct inode *dir, ll_intent_release(&it); } } - +out: + /* + * statahead cached sa_entry can be used only once, and will be killed + * right after use, so if lookup/revalidate accessed statahead cache, + * set dentry ldd_sa_generation to parent lli_sa_generation, later if we + * stat this file again, we know we've done statahead before, see + * dentry_may_statahead(). + */ + ldd = ll_d2d(*dentryp); + /* ldd can be NULL if llite lookup failed. */ + if (ldd != NULL) + ldd->lld_sa_generation = lli->lli_sa_generation; sa_put(sai, entry); + RETURN(rc); } +/** + * start statahead thread + * + * \param[in] dir parent directory + * \param[in] dentry dentry that triggers statahead, normally the first + * dirent under @dir + * \retval -EAGAIN on success, because when this function is + * called, it's already in lookup call, so client should + * do it itself instead of waiting for statahead thread + * to do it asynchronously. + * \retval negative number upon error + */ static int start_statahead_thread(struct inode *dir, struct dentry *dentry) { struct ll_inode_info *lli = ll_i2info(dir); struct ll_statahead_info *sai = NULL; - struct dentry *parent; + struct dentry *parent = dentry->d_parent; struct ptlrpc_thread *thread; struct l_wait_info lwi = { 0 }; struct task_struct *task; @@ -1502,72 +1544,41 @@ static int start_statahead_thread(struct inode *dir, struct dentry *dentry) /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */ rc = is_first_dirent(dir, dentry); - if (rc == LS_NONE_FIRST_DE) + if (rc == LS_NOT_FIRST_DE) /* It is not "ls -{a}l" operation, no need statahead for it. */ - GOTO(out, rc = -EAGAIN); + GOTO(out, rc = -EFAULT); - sai = ll_sai_alloc(); + sai = ll_sai_alloc(parent); if (sai == NULL) GOTO(out, rc = -ENOMEM); sai->sai_ls_all = (rc == LS_FIRST_DOT_DE); - sai->sai_inode = igrab(dir); - if (unlikely(sai->sai_inode == NULL)) { - CWARN("Do not start stat ahead on dying inode "DFID"\n", - PFID(&lli->lli_fid)); - GOTO(out, rc = -ESTALE); - } - /* get parent reference count here, and put it in ll_statahead_thread */ - parent = dget(dentry->d_parent); - if (unlikely(sai->sai_inode != parent->d_inode)) { - struct ll_inode_info *nlli = ll_i2info(parent->d_inode); - - CWARN("Race condition, someone changed %.*s just now: " - "old parent "DFID", new parent "DFID"\n", - dentry->d_name.len, dentry->d_name.name, - PFID(&lli->lli_fid), PFID(&nlli->lli_fid)); - dput(parent); - iput(sai->sai_inode); - GOTO(out, rc = -EAGAIN); - } - - CDEBUG(D_READA, "start statahead thread: sai %p, parent %.*s\n", - sai, parent->d_name.len, parent->d_name.name); - - /* if another process started statahead thread, or deauthorized current - * lli_opendir_key, don't start statahead. */ + /* if current lli_opendir_key was deauthorized, or dir re-opened by + * another process, don't start statahead, otherwise the newly spawned + * statahead thread won't be notified to quit. */ spin_lock(&lli->lli_sa_lock); if (unlikely(lli->lli_sai != NULL || lli->lli_opendir_key == NULL || lli->lli_opendir_pid != current->pid)) { spin_unlock(&lli->lli_sa_lock); - - dput(parent); - iput(sai->sai_inode); - GOTO(out, rc = -EAGAIN); + GOTO(out, rc = -EPERM); } lli->lli_sai = sai; spin_unlock(&lli->lli_sa_lock); atomic_inc(&ll_i2sbi(parent->d_inode)->ll_sa_running); + CDEBUG(D_READA, "start statahead thread: [pid %d] [parent %.*s]\n", + current_pid(), parent->d_name.len, parent->d_name.name); + task = kthread_run(ll_statahead_thread, parent, "ll_sa_%u", lli->lli_opendir_pid); thread = &sai->sai_thread; if (IS_ERR(task)) { rc = PTR_ERR(task); - CERROR("cannot start ll_sa thread: rc = %d\n", rc); - dput(parent); - - spin_lock(&lli->lli_sa_lock); - thread_set_flags(thread, SVC_STOPPED); - thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED); - spin_unlock(&lli->lli_sa_lock); - - ll_sai_put(sai); - LASSERT(lli->lli_sai == NULL); - RETURN(-EAGAIN); + CERROR("can't start ll_sa thread, rc: %d\n", rc); + GOTO(out, rc); } l_wait_event(thread->t_ctl_waitq, @@ -1582,29 +1593,35 @@ static int start_statahead_thread(struct inode *dir, struct dentry *dentry) RETURN(-EAGAIN); out: - if (sai != NULL) - OBD_FREE_PTR(sai); - - /* once we start statahead thread failed, disable statahead so - * subsequent won't waste time to try it. */ + /* once we start statahead thread failed, disable statahead so that + * subsequent stat won't waste time to try it. */ spin_lock(&lli->lli_sa_lock); lli->lli_sa_enabled = 0; + lli->lli_sai = NULL; spin_unlock(&lli->lli_sa_lock); + if (sai != NULL) + ll_sai_free(sai); + RETURN(rc); } /** - * Start statahead thread if this is the first dir entry. - * Otherwise if a thread is started already, wait it until it is ahead of me. - * \retval 1 -- find entry with lock in cache, the caller needs to do - * nothing. - * \retval 0 -- find entry in cache, but without lock, the caller needs - * refresh from MDS. - * \retval others -- the caller need to process as non-statahead. + * statahead entry function, this is called when client getattr on a file, it + * will start statahead thread if this is the first dir entry, else revalidate + * dentry from statahead cache. + * + * \param[in] dir parent directory + * \param[out] dentryp dentry to getattr + * \param[in] unplug unplug statahead window only (normally for negative + * dentry) + * \retval 1 on success + * \retval 0 revalidation from statahead cache failed, caller needs + * to getattr from server directly + * \retval negative number on error, caller often ignores this and + * then getattr from server */ -int do_statahead_enter(struct inode *dir, struct dentry **dentryp, - int only_unplug) +int ll_statahead(struct inode *dir, struct dentry **dentryp, bool unplug) { struct ll_statahead_info *sai; @@ -1612,13 +1629,11 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, if (sai != NULL) { int rc; - rc = revalidate_statahead_dentry(dir, sai, dentryp, - only_unplug); + rc = revalidate_statahead_dentry(dir, sai, dentryp, unplug); CDEBUG(D_READA, "revalidate statahead %.*s: %d.\n", (*dentryp)->d_name.len, (*dentryp)->d_name.name, rc); ll_sai_put(sai); return rc; } - return start_statahead_thread(dir, *dentryp); }