4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 #include <linux/sched.h>
40 #include <linux/highmem.h>
41 #include <linux/pagemap.h>
43 #define DEBUG_SUBSYSTEM S_LLITE
45 #include <obd_support.h>
46 #include <lustre_dlm.h>
47 #include "llite_internal.h"
49 #define SA_OMITTED_ENTRY_MAX 8ULL
52 /** negative values are for error cases */
53 SA_ENTRY_INIT = 0, /** init entry */
54 SA_ENTRY_SUCC = 1, /** stat succeed */
55 SA_ENTRY_INVA = 2, /** invalid entry */
58 /* sa_entry is not refcounted: statahead thread allocates it and do async stat,
59 * and in async stat callback ll_statahead_interpret() will add it into
60 * sai_cb_entries, later statahead thread will call sa_handle_callback() to
61 * instantiate entry and move it into sai_entries, and then only scanner process
62 * can access and free it. */
64 /* link into sai_cb_entries or sai_entries */
65 struct list_head se_list;
66 /* link into sai hash table locally */
67 struct list_head se_hash;
68 /* entry index in the sai */
70 /* low layer ldlm lock handle */
74 /* entry size, contains name */
76 /* pointer to async getattr enqueue info */
77 struct md_enqueue_info *se_minfo;
78 /* pointer to the async getattr request */
79 struct ptlrpc_request *se_req;
80 /* pointer to the target inode */
81 struct inode *se_inode;
86 static unsigned int sai_generation = 0;
87 static DEFINE_SPINLOCK(sai_generation_lock);
89 static inline int sa_unhashed(struct sa_entry *entry)
91 return list_empty(&entry->se_hash);
95 * The entry only can be released by the caller, it is necessary to hold lock.
97 static inline int sa_ready(struct sa_entry *entry)
100 return (entry->se_state != SA_ENTRY_INIT);
103 static inline int sa_hash(int val)
105 return val & LL_SA_CACHE_MASK;
109 * Insert entry to hash SA table.
112 sa_rehash(struct ll_statahead_info *sai, struct sa_entry *entry)
114 int i = sa_hash(entry->se_qstr.hash);
116 spin_lock(&sai->sai_cache_lock[i]);
117 list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
118 spin_unlock(&sai->sai_cache_lock[i]);
122 * Remove entry from SA table.
125 sa_unhash(struct ll_statahead_info *sai, struct sa_entry *entry)
127 int i = sa_hash(entry->se_qstr.hash);
129 spin_lock(&sai->sai_cache_lock[i]);
130 list_del_init(&entry->se_hash);
131 spin_unlock(&sai->sai_cache_lock[i]);
134 static inline int agl_should_run(struct ll_statahead_info *sai,
137 return (inode != NULL && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
140 static inline struct ll_inode_info *
141 agl_first_entry(struct ll_statahead_info *sai)
143 return list_entry(sai->sai_agls.next, struct ll_inode_info,
147 static inline int sa_sent_full(struct ll_statahead_info *sai)
149 return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
152 static inline int sa_has_callback(struct ll_statahead_info *sai)
154 return !list_empty(&sai->sai_interim_entries);
157 static inline int agl_list_empty(struct ll_statahead_info *sai)
159 return list_empty(&sai->sai_agls);
163 * (1) hit ratio less than 80%
165 * (2) consecutive miss more than 8
166 * then means low hit.
168 static inline int sa_low_hit(struct ll_statahead_info *sai)
170 return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
171 (sai->sai_consecutive_miss > 8));
175 * If the given index is behind of statahead window more than
176 * SA_OMITTED_ENTRY_MAX, then it is old.
178 static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
180 return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
184 /* allocate sa_entry and add it into hash to let scanner process to find it */
185 static struct sa_entry *
186 sa_alloc(struct ll_statahead_info *sai, __u64 index, const char *name, int len)
188 struct ll_inode_info *lli;
189 struct sa_entry *entry;
194 entry_size = sizeof(struct sa_entry) + (len & ~3) + 4;
195 OBD_ALLOC(entry, entry_size);
196 if (unlikely(entry == NULL))
197 RETURN(ERR_PTR(-ENOMEM));
199 CDEBUG(D_READA, "alloc sa entry %.*s(%p) index "LPU64"\n",
200 len, name, entry, index);
202 entry->se_index = index;
204 entry->se_state = SA_ENTRY_INIT;
205 entry->se_size = entry_size;
206 dname = (char *)entry + sizeof(struct sa_entry);
207 memcpy(dname, name, len);
209 entry->se_qstr.hash = full_name_hash(name, len);
210 entry->se_qstr.len = len;
211 entry->se_qstr.name = dname;
213 lli = ll_i2info(sai->sai_inode);
214 spin_lock(&lli->lli_sa_lock);
215 INIT_LIST_HEAD(&entry->se_list);
216 sa_rehash(sai, entry);
217 spin_unlock(&lli->lli_sa_lock);
219 atomic_inc(&sai->sai_cache_count);
224 /* free sa_entry which should have been unhashed and not in any list */
225 static void sa_free(struct ll_statahead_info *sai, struct sa_entry *entry)
227 CDEBUG(D_READA, "free sa entry %.*s(%p) index "LPU64"\n",
228 entry->se_qstr.len, entry->se_qstr.name, entry,
231 LASSERT(list_empty(&entry->se_list));
232 LASSERT(sa_unhashed(entry));
234 OBD_FREE(entry, entry->se_size);
235 atomic_dec(&sai->sai_cache_count);
238 /* find sa_entry by name, used by directory scanner, lock is not needed because
239 * only scanner can remove the entry from hash.
241 static struct sa_entry *
242 sa_get(struct ll_statahead_info *sai, const struct qstr *qstr)
244 struct sa_entry *entry;
245 int i = sa_hash(qstr->hash);
247 list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
248 if (entry->se_qstr.hash == qstr->hash &&
249 entry->se_qstr.len == qstr->len &&
250 memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
257 sa_kill(struct ll_statahead_info *sai, struct sa_entry *entry)
259 struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
261 LASSERT(!sa_unhashed(entry));
262 LASSERT(!list_empty(&entry->se_list));
263 LASSERT(sa_ready(entry));
265 sa_unhash(sai, entry);
267 spin_lock(&lli->lli_sa_lock);
268 list_del_init(&entry->se_list);
269 spin_unlock(&lli->lli_sa_lock);
271 if (entry->se_inode != NULL)
272 iput(entry->se_inode);
277 /* called by scanner after use, sa_entry will be killed */
279 sa_put(struct ll_statahead_info *sai, struct sa_entry *entry)
281 struct sa_entry *tmp, *next;
283 if (entry != NULL && entry->se_state == SA_ENTRY_SUCC) {
284 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode);
287 sai->sai_consecutive_miss = 0;
288 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
291 sai->sai_consecutive_miss++;
297 /* kill old completed entries, only scanner process does this, no need
299 list_for_each_entry_safe(tmp, next, &sai->sai_entries, se_list) {
300 if (!is_omitted_entry(sai, tmp->se_index))
305 wake_up(&sai->sai_thread.t_ctl_waitq);
308 /* update state and sort add entry to sai_entries by index, return true if
309 * scanner is waiting on this entry. */
311 __sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
314 struct list_head *pos = &sai->sai_entries;
316 LASSERT(!sa_ready(entry));
317 LASSERT(list_empty(&entry->se_list));
319 entry->se_state = ret < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC;
321 list_for_each_entry_reverse(se, &sai->sai_entries, se_list) {
322 if (se->se_index < entry->se_index) {
327 list_add(&entry->se_list, pos);
329 return (entry->se_index == sai->sai_index_wait);
332 /* release resources used in async stat RPC, complete entry information and
333 * wakeup if necessary */
335 sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
337 struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
338 struct md_enqueue_info *minfo = entry->se_minfo;
339 struct ptlrpc_request *req = entry->se_req;
342 /* release resources used in RPC */
344 entry->se_minfo = NULL;
345 ll_intent_release(&minfo->mi_it);
351 entry->se_req = NULL;
352 ptlrpc_req_finished(req);
355 spin_lock(&lli->lli_sa_lock);
356 wakeup = __sa_make_ready(sai, entry, ret);
357 spin_unlock(&lli->lli_sa_lock);
360 wake_up(&sai->sai_waitq);
364 * Insert inode into the list of sai_agls.
366 static void ll_agl_add(struct ll_statahead_info *sai,
367 struct inode *inode, int index)
369 struct ll_inode_info *child = ll_i2info(inode);
370 struct ll_inode_info *parent = ll_i2info(sai->sai_inode);
373 spin_lock(&child->lli_agl_lock);
374 if (child->lli_agl_index == 0) {
375 child->lli_agl_index = index;
376 spin_unlock(&child->lli_agl_lock);
378 LASSERT(list_empty(&child->lli_agl_list));
381 spin_lock(&parent->lli_agl_lock);
382 if (agl_list_empty(sai))
384 list_add_tail(&child->lli_agl_list, &sai->sai_agls);
385 spin_unlock(&parent->lli_agl_lock);
387 spin_unlock(&child->lli_agl_lock);
391 wake_up(&sai->sai_agl_thread.t_ctl_waitq);
394 static struct ll_statahead_info *ll_sai_alloc(void)
396 struct ll_statahead_info *sai;
404 atomic_set(&sai->sai_refcount, 1);
406 spin_lock(&sai_generation_lock);
407 sai->sai_generation = ++sai_generation;
408 if (unlikely(sai_generation == 0))
409 sai->sai_generation = ++sai_generation;
410 spin_unlock(&sai_generation_lock);
412 sai->sai_max = LL_SA_RPC_MIN;
414 init_waitqueue_head(&sai->sai_waitq);
415 init_waitqueue_head(&sai->sai_thread.t_ctl_waitq);
416 init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq);
418 INIT_LIST_HEAD(&sai->sai_interim_entries);
419 INIT_LIST_HEAD(&sai->sai_entries);
420 INIT_LIST_HEAD(&sai->sai_agls);
422 for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
423 INIT_LIST_HEAD(&sai->sai_cache[i]);
424 spin_lock_init(&sai->sai_cache_lock[i]);
426 atomic_set(&sai->sai_cache_count, 0);
431 static inline struct ll_statahead_info *ll_sai_get(struct inode *dir)
433 struct ll_inode_info *lli = ll_i2info(dir);
434 struct ll_statahead_info *sai = NULL;
436 spin_lock(&lli->lli_sa_lock);
439 atomic_inc(&sai->sai_refcount);
440 spin_unlock(&lli->lli_sa_lock);
445 static void ll_sai_put(struct ll_statahead_info *sai)
447 struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
449 if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
450 struct sa_entry *entry, *next;
451 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode);
454 spin_unlock(&lli->lli_sa_lock);
456 LASSERT(thread_is_stopped(&sai->sai_thread));
457 LASSERT(thread_is_stopped(&sai->sai_agl_thread));
458 LASSERT(sai->sai_sent == sai->sai_replied);
459 LASSERT(!sa_has_callback(sai));
461 list_for_each_entry_safe(entry, next, &sai->sai_entries,
465 LASSERT(atomic_read(&sai->sai_cache_count) == 0);
466 LASSERT(agl_list_empty(sai));
468 iput(sai->sai_inode);
470 atomic_dec(&sbi->ll_sa_running);
474 /* Do NOT forget to drop inode refcount when into sai_agls. */
475 static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
477 struct ll_inode_info *lli = ll_i2info(inode);
478 __u64 index = lli->lli_agl_index;
482 LASSERT(list_empty(&lli->lli_agl_list));
484 /* AGL maybe fall behind statahead with one entry */
485 if (is_omitted_entry(sai, index + 1)) {
486 lli->lli_agl_index = 0;
491 /* Someone is in glimpse (sync or async), do nothing. */
492 rc = down_write_trylock(&lli->lli_glimpse_sem);
494 lli->lli_agl_index = 0;
500 * Someone triggered glimpse within 1 sec before.
501 * 1) The former glimpse succeeded with glimpse lock granted by OST, and
502 * if the lock is still cached on client, AGL needs to do nothing. If
503 * it is cancelled by other client, AGL maybe cannot obtaion new lock
504 * for no glimpse callback triggered by AGL.
505 * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
506 * Under such case, it is quite possible that the OST will not grant
507 * glimpse lock for AGL also.
508 * 3) The former glimpse failed, compared with other two cases, it is
509 * relative rare. AGL can ignore such case, and it will not muchly
510 * affect the performance.
512 if (lli->lli_glimpse_time != 0 &&
513 cfs_time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
514 up_write(&lli->lli_glimpse_sem);
515 lli->lli_agl_index = 0;
520 CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
521 DFID", idx = "LPU64"\n", PFID(&lli->lli_fid), index);
524 lli->lli_agl_index = 0;
525 lli->lli_glimpse_time = cfs_time_current();
526 up_write(&lli->lli_glimpse_sem);
528 CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
529 DFID", idx = "LPU64", rc = %d\n",
530 PFID(&lli->lli_fid), index, rc);
537 /* prepare inode for sa entry, add it into agl list, now sa_entry is ready
538 * to be used by scanner process. */
539 static void sa_instantiate(struct ll_statahead_info *sai,
540 struct sa_entry *entry)
542 struct inode *dir = sai->sai_inode;
544 struct md_enqueue_info *minfo;
545 struct lookup_intent *it;
546 struct ptlrpc_request *req;
547 struct mdt_body *body;
551 LASSERT(entry->se_handle != 0);
553 minfo = entry->se_minfo;
556 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
558 GOTO(out, rc = -EFAULT);
560 child = entry->se_inode;
565 LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
567 /* XXX: No fid in reply, this is probaly cross-ref case.
568 * SA can't handle it yet. */
569 if (body->mbo_valid & OBD_MD_MDS)
570 GOTO(out, rc = -EAGAIN);
575 /* unlinked and re-created with the same name */
576 if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2,
578 entry->se_inode = NULL;
584 it->d.lustre.it_lock_handle = entry->se_handle;
585 rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
587 GOTO(out, rc = -EAGAIN);
589 rc = ll_prep_inode(&child, req, dir->i_sb, it);
593 CDEBUG(D_READA, "%s: setting %.*s"DFID" l_data to inode %p\n",
594 ll_get_fsname(child->i_sb, NULL, 0),
595 entry->se_qstr.len, entry->se_qstr.name,
596 PFID(ll_inode2fid(child)), child);
597 ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
599 entry->se_inode = child;
601 if (agl_should_run(sai, child))
602 ll_agl_add(sai, child, entry->se_index);
607 /* sa_make_ready() will drop ldlm ibits lock refcount by calling
608 * ll_intent_drop_lock() in spite of failures. Do not worry about
609 * calling ll_intent_drop_lock() more than once. */
610 sa_make_ready(sai, entry, rc);
613 /* once there are async stat replies, instantiate sa_entry */
614 static void sa_handle_callback(struct ll_statahead_info *sai)
616 struct ll_inode_info *lli;
618 lli = ll_i2info(sai->sai_inode);
620 while (sa_has_callback(sai)) {
621 struct sa_entry *entry;
623 spin_lock(&lli->lli_sa_lock);
624 if (unlikely(!sa_has_callback(sai))) {
625 spin_unlock(&lli->lli_sa_lock);
628 entry = list_entry(sai->sai_interim_entries.next,
629 struct sa_entry, se_list);
630 list_del_init(&entry->se_list);
631 spin_unlock(&lli->lli_sa_lock);
633 sa_instantiate(sai, entry);
636 spin_lock(&lli->lli_agl_lock);
637 while (!agl_list_empty(sai)) {
638 struct ll_inode_info *clli;
640 clli = agl_first_entry(sai);
641 list_del_init(&clli->lli_agl_list);
642 spin_unlock(&lli->lli_agl_lock);
644 ll_agl_trigger(&clli->lli_vfs_inode, sai);
646 spin_lock(&lli->lli_agl_lock);
648 spin_unlock(&lli->lli_agl_lock);
651 /* callback for async stat, because this is called in ptlrpcd context, we only
652 * put sa_entry in sai_cb_entries list, and let sa_handle_callback() to really
653 * prepare inode and instantiate sa_entry later. */
654 static int ll_statahead_interpret(struct ptlrpc_request *req,
655 struct md_enqueue_info *minfo, int rc)
657 struct lookup_intent *it = &minfo->mi_it;
658 struct inode *dir = minfo->mi_dir;
659 struct ll_inode_info *lli = ll_i2info(dir);
660 struct ll_statahead_info *sai = lli->lli_sai;
661 struct sa_entry *entry = (struct sa_entry *)minfo->mi_cbdata;
666 if (it_disposition(it, DISP_LOOKUP_NEG))
669 /* because statahead thread will wait for all inflight RPC to finish,
670 * sai should be always valid, no need to refcount */
671 LASSERT(sai != NULL);
672 LASSERT(!thread_is_stopped(&sai->sai_thread));
673 LASSERT(entry != NULL);
675 CDEBUG(D_READA, "sa_entry %.*s rc %d\n",
676 entry->se_qstr.len, entry->se_qstr.name, rc);
679 ll_intent_release(it);
683 /* release ibits lock ASAP to avoid deadlock when statahead
684 * thread enqueues lock on parent in readdir and another
685 * process enqueues lock on child with parent lock held, eg.
687 handle = it->d.lustre.it_lock_handle;
688 ll_intent_drop_lock(it);
691 spin_lock(&lli->lli_sa_lock);
693 wakeup = __sa_make_ready(sai, entry, rc);
695 entry->se_minfo = minfo;
696 entry->se_req = ptlrpc_request_addref(req);
697 /* Release the async ibits lock ASAP to avoid deadlock
698 * when statahead thread tries to enqueue lock on parent
699 * for readpage and other tries to enqueue lock on child
700 * with parent's lock held, for example: unlink. */
701 entry->se_handle = handle;
702 wakeup = !sa_has_callback(sai);
703 list_add_tail(&entry->se_list, &sai->sai_interim_entries);
707 wake_up(&sai->sai_thread.t_ctl_waitq);
708 spin_unlock(&lli->lli_sa_lock);
713 static void sa_fini_data(struct md_enqueue_info *minfo,
714 struct ldlm_enqueue_info *einfo)
716 LASSERT(minfo && einfo);
718 capa_put(minfo->mi_data.op_capa1);
719 capa_put(minfo->mi_data.op_capa2);
725 * There is race condition between "capa_put" and "ll_statahead_interpret" for
726 * accessing "op_data.op_capa[1,2]" as following:
727 * "capa_put" releases "op_data.op_capa[1,2]"'s reference count after calling
728 * "md_intent_getattr_async". But "ll_statahead_interpret" maybe run first, and
729 * fill "op_data.op_capa[1,2]" as POISON, then cause "capa_put" access invalid
730 * "ocapa". So here reserve "op_data.op_capa[1,2]" in "pcapa" before calling
731 * "md_intent_getattr_async".
733 static int sa_prep_data(struct inode *dir, struct inode *child,
734 struct sa_entry *entry, struct md_enqueue_info **pmi,
735 struct ldlm_enqueue_info **pei,
736 struct obd_capa **pcapa)
738 struct qstr *qstr = &entry->se_qstr;
739 struct md_enqueue_info *minfo;
740 struct ldlm_enqueue_info *einfo;
741 struct md_op_data *op_data;
743 OBD_ALLOC_PTR(einfo);
747 OBD_ALLOC_PTR(minfo);
753 op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, qstr->name,
754 qstr->len, 0, LUSTRE_OPC_ANY, NULL);
755 if (IS_ERR(op_data)) {
758 return PTR_ERR(op_data);
761 minfo->mi_it.it_op = IT_GETATTR;
762 minfo->mi_dir = igrab(dir);
763 minfo->mi_cb = ll_statahead_interpret;
764 minfo->mi_cbdata = entry;
766 einfo->ei_type = LDLM_IBITS;
767 einfo->ei_mode = it_to_lock_mode(&minfo->mi_it);
768 einfo->ei_cb_bl = ll_md_blocking_ast;
769 einfo->ei_cb_cp = ldlm_completion_ast;
770 einfo->ei_cb_gl = NULL;
771 einfo->ei_cbdata = NULL;
775 pcapa[0] = op_data->op_capa1;
776 pcapa[1] = op_data->op_capa2;
781 static int sa_lookup(struct inode *dir, struct sa_entry *entry)
783 struct md_enqueue_info *minfo;
784 struct ldlm_enqueue_info *einfo;
785 struct obd_capa *capas[2];
789 rc = sa_prep_data(dir, NULL, entry, &minfo, &einfo, capas);
793 rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
798 sa_fini_data(minfo, einfo);
805 * similar to ll_revalidate_it().
806 * \retval 1 -- dentry valid
807 * \retval 0 -- will send stat-ahead request
808 * \retval others -- prepare stat-ahead request failed
810 static int sa_revalidate(struct inode *dir, struct sa_entry *entry,
811 struct dentry *dentry)
813 struct inode *inode = dentry->d_inode;
814 struct lookup_intent it = { .it_op = IT_GETATTR,
815 .d.lustre.it_lock_handle = 0 };
816 struct md_enqueue_info *minfo;
817 struct ldlm_enqueue_info *einfo;
818 struct obd_capa *capas[2];
822 if (unlikely(inode == NULL))
825 if (d_mountpoint(dentry))
828 entry->se_inode = igrab(inode);
829 rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
832 entry->se_handle = it.d.lustre.it_lock_handle;
833 ll_intent_release(&it);
837 rc = sa_prep_data(dir, inode, entry, &minfo, &einfo, capas);
839 entry->se_inode = NULL;
844 rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
849 entry->se_inode = NULL;
851 sa_fini_data(minfo, einfo);
857 static void sa_statahead(struct dentry *parent, const char *name, int len)
859 struct inode *dir = parent->d_inode;
860 struct ll_inode_info *lli = ll_i2info(dir);
861 struct ll_statahead_info *sai = lli->lli_sai;
862 struct dentry *dentry = NULL;
863 struct sa_entry *entry;
867 entry = sa_alloc(sai, sai->sai_index, name, len);
871 dentry = d_lookup(parent, &entry->se_qstr);
873 rc = sa_lookup(dir, entry);
875 rc = sa_revalidate(dir, entry, dentry);
876 if (rc == 1 && agl_should_run(sai, dentry->d_inode))
877 ll_agl_add(sai, dentry->d_inode, entry->se_index);
884 sa_make_ready(sai, entry, rc);
893 static int ll_agl_thread(void *arg)
895 struct dentry *parent = (struct dentry *)arg;
896 struct inode *dir = parent->d_inode;
897 struct ll_inode_info *plli = ll_i2info(dir);
898 struct ll_inode_info *clli;
899 struct ll_sb_info *sbi = ll_i2sbi(dir);
900 struct ll_statahead_info *sai;
901 struct ptlrpc_thread *thread;
902 struct l_wait_info lwi = { 0 };
906 sai = ll_sai_get(dir);
907 thread = &sai->sai_agl_thread;
908 thread->t_pid = current_pid();
909 CDEBUG(D_READA, "agl thread started: sai %p, parent %.*s\n",
910 sai, parent->d_name.len, parent->d_name.name);
912 atomic_inc(&sbi->ll_agl_total);
913 spin_lock(&plli->lli_agl_lock);
914 sai->sai_agl_valid = 1;
915 if (thread_is_init(thread))
916 /* If someone else has changed the thread state
917 * (e.g. already changed to SVC_STOPPING), we can't just
918 * blindly overwrite that setting. */
919 thread_set_flags(thread, SVC_RUNNING);
920 spin_unlock(&plli->lli_agl_lock);
921 wake_up(&thread->t_ctl_waitq);
924 l_wait_event(thread->t_ctl_waitq,
925 !agl_list_empty(sai) ||
926 !thread_is_running(thread),
929 if (!thread_is_running(thread))
932 spin_lock(&plli->lli_agl_lock);
933 /* The statahead thread maybe help to process AGL entries,
934 * so check whether list empty again. */
935 if (!agl_list_empty(sai)) {
936 clli = agl_first_entry(sai);
937 list_del_init(&clli->lli_agl_list);
938 spin_unlock(&plli->lli_agl_lock);
939 ll_agl_trigger(&clli->lli_vfs_inode, sai);
941 spin_unlock(&plli->lli_agl_lock);
945 spin_lock(&plli->lli_agl_lock);
946 sai->sai_agl_valid = 0;
947 while (!agl_list_empty(sai)) {
948 clli = agl_first_entry(sai);
949 list_del_init(&clli->lli_agl_list);
950 spin_unlock(&plli->lli_agl_lock);
951 clli->lli_agl_index = 0;
952 iput(&clli->lli_vfs_inode);
953 spin_lock(&plli->lli_agl_lock);
955 thread_set_flags(thread, SVC_STOPPED);
956 spin_unlock(&plli->lli_agl_lock);
957 wake_up(&thread->t_ctl_waitq);
959 CDEBUG(D_READA, "agl thread stopped: sai %p, parent %.*s\n",
960 sai, parent->d_name.len, parent->d_name.name);
964 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
966 struct ptlrpc_thread *thread = &sai->sai_agl_thread;
967 struct l_wait_info lwi = { 0 };
968 struct ll_inode_info *plli;
969 struct task_struct *task;
972 CDEBUG(D_READA, "start agl thread: sai %p, parent %.*s\n",
973 sai, parent->d_name.len, parent->d_name.name);
975 plli = ll_i2info(parent->d_inode);
976 task = kthread_run(ll_agl_thread, parent,
977 "ll_agl_%u", plli->lli_opendir_pid);
979 CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
980 thread_set_flags(thread, SVC_STOPPED);
984 l_wait_event(thread->t_ctl_waitq,
985 thread_is_running(thread) || thread_is_stopped(thread),
990 static int ll_statahead_thread(void *arg)
992 struct dentry *parent = (struct dentry *)arg;
993 struct inode *dir = parent->d_inode;
994 struct ll_inode_info *lli = ll_i2info(dir);
995 struct ll_sb_info *sbi = ll_i2sbi(dir);
996 struct ll_statahead_info *sai;
997 struct ptlrpc_thread *thread;
998 struct ptlrpc_thread *agl_thread;
1000 struct md_op_data *op_data;
1001 struct ll_dir_chain chain;
1002 struct l_wait_info lwi = { 0 };
1003 struct page *page = NULL;
1008 sai = ll_sai_get(dir);
1009 thread = &sai->sai_thread;
1010 agl_thread = &sai->sai_agl_thread;
1011 thread->t_pid = current_pid();
1012 CDEBUG(D_READA, "statahead thread starting: sai %p, parent %.*s\n",
1013 sai, parent->d_name.len, parent->d_name.name);
1015 op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
1016 LUSTRE_OPC_ANY, dir);
1017 if (IS_ERR(op_data))
1018 GOTO(out, rc = PTR_ERR(op_data));
1020 op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages;
1022 if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
1023 ll_start_agl(parent, sai);
1025 atomic_inc(&sbi->ll_sa_total);
1026 spin_lock(&lli->lli_sa_lock);
1027 if (thread_is_init(thread))
1028 /* If someone else has changed the thread state
1029 * (e.g. already changed to SVC_STOPPING), we can't just
1030 * blindly overwrite that setting. */
1031 thread_set_flags(thread, SVC_RUNNING);
1032 spin_unlock(&lli->lli_sa_lock);
1033 wake_up(&thread->t_ctl_waitq);
1035 ll_dir_chain_init(&chain);
1036 while (pos != MDS_DIR_END_OFF && thread_is_running(thread)) {
1037 struct lu_dirpage *dp;
1038 struct lu_dirent *ent;
1040 sai->sai_in_readpage = 1;
1041 page = ll_get_dir_page(dir, op_data, pos, &chain);
1042 sai->sai_in_readpage = 0;
1045 CDEBUG(D_READA, "error reading dir "DFID" at "LPU64
1046 "/"LPU64" opendir_pid = %u: rc = %d\n",
1047 PFID(ll_inode2fid(dir)), pos, sai->sai_index,
1048 lli->lli_opendir_pid, rc);
1052 dp = page_address(page);
1053 for (ent = lu_dirent_start(dp);
1054 ent != NULL && thread_is_running(thread) &&
1056 ent = lu_dirent_next(ent)) {
1061 hash = le64_to_cpu(ent->lde_hash);
1062 if (unlikely(hash < pos))
1064 * Skip until we find target hash value.
1068 namelen = le16_to_cpu(ent->lde_namelen);
1069 if (unlikely(namelen == 0))
1071 * Skip dummy record.
1075 name = ent->lde_name;
1076 if (name[0] == '.') {
1082 } else if (name[1] == '.' && namelen == 2) {
1087 } else if (!sai->sai_ls_all) {
1089 * skip hidden files.
1091 sai->sai_skip_hidden++;
1097 * don't stat-ahead first entry.
1099 if (unlikely(++first == 1))
1102 /* wait for spare statahead window */
1104 l_wait_event(thread->t_ctl_waitq,
1105 !sa_sent_full(sai) ||
1106 sa_has_callback(sai) ||
1107 !agl_list_empty(sai) ||
1108 !thread_is_running(thread),
1111 sa_handle_callback(sai);
1112 } while (sa_sent_full(sai) &&
1113 thread_is_running(thread));
1115 sa_statahead(parent, name, namelen);
1118 pos = le64_to_cpu(dp->ldp_hash_end);
1119 ll_release_page(dir, page,
1120 le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1122 if (sa_low_hit(sai)) {
1124 atomic_inc(&sbi->ll_sa_wrong);
1125 CDEBUG(D_READA, "Statahead for dir "DFID" hit "
1126 "ratio too low: hit/miss "LPU64"/"LPU64
1127 ", sent/replied "LPU64"/"LPU64", stopping "
1128 "statahead thread: pid %d\n",
1129 PFID(&lli->lli_fid), sai->sai_hit,
1130 sai->sai_miss, sai->sai_sent,
1131 sai->sai_replied, current_pid());
1135 ll_dir_chain_fini(&chain);
1136 ll_finish_md_op_data(op_data);
1139 spin_lock(&lli->lli_sa_lock);
1140 thread_set_flags(thread, SVC_STOPPING);
1141 lli->lli_sa_enabled = 0;
1142 spin_unlock(&lli->lli_sa_lock);
1145 /* statahead is finished, but statahead entries need to be cached, wait
1146 * for file release to stop me. */
1147 while (thread_is_running(thread)) {
1148 l_wait_event(thread->t_ctl_waitq,
1149 sa_has_callback(sai) ||
1150 !agl_list_empty(sai) ||
1151 !thread_is_running(thread),
1154 sa_handle_callback(sai);
1159 if (sai->sai_agl_valid) {
1160 spin_lock(&lli->lli_agl_lock);
1161 thread_set_flags(agl_thread, SVC_STOPPING);
1162 spin_unlock(&lli->lli_agl_lock);
1163 wake_up(&agl_thread->t_ctl_waitq);
1165 CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n",
1166 sai, (unsigned int)agl_thread->t_pid);
1167 l_wait_event(agl_thread->t_ctl_waitq,
1168 thread_is_stopped(agl_thread),
1171 /* Set agl_thread flags anyway. */
1172 thread_set_flags(agl_thread, SVC_STOPPED);
1175 /* wait for inflight statahead RPCs to finish, and then we can free sai
1176 * safely because statahead RPC will access sai data */
1177 while (sai->sai_sent != sai->sai_replied) {
1178 /* in case we're not woken up, timeout wait */
1179 lwi = LWI_TIMEOUT(HZ >> 3, NULL, NULL);
1180 l_wait_event(thread->t_ctl_waitq,
1181 sai->sai_sent == sai->sai_replied, &lwi);
1184 /* release resources held by statahead RPCs */
1185 sa_handle_callback(sai);
1187 spin_lock(&lli->lli_sa_lock);
1188 thread_set_flags(thread, SVC_STOPPED);
1189 spin_unlock(&lli->lli_sa_lock);
1191 wake_up(&sai->sai_waitq);
1192 wake_up(&thread->t_ctl_waitq);
1194 CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %.*s\n",
1195 sai, parent->d_name.len, parent->d_name.name);
1200 /* authorize opened dir handle @key to statahead later */
1201 void ll_authorize_statahead(struct inode *dir, void *key)
1203 struct ll_inode_info *lli = ll_i2info(dir);
1205 spin_lock(&lli->lli_sa_lock);
1206 if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL) {
1208 * if lli_sai is not NULL, it means previous statahead is not
1209 * finished yet, we'd better not start a new statahead for now.
1211 LASSERT(lli->lli_opendir_pid == 0);
1212 lli->lli_opendir_key = key;
1213 lli->lli_opendir_pid = current_pid();
1214 lli->lli_sa_enabled = 1;
1216 spin_unlock(&lli->lli_sa_lock);
1219 /* deauthorize opened dir handle @key to statahead, but statahead thread may
1220 * still be running, notify it to quit. */
1221 void ll_deauthorize_statahead(struct inode *dir, void *key)
1223 struct ll_inode_info *lli = ll_i2info(dir);
1224 struct ll_statahead_info *sai;
1226 LASSERT(lli->lli_opendir_key == key);
1227 LASSERT(lli->lli_opendir_pid != 0);
1229 CDEBUG(D_READA, "deauthorize statahead for "DFID"\n",
1230 PFID(&lli->lli_fid));
1232 spin_lock(&lli->lli_sa_lock);
1233 lli->lli_opendir_key = NULL;
1234 lli->lli_opendir_pid = 0;
1235 lli->lli_sa_enabled = 0;
1237 if (sai != NULL && thread_is_running(&sai->sai_thread)) {
1239 * statahead thread may not quit yet because it needs to cache
1240 * entries, now it's time to tell it to quit.
1242 thread_set_flags(&sai->sai_thread, SVC_STOPPING);
1243 wake_up(&sai->sai_thread.t_ctl_waitq);
1245 spin_unlock(&lli->lli_sa_lock);
1250 * not first dirent, or is "."
1252 LS_NONE_FIRST_DE = 0,
1254 * the first non-hidden dirent
1258 * the first hidden dirent, that is "."
1263 static int is_first_dirent(struct inode *dir, struct dentry *dentry)
1265 struct ll_dir_chain chain;
1266 struct qstr *target = &dentry->d_name;
1267 struct md_op_data *op_data;
1269 struct page *page = NULL;
1270 int rc = LS_NONE_FIRST_DE;
1274 op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
1275 LUSTRE_OPC_ANY, dir);
1276 if (IS_ERR(op_data))
1277 RETURN(PTR_ERR(op_data));
1279 *FIXME choose the start offset of the readdir
1281 op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages;
1283 ll_dir_chain_init(&chain);
1284 page = ll_get_dir_page(dir, op_data, 0, &chain);
1287 struct lu_dirpage *dp;
1288 struct lu_dirent *ent;
1291 struct ll_inode_info *lli = ll_i2info(dir);
1294 CERROR("%s: reading dir "DFID" at "LPU64
1295 "opendir_pid = %u : rc = %d\n",
1296 ll_get_fsname(dir->i_sb, NULL, 0),
1297 PFID(ll_inode2fid(dir)), pos,
1298 lli->lli_opendir_pid, rc);
1302 dp = page_address(page);
1303 for (ent = lu_dirent_start(dp); ent != NULL;
1304 ent = lu_dirent_next(ent)) {
1309 hash = le64_to_cpu(ent->lde_hash);
1310 /* The ll_get_dir_page() can return any page containing
1311 * the given hash which may be not the start hash. */
1312 if (unlikely(hash < pos))
1315 namelen = le16_to_cpu(ent->lde_namelen);
1316 if (unlikely(namelen == 0))
1318 * skip dummy record.
1322 name = ent->lde_name;
1323 if (name[0] == '.') {
1329 else if (name[1] == '.' && namelen == 2)
1340 if (dot_de && target->name[0] != '.') {
1341 CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
1342 target->len, target->name,
1347 if (target->len != namelen ||
1348 memcmp(target->name, name, namelen) != 0)
1349 rc = LS_NONE_FIRST_DE;
1353 rc = LS_FIRST_DOT_DE;
1355 ll_release_page(dir, page, false);
1358 pos = le64_to_cpu(dp->ldp_hash_end);
1359 if (pos == MDS_DIR_END_OFF) {
1361 * End of directory reached.
1363 ll_release_page(dir, page, false);
1367 * chain is exhausted
1368 * Normal case: continue to the next page.
1370 ll_release_page(dir, page, le32_to_cpu(dp->ldp_flags) &
1372 page = ll_get_dir_page(dir, op_data, pos, &chain);
1377 ll_dir_chain_fini(&chain);
1378 ll_finish_md_op_data(op_data);
1382 static int revalidate_statahead_dentry(struct inode *dir,
1383 struct ll_statahead_info *sai,
1384 struct dentry **dentryp,
1387 struct sa_entry *entry = NULL;
1388 struct l_wait_info lwi = { 0 };
1392 if ((*dentryp)->d_name.name[0] == '.') {
1393 if (sai->sai_ls_all ||
1394 sai->sai_miss_hidden >= sai->sai_skip_hidden) {
1396 * Hidden dentry is the first one, or statahead
1397 * thread does not skip so many hidden dentries
1398 * before "sai_ls_all" enabled as below.
1401 if (!sai->sai_ls_all)
1403 * It maybe because hidden dentry is not
1404 * the first one, "sai_ls_all" was not
1405 * set, then "ls -al" missed. Enable
1406 * "sai_ls_all" for such case.
1408 sai->sai_ls_all = 1;
1411 * Such "getattr" has been skipped before
1412 * "sai_ls_all" enabled as above.
1414 sai->sai_miss_hidden++;
1419 entry = sa_get(sai, &(*dentryp)->d_name);
1420 if (entry == NULL || only_unplug) {
1422 RETURN(entry ? 1 : -EAGAIN);
1425 /* if statahead is busy in readdir, help it do post-work */
1426 if (!sa_ready(entry) && sai->sai_in_readpage)
1427 sa_handle_callback(sai);
1429 if (!sa_ready(entry)) {
1430 sai->sai_index_wait = entry->se_index;
1431 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
1432 LWI_ON_SIGNAL_NOOP, NULL);
1433 rc = l_wait_event(sai->sai_waitq,
1435 thread_is_stopped(&sai->sai_thread),
1443 if (entry->se_state == SA_ENTRY_SUCC && entry->se_inode != NULL) {
1444 struct inode *inode = entry->se_inode;
1445 struct lookup_intent it = { .it_op = IT_GETATTR,
1446 .d.lustre.it_lock_handle =
1450 rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
1451 ll_inode2fid(inode), &bits);
1453 if ((*dentryp)->d_inode == NULL) {
1454 struct dentry *alias;
1456 alias = ll_splice_alias(inode, *dentryp);
1457 if (IS_ERR(alias)) {
1459 RETURN(PTR_ERR(alias));
1462 } else if ((*dentryp)->d_inode != inode) {
1463 /* revalidate, but inode is recreated */
1465 "%s: stale dentry %.*s inode "
1466 DFID", statahead inode "DFID
1468 ll_get_fsname((*dentryp)->d_inode->i_sb,
1470 (*dentryp)->d_name.len,
1471 (*dentryp)->d_name.name,
1472 PFID(ll_inode2fid((*dentryp)->d_inode)),
1473 PFID(ll_inode2fid(inode)));
1479 entry->se_inode = NULL;
1481 if ((bits & MDS_INODELOCK_LOOKUP) &&
1482 d_lustre_invalid(*dentryp))
1483 d_lustre_revalidate(*dentryp);
1484 ll_intent_release(&it);
1492 static int start_statahead_thread(struct inode *dir, struct dentry *dentry)
1494 struct ll_inode_info *lli = ll_i2info(dir);
1495 struct ll_statahead_info *sai = NULL;
1496 struct dentry *parent;
1497 struct ptlrpc_thread *thread;
1498 struct l_wait_info lwi = { 0 };
1499 struct task_struct *task;
1503 /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1504 rc = is_first_dirent(dir, dentry);
1505 if (rc == LS_NONE_FIRST_DE)
1506 /* It is not "ls -{a}l" operation, no need statahead for it. */
1507 GOTO(out, rc = -EAGAIN);
1509 sai = ll_sai_alloc();
1511 GOTO(out, rc = -ENOMEM);
1513 sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
1514 sai->sai_inode = igrab(dir);
1515 if (unlikely(sai->sai_inode == NULL)) {
1516 CWARN("Do not start stat ahead on dying inode "DFID"\n",
1517 PFID(&lli->lli_fid));
1518 GOTO(out, rc = -ESTALE);
1521 /* get parent reference count here, and put it in ll_statahead_thread */
1522 parent = dget(dentry->d_parent);
1523 if (unlikely(sai->sai_inode != parent->d_inode)) {
1524 struct ll_inode_info *nlli = ll_i2info(parent->d_inode);
1526 CWARN("Race condition, someone changed %.*s just now: "
1527 "old parent "DFID", new parent "DFID"\n",
1528 dentry->d_name.len, dentry->d_name.name,
1529 PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
1531 iput(sai->sai_inode);
1532 GOTO(out, rc = -EAGAIN);
1535 CDEBUG(D_READA, "start statahead thread: sai %p, parent %.*s\n",
1536 sai, parent->d_name.len, parent->d_name.name);
1538 /* if another process started statahead thread, or deauthorized current
1539 * lli_opendir_key, don't start statahead. */
1540 spin_lock(&lli->lli_sa_lock);
1541 if (unlikely(lli->lli_sai != NULL ||
1542 lli->lli_opendir_key == NULL ||
1543 lli->lli_opendir_pid != current->pid)) {
1544 spin_unlock(&lli->lli_sa_lock);
1547 iput(sai->sai_inode);
1548 GOTO(out, rc = -EAGAIN);
1551 spin_unlock(&lli->lli_sa_lock);
1553 atomic_inc(&ll_i2sbi(parent->d_inode)->ll_sa_running);
1555 task = kthread_run(ll_statahead_thread, parent, "ll_sa_%u",
1556 lli->lli_opendir_pid);
1557 thread = &sai->sai_thread;
1560 CERROR("cannot start ll_sa thread: rc = %d\n", rc);
1563 spin_lock(&lli->lli_sa_lock);
1564 thread_set_flags(thread, SVC_STOPPED);
1565 thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
1566 spin_unlock(&lli->lli_sa_lock);
1569 LASSERT(lli->lli_sai == NULL);
1573 l_wait_event(thread->t_ctl_waitq,
1574 thread_is_running(thread) || thread_is_stopped(thread),
1579 * We don't stat-ahead for the first dirent since we are already in
1588 /* once we start statahead thread failed, disable statahead so
1589 * subsequent won't waste time to try it. */
1590 spin_lock(&lli->lli_sa_lock);
1591 lli->lli_sa_enabled = 0;
1592 spin_unlock(&lli->lli_sa_lock);
1598 * Start statahead thread if this is the first dir entry.
1599 * Otherwise if a thread is started already, wait it until it is ahead of me.
1600 * \retval 1 -- find entry with lock in cache, the caller needs to do
1602 * \retval 0 -- find entry in cache, but without lock, the caller needs
1604 * \retval others -- the caller need to process as non-statahead.
1606 int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
1609 struct ll_statahead_info *sai;
1611 sai = ll_sai_get(dir);
1615 rc = revalidate_statahead_dentry(dir, sai, dentryp,
1617 CDEBUG(D_READA, "revalidate statahead %.*s: %d.\n",
1618 (*dentryp)->d_name.len, (*dentryp)->d_name.name, rc);
1623 return start_statahead_thread(dir, *dentryp);