1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011 Whamcloud, Inc.
36 * This file is part of Lustre, http://www.lustre.org/
37 * Lustre is a trademark of Sun Microsystems, Inc.
41 #include <linux/sched.h>
43 #include <linux/smp_lock.h>
44 #include <linux/highmem.h>
45 #include <linux/pagemap.h>
47 #define DEBUG_SUBSYSTEM S_LLITE
49 #include <obd_support.h>
50 #include <lustre_lite.h>
51 #include <lustre_dlm.h>
52 #include <linux/lustre_version.h>
53 #include "llite_internal.h"
55 #define SA_OMITTED_ENTRY_MAX 8ULL
58 /** negative values are for error cases */
59 SA_ENTRY_INIT = 0, /** init entry */
60 SA_ENTRY_SUCC = 1, /** stat succeed */
61 SA_ENTRY_INVA = 2, /** invalid entry */
62 SA_ENTRY_DEST = 3, /** entry to be destroyed */
66 /* link into sai->sai_entries_{sent,received,stated} */
68 /* link into sai hash table locally */
70 /* entry reference count */
71 cfs_atomic_t se_refcount;
72 /* entry index in the sai */
74 /* low layer ldlm lock handle */
78 /* entry size, contains name */
80 /* pointer to async getattr enqueue info */
81 struct md_enqueue_info *se_minfo;
82 /* pointer to the async getattr request */
83 struct ptlrpc_request *se_req;
84 /* pointer to the target inode */
85 struct inode *se_inode;
90 static unsigned int sai_generation = 0;
91 static cfs_spinlock_t sai_generation_lock = CFS_SPIN_LOCK_UNLOCKED;
93 static inline int ll_sa_entry_unlinked(struct ll_sa_entry *entry)
95 return cfs_list_empty(&entry->se_list);
98 static inline int ll_sa_entry_unhashed(struct ll_sa_entry *entry)
100 return cfs_list_empty(&entry->se_hash);
104 * The entry only can be released by the caller, it is necessary to hold lock.
106 static inline int ll_sa_entry_stated(struct ll_sa_entry *entry)
109 return (entry->se_stat != SA_ENTRY_INIT);
112 static inline int ll_sa_entry_hash(int val)
114 return val & LL_SA_CACHE_MASK;
118 * Insert entry to hash SA table.
121 ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
123 int i = ll_sa_entry_hash(entry->se_qstr.hash);
125 cfs_spin_lock(&sai->sai_cache_lock[i]);
126 cfs_list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
127 cfs_spin_unlock(&sai->sai_cache_lock[i]);
131 * Remove entry from SA table.
134 ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
136 int i = ll_sa_entry_hash(entry->se_qstr.hash);
138 cfs_spin_lock(&sai->sai_cache_lock[i]);
139 cfs_list_del_init(&entry->se_hash);
140 cfs_spin_unlock(&sai->sai_cache_lock[i]);
143 static inline int agl_should_run(struct ll_statahead_info *sai,
146 if (inode != NULL && S_ISREG(inode->i_mode) &&
147 ll_i2info(inode)->lli_smd != NULL && sai->sai_agl_valid)
152 static inline struct ll_sa_entry *
153 sa_first_received_entry(struct ll_statahead_info *sai)
155 return cfs_list_entry(sai->sai_entries_received.next,
156 struct ll_sa_entry, se_list);
159 static inline struct ll_inode_info *
160 agl_first_entry(struct ll_statahead_info *sai)
162 return cfs_list_entry(sai->sai_entries_agl.next,
163 struct ll_inode_info, lli_agl_list);
166 static inline int sa_sent_full(struct ll_statahead_info *sai)
168 return cfs_atomic_read(&sai->sai_cache_count) >= sai->sai_max;
171 static inline int sa_received_empty(struct ll_statahead_info *sai)
173 return cfs_list_empty(&sai->sai_entries_received);
176 static inline int agl_list_empty(struct ll_statahead_info *sai)
178 return cfs_list_empty(&sai->sai_entries_agl);
182 * (1) hit ratio less than 80%
184 * (2) consecutive miss more than 8
185 * then means low hit.
187 static inline int sa_low_hit(struct ll_statahead_info *sai)
189 return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
190 (sai->sai_consecutive_miss > 8));
194 * If the given index is behind of statahead window more than
195 * SA_OMITTED_ENTRY_MAX, then it is old.
197 static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
199 return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
204 * Insert it into sai_entries_sent tail when init.
206 static struct ll_sa_entry *
207 ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index,
208 const char *name, int len)
210 struct ll_inode_info *lli;
211 struct ll_sa_entry *entry;
216 entry_size = sizeof(struct ll_sa_entry) + (len & ~3) + 4;
217 OBD_ALLOC(entry, entry_size);
218 if (unlikely(entry == NULL))
219 RETURN(ERR_PTR(-ENOMEM));
221 CDEBUG(D_READA, "alloc sai entry %.*s(%p) index "LPU64"\n",
222 len, name, entry, index);
224 entry->se_index = index;
227 * Statahead entry reference rules:
229 * 1) When statahead entry is initialized, its reference is set as 2.
230 * One reference is used by the directory scanner. When the scanner
231 * searches the statahead cache for the given name, it can perform
232 * lockless hash lookup (only the scanner can remove entry from hash
233 * list), and once found, it needn't to call "atomic_inc()" for the
234 * entry reference. So the performance is improved. After using the
235 * statahead entry, the scanner will call "atomic_dec()" to drop the
236 * reference held when initialization. If it is the last reference,
237 * the statahead entry will be freed.
239 * 2) All other threads, including statahead thread and ptlrpcd thread,
240 * when they process the statahead entry, the reference for target
241 * should be held to guarantee the entry will not be released by the
242 * directory scanner. After processing the entry, these threads will
243 * drop the entry reference. If it is the last reference, the entry
246 * The second reference when initializes the statahead entry is used
247 * by the statahead thread, following the rule 2).
249 cfs_atomic_set(&entry->se_refcount, 2);
250 entry->se_stat = SA_ENTRY_INIT;
251 entry->se_size = entry_size;
252 dname = (char *)entry + sizeof(struct ll_sa_entry);
253 memcpy(dname, name, len);
255 entry->se_qstr.hash = full_name_hash(name, len);
256 entry->se_qstr.len = len;
257 entry->se_qstr.name = dname;
259 lli = ll_i2info(sai->sai_inode);
260 cfs_spin_lock(&lli->lli_sa_lock);
261 cfs_list_add_tail(&entry->se_list, &sai->sai_entries_sent);
262 cfs_spin_unlock(&lli->lli_sa_lock);
264 cfs_atomic_inc(&sai->sai_cache_count);
265 ll_sa_entry_enhash(sai, entry);
271 * Used by the directory scanner to search entry with name.
273 * Only the caller can remove the entry from hash, so it is unnecessary to hold
274 * hash lock. It is caller's duty to release the init refcount on the entry, so
275 * it is also unnecessary to increase refcount on the entry.
277 static struct ll_sa_entry *
278 ll_sa_entry_get_byname(struct ll_statahead_info *sai, const struct qstr *qstr)
280 struct ll_sa_entry *entry;
281 int i = ll_sa_entry_hash(qstr->hash);
283 cfs_list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
284 if (entry->se_qstr.hash == qstr->hash &&
285 entry->se_qstr.len == qstr->len &&
286 memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
293 * Used by the async getattr request callback to find entry with index.
295 * Inside lli_sa_lock to prevent others to change the list during the search.
296 * It needs to increase entry refcount before returning to guarantee that the
297 * entry cannot be freed by others.
299 static struct ll_sa_entry *
300 ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index)
302 struct ll_sa_entry *entry;
304 cfs_list_for_each_entry(entry, &sai->sai_entries_sent, se_list) {
305 if (entry->se_index == index) {
306 cfs_atomic_inc(&entry->se_refcount);
309 if (entry->se_index > index)
315 static void ll_sa_entry_cleanup(struct ll_statahead_info *sai,
316 struct ll_sa_entry *entry)
318 struct md_enqueue_info *minfo = entry->se_minfo;
319 struct ptlrpc_request *req = entry->se_req;
322 entry->se_minfo = NULL;
323 ll_intent_release(&minfo->mi_it);
329 entry->se_req = NULL;
330 ptlrpc_req_finished(req);
334 static void ll_sa_entry_put(struct ll_statahead_info *sai,
335 struct ll_sa_entry *entry)
337 if (cfs_atomic_dec_and_test(&entry->se_refcount)) {
338 CDEBUG(D_READA, "free sai entry %.*s(%p) index "LPU64"\n",
339 entry->se_qstr.len, entry->se_qstr.name, entry,
342 LASSERT(ll_sa_entry_unhashed(entry));
343 LASSERT(ll_sa_entry_unlinked(entry));
345 ll_sa_entry_cleanup(sai, entry);
347 iput(entry->se_inode);
349 OBD_FREE(entry, entry->se_size);
350 cfs_atomic_dec(&sai->sai_cache_count);
355 do_sai_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
357 struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
359 ll_sa_entry_unhash(sai, entry);
361 cfs_spin_lock(&lli->lli_sa_lock);
362 entry->se_stat = SA_ENTRY_DEST;
363 if (likely(!ll_sa_entry_unlinked(entry)))
364 cfs_list_del_init(&entry->se_list);
365 cfs_spin_unlock(&lli->lli_sa_lock);
367 ll_sa_entry_put(sai, entry);
371 * Delete it from sai_entries_stated list when fini.
374 ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
376 struct ll_sa_entry *pos, *next;
379 do_sai_entry_fini(sai, entry);
381 /* drop old entry from sent list */
382 cfs_list_for_each_entry_safe(pos, next, &sai->sai_entries_sent,
384 if (is_omitted_entry(sai, pos->se_index))
385 do_sai_entry_fini(sai, pos);
390 /* drop old entry from stated list */
391 cfs_list_for_each_entry_safe(pos, next, &sai->sai_entries_stated,
393 if (is_omitted_entry(sai, pos->se_index))
394 do_sai_entry_fini(sai, pos);
401 * Inside lli_sa_lock.
404 do_sai_entry_to_stated(struct ll_statahead_info *sai,
405 struct ll_sa_entry *entry, int rc)
407 struct ll_sa_entry *se;
408 cfs_list_t *pos = &sai->sai_entries_stated;
410 if (!ll_sa_entry_unlinked(entry))
411 cfs_list_del_init(&entry->se_list);
413 cfs_list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
414 if (se->se_index < entry->se_index) {
420 cfs_list_add(&entry->se_list, pos);
425 * Move entry to sai_entries_stated and sort with the index.
426 * \retval 1 -- entry to be destroyed.
427 * \retval 0 -- entry is inserted into stated list.
430 ll_sa_entry_to_stated(struct ll_statahead_info *sai,
431 struct ll_sa_entry *entry, int rc)
433 struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
436 ll_sa_entry_cleanup(sai, entry);
438 cfs_spin_lock(&lli->lli_sa_lock);
439 if (likely(entry->se_stat != SA_ENTRY_DEST)) {
440 do_sai_entry_to_stated(sai, entry, rc);
443 cfs_spin_unlock(&lli->lli_sa_lock);
449 * Insert inode into the list of sai_entries_agl.
451 static void ll_agl_add(struct ll_statahead_info *sai,
452 struct inode *inode, int index)
454 struct ll_inode_info *child = ll_i2info(inode);
455 struct ll_inode_info *parent = ll_i2info(sai->sai_inode);
458 cfs_spin_lock(&child->lli_agl_lock);
459 if (child->lli_agl_index == 0) {
460 child->lli_agl_index = index;
461 cfs_spin_unlock(&child->lli_agl_lock);
463 LASSERT(cfs_list_empty(&child->lli_agl_list));
466 cfs_spin_lock(&parent->lli_agl_lock);
467 if (agl_list_empty(sai))
469 cfs_list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
470 cfs_spin_unlock(&parent->lli_agl_lock);
472 cfs_spin_unlock(&child->lli_agl_lock);
476 cfs_waitq_signal(&sai->sai_agl_thread.t_ctl_waitq);
479 static struct ll_statahead_info *ll_sai_alloc(void)
481 struct ll_statahead_info *sai;
489 cfs_atomic_set(&sai->sai_refcount, 1);
491 cfs_spin_lock(&sai_generation_lock);
492 sai->sai_generation = ++sai_generation;
493 if (unlikely(sai_generation == 0))
494 sai->sai_generation = ++sai_generation;
495 cfs_spin_unlock(&sai_generation_lock);
497 sai->sai_max = LL_SA_RPC_MIN;
499 cfs_waitq_init(&sai->sai_waitq);
500 cfs_waitq_init(&sai->sai_thread.t_ctl_waitq);
501 cfs_waitq_init(&sai->sai_agl_thread.t_ctl_waitq);
503 CFS_INIT_LIST_HEAD(&sai->sai_entries_sent);
504 CFS_INIT_LIST_HEAD(&sai->sai_entries_received);
505 CFS_INIT_LIST_HEAD(&sai->sai_entries_stated);
506 CFS_INIT_LIST_HEAD(&sai->sai_entries_agl);
508 for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
509 CFS_INIT_LIST_HEAD(&sai->sai_cache[i]);
510 cfs_spin_lock_init(&sai->sai_cache_lock[i]);
512 cfs_atomic_set(&sai->sai_cache_count, 0);
517 static inline struct ll_statahead_info *
518 ll_sai_get(struct ll_statahead_info *sai)
520 cfs_atomic_inc(&sai->sai_refcount);
524 static void ll_sai_put(struct ll_statahead_info *sai)
526 struct inode *inode = sai->sai_inode;
527 struct ll_inode_info *lli = ll_i2info(inode);
530 if (cfs_atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
531 struct ll_sa_entry *entry, *next;
533 if (unlikely(cfs_atomic_read(&sai->sai_refcount) > 0)) {
534 /* It is race case, the interpret callback just hold
535 * a reference count */
536 cfs_spin_unlock(&lli->lli_sa_lock);
540 LASSERT(lli->lli_opendir_key == NULL);
541 LASSERT(thread_is_stopped(&sai->sai_thread));
542 LASSERT(thread_is_stopped(&sai->sai_agl_thread));
545 lli->lli_opendir_pid = 0;
546 cfs_spin_unlock(&lli->lli_sa_lock);
548 if (sai->sai_sent > sai->sai_replied)
549 CDEBUG(D_READA,"statahead for dir "DFID" does not "
550 "finish: [sent:"LPU64"] [replied:"LPU64"]\n",
552 sai->sai_sent, sai->sai_replied);
554 cfs_list_for_each_entry_safe(entry, next,
555 &sai->sai_entries_sent, se_list)
556 do_sai_entry_fini(sai, entry);
558 LASSERT(sa_received_empty(sai));
560 cfs_list_for_each_entry_safe(entry, next,
561 &sai->sai_entries_stated, se_list)
562 do_sai_entry_fini(sai, entry);
564 LASSERT(cfs_atomic_read(&sai->sai_cache_count) == 0);
565 LASSERT(agl_list_empty(sai));
574 /* Do NOT forget to drop inode refcount when into sai_entries_agl. */
575 static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
577 struct ll_inode_info *lli = ll_i2info(inode);
578 __u64 index = lli->lli_agl_index;
582 LASSERT(cfs_list_empty(&lli->lli_agl_list));
584 /* AGL maybe fall behind statahead with one entry */
585 if (is_omitted_entry(sai, index + 1)) {
586 lli->lli_agl_index = 0;
591 /* Someone is in glimpse (sync or async), do nothing. */
592 rc = cfs_down_write_trylock(&lli->lli_glimpse_sem);
594 lli->lli_agl_index = 0;
600 * Someone triggered glimpse within 1 sec before.
601 * 1) The former glimpse succeeded with glimpse lock granted by OST, and
602 * if the lock is still cached on client, AGL needs to do nothing. If
603 * it is cancelled by other client, AGL maybe cannot obtaion new lock
604 * for no glimpse callback triggered by AGL.
605 * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
606 * Under such case, it is quite possible that the OST will not grant
607 * glimpse lock for AGL also.
608 * 3) The former glimpse failed, compared with other two cases, it is
609 * relative rare. AGL can ignore such case, and it will not muchly
610 * affect the performance.
612 if (lli->lli_glimpse_time != 0 &&
613 cfs_time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
614 cfs_up_write(&lli->lli_glimpse_sem);
615 lli->lli_agl_index = 0;
620 CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
621 DFID", idx = "LPU64"\n", PFID(&lli->lli_fid), index);
624 lli->lli_agl_index = 0;
625 lli->lli_glimpse_time = cfs_time_current();
626 cfs_up_write(&lli->lli_glimpse_sem);
628 CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
629 DFID", idx = "LPU64", rc = %d\n",
630 PFID(&lli->lli_fid), index, rc);
637 static void do_statahead_interpret(struct ll_statahead_info *sai,
638 struct ll_sa_entry *target)
640 struct inode *dir = sai->sai_inode;
642 struct ll_inode_info *lli = ll_i2info(dir);
643 struct ll_sa_entry *entry;
644 struct md_enqueue_info *minfo;
645 struct lookup_intent *it;
646 struct ptlrpc_request *req;
647 struct mdt_body *body;
651 cfs_spin_lock(&lli->lli_sa_lock);
652 if (target != NULL && target->se_req != NULL &&
653 !cfs_list_empty(&target->se_list)) {
655 } else if (unlikely(sa_received_empty(sai))) {
656 cfs_spin_unlock(&lli->lli_sa_lock);
659 entry = sa_first_received_entry(sai);
662 cfs_atomic_inc(&entry->se_refcount);
663 cfs_list_del_init(&entry->se_list);
664 cfs_spin_unlock(&lli->lli_sa_lock);
666 LASSERT(entry->se_handle != 0);
668 minfo = entry->se_minfo;
671 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
673 GOTO(out, rc = -EFAULT);
675 child = entry->se_inode;
680 LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
682 /* XXX: No fid in reply, this is probaly cross-ref case.
683 * SA can't handle it yet. */
684 if (body->valid & OBD_MD_MDS)
685 GOTO(out, rc = -EAGAIN);
690 /* unlinked and re-created with the same name */
691 if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->fid1))){
692 entry->se_inode = NULL;
698 it->d.lustre.it_lock_handle = entry->se_handle;
699 rc = md_revalidate_lock(ll_i2mdexp(dir), it, NULL, NULL);
701 GOTO(out, rc = -EAGAIN);
703 rc = ll_prep_inode(&child, req, dir->i_sb);
707 CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
708 child, child->i_ino, child->i_generation);
709 ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
711 entry->se_inode = child;
713 if (agl_should_run(sai, child))
714 ll_agl_add(sai, child, entry->se_index);
719 /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
720 * reference count by calling "ll_intent_drop_lock()" in spite of the
721 * above operations failed or not. Do not worry about calling
722 * "ll_intent_drop_lock()" more than once. */
723 rc = ll_sa_entry_to_stated(sai, entry, rc < 0 ? rc : SA_ENTRY_SUCC);
724 if (rc == 0 && entry->se_index == sai->sai_index_wait && target == NULL)
725 cfs_waitq_signal(&sai->sai_waitq);
726 ll_sa_entry_put(sai, entry);
729 static int ll_statahead_interpret(struct ptlrpc_request *req,
730 struct md_enqueue_info *minfo, int rc)
732 struct lookup_intent *it = &minfo->mi_it;
733 struct inode *dir = minfo->mi_dir;
734 struct ll_inode_info *lli = ll_i2info(dir);
735 struct ll_statahead_info *sai = NULL;
736 struct ll_sa_entry *entry;
740 if (it_disposition(it, DISP_LOOKUP_NEG))
743 cfs_spin_lock(&lli->lli_sa_lock);
745 if (unlikely(lli->lli_sai == NULL ||
746 lli->lli_sai->sai_generation != minfo->mi_generation)) {
747 cfs_spin_unlock(&lli->lli_sa_lock);
748 GOTO(out, rc = -ESTALE);
750 sai = ll_sai_get(lli->lli_sai);
751 if (unlikely(!thread_is_running(&sai->sai_thread))) {
753 cfs_spin_unlock(&lli->lli_sa_lock);
754 GOTO(out, rc = -EBADFD);
757 entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata);
760 cfs_spin_unlock(&lli->lli_sa_lock);
761 GOTO(out, rc = -EIDRM);
764 cfs_list_del_init(&entry->se_list);
767 do_sai_entry_to_stated(sai, entry, rc);
768 cfs_spin_unlock(&lli->lli_sa_lock);
769 if (entry->se_index == sai->sai_index_wait)
770 cfs_waitq_signal(&sai->sai_waitq);
772 entry->se_minfo = minfo;
773 entry->se_req = ptlrpc_request_addref(req);
774 /* Release the async ibits lock ASAP to avoid deadlock
775 * when statahead thread tries to enqueue lock on parent
776 * for readpage and other tries to enqueue lock on child
777 * with parent's lock held, for example: unlink. */
778 entry->se_handle = it->d.lustre.it_lock_handle;
779 ll_intent_drop_lock(it);
780 wakeup = sa_received_empty(sai);
781 cfs_list_add_tail(&entry->se_list,
782 &sai->sai_entries_received);
784 cfs_spin_unlock(&lli->lli_sa_lock);
786 cfs_waitq_signal(&sai->sai_thread.t_ctl_waitq);
788 ll_sa_entry_put(sai, entry);
795 ll_intent_release(it);
804 static void sa_args_fini(struct md_enqueue_info *minfo,
805 struct ldlm_enqueue_info *einfo)
807 LASSERT(minfo && einfo);
809 capa_put(minfo->mi_data.op_capa1);
810 capa_put(minfo->mi_data.op_capa2);
816 * There is race condition between "capa_put" and "ll_statahead_interpret" for
817 * accessing "op_data.op_capa[1,2]" as following:
818 * "capa_put" releases "op_data.op_capa[1,2]"'s reference count after calling
819 * "md_intent_getattr_async". But "ll_statahead_interpret" maybe run first, and
820 * fill "op_data.op_capa[1,2]" as POISON, then cause "capa_put" access invalid
821 * "ocapa". So here reserve "op_data.op_capa[1,2]" in "pcapa" before calling
822 * "md_intent_getattr_async".
824 static int sa_args_init(struct inode *dir, struct inode *child,
825 struct ll_sa_entry *entry, struct md_enqueue_info **pmi,
826 struct ldlm_enqueue_info **pei,
827 struct obd_capa **pcapa)
829 struct qstr *qstr = &entry->se_qstr;
830 struct ll_inode_info *lli = ll_i2info(dir);
831 struct md_enqueue_info *minfo;
832 struct ldlm_enqueue_info *einfo;
833 struct md_op_data *op_data;
835 OBD_ALLOC_PTR(einfo);
839 OBD_ALLOC_PTR(minfo);
845 op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, qstr->name,
846 qstr->len, 0, LUSTRE_OPC_ANY, NULL);
847 if (IS_ERR(op_data)) {
850 return PTR_ERR(op_data);
853 minfo->mi_it.it_op = IT_GETATTR;
854 minfo->mi_dir = igrab(dir);
855 minfo->mi_cb = ll_statahead_interpret;
856 minfo->mi_generation = lli->lli_sai->sai_generation;
857 minfo->mi_cbdata = entry->se_index;
859 einfo->ei_type = LDLM_IBITS;
860 einfo->ei_mode = it_to_lock_mode(&minfo->mi_it);
861 einfo->ei_cb_bl = ll_md_blocking_ast;
862 einfo->ei_cb_cp = ldlm_completion_ast;
863 einfo->ei_cb_gl = NULL;
864 einfo->ei_cbdata = NULL;
868 pcapa[0] = op_data->op_capa1;
869 pcapa[1] = op_data->op_capa2;
874 static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry)
876 struct md_enqueue_info *minfo;
877 struct ldlm_enqueue_info *einfo;
878 struct obd_capa *capas[2];
882 rc = sa_args_init(dir, NULL, entry, &minfo, &einfo, capas);
886 rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
891 sa_args_fini(minfo, einfo);
898 * similar to ll_revalidate_it().
899 * \retval 1 -- dentry valid
900 * \retval 0 -- will send stat-ahead request
901 * \retval others -- prepare stat-ahead request failed
903 static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
904 struct dentry *dentry)
906 struct inode *inode = dentry->d_inode;
907 struct lookup_intent it = { .it_op = IT_GETATTR,
908 .d.lustre.it_lock_handle = 0 };
909 struct md_enqueue_info *minfo;
910 struct ldlm_enqueue_info *einfo;
911 struct obd_capa *capas[2];
915 if (unlikely(inode == NULL))
918 if (d_mountpoint(dentry))
921 if (unlikely(dentry == dentry->d_sb->s_root))
924 entry->se_inode = igrab(inode);
925 rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),NULL);
927 entry->se_handle = it.d.lustre.it_lock_handle;
928 ll_intent_release(&it);
932 rc = sa_args_init(dir, inode, entry, &minfo, &einfo, capas);
934 entry->se_inode = NULL;
939 rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
944 entry->se_inode = NULL;
946 sa_args_fini(minfo, einfo);
952 static void ll_statahead_one(struct dentry *parent, const char* entry_name,
955 struct inode *dir = parent->d_inode;
956 struct ll_inode_info *lli = ll_i2info(dir);
957 struct ll_statahead_info *sai = lli->lli_sai;
958 struct dentry *dentry = NULL;
959 struct ll_sa_entry *entry;
964 entry = ll_sa_entry_alloc(sai, sai->sai_index, entry_name,
969 dentry = d_lookup(parent, &entry->se_qstr);
971 rc = do_sa_lookup(dir, entry);
973 rc = do_sa_revalidate(dir, entry, dentry);
974 if (rc == 1 && agl_should_run(sai, dentry->d_inode))
975 ll_agl_add(sai, dentry->d_inode, entry->se_index);
982 rc1 = ll_sa_entry_to_stated(sai, entry,
983 rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
984 if (rc1 == 0 && entry->se_index == sai->sai_index_wait)
985 cfs_waitq_signal(&sai->sai_waitq);
991 /* drop one refcount on entry by ll_sa_entry_alloc */
992 ll_sa_entry_put(sai, entry);
997 static int ll_agl_thread(void *arg)
999 struct dentry *parent = (struct dentry *)arg;
1000 struct inode *dir = parent->d_inode;
1001 struct ll_inode_info *plli = ll_i2info(dir);
1002 struct ll_inode_info *clli;
1003 struct ll_sb_info *sbi = ll_i2sbi(dir);
1004 struct ll_statahead_info *sai = ll_sai_get(plli->lli_sai);
1005 struct ptlrpc_thread *thread = &sai->sai_agl_thread;
1006 struct l_wait_info lwi = { 0 };
1011 snprintf(pname, 15, "ll_agl_%u", plli->lli_opendir_pid);
1012 cfs_daemonize(pname);
1015 CDEBUG(D_READA, "agl thread started: [pid %d] [parent %.*s]\n",
1016 cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1018 atomic_inc(&sbi->ll_agl_total);
1019 cfs_spin_lock(&plli->lli_agl_lock);
1020 sai->sai_agl_valid = 1;
1021 thread_set_flags(thread, SVC_RUNNING);
1022 cfs_spin_unlock(&plli->lli_agl_lock);
1023 cfs_waitq_signal(&thread->t_ctl_waitq);
1026 l_wait_event(thread->t_ctl_waitq,
1027 !agl_list_empty(sai) ||
1028 !thread_is_running(thread),
1031 if (!thread_is_running(thread))
1034 cfs_spin_lock(&plli->lli_agl_lock);
1035 /* The statahead thread maybe help to process AGL entries,
1036 * so check whether list empty again. */
1037 if (!agl_list_empty(sai)) {
1038 clli = agl_first_entry(sai);
1039 cfs_list_del_init(&clli->lli_agl_list);
1040 cfs_spin_unlock(&plli->lli_agl_lock);
1041 ll_agl_trigger(&clli->lli_vfs_inode, sai);
1043 cfs_spin_unlock(&plli->lli_agl_lock);
1047 cfs_spin_lock(&plli->lli_agl_lock);
1048 sai->sai_agl_valid = 0;
1049 while (!agl_list_empty(sai)) {
1050 clli = agl_first_entry(sai);
1051 cfs_list_del_init(&clli->lli_agl_list);
1052 cfs_spin_unlock(&plli->lli_agl_lock);
1053 clli->lli_agl_index = 0;
1054 iput(&clli->lli_vfs_inode);
1055 cfs_spin_lock(&plli->lli_agl_lock);
1057 thread_set_flags(thread, SVC_STOPPED);
1058 cfs_spin_unlock(&plli->lli_agl_lock);
1059 cfs_waitq_signal(&thread->t_ctl_waitq);
1061 CDEBUG(D_READA, "agl thread stopped: [pid %d] [parent %.*s]\n",
1062 cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1066 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
1068 struct ptlrpc_thread *thread = &sai->sai_agl_thread;
1069 struct l_wait_info lwi = { 0 };
1073 CDEBUG(D_READA, "start agl thread: [pid %d] [parent %.*s]\n",
1074 cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1076 rc = cfs_create_thread(ll_agl_thread, parent, 0);
1078 CERROR("can't start ll_agl thread, rc: %d\n", rc);
1082 l_wait_event(thread->t_ctl_waitq,
1083 thread_is_running(thread) || thread_is_stopped(thread),
1088 static int ll_statahead_thread(void *arg)
1090 struct dentry *parent = (struct dentry *)arg;
1091 struct inode *dir = parent->d_inode;
1092 struct ll_inode_info *plli = ll_i2info(dir);
1093 struct ll_inode_info *clli;
1094 struct ll_sb_info *sbi = ll_i2sbi(dir);
1095 struct ll_statahead_info *sai = ll_sai_get(plli->lli_sai);
1096 struct ptlrpc_thread *thread = &sai->sai_thread;
1101 struct ll_dir_chain chain;
1102 struct l_wait_info lwi = { 0 };
1107 snprintf(pname, 15, "ll_sa_%u", plli->lli_opendir_pid);
1108 cfs_daemonize(pname);
1111 CDEBUG(D_READA, "statahead thread started: [pid %d] [parent %.*s]\n",
1112 cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1114 if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
1115 ll_start_agl(parent, sai);
1117 atomic_inc(&sbi->ll_sa_total);
1118 cfs_spin_lock(&plli->lli_sa_lock);
1119 thread_set_flags(thread, SVC_RUNNING);
1120 cfs_spin_unlock(&plli->lli_sa_lock);
1121 cfs_waitq_signal(&thread->t_ctl_waitq);
1123 plli->lli_sa_pos = 0;
1124 ll_dir_chain_init(&chain);
1125 page = ll_get_dir_page(NULL, dir, pos, &chain);
1128 struct lu_dirpage *dp;
1129 struct lu_dirent *ent;
1133 CDEBUG(D_READA, "error reading dir "DFID" at "LPU64
1134 "/"LPU64": [rc %d] [parent %u]\n",
1135 PFID(ll_inode2fid(dir)), pos, sai->sai_index,
1136 rc, plli->lli_opendir_pid);
1140 dp = page_address(page);
1141 for (ent = lu_dirent_start(dp); ent != NULL;
1142 ent = lu_dirent_next(ent)) {
1147 hash = le64_to_cpu(ent->lde_hash);
1148 if (unlikely(hash < pos))
1150 * Skip until we find target hash value.
1154 namelen = le16_to_cpu(ent->lde_namelen);
1155 if (unlikely(namelen == 0))
1157 * Skip dummy record.
1161 name = ent->lde_name;
1162 if (name[0] == '.') {
1168 } else if (name[1] == '.' && namelen == 2) {
1173 } else if (!sai->sai_ls_all) {
1175 * skip hidden files.
1177 sai->sai_skip_hidden++;
1183 * don't stat-ahead first entry.
1185 if (unlikely(++first == 1))
1189 l_wait_event(thread->t_ctl_waitq,
1190 !sa_sent_full(sai) ||
1191 !sa_received_empty(sai) ||
1192 !agl_list_empty(sai) ||
1193 !thread_is_running(thread),
1197 while (!sa_received_empty(sai))
1198 do_statahead_interpret(sai, NULL);
1200 if (unlikely(!thread_is_running(thread))) {
1201 ll_release_page(page, 0);
1205 /* If no window for metadata statahead, but there are
1206 * some AGL entries to be triggered, then try to help
1207 * to process the AGL entries. */
1208 if (sa_sent_full(sai)) {
1209 cfs_spin_lock(&plli->lli_agl_lock);
1210 while (!agl_list_empty(sai)) {
1211 clli = agl_first_entry(sai);
1212 cfs_list_del_init(&clli->lli_agl_list);
1213 cfs_spin_unlock(&plli->lli_agl_lock);
1214 ll_agl_trigger(&clli->lli_vfs_inode,
1217 if (!sa_received_empty(sai))
1221 !thread_is_running(thread))) {
1222 ll_release_page(page, 0);
1226 if (!sa_sent_full(sai))
1229 cfs_spin_lock(&plli->lli_agl_lock);
1231 cfs_spin_unlock(&plli->lli_agl_lock);
1237 ll_statahead_one(parent, name, namelen);
1239 pos = le64_to_cpu(dp->ldp_hash_end);
1240 if (pos == MDS_DIR_END_OFF) {
1242 * End of directory reached.
1244 ll_release_page(page, 0);
1246 l_wait_event(thread->t_ctl_waitq,
1247 !sa_received_empty(sai) ||
1248 sai->sai_sent == sai->sai_replied||
1249 !thread_is_running(thread),
1252 while (!sa_received_empty(sai))
1253 do_statahead_interpret(sai, NULL);
1255 if (unlikely(!thread_is_running(thread)))
1258 if (sai->sai_sent == sai->sai_replied &&
1259 sa_received_empty(sai))
1263 cfs_spin_lock(&plli->lli_agl_lock);
1264 while (!agl_list_empty(sai) &&
1265 thread_is_running(thread)) {
1266 clli = agl_first_entry(sai);
1267 cfs_list_del_init(&clli->lli_agl_list);
1268 cfs_spin_unlock(&plli->lli_agl_lock);
1269 ll_agl_trigger(&clli->lli_vfs_inode, sai);
1270 cfs_spin_lock(&plli->lli_agl_lock);
1272 cfs_spin_unlock(&plli->lli_agl_lock);
1277 * chain is exhausted.
1278 * Normal case: continue to the next page.
1280 ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
1282 plli->lli_sa_pos = pos;
1283 sai->sai_in_readpage = 1;
1284 page = ll_get_dir_page(NULL, dir, pos, &chain);
1285 sai->sai_in_readpage = 0;
1287 LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1288 ll_release_page(page, 1);
1290 * go into overflow page.
1297 if (sai->sai_agl_valid) {
1298 struct ptlrpc_thread *agl_thread = &sai->sai_agl_thread;
1300 cfs_spin_lock(&plli->lli_agl_lock);
1301 thread_set_flags(agl_thread, SVC_STOPPING);
1302 cfs_spin_unlock(&plli->lli_agl_lock);
1303 cfs_waitq_signal(&agl_thread->t_ctl_waitq);
1305 CDEBUG(D_READA, "stop agl thread: [pid %d]\n",
1307 l_wait_event(agl_thread->t_ctl_waitq,
1308 thread_is_stopped(agl_thread),
1312 ll_dir_chain_fini(&chain);
1313 cfs_spin_lock(&plli->lli_sa_lock);
1314 if (!sa_received_empty(sai)) {
1315 thread_set_flags(thread, SVC_STOPPING);
1316 cfs_spin_unlock(&plli->lli_sa_lock);
1318 /* To release the resources held by received entries. */
1319 while (!sa_received_empty(sai))
1320 do_statahead_interpret(sai, NULL);
1322 cfs_spin_lock(&plli->lli_sa_lock);
1324 thread_set_flags(thread, SVC_STOPPED);
1325 cfs_spin_unlock(&plli->lli_sa_lock);
1326 cfs_waitq_signal(&sai->sai_waitq);
1327 cfs_waitq_signal(&thread->t_ctl_waitq);
1330 CDEBUG(D_READA, "statahead thread stopped: [pid %d] [parent %.*s]\n",
1331 cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1336 * called in ll_file_release().
1338 void ll_stop_statahead(struct inode *dir, void *key)
1340 struct ll_inode_info *lli = ll_i2info(dir);
1342 if (unlikely(key == NULL))
1345 cfs_spin_lock(&lli->lli_sa_lock);
1346 if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
1347 cfs_spin_unlock(&lli->lli_sa_lock);
1351 lli->lli_opendir_key = NULL;
1354 struct l_wait_info lwi = { 0 };
1355 struct ptlrpc_thread *thread = &lli->lli_sai->sai_thread;
1357 if (!thread_is_stopped(thread)) {
1358 thread_set_flags(thread, SVC_STOPPING);
1359 cfs_spin_unlock(&lli->lli_sa_lock);
1360 cfs_waitq_signal(&thread->t_ctl_waitq);
1362 CDEBUG(D_READA, "stop statahead thread: [pid %d]\n",
1364 l_wait_event(thread->t_ctl_waitq,
1365 thread_is_stopped(thread),
1368 cfs_spin_unlock(&lli->lli_sa_lock);
1372 * Put the ref which was held when first statahead_enter.
1373 * It maybe not the last ref for some statahead requests
1376 ll_sai_put(lli->lli_sai);
1378 lli->lli_opendir_pid = 0;
1379 cfs_spin_unlock(&lli->lli_sa_lock);
1385 * not first dirent, or is "."
1387 LS_NONE_FIRST_DE = 0,
1389 * the first non-hidden dirent
1393 * the first hidden dirent, that is "."
1398 static int is_first_dirent(struct inode *dir, struct dentry *dentry)
1400 struct ll_inode_info *lli = ll_i2info(dir);
1401 struct ll_dir_chain chain;
1402 struct qstr *target = &dentry->d_name;
1406 int rc = LS_NONE_FIRST_DE;
1409 lli->lli_sa_pos = 0;
1410 ll_dir_chain_init(&chain);
1411 page = ll_get_dir_page(NULL, dir, pos, &chain);
1414 struct lu_dirpage *dp;
1415 struct lu_dirent *ent;
1418 struct ll_inode_info *lli = ll_i2info(dir);
1421 CERROR("error reading dir "DFID" at "LPU64": "
1422 "[rc %d] [parent %u]\n",
1423 PFID(ll_inode2fid(dir)), pos,
1424 rc, lli->lli_opendir_pid);
1428 dp = page_address(page);
1429 for (ent = lu_dirent_start(dp); ent != NULL;
1430 ent = lu_dirent_next(ent)) {
1435 hash = le64_to_cpu(ent->lde_hash);
1436 /* The ll_get_dir_page() can return any page containing
1437 * the given hash which may be not the start hash. */
1438 if (unlikely(hash < pos))
1441 namelen = le16_to_cpu(ent->lde_namelen);
1442 if (unlikely(namelen == 0))
1444 * skip dummy record.
1448 name = ent->lde_name;
1449 if (name[0] == '.') {
1455 else if (name[1] == '.' && namelen == 2)
1466 if (dot_de && target->name[0] != '.') {
1467 CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
1468 target->len, target->name,
1473 if (target->len != namelen ||
1474 memcmp(target->name, name, namelen) != 0)
1475 rc = LS_NONE_FIRST_DE;
1479 rc = LS_FIRST_DOT_DE;
1481 ll_release_page(page, 0);
1484 pos = le64_to_cpu(dp->ldp_hash_end);
1485 if (pos == MDS_DIR_END_OFF) {
1487 * End of directory reached.
1489 ll_release_page(page, 0);
1493 * chain is exhausted
1494 * Normal case: continue to the next page.
1496 ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
1498 lli->lli_sa_pos = pos;
1499 page = ll_get_dir_page(NULL, dir, pos, &chain);
1502 * go into overflow page.
1504 LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1505 ll_release_page(page, 1);
1511 ll_dir_chain_fini(&chain);
1516 ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
1518 struct ptlrpc_thread *thread = &sai->sai_thread;
1519 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode);
1523 if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC)
1528 ll_sa_entry_fini(sai, entry);
1531 sai->sai_consecutive_miss = 0;
1532 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
1534 struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
1537 sai->sai_consecutive_miss++;
1538 if (sa_low_hit(sai) && thread_is_running(thread)) {
1539 atomic_inc(&sbi->ll_sa_wrong);
1540 CDEBUG(D_READA, "Statahead for dir "DFID" hit "
1541 "ratio too low: hit/miss "LPU64"/"LPU64
1542 ", sent/replied "LPU64"/"LPU64", stopping "
1543 "statahead thread: pid %d\n",
1544 PFID(&lli->lli_fid), sai->sai_hit,
1545 sai->sai_miss, sai->sai_sent,
1546 sai->sai_replied, cfs_curproc_pid());
1547 cfs_spin_lock(&lli->lli_sa_lock);
1548 if (!thread_is_stopped(thread))
1549 thread_set_flags(thread, SVC_STOPPING);
1550 cfs_spin_unlock(&lli->lli_sa_lock);
1554 if (!thread_is_stopped(thread))
1555 cfs_waitq_signal(&thread->t_ctl_waitq);
1561 * Start statahead thread if this is the first dir entry.
1562 * Otherwise if a thread is started already, wait it until it is ahead of me.
1563 * \retval 1 -- find entry with lock in cache, the caller needs to do
1565 * \retval 0 -- find entry in cache, but without lock, the caller needs
1567 * \retval others -- the caller need to process as non-statahead.
1569 int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
1572 struct ll_inode_info *lli = ll_i2info(dir);
1573 struct ll_statahead_info *sai = lli->lli_sai;
1574 struct dentry *parent;
1575 struct ll_sa_entry *entry;
1576 struct ptlrpc_thread *thread;
1577 struct l_wait_info lwi = { 0 };
1581 LASSERT(lli->lli_opendir_pid == cfs_curproc_pid());
1584 thread = &sai->sai_thread;
1585 if (unlikely(thread_is_stopped(thread) &&
1586 cfs_list_empty(&sai->sai_entries_stated))) {
1587 /* to release resource */
1588 ll_stop_statahead(dir, lli->lli_opendir_key);
1592 if ((*dentryp)->d_name.name[0] == '.') {
1593 if (sai->sai_ls_all ||
1594 sai->sai_miss_hidden >= sai->sai_skip_hidden) {
1596 * Hidden dentry is the first one, or statahead
1597 * thread does not skip so many hidden dentries
1598 * before "sai_ls_all" enabled as below.
1601 if (!sai->sai_ls_all)
1603 * It maybe because hidden dentry is not
1604 * the first one, "sai_ls_all" was not
1605 * set, then "ls -al" missed. Enable
1606 * "sai_ls_all" for such case.
1608 sai->sai_ls_all = 1;
1611 * Such "getattr" has been skipped before
1612 * "sai_ls_all" enabled as above.
1614 sai->sai_miss_hidden++;
1619 entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name);
1620 if (entry == NULL || only_unplug) {
1621 ll_sai_unplug(sai, entry);
1622 RETURN(entry ? 1 : -EAGAIN);
1625 while (!ll_sa_entry_stated(entry) &&
1626 sai->sai_in_readpage &&
1627 !sa_received_empty(sai))
1628 do_statahead_interpret(sai, entry);
1630 if (!ll_sa_entry_stated(entry)) {
1631 sai->sai_index_wait = entry->se_index;
1632 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
1633 LWI_ON_SIGNAL_NOOP, NULL);
1634 rc = l_wait_event(sai->sai_waitq,
1635 ll_sa_entry_stated(entry) ||
1636 thread_is_stopped(thread),
1639 ll_sai_unplug(sai, entry);
1644 if (entry->se_stat == SA_ENTRY_SUCC &&
1645 entry->se_inode != NULL) {
1646 struct inode *inode = entry->se_inode;
1647 struct lookup_intent it = { .it_op = IT_GETATTR,
1648 .d.lustre.it_lock_handle =
1650 struct ll_dentry_data *lld;
1653 rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
1654 ll_inode2fid(inode), &bits);
1656 if ((*dentryp)->d_inode == NULL) {
1657 *dentryp = ll_find_alias(inode,
1659 lld = ll_d2d(*dentryp);
1660 if (unlikely(lld == NULL))
1661 ll_dops_init(*dentryp, 1, 1);
1663 LASSERT((*dentryp)->d_inode == inode);
1665 ll_dentry_rehash(*dentryp, 0);
1668 entry->se_inode = NULL;
1670 ll_dentry_reset_flags(*dentryp, bits);
1671 ll_intent_release(&it);
1675 ll_sai_unplug(sai, entry);
1679 /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1680 rc = is_first_dirent(dir, *dentryp);
1681 if (rc == LS_NONE_FIRST_DE)
1682 /* It is not "ls -{a}l" operation, no need statahead for it. */
1683 GOTO(out, rc = -EAGAIN);
1685 sai = ll_sai_alloc();
1687 GOTO(out, rc = -ENOMEM);
1689 sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
1690 sai->sai_inode = igrab(dir);
1691 if (unlikely(sai->sai_inode == NULL)) {
1692 CWARN("Do not start stat ahead on dying inode "DFID"\n",
1693 PFID(&lli->lli_fid));
1694 GOTO(out, rc = -ESTALE);
1697 /* get parent reference count here, and put it in ll_statahead_thread */
1698 parent = dget((*dentryp)->d_parent);
1699 if (unlikely(sai->sai_inode != parent->d_inode)) {
1700 struct ll_inode_info *nlli = ll_i2info(parent->d_inode);
1702 CWARN("Race condition, someone changed %.*s just now: "
1703 "old parent "DFID", new parent "DFID"\n",
1704 (*dentryp)->d_name.len, (*dentryp)->d_name.name,
1705 PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
1707 iput(sai->sai_inode);
1708 GOTO(out, rc = -EAGAIN);
1711 CDEBUG(D_READA, "start statahead thread: [pid %d] [parent %.*s]\n",
1712 cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1715 rc = cfs_create_thread(ll_statahead_thread, parent, 0);
1716 thread = &sai->sai_thread;
1718 CERROR("can't start ll_sa thread, rc: %d\n", rc);
1720 lli->lli_opendir_key = NULL;
1721 thread_set_flags(thread, SVC_STOPPED);
1723 LASSERT(lli->lli_sai == NULL);
1727 l_wait_event(thread->t_ctl_waitq,
1728 thread_is_running(thread) || thread_is_stopped(thread),
1732 * We don't stat-ahead for the first dirent since we are already in
1733 * lookup, and -EEXIST also indicates that this is the first dirent.
1740 cfs_spin_lock(&lli->lli_sa_lock);
1741 lli->lli_opendir_key = NULL;
1742 lli->lli_opendir_pid = 0;
1743 cfs_spin_unlock(&lli->lli_sa_lock);