4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 #include <linux/sched.h>
35 #include <linux/kthread.h>
37 #include <linux/highmem.h>
38 #include <linux/pagemap.h>
40 #define DEBUG_SUBSYSTEM S_LLITE
42 #include <obd_support.h>
43 #include <lustre_dlm.h>
44 #include "llite_internal.h"
46 #define SA_OMITTED_ENTRY_MAX 8ULL
49 /** negative values are for error cases */
50 SA_ENTRY_INIT = 0, /** init entry */
51 SA_ENTRY_SUCC = 1, /** stat succeed */
52 SA_ENTRY_INVA = 2, /** invalid entry */
55 /* sa_entry is not refcounted: statahead thread allocates it and do async stat,
56 * and in async stat callback ll_statahead_interpret() will add it into
57 * sai_interim_entries, later statahead thread will call sa_handle_callback() to
58 * instantiate entry and move it into sai_entries, and then only scanner process
59 * can access and free it. */
61 /* link into sai_interim_entries or sai_entries */
62 struct list_head se_list;
63 /* link into sai hash table locally */
64 struct list_head se_hash;
65 /* entry index in the sai */
67 /* low layer ldlm lock handle */
71 /* entry size, contains name */
73 /* pointer to async getattr enqueue info */
74 struct md_enqueue_info *se_minfo;
75 /* pointer to the async getattr request */
76 struct ptlrpc_request *se_req;
77 /* pointer to the target inode */
78 struct inode *se_inode;
85 static unsigned int sai_generation = 0;
86 static DEFINE_SPINLOCK(sai_generation_lock);
88 static inline int sa_unhashed(struct sa_entry *entry)
90 return list_empty(&entry->se_hash);
93 /* sa_entry is ready to use */
94 static inline int sa_ready(struct sa_entry *entry)
97 return (entry->se_state != SA_ENTRY_INIT);
100 /* hash value to put in sai_cache */
101 static inline int sa_hash(int val)
103 return val & LL_SA_CACHE_MASK;
106 /* hash entry into sai_cache */
108 sa_rehash(struct ll_statahead_info *sai, struct sa_entry *entry)
110 int i = sa_hash(entry->se_qstr.hash);
112 spin_lock(&sai->sai_cache_lock[i]);
113 list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
114 spin_unlock(&sai->sai_cache_lock[i]);
117 /* unhash entry from sai_cache */
119 sa_unhash(struct ll_statahead_info *sai, struct sa_entry *entry)
121 int i = sa_hash(entry->se_qstr.hash);
123 spin_lock(&sai->sai_cache_lock[i]);
124 list_del_init(&entry->se_hash);
125 spin_unlock(&sai->sai_cache_lock[i]);
128 static inline int agl_should_run(struct ll_statahead_info *sai,
131 return (inode != NULL && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
134 static inline struct ll_inode_info *
135 agl_first_entry(struct ll_statahead_info *sai)
137 return list_entry(sai->sai_agls.next, struct ll_inode_info,
141 /* statahead window is full */
142 static inline int sa_sent_full(struct ll_statahead_info *sai)
144 return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
147 /* got async stat replies */
148 static inline int sa_has_callback(struct ll_statahead_info *sai)
150 return !list_empty(&sai->sai_interim_entries);
153 static inline int agl_list_empty(struct ll_statahead_info *sai)
155 return list_empty(&sai->sai_agls);
159 * (1) hit ratio less than 80%
161 * (2) consecutive miss more than 8
162 * then means low hit.
164 static inline int sa_low_hit(struct ll_statahead_info *sai)
166 return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
167 (sai->sai_consecutive_miss > 8));
171 * if the given index is behind of statahead window more than
172 * SA_OMITTED_ENTRY_MAX, then it is old.
174 static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
176 return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
180 /* allocate sa_entry and hash it to allow scanner process to find it */
181 static struct sa_entry *
182 sa_alloc(struct dentry *parent, struct ll_statahead_info *sai, __u64 index,
183 const char *name, int len, const struct lu_fid *fid)
185 struct ll_inode_info *lli;
186 struct sa_entry *entry;
191 entry_size = sizeof(struct sa_entry) + (len & ~3) + 4;
192 OBD_ALLOC(entry, entry_size);
193 if (unlikely(entry == NULL))
194 RETURN(ERR_PTR(-ENOMEM));
196 CDEBUG(D_READA, "alloc sa entry %.*s(%p) index %llu\n",
197 len, name, entry, index);
199 entry->se_index = index;
201 entry->se_state = SA_ENTRY_INIT;
202 entry->se_size = entry_size;
203 dname = (char *)entry + sizeof(struct sa_entry);
204 memcpy(dname, name, len);
206 entry->se_qstr.hash = ll_full_name_hash(parent, name, len);
207 entry->se_qstr.len = len;
208 entry->se_qstr.name = dname;
209 entry->se_fid = *fid;
211 lli = ll_i2info(sai->sai_dentry->d_inode);
213 spin_lock(&lli->lli_sa_lock);
214 INIT_LIST_HEAD(&entry->se_list);
215 sa_rehash(sai, entry);
216 spin_unlock(&lli->lli_sa_lock);
218 atomic_inc(&sai->sai_cache_count);
223 /* free sa_entry, which should have been unhashed and not in any list */
224 static void sa_free(struct ll_statahead_info *sai, struct sa_entry *entry)
226 CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n",
227 entry->se_qstr.len, entry->se_qstr.name, entry,
230 LASSERT(list_empty(&entry->se_list));
231 LASSERT(sa_unhashed(entry));
233 OBD_FREE(entry, entry->se_size);
234 atomic_dec(&sai->sai_cache_count);
238 * find sa_entry by name, used by directory scanner, lock is not needed because
239 * only scanner can remove the entry from cache.
241 static struct sa_entry *
242 sa_get(struct ll_statahead_info *sai, const struct qstr *qstr)
244 struct sa_entry *entry;
245 int i = sa_hash(qstr->hash);
247 list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
248 if (entry->se_qstr.hash == qstr->hash &&
249 entry->se_qstr.len == qstr->len &&
250 memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
256 /* unhash and unlink sa_entry, and then free it */
258 sa_kill(struct ll_statahead_info *sai, struct sa_entry *entry)
260 struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
262 LASSERT(!sa_unhashed(entry));
263 LASSERT(!list_empty(&entry->se_list));
264 LASSERT(sa_ready(entry));
266 sa_unhash(sai, entry);
268 spin_lock(&lli->lli_sa_lock);
269 list_del_init(&entry->se_list);
270 spin_unlock(&lli->lli_sa_lock);
272 if (entry->se_inode != NULL)
273 iput(entry->se_inode);
278 /* called by scanner after use, sa_entry will be killed */
280 sa_put(struct ll_statahead_info *sai, struct sa_entry *entry)
282 struct sa_entry *tmp, *next;
284 if (entry != NULL && entry->se_state == SA_ENTRY_SUCC) {
285 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode);
288 sai->sai_consecutive_miss = 0;
289 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
292 sai->sai_consecutive_miss++;
298 /* kill old completed entries, only scanner process does this, no need
300 list_for_each_entry_safe(tmp, next, &sai->sai_entries, se_list) {
301 if (!is_omitted_entry(sai, tmp->se_index))
306 wake_up(&sai->sai_thread.t_ctl_waitq);
309 /* update state and sort add entry to sai_entries by index, return true if
310 * scanner is waiting on this entry. */
312 __sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
315 struct list_head *pos = &sai->sai_entries;
316 __u64 index = entry->se_index;
318 LASSERT(!sa_ready(entry));
319 LASSERT(list_empty(&entry->se_list));
321 list_for_each_entry_reverse(se, &sai->sai_entries, se_list) {
322 if (se->se_index < entry->se_index) {
327 list_add(&entry->se_list, pos);
328 entry->se_state = ret < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC;
330 return (index == sai->sai_index_wait);
334 * release resources used in async stat RPC, update entry state and wakeup if
335 * scanner process it waiting on this entry.
338 sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
340 struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
341 struct md_enqueue_info *minfo = entry->se_minfo;
342 struct ptlrpc_request *req = entry->se_req;
345 /* release resources used in RPC */
347 entry->se_minfo = NULL;
348 ll_intent_release(&minfo->mi_it);
354 entry->se_req = NULL;
355 ptlrpc_req_finished(req);
358 spin_lock(&lli->lli_sa_lock);
359 wakeup = __sa_make_ready(sai, entry, ret);
360 spin_unlock(&lli->lli_sa_lock);
363 wake_up(&sai->sai_waitq);
366 /* insert inode into the list of sai_agls */
367 static void ll_agl_add(struct ll_statahead_info *sai,
368 struct inode *inode, int index)
370 struct ll_inode_info *child = ll_i2info(inode);
371 struct ll_inode_info *parent = ll_i2info(sai->sai_dentry->d_inode);
374 spin_lock(&child->lli_agl_lock);
375 if (child->lli_agl_index == 0) {
376 child->lli_agl_index = index;
377 spin_unlock(&child->lli_agl_lock);
379 LASSERT(list_empty(&child->lli_agl_list));
382 spin_lock(&parent->lli_agl_lock);
383 if (agl_list_empty(sai))
385 list_add_tail(&child->lli_agl_list, &sai->sai_agls);
386 spin_unlock(&parent->lli_agl_lock);
388 spin_unlock(&child->lli_agl_lock);
392 wake_up(&sai->sai_agl_thread.t_ctl_waitq);
396 static struct ll_statahead_info *ll_sai_alloc(struct dentry *dentry)
398 struct ll_statahead_info *sai;
399 struct ll_inode_info *lli = ll_i2info(dentry->d_inode);
407 sai->sai_dentry = dget(dentry);
408 atomic_set(&sai->sai_refcount, 1);
409 sai->sai_max = LL_SA_RPC_MIN;
411 init_waitqueue_head(&sai->sai_waitq);
412 init_waitqueue_head(&sai->sai_thread.t_ctl_waitq);
413 init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq);
415 INIT_LIST_HEAD(&sai->sai_interim_entries);
416 INIT_LIST_HEAD(&sai->sai_entries);
417 INIT_LIST_HEAD(&sai->sai_agls);
419 for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
420 INIT_LIST_HEAD(&sai->sai_cache[i]);
421 spin_lock_init(&sai->sai_cache_lock[i]);
423 atomic_set(&sai->sai_cache_count, 0);
425 spin_lock(&sai_generation_lock);
426 lli->lli_sa_generation = ++sai_generation;
427 if (unlikely(sai_generation == 0))
428 lli->lli_sa_generation = ++sai_generation;
429 spin_unlock(&sai_generation_lock);
435 static inline void ll_sai_free(struct ll_statahead_info *sai)
437 LASSERT(sai->sai_dentry != NULL);
438 dput(sai->sai_dentry);
443 * take refcount of sai if sai for @dir exists, which means statahead is on for
446 static inline struct ll_statahead_info *ll_sai_get(struct inode *dir)
448 struct ll_inode_info *lli = ll_i2info(dir);
449 struct ll_statahead_info *sai = NULL;
451 spin_lock(&lli->lli_sa_lock);
454 atomic_inc(&sai->sai_refcount);
455 spin_unlock(&lli->lli_sa_lock);
461 * put sai refcount after use, if refcount reaches zero, free sai and sa_entries
464 static void ll_sai_put(struct ll_statahead_info *sai)
466 struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
468 if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
469 struct sa_entry *entry, *next;
470 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode);
473 spin_unlock(&lli->lli_sa_lock);
475 LASSERT(thread_is_stopped(&sai->sai_thread));
476 LASSERT(thread_is_stopped(&sai->sai_agl_thread));
477 LASSERT(sai->sai_sent == sai->sai_replied);
478 LASSERT(!sa_has_callback(sai));
480 list_for_each_entry_safe(entry, next, &sai->sai_entries,
484 LASSERT(atomic_read(&sai->sai_cache_count) == 0);
485 LASSERT(agl_list_empty(sai));
488 atomic_dec(&sbi->ll_sa_running);
492 /* Do NOT forget to drop inode refcount when into sai_agls. */
493 static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
495 struct ll_inode_info *lli = ll_i2info(inode);
496 u64 index = lli->lli_agl_index;
501 LASSERT(list_empty(&lli->lli_agl_list));
503 /* AGL maybe fall behind statahead with one entry */
504 if (is_omitted_entry(sai, index + 1)) {
505 lli->lli_agl_index = 0;
510 /* In case of restore, the MDT has the right size and has already
511 * sent it back without granting the layout lock, inode is up-to-date.
512 * Then AGL (async glimpse lock) is useless.
513 * Also to glimpse we need the layout, in case of a runninh restore
514 * the MDT holds the layout lock so the glimpse will block up to the
515 * end of restore (statahead/agl will block) */
516 if (ll_file_test_flag(lli, LLIF_FILE_RESTORING)) {
517 lli->lli_agl_index = 0;
522 /* Someone is in glimpse (sync or async), do nothing. */
523 rc = down_write_trylock(&lli->lli_glimpse_sem);
525 lli->lli_agl_index = 0;
531 * Someone triggered glimpse within 1 sec before.
532 * 1) The former glimpse succeeded with glimpse lock granted by OST, and
533 * if the lock is still cached on client, AGL needs to do nothing. If
534 * it is cancelled by other client, AGL maybe cannot obtaion new lock
535 * for no glimpse callback triggered by AGL.
536 * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
537 * Under such case, it is quite possible that the OST will not grant
538 * glimpse lock for AGL also.
539 * 3) The former glimpse failed, compared with other two cases, it is
540 * relative rare. AGL can ignore such case, and it will not muchly
541 * affect the performance.
543 expire = ktime_sub_ns(ktime_get(), NSEC_PER_SEC);
544 if (ktime_to_ns(lli->lli_glimpse_time) &&
545 ktime_before(expire, lli->lli_glimpse_time)) {
546 up_write(&lli->lli_glimpse_sem);
547 lli->lli_agl_index = 0;
552 CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
553 DFID", idx = %llu\n", PFID(&lli->lli_fid), index);
556 lli->lli_agl_index = 0;
557 lli->lli_glimpse_time = ktime_get();
558 up_write(&lli->lli_glimpse_sem);
560 CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
561 DFID", idx = %llu, rc = %d\n",
562 PFID(&lli->lli_fid), index, rc);
570 * prepare inode for sa entry, add it into agl list, now sa_entry is ready
571 * to be used by scanner process.
573 static void sa_instantiate(struct ll_statahead_info *sai,
574 struct sa_entry *entry)
576 struct inode *dir = sai->sai_dentry->d_inode;
578 struct md_enqueue_info *minfo;
579 struct lookup_intent *it;
580 struct ptlrpc_request *req;
581 struct mdt_body *body;
585 LASSERT(entry->se_handle != 0);
587 minfo = entry->se_minfo;
590 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
592 GOTO(out, rc = -EFAULT);
594 child = entry->se_inode;
596 /* revalidate; unlinked and re-created with the same name */
597 if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2,
599 entry->se_inode = NULL;
605 it->it_lock_handle = entry->se_handle;
606 rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
608 GOTO(out, rc = -EAGAIN);
610 rc = ll_prep_inode(&child, req, dir->i_sb, it);
614 CDEBUG(D_READA, "%s: setting %.*s"DFID" l_data to inode %p\n",
615 ll_get_fsname(child->i_sb, NULL, 0),
616 entry->se_qstr.len, entry->se_qstr.name,
617 PFID(ll_inode2fid(child)), child);
618 ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
620 entry->se_inode = child;
622 if (agl_should_run(sai, child))
623 ll_agl_add(sai, child, entry->se_index);
628 /* sa_make_ready() will drop ldlm ibits lock refcount by calling
629 * ll_intent_drop_lock() in spite of failures. Do not worry about
630 * calling ll_intent_drop_lock() more than once. */
631 sa_make_ready(sai, entry, rc);
634 /* once there are async stat replies, instantiate sa_entry from replies */
635 static void sa_handle_callback(struct ll_statahead_info *sai)
637 struct ll_inode_info *lli;
639 lli = ll_i2info(sai->sai_dentry->d_inode);
641 while (sa_has_callback(sai)) {
642 struct sa_entry *entry;
644 spin_lock(&lli->lli_sa_lock);
645 if (unlikely(!sa_has_callback(sai))) {
646 spin_unlock(&lli->lli_sa_lock);
649 entry = list_entry(sai->sai_interim_entries.next,
650 struct sa_entry, se_list);
651 list_del_init(&entry->se_list);
652 spin_unlock(&lli->lli_sa_lock);
654 sa_instantiate(sai, entry);
659 * callback for async stat RPC, because this is called in ptlrpcd context, we
660 * only put sa_entry in sai_interim_entries, and wake up statahead thread to
661 * really prepare inode and instantiate sa_entry later.
663 static int ll_statahead_interpret(struct ptlrpc_request *req,
664 struct md_enqueue_info *minfo, int rc)
666 struct lookup_intent *it = &minfo->mi_it;
667 struct inode *dir = minfo->mi_dir;
668 struct ll_inode_info *lli = ll_i2info(dir);
669 struct ll_statahead_info *sai = lli->lli_sai;
670 struct sa_entry *entry = (struct sa_entry *)minfo->mi_cbdata;
672 wait_queue_head_t *waitq = NULL;
675 if (it_disposition(it, DISP_LOOKUP_NEG))
678 /* because statahead thread will wait for all inflight RPC to finish,
679 * sai should be always valid, no need to refcount */
680 LASSERT(sai != NULL);
681 LASSERT(!thread_is_stopped(&sai->sai_thread));
682 LASSERT(entry != NULL);
684 CDEBUG(D_READA, "sa_entry %.*s rc %d\n",
685 entry->se_qstr.len, entry->se_qstr.name, rc);
688 ll_intent_release(it);
692 /* release ibits lock ASAP to avoid deadlock when statahead
693 * thread enqueues lock on parent in readdir and another
694 * process enqueues lock on child with parent lock held, eg.
696 handle = it->it_lock_handle;
697 ll_intent_drop_lock(it);
700 spin_lock(&lli->lli_sa_lock);
702 if (__sa_make_ready(sai, entry, rc))
703 waitq = &sai->sai_waitq;
705 entry->se_minfo = minfo;
706 entry->se_req = ptlrpc_request_addref(req);
707 /* Release the async ibits lock ASAP to avoid deadlock
708 * when statahead thread tries to enqueue lock on parent
709 * for readpage and other tries to enqueue lock on child
710 * with parent's lock held, for example: unlink. */
711 entry->se_handle = handle;
712 if (!sa_has_callback(sai))
713 waitq = &sai->sai_thread.t_ctl_waitq;
715 list_add_tail(&entry->se_list, &sai->sai_interim_entries);
722 spin_unlock(&lli->lli_sa_lock);
727 /* finish async stat RPC arguments */
728 static void sa_fini_data(struct md_enqueue_info *minfo)
735 * prepare arguments for async stat RPC.
737 static struct md_enqueue_info *
738 sa_prep_data(struct inode *dir, struct inode *child, struct sa_entry *entry)
740 struct md_enqueue_info *minfo;
741 struct ldlm_enqueue_info *einfo;
742 struct md_op_data *op_data;
744 OBD_ALLOC_PTR(minfo);
746 return ERR_PTR(-ENOMEM);
748 op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child,
749 entry->se_qstr.name, entry->se_qstr.len, 0,
750 LUSTRE_OPC_ANY, NULL);
751 if (IS_ERR(op_data)) {
753 return (struct md_enqueue_info *)op_data;
757 op_data->op_fid2 = entry->se_fid;
759 minfo->mi_it.it_op = IT_GETATTR;
760 minfo->mi_dir = igrab(dir);
761 minfo->mi_cb = ll_statahead_interpret;
762 minfo->mi_cbdata = entry;
764 einfo = &minfo->mi_einfo;
765 einfo->ei_type = LDLM_IBITS;
766 einfo->ei_mode = it_to_lock_mode(&minfo->mi_it);
767 einfo->ei_cb_bl = ll_md_blocking_ast;
768 einfo->ei_cb_cp = ldlm_completion_ast;
769 einfo->ei_cb_gl = NULL;
770 einfo->ei_cbdata = NULL;
775 /* async stat for file not found in dcache */
776 static int sa_lookup(struct inode *dir, struct sa_entry *entry)
778 struct md_enqueue_info *minfo;
782 minfo = sa_prep_data(dir, NULL, entry);
784 RETURN(PTR_ERR(minfo));
786 rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo);
794 * async stat for file found in dcache, similar to .revalidate
796 * \retval 1 dentry valid, no RPC sent
797 * \retval 0 dentry invalid, will send async stat RPC
798 * \retval negative number upon error
800 static int sa_revalidate(struct inode *dir, struct sa_entry *entry,
801 struct dentry *dentry)
803 struct inode *inode = dentry->d_inode;
804 struct lookup_intent it = { .it_op = IT_GETATTR,
805 .it_lock_handle = 0 };
806 struct md_enqueue_info *minfo;
810 if (unlikely(inode == NULL))
813 if (d_mountpoint(dentry))
816 entry->se_inode = igrab(inode);
817 rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
820 entry->se_handle = it.it_lock_handle;
821 ll_intent_release(&it);
825 minfo = sa_prep_data(dir, inode, entry);
827 entry->se_inode = NULL;
829 RETURN(PTR_ERR(minfo));
832 rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo);
834 entry->se_inode = NULL;
842 /* async stat for file with @name */
843 static void sa_statahead(struct dentry *parent, const char *name, int len,
844 const struct lu_fid *fid)
846 struct inode *dir = parent->d_inode;
847 struct ll_inode_info *lli = ll_i2info(dir);
848 struct ll_statahead_info *sai = lli->lli_sai;
849 struct dentry *dentry = NULL;
850 struct sa_entry *entry;
854 entry = sa_alloc(parent, sai, sai->sai_index, name, len, fid);
858 dentry = d_lookup(parent, &entry->se_qstr);
860 rc = sa_lookup(dir, entry);
862 rc = sa_revalidate(dir, entry, dentry);
863 if (rc == 1 && agl_should_run(sai, dentry->d_inode))
864 ll_agl_add(sai, dentry->d_inode, entry->se_index);
871 sa_make_ready(sai, entry, rc);
880 /* async glimpse (agl) thread main function */
881 static int ll_agl_thread(void *arg)
883 struct dentry *parent = (struct dentry *)arg;
884 struct inode *dir = parent->d_inode;
885 struct ll_inode_info *plli = ll_i2info(dir);
886 struct ll_inode_info *clli;
887 struct ll_sb_info *sbi = ll_i2sbi(dir);
888 struct ll_statahead_info *sai;
889 struct ptlrpc_thread *thread;
890 struct l_wait_info lwi = { 0 };
894 sai = ll_sai_get(dir);
895 thread = &sai->sai_agl_thread;
896 thread->t_pid = current_pid();
897 CDEBUG(D_READA, "agl thread started: sai %p, parent %.*s\n",
898 sai, parent->d_name.len, parent->d_name.name);
900 atomic_inc(&sbi->ll_agl_total);
901 spin_lock(&plli->lli_agl_lock);
902 sai->sai_agl_valid = 1;
903 if (thread_is_init(thread))
904 /* If someone else has changed the thread state
905 * (e.g. already changed to SVC_STOPPING), we can't just
906 * blindly overwrite that setting. */
907 thread_set_flags(thread, SVC_RUNNING);
908 spin_unlock(&plli->lli_agl_lock);
909 wake_up(&thread->t_ctl_waitq);
912 l_wait_event(thread->t_ctl_waitq,
913 !agl_list_empty(sai) ||
914 !thread_is_running(thread),
917 if (!thread_is_running(thread))
920 spin_lock(&plli->lli_agl_lock);
921 /* The statahead thread maybe help to process AGL entries,
922 * so check whether list empty again. */
923 if (!agl_list_empty(sai)) {
924 clli = agl_first_entry(sai);
925 list_del_init(&clli->lli_agl_list);
926 spin_unlock(&plli->lli_agl_lock);
927 ll_agl_trigger(&clli->lli_vfs_inode, sai);
930 spin_unlock(&plli->lli_agl_lock);
934 spin_lock(&plli->lli_agl_lock);
935 sai->sai_agl_valid = 0;
936 while (!agl_list_empty(sai)) {
937 clli = agl_first_entry(sai);
938 list_del_init(&clli->lli_agl_list);
939 spin_unlock(&plli->lli_agl_lock);
940 clli->lli_agl_index = 0;
941 iput(&clli->lli_vfs_inode);
942 spin_lock(&plli->lli_agl_lock);
944 thread_set_flags(thread, SVC_STOPPED);
945 spin_unlock(&plli->lli_agl_lock);
946 wake_up(&thread->t_ctl_waitq);
948 CDEBUG(D_READA, "agl thread stopped: sai %p, parent %.*s\n",
949 sai, parent->d_name.len, parent->d_name.name);
953 /* start agl thread */
954 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
956 struct ptlrpc_thread *thread = &sai->sai_agl_thread;
957 struct l_wait_info lwi = { 0 };
958 struct ll_inode_info *plli;
959 struct task_struct *task;
962 CDEBUG(D_READA, "start agl thread: sai %p, parent %.*s\n",
963 sai, parent->d_name.len, parent->d_name.name);
965 plli = ll_i2info(parent->d_inode);
966 task = kthread_run(ll_agl_thread, parent,
967 "ll_agl_%u", plli->lli_opendir_pid);
969 CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
970 thread_set_flags(thread, SVC_STOPPED);
974 l_wait_event(thread->t_ctl_waitq,
975 thread_is_running(thread) || thread_is_stopped(thread),
980 /* statahead thread main function */
981 static int ll_statahead_thread(void *arg)
983 struct dentry *parent = (struct dentry *)arg;
984 struct inode *dir = parent->d_inode;
985 struct ll_inode_info *lli = ll_i2info(dir);
986 struct ll_sb_info *sbi = ll_i2sbi(dir);
987 struct ll_statahead_info *sai;
988 struct ptlrpc_thread *sa_thread;
989 struct ptlrpc_thread *agl_thread;
991 struct md_op_data *op_data;
992 struct ll_dir_chain chain;
993 struct l_wait_info lwi = { 0 };
994 struct page *page = NULL;
999 sai = ll_sai_get(dir);
1000 sa_thread = &sai->sai_thread;
1001 agl_thread = &sai->sai_agl_thread;
1002 sa_thread->t_pid = current_pid();
1003 CDEBUG(D_READA, "statahead thread starting: sai %p, parent %.*s\n",
1004 sai, parent->d_name.len, parent->d_name.name);
1006 op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
1007 LUSTRE_OPC_ANY, dir);
1008 if (IS_ERR(op_data))
1009 GOTO(out, rc = PTR_ERR(op_data));
1011 if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
1012 ll_start_agl(parent, sai);
1014 atomic_inc(&sbi->ll_sa_total);
1015 spin_lock(&lli->lli_sa_lock);
1016 if (thread_is_init(sa_thread))
1017 /* If someone else has changed the thread state
1018 * (e.g. already changed to SVC_STOPPING), we can't just
1019 * blindly overwrite that setting. */
1020 thread_set_flags(sa_thread, SVC_RUNNING);
1021 spin_unlock(&lli->lli_sa_lock);
1022 wake_up(&sa_thread->t_ctl_waitq);
1024 ll_dir_chain_init(&chain);
1025 while (pos != MDS_DIR_END_OFF && thread_is_running(sa_thread)) {
1026 struct lu_dirpage *dp;
1027 struct lu_dirent *ent;
1029 sai->sai_in_readpage = 1;
1030 page = ll_get_dir_page(dir, op_data, pos, &chain);
1031 sai->sai_in_readpage = 0;
1034 CDEBUG(D_READA, "error reading dir "DFID" at %llu"
1035 "/%llu opendir_pid = %u: rc = %d\n",
1036 PFID(ll_inode2fid(dir)), pos, sai->sai_index,
1037 lli->lli_opendir_pid, rc);
1041 dp = page_address(page);
1042 for (ent = lu_dirent_start(dp);
1043 ent != NULL && thread_is_running(sa_thread) &&
1045 ent = lu_dirent_next(ent)) {
1051 hash = le64_to_cpu(ent->lde_hash);
1052 if (unlikely(hash < pos))
1054 * Skip until we find target hash value.
1058 namelen = le16_to_cpu(ent->lde_namelen);
1059 if (unlikely(namelen == 0))
1061 * Skip dummy record.
1065 name = ent->lde_name;
1066 if (name[0] == '.') {
1072 } else if (name[1] == '.' && namelen == 2) {
1077 } else if (!sai->sai_ls_all) {
1079 * skip hidden files.
1081 sai->sai_skip_hidden++;
1087 * don't stat-ahead first entry.
1089 if (unlikely(++first == 1))
1092 fid_le_to_cpu(&fid, &ent->lde_fid);
1094 /* wait for spare statahead window */
1096 l_wait_event(sa_thread->t_ctl_waitq,
1097 !sa_sent_full(sai) ||
1098 sa_has_callback(sai) ||
1099 !agl_list_empty(sai) ||
1100 !thread_is_running(sa_thread),
1103 sa_handle_callback(sai);
1105 spin_lock(&lli->lli_agl_lock);
1106 while (sa_sent_full(sai) &&
1107 !agl_list_empty(sai)) {
1108 struct ll_inode_info *clli;
1110 clli = agl_first_entry(sai);
1111 list_del_init(&clli->lli_agl_list);
1112 spin_unlock(&lli->lli_agl_lock);
1114 ll_agl_trigger(&clli->lli_vfs_inode,
1117 spin_lock(&lli->lli_agl_lock);
1119 spin_unlock(&lli->lli_agl_lock);
1120 } while (sa_sent_full(sai) &&
1121 thread_is_running(sa_thread));
1123 sa_statahead(parent, name, namelen, &fid);
1126 pos = le64_to_cpu(dp->ldp_hash_end);
1127 ll_release_page(dir, page,
1128 le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1130 if (sa_low_hit(sai)) {
1132 atomic_inc(&sbi->ll_sa_wrong);
1133 CDEBUG(D_READA, "Statahead for dir "DFID" hit "
1134 "ratio too low: hit/miss %llu/%llu"
1135 ", sent/replied %llu/%llu, stopping "
1136 "statahead thread: pid %d\n",
1137 PFID(&lli->lli_fid), sai->sai_hit,
1138 sai->sai_miss, sai->sai_sent,
1139 sai->sai_replied, current_pid());
1143 ll_dir_chain_fini(&chain);
1144 ll_finish_md_op_data(op_data);
1147 spin_lock(&lli->lli_sa_lock);
1148 thread_set_flags(sa_thread, SVC_STOPPING);
1149 lli->lli_sa_enabled = 0;
1150 spin_unlock(&lli->lli_sa_lock);
1153 /* statahead is finished, but statahead entries need to be cached, wait
1154 * for file release to stop me. */
1155 while (thread_is_running(sa_thread)) {
1156 l_wait_event(sa_thread->t_ctl_waitq,
1157 sa_has_callback(sai) ||
1158 !thread_is_running(sa_thread),
1161 sa_handle_callback(sai);
1166 if (sai->sai_agl_valid) {
1167 spin_lock(&lli->lli_agl_lock);
1168 thread_set_flags(agl_thread, SVC_STOPPING);
1169 spin_unlock(&lli->lli_agl_lock);
1170 wake_up(&agl_thread->t_ctl_waitq);
1172 CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n",
1173 sai, (unsigned int)agl_thread->t_pid);
1174 l_wait_event(agl_thread->t_ctl_waitq,
1175 thread_is_stopped(agl_thread),
1178 /* Set agl_thread flags anyway. */
1179 thread_set_flags(agl_thread, SVC_STOPPED);
1182 /* wait for inflight statahead RPCs to finish, and then we can free sai
1183 * safely because statahead RPC will access sai data */
1184 while (sai->sai_sent != sai->sai_replied) {
1185 /* in case we're not woken up, timeout wait */
1186 lwi = LWI_TIMEOUT(msecs_to_jiffies(MSEC_PER_SEC >> 3),
1188 l_wait_event(sa_thread->t_ctl_waitq,
1189 sai->sai_sent == sai->sai_replied, &lwi);
1192 /* release resources held by statahead RPCs */
1193 sa_handle_callback(sai);
1195 spin_lock(&lli->lli_sa_lock);
1196 thread_set_flags(sa_thread, SVC_STOPPED);
1197 spin_unlock(&lli->lli_sa_lock);
1199 CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %.*s\n",
1200 sai, parent->d_name.len, parent->d_name.name);
1202 wake_up(&sai->sai_waitq);
1203 wake_up(&sa_thread->t_ctl_waitq);
1209 /* authorize opened dir handle @key to statahead */
1210 void ll_authorize_statahead(struct inode *dir, void *key)
1212 struct ll_inode_info *lli = ll_i2info(dir);
1214 spin_lock(&lli->lli_sa_lock);
1215 if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL) {
1217 * if lli_sai is not NULL, it means previous statahead is not
1218 * finished yet, we'd better not start a new statahead for now.
1220 LASSERT(lli->lli_opendir_pid == 0);
1221 lli->lli_opendir_key = key;
1222 lli->lli_opendir_pid = current_pid();
1223 lli->lli_sa_enabled = 1;
1225 spin_unlock(&lli->lli_sa_lock);
1229 * deauthorize opened dir handle @key to statahead, and notify statahead thread
1230 * to quit if it's running.
1232 void ll_deauthorize_statahead(struct inode *dir, void *key)
1234 struct ll_inode_info *lli = ll_i2info(dir);
1235 struct ll_statahead_info *sai;
1237 LASSERT(lli->lli_opendir_key == key);
1238 LASSERT(lli->lli_opendir_pid != 0);
1240 CDEBUG(D_READA, "deauthorize statahead for "DFID"\n",
1241 PFID(&lli->lli_fid));
1243 spin_lock(&lli->lli_sa_lock);
1244 lli->lli_opendir_key = NULL;
1245 lli->lli_opendir_pid = 0;
1246 lli->lli_sa_enabled = 0;
1248 if (sai != NULL && thread_is_running(&sai->sai_thread)) {
1250 * statahead thread may not quit yet because it needs to cache
1251 * entries, now it's time to tell it to quit.
1253 * In case sai is released, wake_up() is called inside spinlock,
1254 * so we have to call smp_mb() explicitely to serialize ops.
1256 thread_set_flags(&sai->sai_thread, SVC_STOPPING);
1258 wake_up(&sai->sai_thread.t_ctl_waitq);
1260 spin_unlock(&lli->lli_sa_lock);
1265 * not first dirent, or is "."
1267 LS_NOT_FIRST_DE = 0,
1269 * the first non-hidden dirent
1273 * the first hidden dirent, that is "."
1278 /* file is first dirent under @dir */
1279 static int is_first_dirent(struct inode *dir, struct dentry *dentry)
1281 struct ll_dir_chain chain;
1282 struct qstr *target = &dentry->d_name;
1283 struct md_op_data *op_data;
1285 struct page *page = NULL;
1286 int rc = LS_NOT_FIRST_DE;
1290 op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
1291 LUSTRE_OPC_ANY, dir);
1292 if (IS_ERR(op_data))
1293 RETURN(PTR_ERR(op_data));
1295 *FIXME choose the start offset of the readdir
1298 ll_dir_chain_init(&chain);
1299 page = ll_get_dir_page(dir, op_data, 0, &chain);
1302 struct lu_dirpage *dp;
1303 struct lu_dirent *ent;
1306 struct ll_inode_info *lli = ll_i2info(dir);
1309 CERROR("%s: reading dir "DFID" at %llu"
1310 "opendir_pid = %u : rc = %d\n",
1311 ll_get_fsname(dir->i_sb, NULL, 0),
1312 PFID(ll_inode2fid(dir)), pos,
1313 lli->lli_opendir_pid, rc);
1317 dp = page_address(page);
1318 for (ent = lu_dirent_start(dp); ent != NULL;
1319 ent = lu_dirent_next(ent)) {
1324 hash = le64_to_cpu(ent->lde_hash);
1325 /* The ll_get_dir_page() can return any page containing
1326 * the given hash which may be not the start hash. */
1327 if (unlikely(hash < pos))
1330 namelen = le16_to_cpu(ent->lde_namelen);
1331 if (unlikely(namelen == 0))
1333 * skip dummy record.
1337 name = ent->lde_name;
1338 if (name[0] == '.') {
1344 else if (name[1] == '.' && namelen == 2)
1355 if (dot_de && target->name[0] != '.') {
1356 CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
1357 target->len, target->name,
1362 if (target->len != namelen ||
1363 memcmp(target->name, name, namelen) != 0)
1364 rc = LS_NOT_FIRST_DE;
1368 rc = LS_FIRST_DOT_DE;
1370 ll_release_page(dir, page, false);
1373 pos = le64_to_cpu(dp->ldp_hash_end);
1374 if (pos == MDS_DIR_END_OFF) {
1376 * End of directory reached.
1378 ll_release_page(dir, page, false);
1382 * chain is exhausted
1383 * Normal case: continue to the next page.
1385 ll_release_page(dir, page, le32_to_cpu(dp->ldp_flags) &
1387 page = ll_get_dir_page(dir, op_data, pos, &chain);
1392 ll_dir_chain_fini(&chain);
1393 ll_finish_md_op_data(op_data);
1398 * revalidate @dentryp from statahead cache
1400 * \param[in] dir parent directory
1401 * \param[in] sai sai structure
1402 * \param[out] dentryp pointer to dentry which will be revalidated
1403 * \param[in] unplug unplug statahead window only (normally for negative
1405 * \retval 1 on success, dentry is saved in @dentryp
1406 * \retval 0 if revalidation failed (no proper lock on client)
1407 * \retval negative number upon error
1409 static int revalidate_statahead_dentry(struct inode *dir,
1410 struct ll_statahead_info *sai,
1411 struct dentry **dentryp,
1414 struct sa_entry *entry = NULL;
1415 struct l_wait_info lwi = { 0 };
1416 struct ll_dentry_data *ldd;
1417 struct ll_inode_info *lli = ll_i2info(dir);
1421 if ((*dentryp)->d_name.name[0] == '.') {
1422 if (sai->sai_ls_all ||
1423 sai->sai_miss_hidden >= sai->sai_skip_hidden) {
1425 * Hidden dentry is the first one, or statahead
1426 * thread does not skip so many hidden dentries
1427 * before "sai_ls_all" enabled as below.
1430 if (!sai->sai_ls_all)
1432 * It maybe because hidden dentry is not
1433 * the first one, "sai_ls_all" was not
1434 * set, then "ls -al" missed. Enable
1435 * "sai_ls_all" for such case.
1437 sai->sai_ls_all = 1;
1440 * Such "getattr" has been skipped before
1441 * "sai_ls_all" enabled as above.
1443 sai->sai_miss_hidden++;
1451 entry = sa_get(sai, &(*dentryp)->d_name);
1453 GOTO(out, rc = -EAGAIN);
1455 /* if statahead is busy in readdir, help it do post-work */
1456 if (!sa_ready(entry) && sai->sai_in_readpage)
1457 sa_handle_callback(sai);
1459 if (!sa_ready(entry)) {
1460 spin_lock(&lli->lli_sa_lock);
1461 sai->sai_index_wait = entry->se_index;
1462 spin_unlock(&lli->lli_sa_lock);
1463 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
1464 LWI_ON_SIGNAL_NOOP, NULL);
1465 rc = l_wait_event(sai->sai_waitq, sa_ready(entry), &lwi);
1468 * entry may not be ready, so it may be used by inflight
1469 * statahead RPC, don't free it.
1472 GOTO(out, rc = -EAGAIN);
1476 if (entry->se_state == SA_ENTRY_SUCC && entry->se_inode != NULL) {
1477 struct inode *inode = entry->se_inode;
1478 struct lookup_intent it = { .it_op = IT_GETATTR,
1483 rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
1484 ll_inode2fid(inode), &bits);
1486 if ((*dentryp)->d_inode == NULL) {
1487 struct dentry *alias;
1489 alias = ll_splice_alias(inode, *dentryp);
1490 if (IS_ERR(alias)) {
1491 ll_intent_release(&it);
1492 GOTO(out, rc = PTR_ERR(alias));
1495 /* statahead prepared this inode, transfer inode
1496 * refcount from sa_entry to dentry */
1497 entry->se_inode = NULL;
1498 } else if ((*dentryp)->d_inode != inode) {
1499 /* revalidate, but inode is recreated */
1501 "%s: stale dentry %.*s inode "
1502 DFID", statahead inode "DFID
1504 ll_get_fsname((*dentryp)->d_inode->i_sb,
1506 (*dentryp)->d_name.len,
1507 (*dentryp)->d_name.name,
1508 PFID(ll_inode2fid((*dentryp)->d_inode)),
1509 PFID(ll_inode2fid(inode)));
1510 ll_intent_release(&it);
1511 GOTO(out, rc = -ESTALE);
1514 if ((bits & MDS_INODELOCK_LOOKUP) &&
1515 d_lustre_invalid(*dentryp))
1516 d_lustre_revalidate(*dentryp);
1517 ll_intent_release(&it);
1522 * statahead cached sa_entry can be used only once, and will be killed
1523 * right after use, so if lookup/revalidate accessed statahead cache,
1524 * set dentry ldd_sa_generation to parent lli_sa_generation, later if we
1525 * stat this file again, we know we've done statahead before, see
1526 * dentry_may_statahead().
1528 ldd = ll_d2d(*dentryp);
1529 /* ldd can be NULL if llite lookup failed. */
1531 ldd->lld_sa_generation = lli->lli_sa_generation;
1538 * start statahead thread
1540 * \param[in] dir parent directory
1541 * \param[in] dentry dentry that triggers statahead, normally the first
1543 * \retval -EAGAIN on success, because when this function is
1544 * called, it's already in lookup call, so client should
1545 * do it itself instead of waiting for statahead thread
1546 * to do it asynchronously.
1547 * \retval negative number upon error
1549 static int start_statahead_thread(struct inode *dir, struct dentry *dentry)
1551 struct ll_inode_info *lli = ll_i2info(dir);
1552 struct ll_statahead_info *sai = NULL;
1553 struct dentry *parent = dentry->d_parent;
1554 struct ptlrpc_thread *thread;
1555 struct l_wait_info lwi = { 0 };
1556 struct task_struct *task;
1557 struct ll_sb_info *sbi = ll_i2sbi(parent->d_inode);
1558 int first = LS_FIRST_DE;
1562 /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1563 first = is_first_dirent(dir, dentry);
1564 if (first == LS_NOT_FIRST_DE)
1565 /* It is not "ls -{a}l" operation, no need statahead for it. */
1566 GOTO(out, rc = -EFAULT);
1568 if (unlikely(atomic_inc_return(&sbi->ll_sa_running) >
1569 sbi->ll_sa_running_max)) {
1571 "Too many concurrent statahead instances, "
1572 "avoid new statahead instance temporarily.\n");
1573 GOTO(out, rc = -EMFILE);
1576 sai = ll_sai_alloc(parent);
1578 GOTO(out, rc = -ENOMEM);
1580 sai->sai_ls_all = (first == LS_FIRST_DOT_DE);
1582 /* if current lli_opendir_key was deauthorized, or dir re-opened by
1583 * another process, don't start statahead, otherwise the newly spawned
1584 * statahead thread won't be notified to quit. */
1585 spin_lock(&lli->lli_sa_lock);
1586 if (unlikely(lli->lli_sai != NULL ||
1587 lli->lli_opendir_key == NULL ||
1588 lli->lli_opendir_pid != current->pid)) {
1589 spin_unlock(&lli->lli_sa_lock);
1590 GOTO(out, rc = -EPERM);
1593 spin_unlock(&lli->lli_sa_lock);
1595 CDEBUG(D_READA, "start statahead thread: [pid %d] [parent %.*s]\n",
1596 current_pid(), parent->d_name.len, parent->d_name.name);
1598 task = kthread_run(ll_statahead_thread, parent, "ll_sa_%u",
1599 lli->lli_opendir_pid);
1600 thread = &sai->sai_thread;
1602 spin_lock(&lli->lli_sa_lock);
1603 lli->lli_sai = NULL;
1604 spin_unlock(&lli->lli_sa_lock);
1606 CERROR("can't start ll_sa thread, rc: %d\n", rc);
1610 l_wait_event(thread->t_ctl_waitq,
1611 thread_is_running(thread) || thread_is_stopped(thread),
1616 * We don't stat-ahead for the first dirent since we are already in
1622 /* once we start statahead thread failed, disable statahead so that
1623 * subsequent stat won't waste time to try it. */
1624 spin_lock(&lli->lli_sa_lock);
1625 if (lli->lli_opendir_pid == current->pid)
1626 lli->lli_sa_enabled = 0;
1627 spin_unlock(&lli->lli_sa_lock);
1631 if (first != LS_NOT_FIRST_DE)
1632 atomic_dec(&sbi->ll_sa_running);
1638 * statahead entry function, this is called when client getattr on a file, it
1639 * will start statahead thread if this is the first dir entry, else revalidate
1640 * dentry from statahead cache.
1642 * \param[in] dir parent directory
1643 * \param[out] dentryp dentry to getattr
1644 * \param[in] unplug unplug statahead window only (normally for negative
1646 * \retval 1 on success
1647 * \retval 0 revalidation from statahead cache failed, caller needs
1648 * to getattr from server directly
1649 * \retval negative number on error, caller often ignores this and
1650 * then getattr from server
1652 int ll_statahead(struct inode *dir, struct dentry **dentryp, bool unplug)
1654 struct ll_statahead_info *sai;
1656 sai = ll_sai_get(dir);
1660 rc = revalidate_statahead_dentry(dir, sai, dentryp, unplug);
1661 CDEBUG(D_READA, "revalidate statahead %.*s: %d.\n",
1662 (*dentryp)->d_name.len, (*dentryp)->d_name.name, rc);
1666 return start_statahead_thread(dir, *dentryp);