4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
33 #include <linux/sched.h>
34 #include <linux/kthread.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/delay.h>
40 #define DEBUG_SUBSYSTEM S_LLITE
42 #include <obd_support.h>
43 #include <lustre_dlm.h>
44 #include "llite_internal.h"
46 #define SA_OMITTED_ENTRY_MAX 8ULL
49 /** negative values are for error cases */
50 SA_ENTRY_INIT = 0, /** init entry */
51 SA_ENTRY_SUCC = 1, /** stat succeed */
52 SA_ENTRY_INVA = 2, /** invalid entry */
56 * sa_entry is not refcounted: statahead thread allocates it and do async stat,
57 * and in async stat callback ll_statahead_interpret() will prepare the inode
58 * and set lock data in the ptlrpcd context. Then the scanner process will be
59 * woken up if this entry is the waiting one, can access and free it.
62 /* link into sai_entries */
63 struct list_head se_list;
64 /* link into sai hash table locally */
65 struct list_head se_hash;
66 /* entry index in the sai */
68 /* low layer ldlm lock handle */
72 /* entry size, contains name */
74 /* pointer to the target inode */
75 struct inode *se_inode;
76 /* pointer to @sai per process struct */
77 struct ll_statahead_info *se_sai;
84 static unsigned int sai_generation;
85 static DEFINE_SPINLOCK(sai_generation_lock);
87 static inline int sa_unhashed(struct sa_entry *entry)
89 return list_empty(&entry->se_hash);
92 /* sa_entry is ready to use */
93 static inline int sa_ready(struct sa_entry *entry)
95 /* Make sure sa_entry is updated and ready to use */
97 return (entry->se_state != SA_ENTRY_INIT);
100 /* hash value to put in sai_cache */
101 static inline int sa_hash(int val)
103 return val & LL_SA_CACHE_MASK;
106 /* hash entry into sax_cache */
108 sa_rehash(struct ll_statahead_context *ctx, struct sa_entry *entry)
110 int i = sa_hash(entry->se_qstr.hash);
112 spin_lock(&ctx->sax_cache_lock[i]);
113 list_add_tail(&entry->se_hash, &ctx->sax_cache[i]);
114 spin_unlock(&ctx->sax_cache_lock[i]);
117 /* unhash entry from sai_cache */
119 sa_unhash(struct ll_statahead_context *ctx, struct sa_entry *entry)
121 int i = sa_hash(entry->se_qstr.hash);
123 spin_lock(&ctx->sax_cache_lock[i]);
124 list_del_init(&entry->se_hash);
125 spin_unlock(&ctx->sax_cache_lock[i]);
128 static inline int agl_should_run(struct ll_statahead_info *sai,
131 return inode && S_ISREG(inode->i_mode) && sai->sai_agl_task;
134 static inline struct ll_inode_info *
135 agl_first_entry(struct ll_statahead_info *sai)
137 return list_first_entry(&sai->sai_agls, struct ll_inode_info,
141 /* statahead window is full */
142 static inline int sa_sent_full(struct ll_statahead_info *sai)
144 return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
147 /* Batch metadata handle */
148 static inline bool sa_has_batch_handle(struct ll_statahead_info *sai)
150 return sai->sai_bh != NULL;
153 static inline void ll_statahead_flush_nowait(struct ll_statahead_info *sai)
155 if (sa_has_batch_handle(sai)) {
156 sai->sai_index_end = sai->sai_index - 1;
157 (void) md_batch_flush(ll_i2mdexp(sai->sai_dentry->d_inode),
162 static inline int agl_list_empty(struct ll_statahead_info *sai)
164 return list_empty(&sai->sai_agls);
168 * (1) hit ratio less than 80%
170 * (2) consecutive miss more than 8
171 * then means low hit.
173 static inline int sa_low_hit(struct ll_statahead_info *sai)
175 return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
176 (sai->sai_consecutive_miss > 8));
180 * if the given index is behind of statahead window more than
181 * SA_OMITTED_ENTRY_MAX, then it is old.
183 static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
185 return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
189 /* allocate sa_entry and hash it to allow scanner process to find it */
190 static struct sa_entry *
191 sa_alloc(struct dentry *parent, struct ll_statahead_info *sai, __u64 index,
192 const char *name, int len, const struct lu_fid *fid)
194 struct ll_inode_info *lli;
195 struct sa_entry *entry;
201 entry_size = sizeof(struct sa_entry) +
202 round_up(len + 1 /* for trailing NUL */, 4);
203 OBD_ALLOC(entry, entry_size);
204 if (unlikely(!entry))
205 RETURN(ERR_PTR(-ENOMEM));
207 CDEBUG(D_READA, "alloc sa entry %.*s(%p) index %llu\n",
208 len, name, entry, index);
210 entry->se_index = index;
213 entry->se_state = SA_ENTRY_INIT;
214 entry->se_size = entry_size;
215 dname = (char *)entry + sizeof(struct sa_entry);
216 memcpy(dname, name, len);
218 entry->se_qstr.hash = ll_full_name_hash(parent, name, len);
219 entry->se_qstr.len = len;
220 entry->se_qstr.name = dname;
223 entry->se_fid = *fid;
225 lli = ll_i2info(sai->sai_dentry->d_inode);
226 spin_lock(&lli->lli_sa_lock);
227 INIT_LIST_HEAD(&entry->se_list);
228 sa_rehash(lli->lli_sax, entry);
229 spin_unlock(&lli->lli_sa_lock);
231 atomic_inc(&sai->sai_cache_count);
236 /* free sa_entry, which should have been unhashed and not in any list */
237 static void sa_free(struct ll_statahead_context *ctx, struct sa_entry *entry)
239 CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n",
240 entry->se_qstr.len, entry->se_qstr.name, entry,
243 LASSERT(list_empty(&entry->se_list));
244 LASSERT(sa_unhashed(entry));
246 OBD_FREE(entry, entry->se_size);
250 * find sa_entry by name, used by directory scanner, lock is not needed because
251 * only scanner can remove the entry from cache.
253 static struct sa_entry *
254 sa_get(struct ll_statahead_context *ctx, const struct qstr *qstr)
256 struct sa_entry *entry;
257 int i = sa_hash(qstr->hash);
259 spin_lock(&ctx->sax_cache_lock[i]);
260 list_for_each_entry(entry, &ctx->sax_cache[i], se_hash) {
261 if (entry->se_qstr.hash == qstr->hash &&
262 entry->se_qstr.len == qstr->len &&
263 memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0) {
264 spin_unlock(&ctx->sax_cache_lock[i]);
268 spin_unlock(&ctx->sax_cache_lock[i]);
272 /* unhash and unlink sa_entry, and then free it */
274 sa_kill(struct ll_statahead_info *sai, struct sa_entry *entry, bool locked)
276 struct inode *dir = sai->sai_dentry->d_inode;
277 struct ll_inode_info *lli = ll_i2info(dir);
278 struct ll_statahead_context *ctx = lli->lli_sax;
280 LASSERT(!sa_unhashed(entry));
281 LASSERT(!list_empty(&entry->se_list));
282 LASSERT(sa_ready(entry));
284 sa_unhash(ctx, entry);
287 spin_lock(&lli->lli_sa_lock);
288 list_del_init(&entry->se_list);
289 spin_unlock(&lli->lli_sa_lock);
291 iput(entry->se_inode);
292 atomic_dec(&sai->sai_cache_count);
295 spin_lock(&lli->lli_sa_lock);
298 /* called by scanner after use, sa_entry will be killed */
300 sa_put(struct inode *dir, struct ll_statahead_info *sai, struct sa_entry *entry)
302 struct ll_inode_info *lli = ll_i2info(dir);
303 struct sa_entry *tmp;
306 if (entry && entry->se_state == SA_ENTRY_SUCC) {
307 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode);
310 sai->sai_consecutive_miss = 0;
311 if (sai->sai_max < sbi->ll_sa_max) {
312 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
314 } else if (sai->sai_max_batch_count > 0) {
315 if (sai->sai_max >= sai->sai_max_batch_count &&
316 (sai->sai_index_end - entry->se_index) %
317 sai->sai_max_batch_count == 0) {
319 } else if (entry->se_index == sai->sai_index_end) {
327 sai->sai_consecutive_miss++;
332 sa_kill(sai, entry, false);
336 * kill old completed entries. Maybe kicking old entries can
339 spin_lock(&lli->lli_sa_lock);
340 while ((tmp = list_first_entry_or_null(&sai->sai_entries,
341 struct sa_entry, se_list))) {
342 if (!is_omitted_entry(sai, tmp->se_index))
345 /* ll_sa_lock is dropped by sa_kill(), restart list */
346 sa_kill(sai, tmp, true);
348 spin_unlock(&lli->lli_sa_lock);
351 spin_lock(&lli->lli_sa_lock);
352 if (wakeup && sai->sai_task)
353 wake_up_process(sai->sai_task);
354 spin_unlock(&lli->lli_sa_lock);
358 * update state and sort add entry to sai_entries by index, return true if
359 * scanner is waiting on this entry.
362 __sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
365 struct list_head *pos = &sai->sai_entries;
366 __u64 index = entry->se_index;
368 LASSERT(!sa_ready(entry));
369 LASSERT(list_empty(&entry->se_list));
371 list_for_each_entry_reverse(se, &sai->sai_entries, se_list) {
372 if (se->se_index < entry->se_index) {
377 list_add(&entry->se_list, pos);
379 * LU-9210: ll_statahead_interpet must be able to see this before
382 smp_store_release(&entry->se_state,
383 ret < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
385 return (index == sai->sai_index_wait);
388 /* finish async stat RPC arguments */
389 static void sa_fini_data(struct md_op_item *item)
391 struct md_op_data *op_data = &item->mop_data;
393 if (op_data->op_flags & MF_OPNAME_KMALLOCED)
394 /* allocated via ll_setup_filename called from sa_prep_data */
395 kfree(op_data->op_name);
396 ll_unlock_md_op_lsm(&item->mop_data);
398 if (item->mop_subpill_allocated)
399 OBD_FREE_PTR(item->mop_pill);
403 static int ll_statahead_interpret(struct md_op_item *item, int rc);
406 * prepare arguments for async stat RPC.
408 static struct md_op_item *
409 sa_prep_data(struct inode *dir, struct inode *child, struct sa_entry *entry)
411 struct md_op_item *item;
412 struct ldlm_enqueue_info *einfo;
413 struct md_op_data *op_data;
417 return ERR_PTR(-ENOMEM);
419 op_data = ll_prep_md_op_data(&item->mop_data, dir, child,
420 entry->se_qstr.name, entry->se_qstr.len, 0,
421 LUSTRE_OPC_ANY, NULL);
422 if (IS_ERR(op_data)) {
424 return (struct md_op_item *)op_data;
428 op_data->op_fid2 = entry->se_fid;
430 item->mop_opc = MD_OP_GETATTR;
431 item->mop_it.it_op = IT_GETATTR;
432 item->mop_dir = igrab(dir);
433 item->mop_cb = ll_statahead_interpret;
434 item->mop_cbdata = entry;
436 einfo = &item->mop_einfo;
437 einfo->ei_type = LDLM_IBITS;
438 einfo->ei_mode = it_to_lock_mode(&item->mop_it);
439 einfo->ei_cb_bl = ll_md_blocking_ast;
440 einfo->ei_cb_cp = ldlm_completion_ast;
441 einfo->ei_cb_gl = NULL;
442 einfo->ei_cbdata = NULL;
443 einfo->ei_req_slot = 1;
449 * release resources used in async stat RPC, update entry state and wakeup if
450 * scanner process it waiting on this entry.
453 sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
455 struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
458 spin_lock(&lli->lli_sa_lock);
459 wakeup = __sa_make_ready(sai, entry, ret);
460 spin_unlock(&lli->lli_sa_lock);
463 wake_up(&sai->sai_waitq);
466 /* insert inode into the list of sai_agls */
467 static void ll_agl_add(struct ll_statahead_info *sai,
468 struct inode *inode, int index)
470 struct ll_inode_info *child = ll_i2info(inode);
471 struct ll_inode_info *parent = ll_i2info(sai->sai_dentry->d_inode);
473 spin_lock(&child->lli_agl_lock);
474 if (child->lli_agl_index == 0) {
475 child->lli_agl_index = index;
476 spin_unlock(&child->lli_agl_lock);
478 LASSERT(list_empty(&child->lli_agl_list));
480 spin_lock(&parent->lli_agl_lock);
481 /* Re-check under the lock */
482 if (agl_should_run(sai, inode)) {
483 if (agl_list_empty(sai))
484 wake_up_process(sai->sai_agl_task);
486 list_add_tail(&child->lli_agl_list, &sai->sai_agls);
488 child->lli_agl_index = 0;
489 spin_unlock(&parent->lli_agl_lock);
491 spin_unlock(&child->lli_agl_lock);
496 static struct ll_statahead_context *ll_sax_alloc(struct inode *dir)
498 struct ll_statahead_context *ctx;
507 ctx->sax_inode = igrab(dir);
508 atomic_set(&ctx->sax_refcount, 1);
509 for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
510 INIT_LIST_HEAD(&ctx->sax_cache[i]);
511 spin_lock_init(&ctx->sax_cache_lock[i]);
517 static inline void ll_sax_free(struct ll_statahead_context *ctx)
519 LASSERT(ctx->sax_inode != NULL);
520 iput(ctx->sax_inode);
524 static inline void __ll_sax_get(struct ll_statahead_context *ctx)
526 atomic_inc(&ctx->sax_refcount);
529 static inline struct ll_statahead_context *ll_sax_get(struct inode *dir)
531 struct ll_inode_info *lli = ll_i2info(dir);
532 struct ll_statahead_context *ctx = NULL;
534 spin_lock(&lli->lli_sa_lock);
538 spin_unlock(&lli->lli_sa_lock);
543 static inline void ll_sax_put(struct inode *dir,
544 struct ll_statahead_context *ctx)
546 struct ll_inode_info *lli = ll_i2info(dir);
548 if (atomic_dec_and_lock(&ctx->sax_refcount, &lli->lli_sa_lock)) {
551 if (lli->lli_sa_pattern == LSA_PATTERN_FNAME) {
552 lli->lli_opendir_key = NULL;
553 lli->lli_opendir_pid = 0;
554 lli->lli_sa_enabled = 0;
556 lli->lli_sa_pattern = LSA_PATTERN_NONE;
557 spin_unlock(&lli->lli_sa_lock);
564 static struct ll_statahead_info *ll_sai_alloc(struct dentry *dentry)
566 struct ll_statahead_info *sai;
567 struct ll_inode_info *lli = ll_i2info(dentry->d_inode);
575 sai->sai_dentry = dget(dentry);
576 atomic_set(&sai->sai_refcount, 1);
577 sai->sai_max = LL_SA_RPC_MIN;
579 init_waitqueue_head(&sai->sai_waitq);
581 INIT_LIST_HEAD(&sai->sai_entries);
582 INIT_LIST_HEAD(&sai->sai_agls);
584 atomic_set(&sai->sai_cache_count, 0);
586 spin_lock(&sai_generation_lock);
587 lli->lli_sa_generation = ++sai_generation;
588 if (unlikely(sai_generation == 0))
589 lli->lli_sa_generation = ++sai_generation;
590 spin_unlock(&sai_generation_lock);
596 static inline void ll_sai_free(struct ll_statahead_info *sai)
598 LASSERT(sai->sai_dentry != NULL);
599 dput(sai->sai_dentry);
603 static inline struct ll_statahead_info *
604 __ll_sai_get(struct ll_statahead_info *sai)
606 atomic_inc(&sai->sai_refcount);
611 * put sai refcount after use, if refcount reaches zero, free sai and sa_entries
614 static void ll_sai_put(struct ll_statahead_info *sai)
616 struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
618 if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
619 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode);
620 struct sa_entry *entry, *next;
623 spin_unlock(&lli->lli_sa_lock);
625 LASSERT(!sai->sai_task);
626 LASSERT(!sai->sai_agl_task);
627 LASSERT(sai->sai_sent == sai->sai_replied);
629 list_for_each_entry_safe(entry, next, &sai->sai_entries,
631 sa_kill(sai, entry, false);
633 LASSERT(atomic_read(&sai->sai_cache_count) == 0);
634 LASSERT(agl_list_empty(sai));
637 atomic_dec(&sbi->ll_sa_running);
641 /* Do NOT forget to drop inode refcount when into sai_agls. */
642 static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
644 struct ll_inode_info *lli = ll_i2info(inode);
645 u64 index = lli->lli_agl_index;
651 LASSERT(list_empty(&lli->lli_agl_list));
653 /* AGL maybe fall behind statahead with one entry */
654 if (is_omitted_entry(sai, index + 1)) {
655 lli->lli_agl_index = 0;
661 * In case of restore, the MDT has the right size and has already
662 * sent it back without granting the layout lock, inode is up-to-date.
663 * Then AGL (async glimpse lock) is useless.
664 * Also to glimpse we need the layout, in case of a runninh restore
665 * the MDT holds the layout lock so the glimpse will block up to the
666 * end of restore (statahead/agl will block)
668 if (test_bit(LLIF_FILE_RESTORING, &lli->lli_flags)) {
669 lli->lli_agl_index = 0;
674 /* Someone is in glimpse (sync or async), do nothing. */
675 rc = down_write_trylock(&lli->lli_glimpse_sem);
677 lli->lli_agl_index = 0;
683 * Someone triggered glimpse within 1 sec before.
684 * 1) The former glimpse succeeded with glimpse lock granted by OST, and
685 * if the lock is still cached on client, AGL needs to do nothing. If
686 * it is cancelled by other client, AGL maybe cannot obtaion new lock
687 * for no glimpse callback triggered by AGL.
688 * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
689 * Under such case, it is quite possible that the OST will not grant
690 * glimpse lock for AGL also.
691 * 3) The former glimpse failed, compared with other two cases, it is
692 * relative rare. AGL can ignore such case, and it will not muchly
693 * affect the performance.
695 expire = ktime_sub_ns(ktime_get(), NSEC_PER_SEC);
696 if (ktime_to_ns(lli->lli_glimpse_time) &&
697 ktime_before(expire, lli->lli_glimpse_time)) {
698 up_write(&lli->lli_glimpse_sem);
699 lli->lli_agl_index = 0;
705 "Handling (init) async glimpse: inode = " DFID", idx = %llu\n",
706 PFID(&lli->lli_fid), index);
709 lli->lli_agl_index = 0;
710 lli->lli_glimpse_time = ktime_get();
711 up_write(&lli->lli_glimpse_sem);
714 "Handled (init) async glimpse: inode= " DFID", idx = %llu, rc = %d\n",
715 PFID(&lli->lli_fid), index, rc);
722 static void ll_statahead_interpret_fini(struct ll_inode_info *lli,
723 struct ll_statahead_info *sai,
724 struct md_op_item *item,
725 struct sa_entry *entry,
726 struct ptlrpc_request *req,
730 * First it will drop ldlm ibits lock refcount by calling
731 * ll_intent_drop_lock() in spite of failures. Do not worry about
732 * calling ll_intent_drop_lock() more than once.
734 ll_intent_release(&item->mop_it);
737 ptlrpc_req_finished(req);
738 sa_make_ready(sai, entry, rc);
740 spin_lock(&lli->lli_sa_lock);
742 spin_unlock(&lli->lli_sa_lock);
745 static void ll_statahead_interpret_work(struct work_struct *work)
747 struct md_op_item *item = container_of(work, struct md_op_item,
749 struct req_capsule *pill = item->mop_pill;
750 struct inode *dir = item->mop_dir;
751 struct ll_inode_info *lli = ll_i2info(dir);
752 struct ll_statahead_info *sai;
753 struct lookup_intent *it;
754 struct sa_entry *entry;
755 struct mdt_body *body;
761 entry = (struct sa_entry *)item->mop_cbdata;
762 LASSERT(entry->se_handle != 0);
766 body = req_capsule_server_get(pill, &RMF_MDT_BODY);
768 GOTO(out, rc = -EFAULT);
770 child = entry->se_inode;
771 /* revalidate; unlinked and re-created with the same name */
772 if (unlikely(!fid_is_zero(&item->mop_data.op_fid2) &&
773 !lu_fid_eq(&item->mop_data.op_fid2, &body->mbo_fid1))) {
775 entry->se_inode = NULL;
778 /* The mdt_body is invalid. Skip this entry */
779 GOTO(out, rc = -EAGAIN);
782 it->it_lock_handle = entry->se_handle;
783 rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
785 GOTO(out, rc = -EAGAIN);
787 rc = ll_prep_inode(&child, pill, dir->i_sb, it);
789 CERROR("%s: getattr callback for %.*s "DFID": rc = %d\n",
790 ll_i2sbi(dir)->ll_fsname, entry->se_qstr.len,
791 entry->se_qstr.name, PFID(&entry->se_fid), rc);
795 /* If encryption context was returned by MDT, put it in
796 * inode now to save an extra getxattr.
798 if (body->mbo_valid & OBD_MD_ENCCTX) {
799 void *encctx = req_capsule_server_get(pill, &RMF_FILE_ENCCTX);
800 __u32 encctxlen = req_capsule_get_size(pill, &RMF_FILE_ENCCTX,
805 "server returned encryption ctx for "DFID"\n",
806 PFID(ll_inode2fid(child)));
807 rc = ll_xattr_cache_insert(child,
808 xattr_for_enc(child),
811 CWARN("%s: cannot set enc ctx for "DFID": rc = %d\n",
812 ll_i2sbi(child)->ll_fsname,
813 PFID(ll_inode2fid(child)), rc);
817 CDEBUG(D_READA, "%s: setting %.*s"DFID" l_data to inode %p\n",
818 ll_i2sbi(dir)->ll_fsname, entry->se_qstr.len,
819 entry->se_qstr.name, PFID(ll_inode2fid(child)), child);
820 ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
822 entry->se_inode = child;
824 if (agl_should_run(sai, child))
825 ll_agl_add(sai, child, entry->se_index);
827 ll_statahead_interpret_fini(lli, sai, item, entry, pill->rc_req, rc);
831 * Callback for async stat RPC, this is called in ptlrpcd context. It prepares
832 * the inode and set lock data directly in the ptlrpcd context. It will wake up
833 * the directory listing process if the dentry is the waiting one.
835 static int ll_statahead_interpret(struct md_op_item *item, int rc)
837 struct req_capsule *pill = item->mop_pill;
838 struct lookup_intent *it = &item->mop_it;
839 struct inode *dir = item->mop_dir;
840 struct ll_inode_info *lli = ll_i2info(dir);
841 struct sa_entry *entry = (struct sa_entry *)item->mop_cbdata;
842 struct work_struct *work = &item->mop_work;
843 struct ll_statahead_info *sai;
844 struct mdt_body *body;
850 if (it_disposition(it, DISP_LOOKUP_NEG))
854 * because statahead thread will wait for all inflight RPC to finish,
855 * sai should be always valid, no need to refcount
857 LASSERT(entry != NULL);
859 LASSERT(sai != NULL);
861 CDEBUG(D_READA, "sa_entry %.*s rc %d\n",
862 entry->se_qstr.len, entry->se_qstr.name, rc);
867 body = req_capsule_server_get(pill, &RMF_MDT_BODY);
869 GOTO(out, rc = -EFAULT);
871 child = entry->se_inode;
873 * revalidate; unlinked and re-created with the same name.
874 * exclude the case where FID is zero as it was from statahead with
875 * regularized file name pattern and had no idea for the FID of the
878 if (unlikely(!fid_is_zero(&item->mop_data.op_fid2) &&
879 !lu_fid_eq(&item->mop_data.op_fid2, &body->mbo_fid1))) {
881 entry->se_inode = NULL;
884 /* The mdt_body is invalid. Skip this entry */
885 GOTO(out, rc = -EAGAIN);
888 entry->se_handle = it->it_lock_handle;
890 * In ptlrpcd context, it is not allowed to generate new RPCs
891 * especially for striped directories or regular files with layout
895 * release ibits lock ASAP to avoid deadlock when statahead
896 * thread enqueues lock on parent in readdir and another
897 * process enqueues lock on child with parent lock held, eg.
900 handle = it->it_lock_handle;
901 ll_intent_drop_lock(it);
902 ll_unlock_md_op_lsm(&item->mop_data);
905 * If the statahead entry is a striped directory or regular file with
906 * layout change, it will generate a new RPC and long wait in the
908 * However, it is dangerous of blocking in ptlrpcd thread.
909 * Here we use work queue or the separate statahead thread to handle
910 * the extra RPC and long wait:
911 * (@ll_prep_inode->@lmv_revalidate_slaves);
912 * (@ll_prep_inode->@lov_layout_change->osc_cache_wait_range);
914 INIT_WORK(work, ll_statahead_interpret_work);
915 ptlrpc_request_addref(pill->rc_req);
919 ll_statahead_interpret_fini(lli, sai, item, entry, NULL, rc);
923 static inline int sa_getattr(struct ll_statahead_info *sai, struct inode *dir,
924 struct md_op_item *item)
928 if (sa_has_batch_handle(sai))
929 rc = md_batch_add(ll_i2mdexp(dir), sai->sai_bh, item);
931 rc = md_intent_getattr_async(ll_i2mdexp(dir), item);
936 /* async stat for file not found in dcache */
937 static int sa_lookup(struct inode *dir, struct sa_entry *entry)
939 struct md_op_item *item;
944 item = sa_prep_data(dir, NULL, entry);
946 RETURN(PTR_ERR(item));
948 rc = sa_getattr(entry->se_sai, dir, item);
956 * async stat for file found in dcache, similar to .revalidate
958 * \retval 1 dentry valid, no RPC sent
959 * \retval 0 dentry invalid, will send async stat RPC
960 * \retval negative number upon error
962 static int sa_revalidate(struct inode *dir, struct sa_entry *entry,
963 struct dentry *dentry)
965 struct inode *inode = dentry->d_inode;
966 struct lookup_intent it = { .it_op = IT_GETATTR,
967 .it_lock_handle = 0 };
968 struct md_op_item *item;
973 if (unlikely(!inode))
976 if (d_mountpoint(dentry))
979 item = sa_prep_data(dir, inode, entry);
981 RETURN(PTR_ERR(item));
983 entry->se_inode = igrab(inode);
984 rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
987 entry->se_handle = it.it_lock_handle;
988 ll_intent_release(&it);
993 rc = sa_getattr(entry->se_sai, dir, item);
995 entry->se_inode = NULL;
1003 /* async stat for file with @name */
1004 static void sa_statahead(struct ll_statahead_info *sai, struct dentry *parent,
1005 const char *name, int len, const struct lu_fid *fid)
1007 struct inode *dir = parent->d_inode;
1008 struct dentry *dentry = NULL;
1009 struct sa_entry *entry;
1014 entry = sa_alloc(parent, sai, sai->sai_index, name, len, fid);
1018 dentry = d_lookup(parent, &entry->se_qstr);
1020 rc = sa_lookup(dir, entry);
1022 rc = sa_revalidate(dir, entry, dentry);
1023 if (rc == 1 && agl_should_run(sai, dentry->d_inode))
1024 ll_agl_add(sai, dentry->d_inode, entry->se_index);
1031 sa_make_ready(sai, entry, rc);
1037 if (sa_sent_full(sai))
1038 ll_statahead_flush_nowait(sai);
1043 /* async glimpse (agl) thread main function */
1044 static int ll_agl_thread(void *arg)
1047 * We already own this reference, so it is safe to take it
1050 struct ll_statahead_info *sai = (struct ll_statahead_info *)arg;
1051 struct dentry *parent = sai->sai_dentry;
1052 struct inode *dir = parent->d_inode;
1053 struct ll_inode_info *plli = ll_i2info(dir);
1054 struct ll_inode_info *clli;
1058 CDEBUG(D_READA, "agl thread started: sai %p, parent %pd\n",
1061 while (({set_current_state(TASK_IDLE);
1062 !kthread_should_stop(); })) {
1063 spin_lock(&plli->lli_agl_lock);
1064 clli = list_first_entry_or_null(&sai->sai_agls,
1065 struct ll_inode_info,
1068 __set_current_state(TASK_RUNNING);
1069 list_del_init(&clli->lli_agl_list);
1070 spin_unlock(&plli->lli_agl_lock);
1071 ll_agl_trigger(&clli->lli_vfs_inode, sai);
1074 spin_unlock(&plli->lli_agl_lock);
1078 __set_current_state(TASK_RUNNING);
1082 static void ll_stop_agl(struct ll_statahead_info *sai)
1084 struct dentry *parent = sai->sai_dentry;
1085 struct ll_inode_info *plli = ll_i2info(parent->d_inode);
1086 struct ll_inode_info *clli;
1087 struct task_struct *agl_task;
1089 spin_lock(&plli->lli_agl_lock);
1090 agl_task = sai->sai_agl_task;
1091 sai->sai_agl_task = NULL;
1092 spin_unlock(&plli->lli_agl_lock);
1096 CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n",
1097 sai, (unsigned int)agl_task->pid);
1098 kthread_stop(agl_task);
1100 spin_lock(&plli->lli_agl_lock);
1101 while ((clli = list_first_entry_or_null(&sai->sai_agls,
1102 struct ll_inode_info,
1103 lli_agl_list)) != NULL) {
1104 list_del_init(&clli->lli_agl_list);
1105 spin_unlock(&plli->lli_agl_lock);
1106 clli->lli_agl_index = 0;
1107 iput(&clli->lli_vfs_inode);
1108 spin_lock(&plli->lli_agl_lock);
1110 spin_unlock(&plli->lli_agl_lock);
1111 CDEBUG(D_READA, "agl thread stopped: sai %p, parent %pd\n",
1116 /* start agl thread */
1117 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
1119 int node = cfs_cpt_spread_node(cfs_cpt_tab, CFS_CPT_ANY);
1120 struct ll_inode_info *plli;
1121 struct task_struct *task;
1125 CDEBUG(D_READA, "start agl thread: sai %p, parent %pd\n",
1128 plli = ll_i2info(parent->d_inode);
1129 task = kthread_create_on_node(ll_agl_thread, sai, node, "ll_agl_%d",
1130 plli->lli_opendir_pid);
1132 CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
1135 sai->sai_agl_task = task;
1136 atomic_inc(&ll_i2sbi(d_inode(parent))->ll_agl_total);
1137 /* Get an extra reference that the thread holds */
1140 wake_up_process(task);
1145 static int ll_statahead_by_list(struct dentry *parent)
1147 struct inode *dir = parent->d_inode;
1148 struct ll_inode_info *lli = ll_i2info(dir);
1149 struct ll_statahead_info *sai = lli->lli_sai;
1150 struct ll_sb_info *sbi = ll_i2sbi(dir);
1151 struct md_op_data *op_data;
1152 struct page *page = NULL;
1159 CDEBUG(D_READA, "statahead thread starting: sai %p, parent %pd\n",
1162 OBD_ALLOC_PTR(op_data);
1166 /* matches smp_store_release() in ll_deauthorize_statahead() */
1167 while (pos != MDS_DIR_END_OFF && smp_load_acquire(&sai->sai_task)) {
1168 struct lu_dirpage *dp;
1169 struct lu_dirent *ent;
1171 op_data = ll_prep_md_op_data(op_data, dir, dir, NULL, 0, 0,
1172 LUSTRE_OPC_ANY, dir);
1173 if (IS_ERR(op_data)) {
1174 rc = PTR_ERR(op_data);
1178 page = ll_get_dir_page(dir, op_data, pos, NULL);
1179 ll_unlock_md_op_lsm(op_data);
1183 "error reading dir "DFID" at %llu /%llu opendir_pid = %u: rc = %d\n",
1184 PFID(ll_inode2fid(dir)), pos, sai->sai_index,
1185 lli->lli_opendir_pid, rc);
1189 dp = page_address(page);
1190 for (ent = lu_dirent_start(dp);
1191 /* matches smp_store_release() in ll_deauthorize_statahead() */
1192 ent != NULL && smp_load_acquire(&sai->sai_task) &&
1194 ent = lu_dirent_next(ent)) {
1199 struct llcrypt_str lltr = LLTR_INIT(NULL, 0);
1201 hash = le64_to_cpu(ent->lde_hash);
1202 if (unlikely(hash < pos))
1204 * Skip until we find target hash value.
1208 namelen = le16_to_cpu(ent->lde_namelen);
1209 if (unlikely(namelen == 0))
1211 * Skip dummy record.
1215 name = ent->lde_name;
1216 if (name[0] == '.') {
1222 } else if (name[1] == '.' && namelen == 2) {
1227 } else if (!sai->sai_ls_all) {
1229 * skip hidden files.
1231 sai->sai_skip_hidden++;
1237 * don't stat-ahead first entry.
1239 if (unlikely(++first == 1))
1242 fid_le_to_cpu(&fid, &ent->lde_fid);
1244 while (({set_current_state(TASK_IDLE);
1245 /* matches smp_store_release() in
1246 * ll_deauthorize_statahead() */
1247 smp_load_acquire(&sai->sai_task); })) {
1248 spin_lock(&lli->lli_agl_lock);
1249 while (sa_sent_full(sai) &&
1250 !agl_list_empty(sai)) {
1251 struct ll_inode_info *clli;
1253 __set_current_state(TASK_RUNNING);
1254 clli = agl_first_entry(sai);
1255 list_del_init(&clli->lli_agl_list);
1256 spin_unlock(&lli->lli_agl_lock);
1258 ll_agl_trigger(&clli->lli_vfs_inode,
1261 spin_lock(&lli->lli_agl_lock);
1263 spin_unlock(&lli->lli_agl_lock);
1265 if (!sa_sent_full(sai))
1269 __set_current_state(TASK_RUNNING);
1271 if (IS_ENCRYPTED(dir)) {
1272 struct llcrypt_str de_name =
1273 LLTR_INIT(ent->lde_name, namelen);
1276 rc = llcrypt_fname_alloc_buffer(dir, NAME_MAX,
1281 fid_le_to_cpu(&fid, &ent->lde_fid);
1282 if (ll_fname_disk_to_usr(dir, 0, 0, &de_name,
1284 llcrypt_fname_free_buffer(&lltr);
1292 sa_statahead(sai, parent, name, namelen, &fid);
1293 llcrypt_fname_free_buffer(&lltr);
1296 pos = le64_to_cpu(dp->ldp_hash_end);
1297 ll_release_page(dir, page,
1298 le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1300 if (sa_low_hit(sai)) {
1302 atomic_inc(&sbi->ll_sa_wrong);
1304 "Statahead for dir "DFID" hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stoppingstatahead thread: pid %d\n",
1305 PFID(&lli->lli_fid), sai->sai_hit,
1306 sai->sai_miss, sai->sai_sent,
1307 sai->sai_replied, current->pid);
1311 ll_finish_md_op_data(op_data);
1316 static int ll_statahead_by_fname(struct ll_statahead_info *sai,
1317 struct dentry *parent)
1319 struct inode *dir = parent->d_inode;
1320 struct ll_inode_info *lli = ll_i2info(dir);
1321 struct ll_sb_info *sbi = ll_i2sbi(dir);
1331 CDEBUG(D_READA, "%s: FNAME statahead: parent %pd fname prefix %s\n",
1332 sbi->ll_fsname, parent, sai->sai_fname);
1334 OBD_ALLOC(fname, NAME_MAX);
1338 len = strlen(sai->sai_fname);
1339 memcpy(fname, sai->sai_fname, len);
1340 max_len = sizeof(sai->sai_fname) - len;
1343 /* matches smp_store_release() in ll_deauthorize_statahead() */
1344 while (smp_load_acquire(&sai->sai_task)) {
1347 numlen = snprintf(ptr, max_len, "%llu",
1348 sai->sai_fstart + i);
1350 while (({set_current_state(TASK_IDLE);
1352 * matches smp_store_release() in
1353 * ll_deauthorize_statahead()
1355 smp_load_acquire(&sai->sai_task); })) {
1356 spin_lock(&lli->lli_agl_lock);
1357 while (sa_sent_full(sai) && !agl_list_empty(sai)) {
1358 struct ll_inode_info *clli;
1360 __set_current_state(TASK_RUNNING);
1361 clli = agl_first_entry(sai);
1362 list_del_init(&clli->lli_agl_list);
1363 spin_unlock(&lli->lli_agl_lock);
1365 ll_agl_trigger(&clli->lli_vfs_inode, sai);
1367 spin_lock(&lli->lli_agl_lock);
1369 spin_unlock(&lli->lli_agl_lock);
1371 if (!sa_sent_full(sai))
1375 __set_current_state(TASK_RUNNING);
1377 sa_statahead(sai, parent, fname, len + numlen, NULL);
1378 if (++i >= sai->sai_fend)
1382 OBD_FREE(fname, NAME_MAX);
1386 /* statahead thread main function */
1387 static int ll_statahead_thread(void *arg)
1389 struct ll_statahead_info *sai = (struct ll_statahead_info *)arg;
1390 struct dentry *parent = sai->sai_dentry;
1391 struct inode *dir = parent->d_inode;
1392 struct ll_inode_info *lli = ll_i2info(dir);
1393 struct ll_sb_info *sbi = ll_i2sbi(dir);
1394 struct lu_batch *bh = NULL;
1399 CDEBUG(D_READA, "statahead thread starting: sai %p, parent %pd\n",
1402 sai->sai_max_batch_count = sbi->ll_sa_batch_max;
1403 if (sai->sai_max_batch_count) {
1404 bh = md_batch_create(ll_i2mdexp(dir), BATCH_FL_RDONLY,
1405 sai->sai_max_batch_count);
1407 GOTO(out_stop_agl, rc = PTR_ERR(bh));
1412 switch (lli->lli_sa_pattern) {
1413 case LSA_PATTERN_LIST:
1414 rc = ll_statahead_by_list(parent);
1416 case LSA_PATTERN_FNAME:
1417 rc = ll_statahead_by_fname(sai, parent);
1425 spin_lock(&lli->lli_sa_lock);
1426 sai->sai_task = NULL;
1427 lli->lli_sa_enabled = 0;
1428 spin_unlock(&lli->lli_sa_lock);
1431 ll_statahead_flush_nowait(sai);
1434 * statahead is finished, but statahead entries need to be cached, wait
1435 * for file release closedir() call to stop me.
1437 while (({set_current_state(TASK_IDLE);
1438 /* matches smp_store_release() in ll_deauthorize_statahead() */
1439 smp_load_acquire(&sai->sai_task); })) {
1442 __set_current_state(TASK_RUNNING);
1447 rc = md_batch_stop(ll_i2mdexp(dir), sai->sai_bh);
1455 * wait for inflight statahead RPCs to finish, and then we can free sai
1456 * safely because statahead RPC will access sai data
1458 while (sai->sai_sent != sai->sai_replied)
1459 /* in case we're not woken up, timeout wait */
1462 CDEBUG(D_READA, "%s: statahead thread stopped: sai %p, parent %pd hit %llu miss %llu\n",
1463 sbi->ll_fsname, sai, parent, sai->sai_hit, sai->sai_miss);
1465 spin_lock(&lli->lli_sa_lock);
1466 sai->sai_task = NULL;
1467 spin_unlock(&lli->lli_sa_lock);
1468 wake_up(&sai->sai_waitq);
1470 atomic_add(sai->sai_hit, &sbi->ll_sa_hit_total);
1471 atomic_add(sai->sai_miss, &sbi->ll_sa_miss_total);
1474 ll_sax_put(dir, lli->lli_sax);
1479 /* authorize opened dir handle @key to statahead */
1480 void ll_authorize_statahead(struct inode *dir, void *key)
1482 struct ll_inode_info *lli = ll_i2info(dir);
1484 spin_lock(&lli->lli_sa_lock);
1485 if (!lli->lli_opendir_key && !lli->lli_sai) {
1487 * if lli_sai is not NULL, it means previous statahead is not
1488 * finished yet, we'd better not start a new statahead for now.
1490 lli->lli_opendir_key = key;
1491 lli->lli_opendir_pid = current->pid;
1492 lli->lli_sa_enabled = 1;
1494 spin_unlock(&lli->lli_sa_lock);
1497 static void ll_deauthorize_statahead_fname(struct inode *dir, void *key)
1499 struct ll_inode_info *lli = ll_i2info(dir);
1500 struct ll_file_data *fd = (struct ll_file_data *)key;
1501 struct ll_statahead_info *sai = fd->fd_sai;
1506 spin_lock(&lli->lli_sa_lock);
1507 if (sai->sai_task) {
1508 struct task_struct *task = sai->sai_task;
1510 sai->sai_task = NULL;
1511 wake_up_process(task);
1514 spin_unlock(&lli->lli_sa_lock);
1516 LASSERT(lli->lli_sax != NULL);
1517 ll_sax_put(dir, lli->lli_sax);
1521 * deauthorize opened dir handle @key to statahead, and notify statahead thread
1522 * to quit if it's running.
1524 void ll_deauthorize_statahead(struct inode *dir, void *key)
1526 struct ll_inode_info *lli = ll_i2info(dir);
1527 struct ll_statahead_info *sai;
1529 LASSERT(lli->lli_opendir_pid != 0);
1531 CDEBUG(D_READA, "deauthorize statahead for "DFID"\n",
1532 PFID(&lli->lli_fid));
1534 if (lli->lli_sa_pattern == LSA_PATTERN_FNAME) {
1535 ll_deauthorize_statahead_fname(dir, key);
1539 LASSERT(lli->lli_opendir_key == key);
1540 spin_lock(&lli->lli_sa_lock);
1541 lli->lli_opendir_key = NULL;
1542 lli->lli_opendir_pid = 0;
1543 lli->lli_sa_enabled = 0;
1545 if (sai && sai->sai_task) {
1547 * statahead thread may not have quit yet because it needs to
1548 * cache entries, now it's time to tell it to quit.
1550 * wake_up_process() provides the necessary barriers
1551 * to pair with set_current_state().
1553 struct task_struct *task = sai->sai_task;
1555 /* matches smp_load_acquire() in ll_statahead_thread() */
1556 smp_store_release(&sai->sai_task, NULL);
1557 wake_up_process(task);
1559 spin_unlock(&lli->lli_sa_lock);
1564 * not first dirent, or is "."
1566 LS_NOT_FIRST_DE = 0,
1568 * the first non-hidden dirent
1572 * the first hidden dirent, that is "."
1577 /* file is first dirent under @dir */
1578 static int is_first_dirent(struct inode *dir, struct dentry *dentry)
1580 struct qstr *target = &dentry->d_name;
1581 struct md_op_data *op_data;
1583 struct page *page = NULL;
1584 int rc = LS_NOT_FIRST_DE;
1586 struct llcrypt_str lltr = LLTR_INIT(NULL, 0);
1590 op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
1591 LUSTRE_OPC_ANY, dir);
1592 if (IS_ERR(op_data))
1593 RETURN(PTR_ERR(op_data));
1595 if (IS_ENCRYPTED(dir)) {
1596 int rc2 = llcrypt_fname_alloc_buffer(dir, NAME_MAX, &lltr);
1603 *FIXME choose the start offset of the readdir
1606 page = ll_get_dir_page(dir, op_data, 0, NULL);
1609 struct lu_dirpage *dp;
1610 struct lu_dirent *ent;
1613 struct ll_inode_info *lli = ll_i2info(dir);
1616 CERROR("%s: reading dir "DFID" at %llu opendir_pid = %u : rc = %d\n",
1617 ll_i2sbi(dir)->ll_fsname,
1618 PFID(ll_inode2fid(dir)), pos,
1619 lli->lli_opendir_pid, rc);
1623 dp = page_address(page);
1624 for (ent = lu_dirent_start(dp); ent != NULL;
1625 ent = lu_dirent_next(ent)) {
1630 hash = le64_to_cpu(ent->lde_hash);
1632 * The ll_get_dir_page() can return any page containing
1633 * the given hash which may be not the start hash.
1635 if (unlikely(hash < pos))
1638 namelen = le16_to_cpu(ent->lde_namelen);
1639 if (unlikely(namelen == 0))
1641 * skip dummy record.
1645 name = ent->lde_name;
1646 if (name[0] == '.') {
1652 else if (name[1] == '.' && namelen == 2)
1663 if (dot_de && target->name[0] != '.') {
1664 CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
1665 target->len, target->name,
1670 if (IS_ENCRYPTED(dir)) {
1671 struct llcrypt_str de_name =
1672 LLTR_INIT(ent->lde_name, namelen);
1675 fid_le_to_cpu(&fid, &ent->lde_fid);
1676 if (ll_fname_disk_to_usr(dir, 0, 0, &de_name,
1683 if (target->len != namelen ||
1684 memcmp(target->name, name, namelen) != 0)
1685 rc = LS_NOT_FIRST_DE;
1689 rc = LS_FIRST_DOT_DE;
1691 ll_release_page(dir, page, false);
1694 pos = le64_to_cpu(dp->ldp_hash_end);
1695 if (pos == MDS_DIR_END_OFF) {
1697 * End of directory reached.
1699 ll_release_page(dir, page, false);
1703 * chain is exhausted
1704 * Normal case: continue to the next page.
1706 ll_release_page(dir, page, le32_to_cpu(dp->ldp_flags) &
1708 page = ll_get_dir_page(dir, op_data, pos, NULL);
1713 llcrypt_fname_free_buffer(&lltr);
1714 ll_finish_md_op_data(op_data);
1720 * revalidate @dentryp from statahead cache
1722 * \param[in] dir parent directory
1723 * \param[in] sai sai structure
1724 * \param[out] dentryp pointer to dentry which will be revalidated
1725 * \param[in] unplug unplug statahead window only (normally for negative
1727 * \retval 1 on success, dentry is saved in @dentryp
1728 * \retval 0 if revalidation failed (no proper lock on client)
1729 * \retval negative number upon error
1731 static int revalidate_statahead_dentry(struct inode *dir,
1732 struct ll_statahead_context *ctx,
1733 struct dentry **dentryp,
1736 struct sa_entry *entry = NULL;
1737 struct ll_inode_info *lli = ll_i2info(dir);
1738 struct ll_statahead_info *sai = lli->lli_sai;
1743 if (sai && (*dentryp)->d_name.name[0] == '.') {
1744 if (sai->sai_ls_all ||
1745 sai->sai_miss_hidden >= sai->sai_skip_hidden) {
1747 * Hidden dentry is the first one, or statahead
1748 * thread does not skip so many hidden dentries
1749 * before "sai_ls_all" enabled as below.
1752 if (!sai->sai_ls_all)
1754 * It maybe because hidden dentry is not
1755 * the first one, "sai_ls_all" was not
1756 * set, then "ls -al" missed. Enable
1757 * "sai_ls_all" for such case.
1759 sai->sai_ls_all = 1;
1762 * Such "getattr" has been skipped before
1763 * "sai_ls_all" enabled as above.
1765 sai->sai_miss_hidden++;
1773 entry = sa_get(ctx, &(*dentryp)->d_name);
1775 GOTO(out, rc = -EAGAIN);
1777 if (lli->lli_sa_pattern == LSA_PATTERN_LIST)
1778 LASSERT(sai == entry->se_sai);
1779 else if (lli->lli_sa_pattern == LSA_PATTERN_FNAME)
1780 sai = entry->se_sai;
1782 LASSERT(sai != NULL);
1783 if (!sa_ready(entry)) {
1784 spin_lock(&lli->lli_sa_lock);
1785 sai->sai_index_wait = entry->se_index;
1786 spin_unlock(&lli->lli_sa_lock);
1787 rc = wait_event_idle_timeout(sai->sai_waitq, sa_ready(entry),
1788 cfs_time_seconds(30));
1791 * entry may not be ready, so it may be used by inflight
1792 * statahead RPC, don't free it.
1795 GOTO(out, rc = -EAGAIN);
1800 * We need to see the value that was set immediately before we
1803 if (smp_load_acquire(&entry->se_state) == SA_ENTRY_SUCC &&
1805 struct inode *inode = entry->se_inode;
1806 struct lookup_intent it = { .it_op = IT_GETATTR,
1811 rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
1812 ll_inode2fid(inode), &bits);
1814 if (!(*dentryp)->d_inode) {
1815 struct dentry *alias;
1817 alias = ll_splice_alias(inode, *dentryp);
1818 if (IS_ERR(alias)) {
1819 ll_intent_release(&it);
1820 GOTO(out, rc = PTR_ERR(alias));
1824 * statahead prepared this inode, transfer inode
1825 * refcount from sa_entry to dentry
1827 entry->se_inode = NULL;
1828 } else if ((*dentryp)->d_inode != inode) {
1829 /* revalidate, but inode is recreated */
1831 "%s: stale dentry %pd inode " DFID", statahead inode "DFID "\n",
1832 ll_i2sbi(inode)->ll_fsname, *dentryp,
1833 PFID(ll_inode2fid((*dentryp)->d_inode)),
1834 PFID(ll_inode2fid(inode)));
1835 ll_intent_release(&it);
1836 GOTO(out, rc = -ESTALE);
1839 if (bits & MDS_INODELOCK_LOOKUP) {
1840 d_lustre_revalidate(*dentryp);
1841 if (S_ISDIR(inode->i_mode))
1842 ll_update_dir_depth_dmv(dir, *dentryp);
1845 ll_intent_release(&it);
1850 * statahead cached sa_entry can be used only once, and will be killed
1851 * right after use, so if lookup/revalidate accessed statahead cache,
1852 * set dentry ldd_sa_generation to parent lli_sa_generation, later if we
1853 * stat this file again, we know we've done statahead before, see
1854 * dentry_may_statahead().
1856 if (lld_is_init(*dentryp))
1857 ll_d2d(*dentryp)->lld_sa_generation = lli->lli_sa_generation;
1858 sa_put(dir, sai, entry);
1864 * start statahead thread
1866 * \param[in] dir parent directory
1867 * \param[in] dentry dentry that triggers statahead, normally the first
1869 * \param[in] agl indicate whether AGL is needed
1870 * \retval -EAGAIN on success, because when this function is
1871 * called, it's already in lookup call, so client should
1872 * do it itself instead of waiting for statahead thread
1873 * to do it asynchronously.
1874 * \retval negative number upon error
1876 static int start_statahead_thread(struct inode *dir, struct dentry *dentry,
1879 int node = cfs_cpt_spread_node(cfs_cpt_tab, CFS_CPT_ANY);
1880 struct ll_inode_info *lli = ll_i2info(dir);
1881 struct ll_statahead_info *sai = NULL;
1882 struct ll_statahead_context *ctx = NULL;
1883 struct dentry *parent = dentry->d_parent;
1884 struct task_struct *task;
1885 struct ll_sb_info *sbi = ll_i2sbi(parent->d_inode);
1886 int first = LS_FIRST_DE;
1891 /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1892 first = is_first_dirent(dir, dentry);
1893 if (first == LS_NOT_FIRST_DE)
1894 /* It is not "ls -{a}l" operation, no need statahead for it. */
1895 GOTO(out, rc = -EFAULT);
1897 if (unlikely(atomic_inc_return(&sbi->ll_sa_running) >
1898 sbi->ll_sa_running_max)) {
1900 "Too many concurrent statahead instances, avoid new statahead instance temporarily.\n");
1901 GOTO(out, rc = -EMFILE);
1904 sai = ll_sai_alloc(parent);
1906 GOTO(out, rc = -ENOMEM);
1908 ctx = ll_sax_alloc(dir);
1910 GOTO(out, rc = -ENOMEM);
1912 sai->sai_ls_all = (first == LS_FIRST_DOT_DE);
1915 * if current lli_opendir_key was deauthorized, or dir re-opened by
1916 * another process, don't start statahead, otherwise the newly spawned
1917 * statahead thread won't be notified to quit.
1919 spin_lock(&lli->lli_sa_lock);
1920 if (unlikely(lli->lli_sai || !lli->lli_opendir_key ||
1921 lli->lli_opendir_pid != current->pid ||
1922 lli->lli_sa_pattern != LSA_PATTERN_NONE)) {
1923 spin_unlock(&lli->lli_sa_lock);
1924 GOTO(out, rc = -EPERM);
1928 lli->lli_sa_pattern = LSA_PATTERN_LIST;
1929 spin_unlock(&lli->lli_sa_lock);
1931 CDEBUG(D_READA, "start statahead thread: [pid %d] [parent %pd]\n",
1932 current->pid, parent);
1934 task = kthread_create_on_node(ll_statahead_thread, sai, node,
1935 "ll_sa_%u", lli->lli_opendir_pid);
1937 spin_lock(&lli->lli_sa_lock);
1938 lli->lli_sai = NULL;
1939 spin_unlock(&lli->lli_sa_lock);
1941 CERROR("can't start ll_sa thread, rc: %d\n", rc);
1945 if (test_bit(LL_SBI_AGL_ENABLED, sbi->ll_flags) && agl)
1946 ll_start_agl(parent, sai);
1948 atomic_inc(&sbi->ll_sa_total);
1949 sai->sai_task = task;
1951 wake_up_process(task);
1953 * We don't stat-ahead for the first dirent since we are already in
1960 * once we start statahead thread failed, disable statahead so that
1961 * subsequent stat won't waste time to try it.
1963 spin_lock(&lli->lli_sa_lock);
1964 if (lli->lli_opendir_pid == current->pid)
1965 lli->lli_sa_enabled = 0;
1966 spin_unlock(&lli->lli_sa_lock);
1974 if (first != LS_NOT_FIRST_DE)
1975 atomic_dec(&sbi->ll_sa_running);
1981 * Check whether statahead for @dir was started.
1983 static inline bool ll_statahead_started(struct inode *dir, bool agl)
1985 struct ll_inode_info *lli = ll_i2info(dir);
1986 struct ll_statahead_context *ctx;
1987 struct ll_statahead_info *sai;
1989 spin_lock(&lli->lli_sa_lock);
1992 if (sai && (sai->sai_agl_task != NULL) != agl)
1994 "%s: Statahead AGL hint changed from %d to %d\n",
1995 ll_i2sbi(dir)->ll_fsname,
1996 sai->sai_agl_task != NULL, agl);
1997 spin_unlock(&lli->lli_sa_lock);
2003 * statahead entry function, this is called when client getattr on a file, it
2004 * will start statahead thread if this is the first dir entry, else revalidate
2005 * dentry from statahead cache.
2007 * \param[in] dir parent directory
2008 * \param[out] dentryp dentry to getattr
2009 * \param[in] agl whether start the agl thread
2011 * \retval 1 on success
2012 * \retval 0 revalidation from statahead cache failed, caller needs
2013 * to getattr from server directly
2014 * \retval negative number on error, caller often ignores this and
2015 * then getattr from server
2017 int ll_start_statahead(struct inode *dir, struct dentry *dentry, bool agl)
2019 if (!ll_statahead_started(dir, agl))
2020 return start_statahead_thread(dir, dentry, agl);
2025 * revalidate dentry from statahead cache.
2027 * \param[in] dir parent directory
2028 * \param[out] dentryp dentry to getattr
2029 * \param[in] unplug unplug statahead window only (normally for negative
2031 * \retval 1 on success
2032 * \retval 0 revalidation from statahead cache failed, caller needs
2033 * to getattr from server directly
2034 * \retval negative number on error, caller often ignores this and
2035 * then getattr from server
2037 int ll_revalidate_statahead(struct inode *dir, struct dentry **dentryp,
2040 struct ll_inode_info *lli = ll_i2info(dir);
2041 struct ll_statahead_context *ctx;
2042 struct ll_statahead_info *sai = NULL;
2045 spin_lock(&lli->lli_sa_lock);
2050 atomic_inc(&sai->sai_refcount);
2051 } else if (lli->lli_sa_pattern & LSA_PATTERN_LIST) {
2052 spin_unlock(&lli->lli_sa_lock);
2057 spin_unlock(&lli->lli_sa_lock);
2059 rc = revalidate_statahead_dentry(dir, ctx, dentryp, unplug);
2060 CDEBUG(D_READA, "revalidate statahead %pd: rc = %d.\n",
2064 ll_sax_put(dir, ctx);
2069 int ll_ioctl_ahead(struct file *file, struct llapi_lu_ladvise2 *ladvise)
2071 int node = cfs_cpt_spread_node(cfs_cpt_tab, CFS_CPT_ANY);
2072 struct ll_file_data *fd = file->private_data;
2073 struct dentry *dentry = file_dentry(file);
2074 struct inode *dir = dentry->d_inode;
2075 struct ll_inode_info *lli = ll_i2info(dir);
2076 struct ll_sb_info *sbi = ll_i2sbi(dir);
2077 struct ll_statahead_info *sai = NULL;
2078 struct ll_statahead_context *ctx = NULL;
2079 struct task_struct *task;
2085 if (sbi->ll_sa_max == 0)
2088 if (!S_ISDIR(dir->i_mode))
2093 CWARN("%s: already set statahead hint for dir %pd: rc = %d\n",
2094 sbi->ll_fsname, dentry, rc);
2098 if (unlikely(atomic_inc_return(&sbi->ll_sa_running) >
2099 sbi->ll_sa_running_max)) {
2101 "Too many concurrent statahead instances, avoid new statahead instance temporarily.\n");
2102 GOTO(out, rc = -EMFILE);
2105 sai = ll_sai_alloc(dentry);
2107 GOTO(out, rc = -ENOMEM);
2109 sai->sai_fstart = ladvise->lla_start;
2110 sai->sai_fend = ladvise->lla_end;
2111 sai->sai_ls_all = 0;
2112 sai->sai_max = sbi->ll_sa_max;
2113 strncpy(sai->sai_fname, ladvise->lla_fname, sizeof(sai->sai_fname));
2115 ctx = ll_sax_get(dir);
2117 ctx = ll_sax_alloc(dir);
2119 GOTO(out, rc = -ENOMEM);
2121 spin_lock(&lli->lli_sa_lock);
2122 if (unlikely(lli->lli_sax)) {
2123 struct ll_statahead_context *tmp = ctx;
2125 if (lli->lli_sa_pattern == LSA_PATTERN_NONE ||
2126 lli->lli_sa_pattern == LSA_PATTERN_FNAME) {
2127 lli->lli_sa_pattern = LSA_PATTERN_FNAME;
2130 fd->fd_sai = __ll_sai_get(sai);
2134 CWARN("%s: pattern %X is not FNAME: rc = %d\n",
2135 sbi->ll_fsname, lli->lli_sa_pattern, rc);
2138 spin_unlock(&lli->lli_sa_lock);
2143 lli->lli_sa_pattern = LSA_PATTERN_FNAME;
2145 fd->fd_sai = __ll_sai_get(sai);
2146 spin_unlock(&lli->lli_sa_lock);
2149 spin_lock(&lli->lli_sa_lock);
2150 if (!(lli->lli_sa_pattern == LSA_PATTERN_FNAME ||
2151 lli->lli_sa_pattern == LSA_PATTERN_NONE)) {
2152 spin_unlock(&lli->lli_sa_lock);
2153 GOTO(out, rc = -EINVAL);
2156 lli->lli_sa_pattern = LSA_PATTERN_FNAME;
2157 fd->fd_sai = __ll_sai_get(sai);
2158 spin_unlock(&lli->lli_sa_lock);
2163 "start statahead thread: [pid %d] [parent %pd] sai %p ctx %p\n",
2164 current->pid, dentry, sai, ctx);
2166 task = kthread_create_on_node(ll_statahead_thread, sai, node,
2167 "ll_sa_%u", current->pid);
2170 CERROR("%s: cannot start ll_sa thread: rc = %d\n",
2171 sbi->ll_fsname, rc);
2175 if (test_bit(LL_SBI_AGL_ENABLED, sbi->ll_flags) && agl)
2176 ll_start_agl(dentry, sai);
2178 atomic_inc(&sbi->ll_sa_total);
2179 sai->sai_task = task;
2180 wake_up_process(task);
2186 ll_sax_put(dir, ctx);
2196 atomic_dec(&sbi->ll_sa_running);