4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
33 #include <linux/sched.h>
34 #include <linux/kthread.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/delay.h>
40 #define DEBUG_SUBSYSTEM S_LLITE
42 #include <obd_support.h>
43 #include <lustre_dlm.h>
44 #include "llite_internal.h"
46 #define SA_OMITTED_ENTRY_MAX 8ULL
49 /** negative values are for error cases */
50 SA_ENTRY_INIT = 0, /** init entry */
51 SA_ENTRY_SUCC = 1, /** stat succeed */
52 SA_ENTRY_INVA = 2, /** invalid entry */
56 * sa_entry is not refcounted: statahead thread allocates it and do async stat,
57 * and in async stat callback ll_statahead_interpret() will add it into
58 * sai_interim_entries, later statahead thread will call sa_handle_callback() to
59 * instantiate entry and move it into sai_entries, and then only scanner process
60 * can access and free it.
63 /* link into sai_interim_entries or sai_entries */
64 struct list_head se_list;
65 /* link into sai hash table locally */
66 struct list_head se_hash;
67 /* entry index in the sai */
69 /* low layer ldlm lock handle */
73 /* entry size, contains name */
75 /* pointer to async getattr enqueue info */
76 struct md_enqueue_info *se_minfo;
77 /* pointer to the async getattr request */
78 struct ptlrpc_request *se_req;
79 /* pointer to the target inode */
80 struct inode *se_inode;
87 static unsigned int sai_generation;
88 static DEFINE_SPINLOCK(sai_generation_lock);
90 static inline int sa_unhashed(struct sa_entry *entry)
92 return list_empty(&entry->se_hash);
95 /* sa_entry is ready to use */
96 static inline int sa_ready(struct sa_entry *entry)
98 /* Make sure sa_entry is updated and ready to use */
100 return (entry->se_state != SA_ENTRY_INIT);
103 /* hash value to put in sai_cache */
104 static inline int sa_hash(int val)
106 return val & LL_SA_CACHE_MASK;
109 /* hash entry into sai_cache */
111 sa_rehash(struct ll_statahead_info *sai, struct sa_entry *entry)
113 int i = sa_hash(entry->se_qstr.hash);
115 spin_lock(&sai->sai_cache_lock[i]);
116 list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
117 spin_unlock(&sai->sai_cache_lock[i]);
120 /* unhash entry from sai_cache */
122 sa_unhash(struct ll_statahead_info *sai, struct sa_entry *entry)
124 int i = sa_hash(entry->se_qstr.hash);
126 spin_lock(&sai->sai_cache_lock[i]);
127 list_del_init(&entry->se_hash);
128 spin_unlock(&sai->sai_cache_lock[i]);
131 static inline int agl_should_run(struct ll_statahead_info *sai,
134 return inode && S_ISREG(inode->i_mode) && sai->sai_agl_task;
137 static inline struct ll_inode_info *
138 agl_first_entry(struct ll_statahead_info *sai)
140 return list_entry(sai->sai_agls.next, struct ll_inode_info,
144 /* statahead window is full */
145 static inline int sa_sent_full(struct ll_statahead_info *sai)
147 return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
150 /* got async stat replies */
151 static inline int sa_has_callback(struct ll_statahead_info *sai)
153 return !list_empty(&sai->sai_interim_entries);
156 static inline int agl_list_empty(struct ll_statahead_info *sai)
158 return list_empty(&sai->sai_agls);
162 * (1) hit ratio less than 80%
164 * (2) consecutive miss more than 8
165 * then means low hit.
167 static inline int sa_low_hit(struct ll_statahead_info *sai)
169 return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
170 (sai->sai_consecutive_miss > 8));
174 * if the given index is behind of statahead window more than
175 * SA_OMITTED_ENTRY_MAX, then it is old.
177 static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
179 return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
183 /* allocate sa_entry and hash it to allow scanner process to find it */
184 static struct sa_entry *
185 sa_alloc(struct dentry *parent, struct ll_statahead_info *sai, __u64 index,
186 const char *name, int len, const struct lu_fid *fid)
188 struct ll_inode_info *lli;
189 struct sa_entry *entry;
195 entry_size = sizeof(struct sa_entry) + (len & ~3) + 4;
196 OBD_ALLOC(entry, entry_size);
197 if (unlikely(!entry))
198 RETURN(ERR_PTR(-ENOMEM));
200 CDEBUG(D_READA, "alloc sa entry %.*s(%p) index %llu\n",
201 len, name, entry, index);
203 entry->se_index = index;
205 entry->se_state = SA_ENTRY_INIT;
206 entry->se_size = entry_size;
207 dname = (char *)entry + sizeof(struct sa_entry);
208 memcpy(dname, name, len);
210 entry->se_qstr.hash = ll_full_name_hash(parent, name, len);
211 entry->se_qstr.len = len;
212 entry->se_qstr.name = dname;
213 entry->se_fid = *fid;
215 lli = ll_i2info(sai->sai_dentry->d_inode);
217 spin_lock(&lli->lli_sa_lock);
218 INIT_LIST_HEAD(&entry->se_list);
219 sa_rehash(sai, entry);
220 spin_unlock(&lli->lli_sa_lock);
222 atomic_inc(&sai->sai_cache_count);
227 /* free sa_entry, which should have been unhashed and not in any list */
228 static void sa_free(struct ll_statahead_info *sai, struct sa_entry *entry)
230 CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n",
231 entry->se_qstr.len, entry->se_qstr.name, entry,
234 LASSERT(list_empty(&entry->se_list));
235 LASSERT(sa_unhashed(entry));
237 OBD_FREE(entry, entry->se_size);
238 atomic_dec(&sai->sai_cache_count);
242 * find sa_entry by name, used by directory scanner, lock is not needed because
243 * only scanner can remove the entry from cache.
245 static struct sa_entry *
246 sa_get(struct ll_statahead_info *sai, const struct qstr *qstr)
248 struct sa_entry *entry;
249 int i = sa_hash(qstr->hash);
251 list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
252 if (entry->se_qstr.hash == qstr->hash &&
253 entry->se_qstr.len == qstr->len &&
254 memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
260 /* unhash and unlink sa_entry, and then free it */
262 sa_kill(struct ll_statahead_info *sai, struct sa_entry *entry)
264 struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
266 LASSERT(!sa_unhashed(entry));
267 LASSERT(!list_empty(&entry->se_list));
268 LASSERT(sa_ready(entry));
270 sa_unhash(sai, entry);
272 spin_lock(&lli->lli_sa_lock);
273 list_del_init(&entry->se_list);
274 spin_unlock(&lli->lli_sa_lock);
276 iput(entry->se_inode);
281 /* called by scanner after use, sa_entry will be killed */
283 sa_put(struct ll_statahead_info *sai, struct sa_entry *entry)
285 struct sa_entry *tmp, *next;
287 if (entry && entry->se_state == SA_ENTRY_SUCC) {
288 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode);
291 sai->sai_consecutive_miss = 0;
292 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
295 sai->sai_consecutive_miss++;
302 * kill old completed entries, only scanner process does this, no need
305 list_for_each_entry_safe(tmp, next, &sai->sai_entries, se_list) {
306 if (!is_omitted_entry(sai, tmp->se_index))
313 * update state and sort add entry to sai_entries by index, return true if
314 * scanner is waiting on this entry.
317 __sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
320 struct list_head *pos = &sai->sai_entries;
321 __u64 index = entry->se_index;
323 LASSERT(!sa_ready(entry));
324 LASSERT(list_empty(&entry->se_list));
326 list_for_each_entry_reverse(se, &sai->sai_entries, se_list) {
327 if (se->se_index < entry->se_index) {
332 list_add(&entry->se_list, pos);
334 * LU-9210: ll_statahead_interpet must be able to see this before
337 smp_store_release(&entry->se_state,
338 ret < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
340 return (index == sai->sai_index_wait);
343 /* finish async stat RPC arguments */
344 static void sa_fini_data(struct md_enqueue_info *minfo)
346 ll_unlock_md_op_lsm(&minfo->mi_data);
351 static int ll_statahead_interpret(struct ptlrpc_request *req,
352 struct md_enqueue_info *minfo, int rc);
355 * prepare arguments for async stat RPC.
357 static struct md_enqueue_info *
358 sa_prep_data(struct inode *dir, struct inode *child, struct sa_entry *entry)
360 struct md_enqueue_info *minfo;
361 struct ldlm_enqueue_info *einfo;
362 struct md_op_data *op_data;
364 OBD_ALLOC_PTR(minfo);
366 return ERR_PTR(-ENOMEM);
368 op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child,
369 entry->se_qstr.name, entry->se_qstr.len, 0,
370 LUSTRE_OPC_ANY, NULL);
371 if (IS_ERR(op_data)) {
373 return (struct md_enqueue_info *)op_data;
377 op_data->op_fid2 = entry->se_fid;
379 minfo->mi_it.it_op = IT_GETATTR;
380 minfo->mi_dir = igrab(dir);
381 minfo->mi_cb = ll_statahead_interpret;
382 minfo->mi_cbdata = entry;
384 einfo = &minfo->mi_einfo;
385 einfo->ei_type = LDLM_IBITS;
386 einfo->ei_mode = it_to_lock_mode(&minfo->mi_it);
387 einfo->ei_cb_bl = ll_md_blocking_ast;
388 einfo->ei_cb_cp = ldlm_completion_ast;
389 einfo->ei_cb_gl = NULL;
390 einfo->ei_cbdata = NULL;
396 * release resources used in async stat RPC, update entry state and wakeup if
397 * scanner process it waiting on this entry.
400 sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
402 struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
403 struct md_enqueue_info *minfo = entry->se_minfo;
404 struct ptlrpc_request *req = entry->se_req;
407 /* release resources used in RPC */
409 entry->se_minfo = NULL;
410 ll_intent_release(&minfo->mi_it);
415 entry->se_req = NULL;
416 ptlrpc_req_finished(req);
419 spin_lock(&lli->lli_sa_lock);
420 wakeup = __sa_make_ready(sai, entry, ret);
421 spin_unlock(&lli->lli_sa_lock);
424 wake_up(&sai->sai_waitq);
427 /* insert inode into the list of sai_agls */
428 static void ll_agl_add(struct ll_statahead_info *sai,
429 struct inode *inode, int index)
431 struct ll_inode_info *child = ll_i2info(inode);
432 struct ll_inode_info *parent = ll_i2info(sai->sai_dentry->d_inode);
434 spin_lock(&child->lli_agl_lock);
435 if (child->lli_agl_index == 0) {
436 child->lli_agl_index = index;
437 spin_unlock(&child->lli_agl_lock);
439 LASSERT(list_empty(&child->lli_agl_list));
441 spin_lock(&parent->lli_agl_lock);
442 /* Re-check under the lock */
443 if (agl_should_run(sai, inode)) {
444 if (agl_list_empty(sai))
445 wake_up_process(sai->sai_agl_task);
447 list_add_tail(&child->lli_agl_list, &sai->sai_agls);
449 child->lli_agl_index = 0;
450 spin_unlock(&parent->lli_agl_lock);
452 spin_unlock(&child->lli_agl_lock);
457 static struct ll_statahead_info *ll_sai_alloc(struct dentry *dentry)
459 struct ll_statahead_info *sai;
460 struct ll_inode_info *lli = ll_i2info(dentry->d_inode);
469 sai->sai_dentry = dget(dentry);
470 atomic_set(&sai->sai_refcount, 1);
471 sai->sai_max = LL_SA_RPC_MIN;
473 init_waitqueue_head(&sai->sai_waitq);
475 INIT_LIST_HEAD(&sai->sai_interim_entries);
476 INIT_LIST_HEAD(&sai->sai_entries);
477 INIT_LIST_HEAD(&sai->sai_agls);
479 for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
480 INIT_LIST_HEAD(&sai->sai_cache[i]);
481 spin_lock_init(&sai->sai_cache_lock[i]);
483 atomic_set(&sai->sai_cache_count, 0);
485 spin_lock(&sai_generation_lock);
486 lli->lli_sa_generation = ++sai_generation;
487 if (unlikely(sai_generation == 0))
488 lli->lli_sa_generation = ++sai_generation;
489 spin_unlock(&sai_generation_lock);
495 static inline void ll_sai_free(struct ll_statahead_info *sai)
497 LASSERT(sai->sai_dentry != NULL);
498 dput(sai->sai_dentry);
503 * take refcount of sai if sai for @dir exists, which means statahead is on for
506 static inline struct ll_statahead_info *ll_sai_get(struct inode *dir)
508 struct ll_inode_info *lli = ll_i2info(dir);
509 struct ll_statahead_info *sai = NULL;
511 spin_lock(&lli->lli_sa_lock);
514 atomic_inc(&sai->sai_refcount);
515 spin_unlock(&lli->lli_sa_lock);
521 * put sai refcount after use, if refcount reaches zero, free sai and sa_entries
524 static void ll_sai_put(struct ll_statahead_info *sai)
526 struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
528 if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
529 struct sa_entry *entry, *next;
530 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode);
533 spin_unlock(&lli->lli_sa_lock);
535 LASSERT(!sai->sai_task);
536 LASSERT(!sai->sai_agl_task);
537 LASSERT(sai->sai_sent == sai->sai_replied);
538 LASSERT(!sa_has_callback(sai));
540 list_for_each_entry_safe(entry, next, &sai->sai_entries,
544 LASSERT(atomic_read(&sai->sai_cache_count) == 0);
545 LASSERT(agl_list_empty(sai));
548 atomic_dec(&sbi->ll_sa_running);
552 /* Do NOT forget to drop inode refcount when into sai_agls. */
553 static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
555 struct ll_inode_info *lli = ll_i2info(inode);
556 u64 index = lli->lli_agl_index;
562 LASSERT(list_empty(&lli->lli_agl_list));
564 /* AGL maybe fall behind statahead with one entry */
565 if (is_omitted_entry(sai, index + 1)) {
566 lli->lli_agl_index = 0;
572 * In case of restore, the MDT has the right size and has already
573 * sent it back without granting the layout lock, inode is up-to-date.
574 * Then AGL (async glimpse lock) is useless.
575 * Also to glimpse we need the layout, in case of a runninh restore
576 * the MDT holds the layout lock so the glimpse will block up to the
577 * end of restore (statahead/agl will block)
579 if (test_bit(LLIF_FILE_RESTORING, &lli->lli_flags)) {
580 lli->lli_agl_index = 0;
585 /* Someone is in glimpse (sync or async), do nothing. */
586 rc = down_write_trylock(&lli->lli_glimpse_sem);
588 lli->lli_agl_index = 0;
594 * Someone triggered glimpse within 1 sec before.
595 * 1) The former glimpse succeeded with glimpse lock granted by OST, and
596 * if the lock is still cached on client, AGL needs to do nothing. If
597 * it is cancelled by other client, AGL maybe cannot obtaion new lock
598 * for no glimpse callback triggered by AGL.
599 * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
600 * Under such case, it is quite possible that the OST will not grant
601 * glimpse lock for AGL also.
602 * 3) The former glimpse failed, compared with other two cases, it is
603 * relative rare. AGL can ignore such case, and it will not muchly
604 * affect the performance.
606 expire = ktime_sub_ns(ktime_get(), NSEC_PER_SEC);
607 if (ktime_to_ns(lli->lli_glimpse_time) &&
608 ktime_before(expire, lli->lli_glimpse_time)) {
609 up_write(&lli->lli_glimpse_sem);
610 lli->lli_agl_index = 0;
616 "Handling (init) async glimpse: inode = " DFID", idx = %llu\n",
617 PFID(&lli->lli_fid), index);
620 lli->lli_agl_index = 0;
621 lli->lli_glimpse_time = ktime_get();
622 up_write(&lli->lli_glimpse_sem);
625 "Handled (init) async glimpse: inode= " DFID", idx = %llu, rc = %d\n",
626 PFID(&lli->lli_fid), index, rc);
634 * prepare inode for sa entry, add it into agl list, now sa_entry is ready
635 * to be used by scanner process.
637 static void sa_instantiate(struct ll_statahead_info *sai,
638 struct sa_entry *entry)
640 struct inode *dir = sai->sai_dentry->d_inode;
642 struct md_enqueue_info *minfo;
643 struct lookup_intent *it;
644 struct ptlrpc_request *req;
645 struct mdt_body *body;
650 LASSERT(entry->se_handle != 0);
652 minfo = entry->se_minfo;
655 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
657 GOTO(out, rc = -EFAULT);
659 child = entry->se_inode;
660 /* revalidate; unlinked and re-created with the same name */
661 if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->mbo_fid1))) {
663 entry->se_inode = NULL;
666 /* The mdt_body is invalid. Skip this entry */
667 GOTO(out, rc = -EAGAIN);
670 it->it_lock_handle = entry->se_handle;
671 rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
673 GOTO(out, rc = -EAGAIN);
675 rc = ll_prep_inode(&child, &req->rq_pill, dir->i_sb, it);
679 CDEBUG(D_READA, "%s: setting %.*s"DFID" l_data to inode %p\n",
680 ll_i2sbi(dir)->ll_fsname, entry->se_qstr.len,
681 entry->se_qstr.name, PFID(ll_inode2fid(child)), child);
682 ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
684 entry->se_inode = child;
686 if (agl_should_run(sai, child))
687 ll_agl_add(sai, child, entry->se_index);
693 * sa_make_ready() will drop ldlm ibits lock refcount by calling
694 * ll_intent_drop_lock() in spite of failures. Do not worry about
695 * calling ll_intent_drop_lock() more than once.
697 sa_make_ready(sai, entry, rc);
700 /* once there are async stat replies, instantiate sa_entry from replies */
701 static void sa_handle_callback(struct ll_statahead_info *sai)
703 struct ll_inode_info *lli;
705 lli = ll_i2info(sai->sai_dentry->d_inode);
707 spin_lock(&lli->lli_sa_lock);
708 while (sa_has_callback(sai)) {
709 struct sa_entry *entry;
711 entry = list_entry(sai->sai_interim_entries.next,
712 struct sa_entry, se_list);
713 list_del_init(&entry->se_list);
714 spin_unlock(&lli->lli_sa_lock);
716 sa_instantiate(sai, entry);
717 spin_lock(&lli->lli_sa_lock);
719 spin_unlock(&lli->lli_sa_lock);
723 * callback for async stat RPC, because this is called in ptlrpcd context, we
724 * only put sa_entry in sai_interim_entries, and wake up statahead thread to
725 * really prepare inode and instantiate sa_entry later.
727 static int ll_statahead_interpret(struct ptlrpc_request *req,
728 struct md_enqueue_info *minfo, int rc)
730 struct lookup_intent *it = &minfo->mi_it;
731 struct inode *dir = minfo->mi_dir;
732 struct ll_inode_info *lli = ll_i2info(dir);
733 struct ll_statahead_info *sai = lli->lli_sai;
734 struct sa_entry *entry = (struct sa_entry *)minfo->mi_cbdata;
739 if (it_disposition(it, DISP_LOOKUP_NEG))
743 * because statahead thread will wait for all inflight RPC to finish,
744 * sai should be always valid, no need to refcount
746 LASSERT(sai != NULL);
747 LASSERT(entry != NULL);
749 CDEBUG(D_READA, "sa_entry %.*s rc %d\n",
750 entry->se_qstr.len, entry->se_qstr.name, rc);
753 ll_intent_release(it);
757 * release ibits lock ASAP to avoid deadlock when statahead
758 * thread enqueues lock on parent in readdir and another
759 * process enqueues lock on child with parent lock held, eg.
762 handle = it->it_lock_handle;
763 ll_intent_drop_lock(it);
764 ll_unlock_md_op_lsm(&minfo->mi_data);
767 spin_lock(&lli->lli_sa_lock);
769 if (__sa_make_ready(sai, entry, rc))
770 wake_up(&sai->sai_waitq);
774 entry->se_minfo = minfo;
775 entry->se_req = ptlrpc_request_addref(req);
777 * Release the async ibits lock ASAP to avoid deadlock
778 * when statahead thread tries to enqueue lock on parent
779 * for readpage and other tries to enqueue lock on child
780 * with parent's lock held, for example: unlink.
782 entry->se_handle = handle;
783 if (!sa_has_callback(sai))
786 list_add_tail(&entry->se_list, &sai->sai_interim_entries);
787 if (first && sai->sai_task)
788 wake_up_process(sai->sai_task);
792 spin_unlock(&lli->lli_sa_lock);
797 /* async stat for file not found in dcache */
798 static int sa_lookup(struct inode *dir, struct sa_entry *entry)
800 struct md_enqueue_info *minfo;
805 minfo = sa_prep_data(dir, NULL, entry);
807 RETURN(PTR_ERR(minfo));
809 rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo);
817 * async stat for file found in dcache, similar to .revalidate
819 * \retval 1 dentry valid, no RPC sent
820 * \retval 0 dentry invalid, will send async stat RPC
821 * \retval negative number upon error
823 static int sa_revalidate(struct inode *dir, struct sa_entry *entry,
824 struct dentry *dentry)
826 struct inode *inode = dentry->d_inode;
827 struct lookup_intent it = { .it_op = IT_GETATTR,
828 .it_lock_handle = 0 };
829 struct md_enqueue_info *minfo;
834 if (unlikely(!inode))
837 if (d_mountpoint(dentry))
840 minfo = sa_prep_data(dir, inode, entry);
842 RETURN(PTR_ERR(minfo));
844 entry->se_inode = igrab(inode);
845 rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
848 entry->se_handle = it.it_lock_handle;
849 ll_intent_release(&it);
854 rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo);
856 entry->se_inode = NULL;
864 /* async stat for file with @name */
865 static void sa_statahead(struct dentry *parent, const char *name, int len,
866 const struct lu_fid *fid)
868 struct inode *dir = parent->d_inode;
869 struct ll_inode_info *lli = ll_i2info(dir);
870 struct ll_statahead_info *sai = lli->lli_sai;
871 struct dentry *dentry = NULL;
872 struct sa_entry *entry;
877 entry = sa_alloc(parent, sai, sai->sai_index, name, len, fid);
881 dentry = d_lookup(parent, &entry->se_qstr);
883 rc = sa_lookup(dir, entry);
885 rc = sa_revalidate(dir, entry, dentry);
886 if (rc == 1 && agl_should_run(sai, dentry->d_inode))
887 ll_agl_add(sai, dentry->d_inode, entry->se_index);
894 sa_make_ready(sai, entry, rc);
903 /* async glimpse (agl) thread main function */
904 static int ll_agl_thread(void *arg)
906 struct dentry *parent = (struct dentry *)arg;
907 struct inode *dir = parent->d_inode;
908 struct ll_inode_info *plli = ll_i2info(dir);
909 struct ll_inode_info *clli;
911 * We already own this reference, so it is safe to take it
914 struct ll_statahead_info *sai = plli->lli_sai;
918 CDEBUG(D_READA, "agl thread started: sai %p, parent %pd\n",
921 while (({set_current_state(TASK_IDLE);
922 !kthread_should_stop(); })) {
923 spin_lock(&plli->lli_agl_lock);
924 if (!agl_list_empty(sai)) {
925 __set_current_state(TASK_RUNNING);
926 clli = agl_first_entry(sai);
927 list_del_init(&clli->lli_agl_list);
928 spin_unlock(&plli->lli_agl_lock);
929 ll_agl_trigger(&clli->lli_vfs_inode, sai);
932 spin_unlock(&plli->lli_agl_lock);
936 __set_current_state(TASK_RUNNING);
940 static void ll_stop_agl(struct ll_statahead_info *sai)
942 struct dentry *parent = sai->sai_dentry;
943 struct ll_inode_info *plli = ll_i2info(parent->d_inode);
944 struct ll_inode_info *clli;
945 struct task_struct *agl_task;
947 spin_lock(&plli->lli_agl_lock);
948 agl_task = sai->sai_agl_task;
949 sai->sai_agl_task = NULL;
950 spin_unlock(&plli->lli_agl_lock);
954 CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n",
955 sai, (unsigned int)agl_task->pid);
956 kthread_stop(agl_task);
958 spin_lock(&plli->lli_agl_lock);
959 while (!agl_list_empty(sai)) {
960 clli = agl_first_entry(sai);
961 list_del_init(&clli->lli_agl_list);
962 spin_unlock(&plli->lli_agl_lock);
963 clli->lli_agl_index = 0;
964 iput(&clli->lli_vfs_inode);
965 spin_lock(&plli->lli_agl_lock);
967 spin_unlock(&plli->lli_agl_lock);
968 CDEBUG(D_READA, "agl thread stopped: sai %p, parent %pd\n",
973 /* start agl thread */
974 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
976 int node = cfs_cpt_spread_node(cfs_cpt_tab, CFS_CPT_ANY);
977 struct ll_inode_info *plli;
978 struct task_struct *task;
982 CDEBUG(D_READA, "start agl thread: sai %p, parent %pd\n",
985 plli = ll_i2info(parent->d_inode);
986 task = kthread_create_on_node(ll_agl_thread, parent, node, "ll_agl_%d",
987 plli->lli_opendir_pid);
989 CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
992 sai->sai_agl_task = task;
993 atomic_inc(&ll_i2sbi(d_inode(parent))->ll_agl_total);
994 /* Get an extra reference that the thread holds */
995 ll_sai_get(d_inode(parent));
997 wake_up_process(task);
1002 /* statahead thread main function */
1003 static int ll_statahead_thread(void *arg)
1005 struct dentry *parent = (struct dentry *)arg;
1006 struct inode *dir = parent->d_inode;
1007 struct ll_inode_info *lli = ll_i2info(dir);
1008 struct ll_sb_info *sbi = ll_i2sbi(dir);
1009 struct ll_statahead_info *sai = lli->lli_sai;
1011 struct md_op_data *op_data;
1012 struct page *page = NULL;
1018 CDEBUG(D_READA, "statahead thread starting: sai %p, parent %pd\n",
1021 OBD_ALLOC_PTR(op_data);
1023 GOTO(out, rc = -ENOMEM);
1025 while (pos != MDS_DIR_END_OFF && sai->sai_task) {
1026 struct lu_dirpage *dp;
1027 struct lu_dirent *ent;
1029 op_data = ll_prep_md_op_data(op_data, dir, dir, NULL, 0, 0,
1030 LUSTRE_OPC_ANY, dir);
1031 if (IS_ERR(op_data)) {
1032 rc = PTR_ERR(op_data);
1036 sai->sai_in_readpage = 1;
1037 page = ll_get_dir_page(dir, op_data, pos);
1038 ll_unlock_md_op_lsm(op_data);
1039 sai->sai_in_readpage = 0;
1043 "error reading dir "DFID" at %llu /%llu opendir_pid = %u: rc = %d\n",
1044 PFID(ll_inode2fid(dir)), pos, sai->sai_index,
1045 lli->lli_opendir_pid, rc);
1049 dp = page_address(page);
1050 for (ent = lu_dirent_start(dp);
1051 ent != NULL && sai->sai_task &&
1053 ent = lu_dirent_next(ent)) {
1059 hash = le64_to_cpu(ent->lde_hash);
1060 if (unlikely(hash < pos))
1062 * Skip until we find target hash value.
1066 namelen = le16_to_cpu(ent->lde_namelen);
1067 if (unlikely(namelen == 0))
1069 * Skip dummy record.
1073 name = ent->lde_name;
1074 if (name[0] == '.') {
1080 } else if (name[1] == '.' && namelen == 2) {
1085 } else if (!sai->sai_ls_all) {
1087 * skip hidden files.
1089 sai->sai_skip_hidden++;
1095 * don't stat-ahead first entry.
1097 if (unlikely(++first == 1))
1100 fid_le_to_cpu(&fid, &ent->lde_fid);
1102 while (({set_current_state(TASK_IDLE);
1103 sai->sai_task; })) {
1104 if (sa_has_callback(sai)) {
1105 __set_current_state(TASK_RUNNING);
1106 sa_handle_callback(sai);
1109 spin_lock(&lli->lli_agl_lock);
1110 while (sa_sent_full(sai) &&
1111 !agl_list_empty(sai)) {
1112 struct ll_inode_info *clli;
1114 __set_current_state(TASK_RUNNING);
1115 clli = agl_first_entry(sai);
1116 list_del_init(&clli->lli_agl_list);
1117 spin_unlock(&lli->lli_agl_lock);
1119 ll_agl_trigger(&clli->lli_vfs_inode,
1122 spin_lock(&lli->lli_agl_lock);
1124 spin_unlock(&lli->lli_agl_lock);
1126 if (!sa_sent_full(sai))
1130 __set_current_state(TASK_RUNNING);
1132 sa_statahead(parent, name, namelen, &fid);
1135 pos = le64_to_cpu(dp->ldp_hash_end);
1136 ll_release_page(dir, page,
1137 le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1139 if (sa_low_hit(sai)) {
1141 atomic_inc(&sbi->ll_sa_wrong);
1143 "Statahead for dir "DFID" hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stoppingstatahead thread: pid %d\n",
1144 PFID(&lli->lli_fid), sai->sai_hit,
1145 sai->sai_miss, sai->sai_sent,
1146 sai->sai_replied, current->pid);
1150 ll_finish_md_op_data(op_data);
1153 spin_lock(&lli->lli_sa_lock);
1154 sai->sai_task = NULL;
1155 lli->lli_sa_enabled = 0;
1156 spin_unlock(&lli->lli_sa_lock);
1160 * statahead is finished, but statahead entries need to be cached, wait
1161 * for file release to stop me.
1163 while (({set_current_state(TASK_IDLE);
1164 sai->sai_task; })) {
1165 if (sa_has_callback(sai)) {
1166 __set_current_state(TASK_RUNNING);
1167 sa_handle_callback(sai);
1172 __set_current_state(TASK_RUNNING);
1179 * wait for inflight statahead RPCs to finish, and then we can free sai
1180 * safely because statahead RPC will access sai data
1182 while (sai->sai_sent != sai->sai_replied)
1183 /* in case we're not woken up, timeout wait */
1186 /* release resources held by statahead RPCs */
1187 sa_handle_callback(sai);
1189 CDEBUG(D_READA, "%s: statahead thread stopped: sai %p, parent %pd\n",
1190 sbi->ll_fsname, sai, parent);
1192 spin_lock(&lli->lli_sa_lock);
1193 sai->sai_task = NULL;
1194 spin_unlock(&lli->lli_sa_lock);
1195 wake_up(&sai->sai_waitq);
1202 /* authorize opened dir handle @key to statahead */
1203 void ll_authorize_statahead(struct inode *dir, void *key)
1205 struct ll_inode_info *lli = ll_i2info(dir);
1207 spin_lock(&lli->lli_sa_lock);
1208 if (!lli->lli_opendir_key && !lli->lli_sai) {
1210 * if lli_sai is not NULL, it means previous statahead is not
1211 * finished yet, we'd better not start a new statahead for now.
1213 LASSERT(lli->lli_opendir_pid == 0);
1214 lli->lli_opendir_key = key;
1215 lli->lli_opendir_pid = current->pid;
1216 lli->lli_sa_enabled = 1;
1218 spin_unlock(&lli->lli_sa_lock);
1222 * deauthorize opened dir handle @key to statahead, and notify statahead thread
1223 * to quit if it's running.
1225 void ll_deauthorize_statahead(struct inode *dir, void *key)
1227 struct ll_inode_info *lli = ll_i2info(dir);
1228 struct ll_statahead_info *sai;
1230 LASSERT(lli->lli_opendir_key == key);
1231 LASSERT(lli->lli_opendir_pid != 0);
1233 CDEBUG(D_READA, "deauthorize statahead for "DFID"\n",
1234 PFID(&lli->lli_fid));
1236 spin_lock(&lli->lli_sa_lock);
1237 lli->lli_opendir_key = NULL;
1238 lli->lli_opendir_pid = 0;
1239 lli->lli_sa_enabled = 0;
1241 if (sai && sai->sai_task) {
1243 * statahead thread may not have quit yet because it needs to
1244 * cache entries, now it's time to tell it to quit.
1246 * wake_up_process() provides the necessary barriers
1247 * to pair with set_current_state().
1249 struct task_struct *task = sai->sai_task;
1251 sai->sai_task = NULL;
1252 wake_up_process(task);
1254 spin_unlock(&lli->lli_sa_lock);
1259 * not first dirent, or is "."
1261 LS_NOT_FIRST_DE = 0,
1263 * the first non-hidden dirent
1267 * the first hidden dirent, that is "."
1272 /* file is first dirent under @dir */
1273 static int is_first_dirent(struct inode *dir, struct dentry *dentry)
1275 struct qstr *target = &dentry->d_name;
1276 struct md_op_data *op_data;
1278 struct page *page = NULL;
1279 int rc = LS_NOT_FIRST_DE;
1284 op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
1285 LUSTRE_OPC_ANY, dir);
1286 if (IS_ERR(op_data))
1287 RETURN(PTR_ERR(op_data));
1289 *FIXME choose the start offset of the readdir
1292 page = ll_get_dir_page(dir, op_data, 0);
1295 struct lu_dirpage *dp;
1296 struct lu_dirent *ent;
1299 struct ll_inode_info *lli = ll_i2info(dir);
1302 CERROR("%s: reading dir "DFID" at %llu opendir_pid = %u : rc = %d\n",
1303 ll_i2sbi(dir)->ll_fsname,
1304 PFID(ll_inode2fid(dir)), pos,
1305 lli->lli_opendir_pid, rc);
1309 dp = page_address(page);
1310 for (ent = lu_dirent_start(dp); ent != NULL;
1311 ent = lu_dirent_next(ent)) {
1316 hash = le64_to_cpu(ent->lde_hash);
1318 * The ll_get_dir_page() can return any page containing
1319 * the given hash which may be not the start hash.
1321 if (unlikely(hash < pos))
1324 namelen = le16_to_cpu(ent->lde_namelen);
1325 if (unlikely(namelen == 0))
1327 * skip dummy record.
1331 name = ent->lde_name;
1332 if (name[0] == '.') {
1338 else if (name[1] == '.' && namelen == 2)
1349 if (dot_de && target->name[0] != '.') {
1350 CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
1351 target->len, target->name,
1356 if (target->len != namelen ||
1357 memcmp(target->name, name, namelen) != 0)
1358 rc = LS_NOT_FIRST_DE;
1362 rc = LS_FIRST_DOT_DE;
1364 ll_release_page(dir, page, false);
1367 pos = le64_to_cpu(dp->ldp_hash_end);
1368 if (pos == MDS_DIR_END_OFF) {
1370 * End of directory reached.
1372 ll_release_page(dir, page, false);
1376 * chain is exhausted
1377 * Normal case: continue to the next page.
1379 ll_release_page(dir, page, le32_to_cpu(dp->ldp_flags) &
1381 page = ll_get_dir_page(dir, op_data, pos);
1386 ll_finish_md_op_data(op_data);
1392 * revalidate @dentryp from statahead cache
1394 * \param[in] dir parent directory
1395 * \param[in] sai sai structure
1396 * \param[out] dentryp pointer to dentry which will be revalidated
1397 * \param[in] unplug unplug statahead window only (normally for negative
1399 * \retval 1 on success, dentry is saved in @dentryp
1400 * \retval 0 if revalidation failed (no proper lock on client)
1401 * \retval negative number upon error
1403 static int revalidate_statahead_dentry(struct inode *dir,
1404 struct ll_statahead_info *sai,
1405 struct dentry **dentryp,
1408 struct sa_entry *entry = NULL;
1409 struct ll_dentry_data *ldd;
1410 struct ll_inode_info *lli = ll_i2info(dir);
1415 if ((*dentryp)->d_name.name[0] == '.') {
1416 if (sai->sai_ls_all ||
1417 sai->sai_miss_hidden >= sai->sai_skip_hidden) {
1419 * Hidden dentry is the first one, or statahead
1420 * thread does not skip so many hidden dentries
1421 * before "sai_ls_all" enabled as below.
1424 if (!sai->sai_ls_all)
1426 * It maybe because hidden dentry is not
1427 * the first one, "sai_ls_all" was not
1428 * set, then "ls -al" missed. Enable
1429 * "sai_ls_all" for such case.
1431 sai->sai_ls_all = 1;
1434 * Such "getattr" has been skipped before
1435 * "sai_ls_all" enabled as above.
1437 sai->sai_miss_hidden++;
1445 entry = sa_get(sai, &(*dentryp)->d_name);
1447 GOTO(out, rc = -EAGAIN);
1449 /* if statahead is busy in readdir, help it do post-work */
1450 if (!sa_ready(entry) && sai->sai_in_readpage)
1451 sa_handle_callback(sai);
1453 if (!sa_ready(entry)) {
1454 spin_lock(&lli->lli_sa_lock);
1455 sai->sai_index_wait = entry->se_index;
1456 spin_unlock(&lli->lli_sa_lock);
1457 rc = wait_event_idle_timeout(sai->sai_waitq, sa_ready(entry),
1458 cfs_time_seconds(30));
1461 * entry may not be ready, so it may be used by inflight
1462 * statahead RPC, don't free it.
1465 GOTO(out, rc = -EAGAIN);
1470 * We need to see the value that was set immediately before we
1473 if (smp_load_acquire(&entry->se_state) == SA_ENTRY_SUCC &&
1475 struct inode *inode = entry->se_inode;
1476 struct lookup_intent it = { .it_op = IT_GETATTR,
1481 rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
1482 ll_inode2fid(inode), &bits);
1484 if (!(*dentryp)->d_inode) {
1485 struct dentry *alias;
1487 alias = ll_splice_alias(inode, *dentryp);
1488 if (IS_ERR(alias)) {
1489 ll_intent_release(&it);
1490 GOTO(out, rc = PTR_ERR(alias));
1494 * statahead prepared this inode, transfer inode
1495 * refcount from sa_entry to dentry
1497 entry->se_inode = NULL;
1498 } else if ((*dentryp)->d_inode != inode) {
1499 /* revalidate, but inode is recreated */
1501 "%s: stale dentry %pd inode " DFID", statahead inode "DFID "\n",
1502 ll_i2sbi(inode)->ll_fsname, *dentryp,
1503 PFID(ll_inode2fid((*dentryp)->d_inode)),
1504 PFID(ll_inode2fid(inode)));
1505 ll_intent_release(&it);
1506 GOTO(out, rc = -ESTALE);
1509 if ((bits & MDS_INODELOCK_LOOKUP) &&
1510 d_lustre_invalid(*dentryp))
1511 d_lustre_revalidate(*dentryp);
1512 ll_intent_release(&it);
1517 * statahead cached sa_entry can be used only once, and will be killed
1518 * right after use, so if lookup/revalidate accessed statahead cache,
1519 * set dentry ldd_sa_generation to parent lli_sa_generation, later if we
1520 * stat this file again, we know we've done statahead before, see
1521 * dentry_may_statahead().
1523 ldd = ll_d2d(*dentryp);
1524 /* ldd can be NULL if llite lookup failed. */
1526 ldd->lld_sa_generation = lli->lli_sa_generation;
1528 spin_lock(&lli->lli_sa_lock);
1530 wake_up_process(sai->sai_task);
1531 spin_unlock(&lli->lli_sa_lock);
1537 * start statahead thread
1539 * \param[in] dir parent directory
1540 * \param[in] dentry dentry that triggers statahead, normally the first
1542 * \param[in] agl indicate whether AGL is needed
1543 * \retval -EAGAIN on success, because when this function is
1544 * called, it's already in lookup call, so client should
1545 * do it itself instead of waiting for statahead thread
1546 * to do it asynchronously.
1547 * \retval negative number upon error
1549 static int start_statahead_thread(struct inode *dir, struct dentry *dentry,
1552 int node = cfs_cpt_spread_node(cfs_cpt_tab, CFS_CPT_ANY);
1553 struct ll_inode_info *lli = ll_i2info(dir);
1554 struct ll_statahead_info *sai = NULL;
1555 struct dentry *parent = dentry->d_parent;
1556 struct task_struct *task;
1557 struct ll_sb_info *sbi = ll_i2sbi(parent->d_inode);
1558 int first = LS_FIRST_DE;
1563 /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1564 first = is_first_dirent(dir, dentry);
1565 if (first == LS_NOT_FIRST_DE)
1566 /* It is not "ls -{a}l" operation, no need statahead for it. */
1567 GOTO(out, rc = -EFAULT);
1569 if (unlikely(atomic_inc_return(&sbi->ll_sa_running) >
1570 sbi->ll_sa_running_max)) {
1572 "Too many concurrent statahead instances, avoid new statahead instance temporarily.\n");
1573 GOTO(out, rc = -EMFILE);
1576 sai = ll_sai_alloc(parent);
1578 GOTO(out, rc = -ENOMEM);
1580 sai->sai_ls_all = (first == LS_FIRST_DOT_DE);
1583 * if current lli_opendir_key was deauthorized, or dir re-opened by
1584 * another process, don't start statahead, otherwise the newly spawned
1585 * statahead thread won't be notified to quit.
1587 spin_lock(&lli->lli_sa_lock);
1588 if (unlikely(lli->lli_sai || !lli->lli_opendir_key ||
1589 lli->lli_opendir_pid != current->pid)) {
1590 spin_unlock(&lli->lli_sa_lock);
1591 GOTO(out, rc = -EPERM);
1594 spin_unlock(&lli->lli_sa_lock);
1596 CDEBUG(D_READA, "start statahead thread: [pid %d] [parent %pd]\n",
1597 current->pid, parent);
1599 task = kthread_create_on_node(ll_statahead_thread, parent, node,
1600 "ll_sa_%u", lli->lli_opendir_pid);
1602 spin_lock(&lli->lli_sa_lock);
1603 lli->lli_sai = NULL;
1604 spin_unlock(&lli->lli_sa_lock);
1606 CERROR("can't start ll_sa thread, rc: %d\n", rc);
1610 if (ll_i2sbi(parent->d_inode)->ll_flags & LL_SBI_AGL_ENABLED && agl)
1611 ll_start_agl(parent, sai);
1613 atomic_inc(&ll_i2sbi(parent->d_inode)->ll_sa_total);
1614 sai->sai_task = task;
1616 wake_up_process(task);
1618 * We don't stat-ahead for the first dirent since we are already in
1625 * once we start statahead thread failed, disable statahead so that
1626 * subsequent stat won't waste time to try it.
1628 spin_lock(&lli->lli_sa_lock);
1629 if (lli->lli_opendir_pid == current->pid)
1630 lli->lli_sa_enabled = 0;
1631 spin_unlock(&lli->lli_sa_lock);
1635 if (first != LS_NOT_FIRST_DE)
1636 atomic_dec(&sbi->ll_sa_running);
1642 * Check whether statahead for @dir was started.
1644 static inline bool ll_statahead_started(struct inode *dir, bool agl)
1646 struct ll_inode_info *lli = ll_i2info(dir);
1647 struct ll_statahead_info *sai;
1649 spin_lock(&lli->lli_sa_lock);
1651 if (sai && (sai->sai_agl_task != NULL) != agl)
1653 "%s: Statahead AGL hint changed from %d to %d\n",
1654 ll_i2sbi(dir)->ll_fsname,
1655 sai->sai_agl_task != NULL, agl);
1656 spin_unlock(&lli->lli_sa_lock);
1662 * statahead entry function, this is called when client getattr on a file, it
1663 * will start statahead thread if this is the first dir entry, else revalidate
1664 * dentry from statahead cache.
1666 * \param[in] dir parent directory
1667 * \param[out] dentryp dentry to getattr
1668 * \param[in] agl whether start the agl thread
1670 * \retval 1 on success
1671 * \retval 0 revalidation from statahead cache failed, caller needs
1672 * to getattr from server directly
1673 * \retval negative number on error, caller often ignores this and
1674 * then getattr from server
1676 int ll_start_statahead(struct inode *dir, struct dentry *dentry, bool agl)
1678 if (!ll_statahead_started(dir, agl))
1679 return start_statahead_thread(dir, dentry, agl);
1684 * revalidate dentry from statahead cache.
1686 * \param[in] dir parent directory
1687 * \param[out] dentryp dentry to getattr
1688 * \param[in] unplug unplug statahead window only (normally for negative
1690 * \retval 1 on success
1691 * \retval 0 revalidation from statahead cache failed, caller needs
1692 * to getattr from server directly
1693 * \retval negative number on error, caller often ignores this and
1694 * then getattr from server
1696 int ll_revalidate_statahead(struct inode *dir, struct dentry **dentryp,
1699 struct ll_statahead_info *sai;
1702 sai = ll_sai_get(dir);
1704 rc = revalidate_statahead_dentry(dir, sai, dentryp, unplug);
1705 CDEBUG(D_READA, "revalidate statahead %pd: rc = %d.\n",