Whamcloud - gitweb
LU-9441 llite: bind kthread thread to accepted node set
[fs/lustre-release.git] / lustre / llite / statahead.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32
33 #include <linux/fs.h>
34 #include <linux/sched.h>
35 #include <linux/kthread.h>
36 #include <linux/mm.h>
37 #include <linux/highmem.h>
38 #include <linux/pagemap.h>
39 #include <linux/delay.h>
40
41 #define DEBUG_SUBSYSTEM S_LLITE
42
43 #include <obd_support.h>
44 #include <lustre_dlm.h>
45 #include "llite_internal.h"
46
47 #define SA_OMITTED_ENTRY_MAX 8ULL
48
49 typedef enum {
50         /** negative values are for error cases */
51         SA_ENTRY_INIT = 0,      /** init entry */
52         SA_ENTRY_SUCC = 1,      /** stat succeed */
53         SA_ENTRY_INVA = 2,      /** invalid entry */
54 } se_state_t;
55
56 /*
57  * sa_entry is not refcounted: statahead thread allocates it and do async stat,
58  * and in async stat callback ll_statahead_interpret() will add it into
59  * sai_interim_entries, later statahead thread will call sa_handle_callback() to
60  * instantiate entry and move it into sai_entries, and then only scanner process
61  * can access and free it.
62  */
63 struct sa_entry {
64         /* link into sai_interim_entries or sai_entries */
65         struct list_head        se_list;
66         /* link into sai hash table locally */
67         struct list_head        se_hash;
68         /* entry index in the sai */
69         __u64                   se_index;
70         /* low layer ldlm lock handle */
71         __u64                   se_handle;
72         /* entry status */
73         se_state_t              se_state;
74         /* entry size, contains name */
75         int                     se_size;
76         /* pointer to async getattr enqueue info */
77         struct md_enqueue_info *se_minfo;
78         /* pointer to the async getattr request */
79         struct ptlrpc_request  *se_req;
80         /* pointer to the target inode */
81         struct inode           *se_inode;
82         /* entry name */
83         struct qstr             se_qstr;
84         /* entry fid */
85         struct lu_fid           se_fid;
86 };
87
88 static unsigned int sai_generation;
89 static DEFINE_SPINLOCK(sai_generation_lock);
90
91 static inline int sa_unhashed(struct sa_entry *entry)
92 {
93         return list_empty(&entry->se_hash);
94 }
95
96 /* sa_entry is ready to use */
97 static inline int sa_ready(struct sa_entry *entry)
98 {
99         /* Make sure sa_entry is updated and ready to use */
100         smp_rmb();
101         return (entry->se_state != SA_ENTRY_INIT);
102 }
103
104 /* hash value to put in sai_cache */
105 static inline int sa_hash(int val)
106 {
107         return val & LL_SA_CACHE_MASK;
108 }
109
110 /* hash entry into sai_cache */
111 static inline void
112 sa_rehash(struct ll_statahead_info *sai, struct sa_entry *entry)
113 {
114         int i = sa_hash(entry->se_qstr.hash);
115
116         spin_lock(&sai->sai_cache_lock[i]);
117         list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
118         spin_unlock(&sai->sai_cache_lock[i]);
119 }
120
121 /* unhash entry from sai_cache */
122 static inline void
123 sa_unhash(struct ll_statahead_info *sai, struct sa_entry *entry)
124 {
125         int i = sa_hash(entry->se_qstr.hash);
126
127         spin_lock(&sai->sai_cache_lock[i]);
128         list_del_init(&entry->se_hash);
129         spin_unlock(&sai->sai_cache_lock[i]);
130 }
131
132 static inline int agl_should_run(struct ll_statahead_info *sai,
133                                  struct inode *inode)
134 {
135         return (inode && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
136 }
137
138 static inline struct ll_inode_info *
139 agl_first_entry(struct ll_statahead_info *sai)
140 {
141         return list_entry(sai->sai_agls.next, struct ll_inode_info,
142                           lli_agl_list);
143 }
144
145 /* statahead window is full */
146 static inline int sa_sent_full(struct ll_statahead_info *sai)
147 {
148         return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
149 }
150
151 /* got async stat replies */
152 static inline int sa_has_callback(struct ll_statahead_info *sai)
153 {
154         return !list_empty(&sai->sai_interim_entries);
155 }
156
157 static inline int agl_list_empty(struct ll_statahead_info *sai)
158 {
159         return list_empty(&sai->sai_agls);
160 }
161
162 /**
163  * (1) hit ratio less than 80%
164  * or
165  * (2) consecutive miss more than 8
166  * then means low hit.
167  */
168 static inline int sa_low_hit(struct ll_statahead_info *sai)
169 {
170         return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
171                 (sai->sai_consecutive_miss > 8));
172 }
173
174 /*
175  * if the given index is behind of statahead window more than
176  * SA_OMITTED_ENTRY_MAX, then it is old.
177  */
178 static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
179 {
180         return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
181                 sai->sai_index);
182 }
183
184 /* allocate sa_entry and hash it to allow scanner process to find it */
185 static struct sa_entry *
186 sa_alloc(struct dentry *parent, struct ll_statahead_info *sai, __u64 index,
187          const char *name, int len, const struct lu_fid *fid)
188 {
189         struct ll_inode_info *lli;
190         struct sa_entry *entry;
191         int entry_size;
192         char *dname;
193
194         ENTRY;
195
196         entry_size = sizeof(struct sa_entry) + (len & ~3) + 4;
197         OBD_ALLOC(entry, entry_size);
198         if (unlikely(!entry))
199                 RETURN(ERR_PTR(-ENOMEM));
200
201         CDEBUG(D_READA, "alloc sa entry %.*s(%p) index %llu\n",
202                len, name, entry, index);
203
204         entry->se_index = index;
205
206         entry->se_state = SA_ENTRY_INIT;
207         entry->se_size = entry_size;
208         dname = (char *)entry + sizeof(struct sa_entry);
209         memcpy(dname, name, len);
210         dname[len] = 0;
211         entry->se_qstr.hash = ll_full_name_hash(parent, name, len);
212         entry->se_qstr.len = len;
213         entry->se_qstr.name = dname;
214         entry->se_fid = *fid;
215
216         lli = ll_i2info(sai->sai_dentry->d_inode);
217
218         spin_lock(&lli->lli_sa_lock);
219         INIT_LIST_HEAD(&entry->se_list);
220         sa_rehash(sai, entry);
221         spin_unlock(&lli->lli_sa_lock);
222
223         atomic_inc(&sai->sai_cache_count);
224
225         RETURN(entry);
226 }
227
228 /* free sa_entry, which should have been unhashed and not in any list */
229 static void sa_free(struct ll_statahead_info *sai, struct sa_entry *entry)
230 {
231         CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n",
232                entry->se_qstr.len, entry->se_qstr.name, entry,
233                entry->se_index);
234
235         LASSERT(list_empty(&entry->se_list));
236         LASSERT(sa_unhashed(entry));
237
238         OBD_FREE(entry, entry->se_size);
239         atomic_dec(&sai->sai_cache_count);
240 }
241
242 /*
243  * find sa_entry by name, used by directory scanner, lock is not needed because
244  * only scanner can remove the entry from cache.
245  */
246 static struct sa_entry *
247 sa_get(struct ll_statahead_info *sai, const struct qstr *qstr)
248 {
249         struct sa_entry *entry;
250         int i = sa_hash(qstr->hash);
251
252         list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
253                 if (entry->se_qstr.hash == qstr->hash &&
254                     entry->se_qstr.len == qstr->len &&
255                     memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
256                         return entry;
257         }
258         return NULL;
259 }
260
261 /* unhash and unlink sa_entry, and then free it */
262 static inline void
263 sa_kill(struct ll_statahead_info *sai, struct sa_entry *entry)
264 {
265         struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
266
267         LASSERT(!sa_unhashed(entry));
268         LASSERT(!list_empty(&entry->se_list));
269         LASSERT(sa_ready(entry));
270
271         sa_unhash(sai, entry);
272
273         spin_lock(&lli->lli_sa_lock);
274         list_del_init(&entry->se_list);
275         spin_unlock(&lli->lli_sa_lock);
276
277         if (entry->se_inode)
278                 iput(entry->se_inode);
279
280         sa_free(sai, entry);
281 }
282
283 /* called by scanner after use, sa_entry will be killed */
284 static void
285 sa_put(struct ll_statahead_info *sai, struct sa_entry *entry)
286 {
287         struct sa_entry *tmp, *next;
288
289         if (entry && entry->se_state == SA_ENTRY_SUCC) {
290                 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode);
291
292                 sai->sai_hit++;
293                 sai->sai_consecutive_miss = 0;
294                 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
295         } else {
296                 sai->sai_miss++;
297                 sai->sai_consecutive_miss++;
298         }
299
300         if (entry)
301                 sa_kill(sai, entry);
302
303         /*
304          * kill old completed entries, only scanner process does this, no need
305          * to lock
306          */
307         list_for_each_entry_safe(tmp, next, &sai->sai_entries, se_list) {
308                 if (!is_omitted_entry(sai, tmp->se_index))
309                         break;
310                 sa_kill(sai, tmp);
311         }
312 }
313
314 /*
315  * update state and sort add entry to sai_entries by index, return true if
316  * scanner is waiting on this entry.
317  */
318 static bool
319 __sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
320 {
321         struct sa_entry *se;
322         struct list_head *pos = &sai->sai_entries;
323         __u64 index = entry->se_index;
324
325         LASSERT(!sa_ready(entry));
326         LASSERT(list_empty(&entry->se_list));
327
328         list_for_each_entry_reverse(se, &sai->sai_entries, se_list) {
329                 if (se->se_index < entry->se_index) {
330                         pos = &se->se_list;
331                         break;
332                 }
333         }
334         list_add(&entry->se_list, pos);
335         /*
336          * LU-9210: ll_statahead_interpet must be able to see this before
337          * we wake it up
338          */
339         smp_store_release(&entry->se_state,
340                           ret < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
341
342         return (index == sai->sai_index_wait);
343 }
344
345 /* finish async stat RPC arguments */
346 static void sa_fini_data(struct md_enqueue_info *minfo)
347 {
348         ll_unlock_md_op_lsm(&minfo->mi_data);
349         iput(minfo->mi_dir);
350         OBD_FREE_PTR(minfo);
351 }
352
353 static int ll_statahead_interpret(struct ptlrpc_request *req,
354                                   struct md_enqueue_info *minfo, int rc);
355
356 /*
357  * prepare arguments for async stat RPC.
358  */
359 static struct md_enqueue_info *
360 sa_prep_data(struct inode *dir, struct inode *child, struct sa_entry *entry)
361 {
362         struct md_enqueue_info   *minfo;
363         struct ldlm_enqueue_info *einfo;
364         struct md_op_data        *op_data;
365
366         OBD_ALLOC_PTR(minfo);
367         if (!minfo)
368                 return ERR_PTR(-ENOMEM);
369
370         op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child,
371                                      entry->se_qstr.name, entry->se_qstr.len, 0,
372                                      LUSTRE_OPC_ANY, NULL);
373         if (IS_ERR(op_data)) {
374                 OBD_FREE_PTR(minfo);
375                 return (struct md_enqueue_info *)op_data;
376         }
377
378         if (!child)
379                 op_data->op_fid2 = entry->se_fid;
380
381         minfo->mi_it.it_op = IT_GETATTR;
382         minfo->mi_dir = igrab(dir);
383         minfo->mi_cb = ll_statahead_interpret;
384         minfo->mi_cbdata = entry;
385
386         einfo = &minfo->mi_einfo;
387         einfo->ei_type   = LDLM_IBITS;
388         einfo->ei_mode   = it_to_lock_mode(&minfo->mi_it);
389         einfo->ei_cb_bl  = ll_md_blocking_ast;
390         einfo->ei_cb_cp  = ldlm_completion_ast;
391         einfo->ei_cb_gl  = NULL;
392         einfo->ei_cbdata = NULL;
393
394         return minfo;
395 }
396
397 /*
398  * release resources used in async stat RPC, update entry state and wakeup if
399  * scanner process it waiting on this entry.
400  */
401 static void
402 sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
403 {
404         struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
405         struct md_enqueue_info *minfo = entry->se_minfo;
406         struct ptlrpc_request *req = entry->se_req;
407         bool wakeup;
408
409         /* release resources used in RPC */
410         if (minfo) {
411                 entry->se_minfo = NULL;
412                 ll_intent_release(&minfo->mi_it);
413                 sa_fini_data(minfo);
414         }
415
416         if (req) {
417                 entry->se_req = NULL;
418                 ptlrpc_req_finished(req);
419         }
420
421         spin_lock(&lli->lli_sa_lock);
422         wakeup = __sa_make_ready(sai, entry, ret);
423         spin_unlock(&lli->lli_sa_lock);
424
425         if (wakeup)
426                 wake_up(&sai->sai_waitq);
427 }
428
429 /* insert inode into the list of sai_agls */
430 static void ll_agl_add(struct ll_statahead_info *sai,
431                        struct inode *inode, int index)
432 {
433         struct ll_inode_info *child  = ll_i2info(inode);
434         struct ll_inode_info *parent = ll_i2info(sai->sai_dentry->d_inode);
435         int                   added  = 0;
436
437         spin_lock(&child->lli_agl_lock);
438         if (child->lli_agl_index == 0) {
439                 child->lli_agl_index = index;
440                 spin_unlock(&child->lli_agl_lock);
441
442                 LASSERT(list_empty(&child->lli_agl_list));
443
444                 igrab(inode);
445                 spin_lock(&parent->lli_agl_lock);
446                 if (agl_list_empty(sai))
447                         added = 1;
448                 list_add_tail(&child->lli_agl_list, &sai->sai_agls);
449                 if (added && sai->sai_agl_task)
450                         wake_up_process(sai->sai_agl_task);
451                 spin_unlock(&parent->lli_agl_lock);
452         } else {
453                 spin_unlock(&child->lli_agl_lock);
454         }
455 }
456
457 /* allocate sai */
458 static struct ll_statahead_info *ll_sai_alloc(struct dentry *dentry)
459 {
460         struct ll_statahead_info *sai;
461         struct ll_inode_info *lli = ll_i2info(dentry->d_inode);
462         int i;
463
464         ENTRY;
465
466         OBD_ALLOC_PTR(sai);
467         if (!sai)
468                 RETURN(NULL);
469
470         sai->sai_dentry = dget(dentry);
471         atomic_set(&sai->sai_refcount, 1);
472         sai->sai_max = LL_SA_RPC_MIN;
473         sai->sai_index = 1;
474         init_waitqueue_head(&sai->sai_waitq);
475
476         INIT_LIST_HEAD(&sai->sai_interim_entries);
477         INIT_LIST_HEAD(&sai->sai_entries);
478         INIT_LIST_HEAD(&sai->sai_agls);
479
480         for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
481                 INIT_LIST_HEAD(&sai->sai_cache[i]);
482                 spin_lock_init(&sai->sai_cache_lock[i]);
483         }
484         atomic_set(&sai->sai_cache_count, 0);
485
486         spin_lock(&sai_generation_lock);
487         lli->lli_sa_generation = ++sai_generation;
488         if (unlikely(sai_generation == 0))
489                 lli->lli_sa_generation = ++sai_generation;
490         spin_unlock(&sai_generation_lock);
491
492         RETURN(sai);
493 }
494
495 /* free sai */
496 static inline void ll_sai_free(struct ll_statahead_info *sai)
497 {
498         LASSERT(sai->sai_dentry != NULL);
499         dput(sai->sai_dentry);
500         OBD_FREE_PTR(sai);
501 }
502
503 /*
504  * take refcount of sai if sai for @dir exists, which means statahead is on for
505  * this directory.
506  */
507 static inline struct ll_statahead_info *ll_sai_get(struct inode *dir)
508 {
509         struct ll_inode_info *lli = ll_i2info(dir);
510         struct ll_statahead_info *sai = NULL;
511
512         spin_lock(&lli->lli_sa_lock);
513         sai = lli->lli_sai;
514         if (sai)
515                 atomic_inc(&sai->sai_refcount);
516         spin_unlock(&lli->lli_sa_lock);
517
518         return sai;
519 }
520
521 /*
522  * put sai refcount after use, if refcount reaches zero, free sai and sa_entries
523  * attached to it.
524  */
525 static void ll_sai_put(struct ll_statahead_info *sai)
526 {
527         struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
528
529         if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
530                 struct sa_entry *entry, *next;
531                 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode);
532
533                 lli->lli_sai = NULL;
534                 spin_unlock(&lli->lli_sa_lock);
535
536                 LASSERT(!sai->sai_task);
537                 LASSERT(!sai->sai_agl_task);
538                 LASSERT(sai->sai_sent == sai->sai_replied);
539                 LASSERT(!sa_has_callback(sai));
540
541                 list_for_each_entry_safe(entry, next, &sai->sai_entries,
542                                          se_list)
543                         sa_kill(sai, entry);
544
545                 LASSERT(atomic_read(&sai->sai_cache_count) == 0);
546                 LASSERT(agl_list_empty(sai));
547
548                 ll_sai_free(sai);
549                 atomic_dec(&sbi->ll_sa_running);
550         }
551 }
552
553 /* Do NOT forget to drop inode refcount when into sai_agls. */
554 static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
555 {
556         struct ll_inode_info *lli = ll_i2info(inode);
557         u64 index = lli->lli_agl_index;
558         ktime_t expire;
559         int rc;
560
561         ENTRY;
562
563         LASSERT(list_empty(&lli->lli_agl_list));
564
565         /* AGL maybe fall behind statahead with one entry */
566         if (is_omitted_entry(sai, index + 1)) {
567                 lli->lli_agl_index = 0;
568                 iput(inode);
569                 RETURN_EXIT;
570         }
571
572         /*
573          * In case of restore, the MDT has the right size and has already
574          * sent it back without granting the layout lock, inode is up-to-date.
575          * Then AGL (async glimpse lock) is useless.
576          * Also to glimpse we need the layout, in case of a runninh restore
577          * the MDT holds the layout lock so the glimpse will block up to the
578          * end of restore (statahead/agl will block)
579          */
580         if (ll_file_test_flag(lli, LLIF_FILE_RESTORING)) {
581                 lli->lli_agl_index = 0;
582                 iput(inode);
583                 RETURN_EXIT;
584         }
585
586         /* Someone is in glimpse (sync or async), do nothing. */
587         rc = down_write_trylock(&lli->lli_glimpse_sem);
588         if (rc == 0) {
589                 lli->lli_agl_index = 0;
590                 iput(inode);
591                 RETURN_EXIT;
592         }
593
594         /*
595          * Someone triggered glimpse within 1 sec before.
596          * 1) The former glimpse succeeded with glimpse lock granted by OST, and
597          *    if the lock is still cached on client, AGL needs to do nothing. If
598          *    it is cancelled by other client, AGL maybe cannot obtaion new lock
599          *    for no glimpse callback triggered by AGL.
600          * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
601          *    Under such case, it is quite possible that the OST will not grant
602          *    glimpse lock for AGL also.
603          * 3) The former glimpse failed, compared with other two cases, it is
604          *    relative rare. AGL can ignore such case, and it will not muchly
605          *    affect the performance.
606          */
607         expire = ktime_sub_ns(ktime_get(), NSEC_PER_SEC);
608         if (ktime_to_ns(lli->lli_glimpse_time) &&
609             ktime_before(expire, lli->lli_glimpse_time)) {
610                 up_write(&lli->lli_glimpse_sem);
611                 lli->lli_agl_index = 0;
612                 iput(inode);
613                 RETURN_EXIT;
614         }
615
616         CDEBUG(D_READA,
617                "Handling (init) async glimpse: inode = " DFID", idx = %llu\n",
618                PFID(&lli->lli_fid), index);
619
620         cl_agl(inode);
621         lli->lli_agl_index = 0;
622         lli->lli_glimpse_time = ktime_get();
623         up_write(&lli->lli_glimpse_sem);
624
625         CDEBUG(D_READA,
626                "Handled (init) async glimpse: inode= " DFID", idx = %llu, rc = %d\n",
627                PFID(&lli->lli_fid), index, rc);
628
629         iput(inode);
630
631         EXIT;
632 }
633
634 /*
635  * prepare inode for sa entry, add it into agl list, now sa_entry is ready
636  * to be used by scanner process.
637  */
638 static void sa_instantiate(struct ll_statahead_info *sai,
639                            struct sa_entry *entry)
640 {
641         struct inode *dir = sai->sai_dentry->d_inode;
642         struct inode *child;
643         struct md_enqueue_info *minfo;
644         struct lookup_intent *it;
645         struct ptlrpc_request *req;
646         struct mdt_body *body;
647         int rc = 0;
648
649         ENTRY;
650
651         LASSERT(entry->se_handle != 0);
652
653         minfo = entry->se_minfo;
654         it = &minfo->mi_it;
655         req = entry->se_req;
656         body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
657         if (!body)
658                 GOTO(out, rc = -EFAULT);
659
660         child = entry->se_inode;
661         /* revalidate; unlinked and re-created with the same name */
662         if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->mbo_fid1))) {
663                 if (child) {
664                         entry->se_inode = NULL;
665                         iput(child);
666                 }
667                 /* The mdt_body is invalid. Skip this entry */
668                 GOTO(out, rc = -EAGAIN);
669         }
670
671         it->it_lock_handle = entry->se_handle;
672         rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
673         if (rc != 1)
674                 GOTO(out, rc = -EAGAIN);
675
676         rc = ll_prep_inode(&child, req, dir->i_sb, it);
677         if (rc)
678                 GOTO(out, rc);
679
680         CDEBUG(D_READA, "%s: setting %.*s"DFID" l_data to inode %p\n",
681                ll_i2sbi(dir)->ll_fsname, entry->se_qstr.len,
682                entry->se_qstr.name, PFID(ll_inode2fid(child)), child);
683         ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
684
685         entry->se_inode = child;
686
687         if (agl_should_run(sai, child))
688                 ll_agl_add(sai, child, entry->se_index);
689
690         EXIT;
691
692 out:
693         /*
694          * sa_make_ready() will drop ldlm ibits lock refcount by calling
695          * ll_intent_drop_lock() in spite of failures. Do not worry about
696          * calling ll_intent_drop_lock() more than once.
697          */
698         sa_make_ready(sai, entry, rc);
699 }
700
701 /* once there are async stat replies, instantiate sa_entry from replies */
702 static void sa_handle_callback(struct ll_statahead_info *sai)
703 {
704         struct ll_inode_info *lli;
705
706         lli = ll_i2info(sai->sai_dentry->d_inode);
707
708         spin_lock(&lli->lli_sa_lock);
709         while (sa_has_callback(sai)) {
710                 struct sa_entry *entry;
711
712                 entry = list_entry(sai->sai_interim_entries.next,
713                                    struct sa_entry, se_list);
714                 list_del_init(&entry->se_list);
715                 spin_unlock(&lli->lli_sa_lock);
716
717                 sa_instantiate(sai, entry);
718                 spin_lock(&lli->lli_sa_lock);
719         }
720         spin_unlock(&lli->lli_sa_lock);
721 }
722
723 /*
724  * callback for async stat RPC, because this is called in ptlrpcd context, we
725  * only put sa_entry in sai_interim_entries, and wake up statahead thread to
726  * really prepare inode and instantiate sa_entry later.
727  */
728 static int ll_statahead_interpret(struct ptlrpc_request *req,
729                                   struct md_enqueue_info *minfo, int rc)
730 {
731         struct lookup_intent *it = &minfo->mi_it;
732         struct inode *dir = minfo->mi_dir;
733         struct ll_inode_info *lli = ll_i2info(dir);
734         struct ll_statahead_info *sai = lli->lli_sai;
735         struct sa_entry *entry = (struct sa_entry *)minfo->mi_cbdata;
736         __u64 handle = 0;
737
738         ENTRY;
739
740         if (it_disposition(it, DISP_LOOKUP_NEG))
741                 rc = -ENOENT;
742
743         /*
744          * because statahead thread will wait for all inflight RPC to finish,
745          * sai should be always valid, no need to refcount
746          */
747         LASSERT(sai != NULL);
748         LASSERT(entry != NULL);
749
750         CDEBUG(D_READA, "sa_entry %.*s rc %d\n",
751                entry->se_qstr.len, entry->se_qstr.name, rc);
752
753         if (rc != 0) {
754                 ll_intent_release(it);
755                 sa_fini_data(minfo);
756         } else {
757                 /*
758                  * release ibits lock ASAP to avoid deadlock when statahead
759                  * thread enqueues lock on parent in readdir and another
760                  * process enqueues lock on child with parent lock held, eg.
761                  * unlink.
762                  */
763                 handle = it->it_lock_handle;
764                 ll_intent_drop_lock(it);
765                 ll_unlock_md_op_lsm(&minfo->mi_data);
766         }
767
768         spin_lock(&lli->lli_sa_lock);
769         if (rc != 0) {
770                 if (__sa_make_ready(sai, entry, rc))
771                         wake_up(&sai->sai_waitq);
772         } else {
773                 int first = 0;
774
775                 entry->se_minfo = minfo;
776                 entry->se_req = ptlrpc_request_addref(req);
777                 /*
778                  * Release the async ibits lock ASAP to avoid deadlock
779                  * when statahead thread tries to enqueue lock on parent
780                  * for readpage and other tries to enqueue lock on child
781                  * with parent's lock held, for example: unlink.
782                  */
783                 entry->se_handle = handle;
784                 if (!sa_has_callback(sai))
785                         first = 1;
786
787                 list_add_tail(&entry->se_list, &sai->sai_interim_entries);
788                 if (first && sai->sai_task)
789                         wake_up_process(sai->sai_task);
790         }
791         sai->sai_replied++;
792
793         spin_unlock(&lli->lli_sa_lock);
794
795         RETURN(rc);
796 }
797
798 /* async stat for file not found in dcache */
799 static int sa_lookup(struct inode *dir, struct sa_entry *entry)
800 {
801         struct md_enqueue_info   *minfo;
802         int                       rc;
803
804         ENTRY;
805
806         minfo = sa_prep_data(dir, NULL, entry);
807         if (IS_ERR(minfo))
808                 RETURN(PTR_ERR(minfo));
809
810         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo);
811         if (rc < 0)
812                 sa_fini_data(minfo);
813
814         RETURN(rc);
815 }
816
817 /**
818  * async stat for file found in dcache, similar to .revalidate
819  *
820  * \retval      1 dentry valid, no RPC sent
821  * \retval      0 dentry invalid, will send async stat RPC
822  * \retval      negative number upon error
823  */
824 static int sa_revalidate(struct inode *dir, struct sa_entry *entry,
825                          struct dentry *dentry)
826 {
827         struct inode *inode = dentry->d_inode;
828         struct lookup_intent it = { .it_op = IT_GETATTR,
829                                     .it_lock_handle = 0 };
830         struct md_enqueue_info *minfo;
831         int rc;
832
833         ENTRY;
834
835         if (unlikely(!inode))
836                 RETURN(1);
837
838         if (d_mountpoint(dentry))
839                 RETURN(1);
840
841         minfo = sa_prep_data(dir, inode, entry);
842         if (IS_ERR(minfo))
843                 RETURN(PTR_ERR(minfo));
844
845         entry->se_inode = igrab(inode);
846         rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
847                                 NULL);
848         if (rc == 1) {
849                 entry->se_handle = it.it_lock_handle;
850                 ll_intent_release(&it);
851                 sa_fini_data(minfo);
852                 RETURN(1);
853         }
854
855         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo);
856         if (rc < 0) {
857                 entry->se_inode = NULL;
858                 iput(inode);
859                 sa_fini_data(minfo);
860         }
861
862         RETURN(rc);
863 }
864
865 /* async stat for file with @name */
866 static void sa_statahead(struct dentry *parent, const char *name, int len,
867                          const struct lu_fid *fid)
868 {
869         struct inode *dir = parent->d_inode;
870         struct ll_inode_info *lli = ll_i2info(dir);
871         struct ll_statahead_info *sai = lli->lli_sai;
872         struct dentry *dentry = NULL;
873         struct sa_entry *entry;
874         int rc;
875
876         ENTRY;
877
878         entry = sa_alloc(parent, sai, sai->sai_index, name, len, fid);
879         if (IS_ERR(entry))
880                 RETURN_EXIT;
881
882         dentry = d_lookup(parent, &entry->se_qstr);
883         if (!dentry) {
884                 rc = sa_lookup(dir, entry);
885         } else {
886                 rc = sa_revalidate(dir, entry, dentry);
887                 if (rc == 1 && agl_should_run(sai, dentry->d_inode))
888                         ll_agl_add(sai, dentry->d_inode, entry->se_index);
889         }
890
891         if (dentry)
892                 dput(dentry);
893
894         if (rc != 0)
895                 sa_make_ready(sai, entry, rc);
896         else
897                 sai->sai_sent++;
898
899         sai->sai_index++;
900
901         EXIT;
902 }
903
904 /* async glimpse (agl) thread main function */
905 static int ll_agl_thread(void *arg)
906 {
907         struct dentry *parent = (struct dentry *)arg;
908         struct inode *dir = parent->d_inode;
909         struct ll_inode_info *plli = ll_i2info(dir);
910         struct ll_inode_info *clli;
911         /*
912          * We already own this reference, so it is safe to take it
913          * without a lock.
914          */
915         struct ll_statahead_info *sai = plli->lli_sai;
916
917         ENTRY;
918
919         CDEBUG(D_READA, "agl thread started: sai %p, parent %pd\n",
920                sai, parent);
921
922         while (({set_current_state(TASK_IDLE);
923                  !kthread_should_stop(); })) {
924                 spin_lock(&plli->lli_agl_lock);
925                 if (!agl_list_empty(sai)) {
926                         __set_current_state(TASK_RUNNING);
927                         clli = agl_first_entry(sai);
928                         list_del_init(&clli->lli_agl_list);
929                         spin_unlock(&plli->lli_agl_lock);
930                         ll_agl_trigger(&clli->lli_vfs_inode, sai);
931                         cond_resched();
932                 } else {
933                         spin_unlock(&plli->lli_agl_lock);
934                         schedule();
935                 }
936         }
937         __set_current_state(TASK_RUNNING);
938         RETURN(0);
939 }
940
941 static void ll_stop_agl(struct ll_statahead_info *sai)
942 {
943         struct dentry *parent = sai->sai_dentry;
944         struct ll_inode_info *plli = ll_i2info(parent->d_inode);
945         struct ll_inode_info *clli;
946         struct task_struct *agl_task;
947
948         spin_lock(&plli->lli_agl_lock);
949         agl_task = sai->sai_agl_task;
950         sai->sai_agl_task = NULL;
951         spin_unlock(&plli->lli_agl_lock);
952         if (!agl_task)
953                 return;
954
955         CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n",
956                sai, (unsigned int)agl_task->pid);
957         kthread_stop(agl_task);
958
959         spin_lock(&plli->lli_agl_lock);
960         sai->sai_agl_valid = 0;
961         while (!agl_list_empty(sai)) {
962                 clli = agl_first_entry(sai);
963                 list_del_init(&clli->lli_agl_list);
964                 spin_unlock(&plli->lli_agl_lock);
965                 clli->lli_agl_index = 0;
966                 iput(&clli->lli_vfs_inode);
967                 spin_lock(&plli->lli_agl_lock);
968         }
969         spin_unlock(&plli->lli_agl_lock);
970         CDEBUG(D_READA, "agl thread stopped: sai %p, parent %pd\n",
971                sai, parent);
972         ll_sai_put(sai);
973 }
974
975 /* start agl thread */
976 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
977 {
978         int node = cfs_cpt_spread_node(cfs_cpt_tab, CFS_CPT_ANY);
979         struct ll_inode_info *plli;
980         struct task_struct *task;
981
982         ENTRY;
983
984         CDEBUG(D_READA, "start agl thread: sai %p, parent %pd\n",
985                sai, parent);
986
987         plli = ll_i2info(parent->d_inode);
988         task = kthread_create_on_node(ll_agl_thread, parent, node, "ll_agl_%d",
989                                       plli->lli_opendir_pid);
990         if (IS_ERR(task)) {
991                 CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
992                 sai->sai_agl_valid = 0;
993                 RETURN_EXIT;
994         }
995         sai->sai_agl_task = task;
996         LASSERT(sai->sai_agl_valid == 1);
997         atomic_inc(&ll_i2sbi(d_inode(parent))->ll_agl_total);
998         /* Get an extra reference that the thread holds */
999         ll_sai_get(d_inode(parent));
1000
1001         wake_up_process(task);
1002
1003         EXIT;
1004 }
1005
1006 /* statahead thread main function */
1007 static int ll_statahead_thread(void *arg)
1008 {
1009         struct dentry *parent = (struct dentry *)arg;
1010         struct inode *dir = parent->d_inode;
1011         struct ll_inode_info *lli = ll_i2info(dir);
1012         struct ll_sb_info *sbi = ll_i2sbi(dir);
1013         struct ll_statahead_info *sai = lli->lli_sai;
1014         int first = 0;
1015         struct md_op_data *op_data;
1016         struct ll_dir_chain chain;
1017         struct page *page = NULL;
1018         __u64 pos = 0;
1019         int rc = 0;
1020
1021         ENTRY;
1022
1023         CDEBUG(D_READA, "statahead thread starting: sai %p, parent %pd\n",
1024                sai, parent);
1025
1026         OBD_ALLOC_PTR(op_data);
1027         if (!op_data)
1028                 GOTO(out, rc = -ENOMEM);
1029
1030         ll_dir_chain_init(&chain);
1031         while (pos != MDS_DIR_END_OFF && sai->sai_task) {
1032                 struct lu_dirpage *dp;
1033                 struct lu_dirent  *ent;
1034
1035                 op_data = ll_prep_md_op_data(op_data, dir, dir, NULL, 0, 0,
1036                                              LUSTRE_OPC_ANY, dir);
1037                 if (IS_ERR(op_data)) {
1038                         rc = PTR_ERR(op_data);
1039                         break;
1040                 }
1041
1042                 sai->sai_in_readpage = 1;
1043                 page = ll_get_dir_page(dir, op_data, pos, &chain);
1044                 ll_unlock_md_op_lsm(op_data);
1045                 sai->sai_in_readpage = 0;
1046                 if (IS_ERR(page)) {
1047                         rc = PTR_ERR(page);
1048                         CDEBUG(D_READA,
1049                                "error reading dir "DFID" at %llu /%llu opendir_pid = %u: rc = %d\n",
1050                                PFID(ll_inode2fid(dir)), pos, sai->sai_index,
1051                                lli->lli_opendir_pid, rc);
1052                         break;
1053                 }
1054
1055                 dp = page_address(page);
1056                 for (ent = lu_dirent_start(dp);
1057                      ent != NULL && sai->sai_task &&
1058                      !sa_low_hit(sai);
1059                      ent = lu_dirent_next(ent)) {
1060                         __u64 hash;
1061                         int namelen;
1062                         char *name;
1063                         struct lu_fid fid;
1064
1065                         hash = le64_to_cpu(ent->lde_hash);
1066                         if (unlikely(hash < pos))
1067                                 /*
1068                                  * Skip until we find target hash value.
1069                                  */
1070                                 continue;
1071
1072                         namelen = le16_to_cpu(ent->lde_namelen);
1073                         if (unlikely(namelen == 0))
1074                                 /*
1075                                  * Skip dummy record.
1076                                  */
1077                                 continue;
1078
1079                         name = ent->lde_name;
1080                         if (name[0] == '.') {
1081                                 if (namelen == 1) {
1082                                         /*
1083                                          * skip "."
1084                                          */
1085                                         continue;
1086                                 } else if (name[1] == '.' && namelen == 2) {
1087                                         /*
1088                                          * skip ".."
1089                                          */
1090                                         continue;
1091                                 } else if (!sai->sai_ls_all) {
1092                                         /*
1093                                          * skip hidden files.
1094                                          */
1095                                         sai->sai_skip_hidden++;
1096                                         continue;
1097                                 }
1098                         }
1099
1100                         /*
1101                          * don't stat-ahead first entry.
1102                          */
1103                         if (unlikely(++first == 1))
1104                                 continue;
1105
1106                         fid_le_to_cpu(&fid, &ent->lde_fid);
1107
1108                         while (({set_current_state(TASK_IDLE);
1109                                  sai->sai_task; })) {
1110                                 if (sa_has_callback(sai)) {
1111                                         __set_current_state(TASK_RUNNING);
1112                                         sa_handle_callback(sai);
1113                                 }
1114
1115                                 spin_lock(&lli->lli_agl_lock);
1116                                 while (sa_sent_full(sai) &&
1117                                        !agl_list_empty(sai)) {
1118                                         struct ll_inode_info *clli;
1119
1120                                         __set_current_state(TASK_RUNNING);
1121                                         clli = agl_first_entry(sai);
1122                                         list_del_init(&clli->lli_agl_list);
1123                                         spin_unlock(&lli->lli_agl_lock);
1124
1125                                         ll_agl_trigger(&clli->lli_vfs_inode,
1126                                                        sai);
1127                                         cond_resched();
1128                                         spin_lock(&lli->lli_agl_lock);
1129                                 }
1130                                 spin_unlock(&lli->lli_agl_lock);
1131
1132                                 if (!sa_sent_full(sai))
1133                                         break;
1134                                 schedule();
1135                         }
1136                         __set_current_state(TASK_RUNNING);
1137
1138                         sa_statahead(parent, name, namelen, &fid);
1139                 }
1140
1141                 pos = le64_to_cpu(dp->ldp_hash_end);
1142                 ll_release_page(dir, page,
1143                                 le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1144
1145                 if (sa_low_hit(sai)) {
1146                         rc = -EFAULT;
1147                         atomic_inc(&sbi->ll_sa_wrong);
1148                         CDEBUG(D_READA,
1149                                "Statahead for dir "DFID" hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stoppingstatahead thread: pid %d\n",
1150                                PFID(&lli->lli_fid), sai->sai_hit,
1151                                sai->sai_miss, sai->sai_sent,
1152                                sai->sai_replied, current->pid);
1153                         break;
1154                 }
1155         }
1156         ll_dir_chain_fini(&chain);
1157         ll_finish_md_op_data(op_data);
1158
1159         if (rc < 0) {
1160                 spin_lock(&lli->lli_sa_lock);
1161                 sai->sai_task = NULL;
1162                 lli->lli_sa_enabled = 0;
1163                 spin_unlock(&lli->lli_sa_lock);
1164         }
1165
1166         /*
1167          * statahead is finished, but statahead entries need to be cached, wait
1168          * for file release to stop me.
1169          */
1170         while (({set_current_state(TASK_IDLE);
1171                  sai->sai_task; })) {
1172                 if (sa_has_callback(sai)) {
1173                         __set_current_state(TASK_RUNNING);
1174                         sa_handle_callback(sai);
1175                 } else {
1176                         schedule();
1177                 }
1178         }
1179         __set_current_state(TASK_RUNNING);
1180
1181         EXIT;
1182 out:
1183         ll_stop_agl(sai);
1184
1185         /*
1186          * wait for inflight statahead RPCs to finish, and then we can free sai
1187          * safely because statahead RPC will access sai data
1188          */
1189         while (sai->sai_sent != sai->sai_replied)
1190                 /* in case we're not woken up, timeout wait */
1191                 msleep(125);
1192
1193         /* release resources held by statahead RPCs */
1194         sa_handle_callback(sai);
1195
1196         CDEBUG(D_READA, "%s: statahead thread stopped: sai %p, parent %pd\n",
1197                sbi->ll_fsname, sai, parent);
1198
1199         spin_lock(&lli->lli_sa_lock);
1200         sai->sai_task = NULL;
1201         spin_unlock(&lli->lli_sa_lock);
1202         wake_up(&sai->sai_waitq);
1203
1204         ll_sai_put(sai);
1205
1206         return rc;
1207 }
1208
1209 /* authorize opened dir handle @key to statahead */
1210 void ll_authorize_statahead(struct inode *dir, void *key)
1211 {
1212         struct ll_inode_info *lli = ll_i2info(dir);
1213
1214         spin_lock(&lli->lli_sa_lock);
1215         if (!lli->lli_opendir_key && !lli->lli_sai) {
1216                 /*
1217                  * if lli_sai is not NULL, it means previous statahead is not
1218                  * finished yet, we'd better not start a new statahead for now.
1219                  */
1220                 LASSERT(lli->lli_opendir_pid == 0);
1221                 lli->lli_opendir_key = key;
1222                 lli->lli_opendir_pid = current->pid;
1223                 lli->lli_sa_enabled = 1;
1224         }
1225         spin_unlock(&lli->lli_sa_lock);
1226 }
1227
1228 /*
1229  * deauthorize opened dir handle @key to statahead, and notify statahead thread
1230  * to quit if it's running.
1231  */
1232 void ll_deauthorize_statahead(struct inode *dir, void *key)
1233 {
1234         struct ll_inode_info *lli = ll_i2info(dir);
1235         struct ll_statahead_info *sai;
1236
1237         LASSERT(lli->lli_opendir_key == key);
1238         LASSERT(lli->lli_opendir_pid != 0);
1239
1240         CDEBUG(D_READA, "deauthorize statahead for "DFID"\n",
1241                PFID(&lli->lli_fid));
1242
1243         spin_lock(&lli->lli_sa_lock);
1244         lli->lli_opendir_key = NULL;
1245         lli->lli_opendir_pid = 0;
1246         lli->lli_sa_enabled = 0;
1247         sai = lli->lli_sai;
1248         if (sai && sai->sai_task) {
1249                 /*
1250                  * statahead thread may not have quit yet because it needs to
1251                  * cache entries, now it's time to tell it to quit.
1252                  *
1253                  * wake_up_process() provides the necessary barriers
1254                  * to pair with set_current_state().
1255                  */
1256                 struct task_struct *task = sai->sai_task;
1257
1258                 sai->sai_task = NULL;
1259                 wake_up_process(task);
1260         }
1261         spin_unlock(&lli->lli_sa_lock);
1262 }
1263
1264 enum {
1265         /**
1266          * not first dirent, or is "."
1267          */
1268         LS_NOT_FIRST_DE = 0,
1269         /**
1270          * the first non-hidden dirent
1271          */
1272         LS_FIRST_DE,
1273         /**
1274          * the first hidden dirent, that is "."
1275          */
1276         LS_FIRST_DOT_DE
1277 };
1278
1279 /* file is first dirent under @dir */
1280 static int is_first_dirent(struct inode *dir, struct dentry *dentry)
1281 {
1282         struct ll_dir_chain   chain;
1283         struct qstr          *target = &dentry->d_name;
1284         struct md_op_data    *op_data;
1285         int                   dot_de;
1286         struct page          *page = NULL;
1287         int                   rc = LS_NOT_FIRST_DE;
1288         __u64                 pos = 0;
1289
1290         ENTRY;
1291
1292         op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
1293                                      LUSTRE_OPC_ANY, dir);
1294         if (IS_ERR(op_data))
1295                 RETURN(PTR_ERR(op_data));
1296         /**
1297          *FIXME choose the start offset of the readdir
1298          */
1299
1300         ll_dir_chain_init(&chain);
1301         page = ll_get_dir_page(dir, op_data, 0, &chain);
1302
1303         while (1) {
1304                 struct lu_dirpage *dp;
1305                 struct lu_dirent  *ent;
1306
1307                 if (IS_ERR(page)) {
1308                         struct ll_inode_info *lli = ll_i2info(dir);
1309
1310                         rc = PTR_ERR(page);
1311                         CERROR("%s: reading dir "DFID" at %llu opendir_pid = %u : rc = %d\n",
1312                                ll_i2sbi(dir)->ll_fsname,
1313                                PFID(ll_inode2fid(dir)), pos,
1314                                lli->lli_opendir_pid, rc);
1315                         break;
1316                 }
1317
1318                 dp = page_address(page);
1319                 for (ent = lu_dirent_start(dp); ent != NULL;
1320                      ent = lu_dirent_next(ent)) {
1321                         __u64 hash;
1322                         int namelen;
1323                         char *name;
1324
1325                         hash = le64_to_cpu(ent->lde_hash);
1326                         /*
1327                          * The ll_get_dir_page() can return any page containing
1328                          * the given hash which may be not the start hash.
1329                          */
1330                         if (unlikely(hash < pos))
1331                                 continue;
1332
1333                         namelen = le16_to_cpu(ent->lde_namelen);
1334                         if (unlikely(namelen == 0))
1335                                 /*
1336                                  * skip dummy record.
1337                                  */
1338                                 continue;
1339
1340                         name = ent->lde_name;
1341                         if (name[0] == '.') {
1342                                 if (namelen == 1)
1343                                         /*
1344                                          * skip "."
1345                                          */
1346                                         continue;
1347                                 else if (name[1] == '.' && namelen == 2)
1348                                         /*
1349                                          * skip ".."
1350                                          */
1351                                         continue;
1352                                 else
1353                                         dot_de = 1;
1354                         } else {
1355                                 dot_de = 0;
1356                         }
1357
1358                         if (dot_de && target->name[0] != '.') {
1359                                 CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
1360                                        target->len, target->name,
1361                                        namelen, name);
1362                                 continue;
1363                         }
1364
1365                         if (target->len != namelen ||
1366                             memcmp(target->name, name, namelen) != 0)
1367                                 rc = LS_NOT_FIRST_DE;
1368                         else if (!dot_de)
1369                                 rc = LS_FIRST_DE;
1370                         else
1371                                 rc = LS_FIRST_DOT_DE;
1372
1373                         ll_release_page(dir, page, false);
1374                         GOTO(out, rc);
1375                 }
1376                 pos = le64_to_cpu(dp->ldp_hash_end);
1377                 if (pos == MDS_DIR_END_OFF) {
1378                         /*
1379                          * End of directory reached.
1380                          */
1381                         ll_release_page(dir, page, false);
1382                         GOTO(out, rc);
1383                 } else {
1384                         /*
1385                          * chain is exhausted
1386                          * Normal case: continue to the next page.
1387                          */
1388                         ll_release_page(dir, page, le32_to_cpu(dp->ldp_flags) &
1389                                               LDF_COLLIDE);
1390                         page = ll_get_dir_page(dir, op_data, pos, &chain);
1391                 }
1392         }
1393         EXIT;
1394 out:
1395         ll_dir_chain_fini(&chain);
1396         ll_finish_md_op_data(op_data);
1397
1398         return rc;
1399 }
1400
1401 /**
1402  * revalidate @dentryp from statahead cache
1403  *
1404  * \param[in] dir       parent directory
1405  * \param[in] sai       sai structure
1406  * \param[out] dentryp  pointer to dentry which will be revalidated
1407  * \param[in] unplug    unplug statahead window only (normally for negative
1408  *                      dentry)
1409  * \retval              1 on success, dentry is saved in @dentryp
1410  * \retval              0 if revalidation failed (no proper lock on client)
1411  * \retval              negative number upon error
1412  */
1413 static int revalidate_statahead_dentry(struct inode *dir,
1414                                        struct ll_statahead_info *sai,
1415                                        struct dentry **dentryp,
1416                                        bool unplug)
1417 {
1418         struct sa_entry *entry = NULL;
1419         struct ll_dentry_data *ldd;
1420         struct ll_inode_info *lli = ll_i2info(dir);
1421         int rc = 0;
1422
1423         ENTRY;
1424
1425         if ((*dentryp)->d_name.name[0] == '.') {
1426                 if (sai->sai_ls_all ||
1427                     sai->sai_miss_hidden >= sai->sai_skip_hidden) {
1428                         /*
1429                          * Hidden dentry is the first one, or statahead
1430                          * thread does not skip so many hidden dentries
1431                          * before "sai_ls_all" enabled as below.
1432                          */
1433                 } else {
1434                         if (!sai->sai_ls_all)
1435                                 /*
1436                                  * It maybe because hidden dentry is not
1437                                  * the first one, "sai_ls_all" was not
1438                                  * set, then "ls -al" missed. Enable
1439                                  * "sai_ls_all" for such case.
1440                                  */
1441                                 sai->sai_ls_all = 1;
1442
1443                         /*
1444                          * Such "getattr" has been skipped before
1445                          * "sai_ls_all" enabled as above.
1446                          */
1447                         sai->sai_miss_hidden++;
1448                         RETURN(-EAGAIN);
1449                 }
1450         }
1451
1452         if (unplug)
1453                 GOTO(out, rc = 1);
1454
1455         entry = sa_get(sai, &(*dentryp)->d_name);
1456         if (!entry)
1457                 GOTO(out, rc = -EAGAIN);
1458
1459         /* if statahead is busy in readdir, help it do post-work */
1460         if (!sa_ready(entry) && sai->sai_in_readpage)
1461                 sa_handle_callback(sai);
1462
1463         if (!sa_ready(entry)) {
1464                 spin_lock(&lli->lli_sa_lock);
1465                 sai->sai_index_wait = entry->se_index;
1466                 spin_unlock(&lli->lli_sa_lock);
1467                 rc = wait_event_idle_timeout(sai->sai_waitq, sa_ready(entry),
1468                                              cfs_time_seconds(30));
1469                 if (rc == 0) {
1470                         /*
1471                          * entry may not be ready, so it may be used by inflight
1472                          * statahead RPC, don't free it.
1473                          */
1474                         entry = NULL;
1475                         GOTO(out, rc = -EAGAIN);
1476                 }
1477         }
1478
1479         /*
1480          * We need to see the value that was set immediately before we
1481          * were woken up.
1482          */
1483         if (smp_load_acquire(&entry->se_state) == SA_ENTRY_SUCC &&
1484             entry->se_inode) {
1485                 struct inode *inode = entry->se_inode;
1486                 struct lookup_intent it = { .it_op = IT_GETATTR,
1487                                             .it_lock_handle =
1488                                                 entry->se_handle };
1489                 __u64 bits;
1490
1491                 rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
1492                                         ll_inode2fid(inode), &bits);
1493                 if (rc == 1) {
1494                         if (!(*dentryp)->d_inode) {
1495                                 struct dentry *alias;
1496
1497                                 alias = ll_splice_alias(inode, *dentryp);
1498                                 if (IS_ERR(alias)) {
1499                                         ll_intent_release(&it);
1500                                         GOTO(out, rc = PTR_ERR(alias));
1501                                 }
1502                                 *dentryp = alias;
1503                                 /*
1504                                  * statahead prepared this inode, transfer inode
1505                                  * refcount from sa_entry to dentry
1506                                  */
1507                                 entry->se_inode = NULL;
1508                         } else if ((*dentryp)->d_inode != inode) {
1509                                 /* revalidate, but inode is recreated */
1510                                 CDEBUG(D_READA,
1511                                        "%s: stale dentry %pd inode " DFID", statahead inode "DFID "\n",
1512                                        ll_i2sbi(inode)->ll_fsname, *dentryp,
1513                                        PFID(ll_inode2fid((*dentryp)->d_inode)),
1514                                        PFID(ll_inode2fid(inode)));
1515                                 ll_intent_release(&it);
1516                                 GOTO(out, rc = -ESTALE);
1517                         }
1518
1519                         if ((bits & MDS_INODELOCK_LOOKUP) &&
1520                             d_lustre_invalid(*dentryp))
1521                                 d_lustre_revalidate(*dentryp);
1522                         ll_intent_release(&it);
1523                 }
1524         }
1525 out:
1526         /*
1527          * statahead cached sa_entry can be used only once, and will be killed
1528          * right after use, so if lookup/revalidate accessed statahead cache,
1529          * set dentry ldd_sa_generation to parent lli_sa_generation, later if we
1530          * stat this file again, we know we've done statahead before, see
1531          * dentry_may_statahead().
1532          */
1533         ldd = ll_d2d(*dentryp);
1534         /* ldd can be NULL if llite lookup failed. */
1535         if (ldd)
1536                 ldd->lld_sa_generation = lli->lli_sa_generation;
1537         sa_put(sai, entry);
1538         spin_lock(&lli->lli_sa_lock);
1539         if (sai->sai_task)
1540                 wake_up_process(sai->sai_task);
1541         spin_unlock(&lli->lli_sa_lock);
1542
1543         RETURN(rc);
1544 }
1545
1546 /**
1547  * start statahead thread
1548  *
1549  * \param[in] dir       parent directory
1550  * \param[in] dentry    dentry that triggers statahead, normally the first
1551  *                      dirent under @dir
1552  * \param[in] agl       indicate whether AGL is needed
1553  * \retval              -EAGAIN on success, because when this function is
1554  *                      called, it's already in lookup call, so client should
1555  *                      do it itself instead of waiting for statahead thread
1556  *                      to do it asynchronously.
1557  * \retval              negative number upon error
1558  */
1559 static int start_statahead_thread(struct inode *dir, struct dentry *dentry,
1560                                   bool agl)
1561 {
1562         int node = cfs_cpt_spread_node(cfs_cpt_tab, CFS_CPT_ANY);
1563         struct ll_inode_info *lli = ll_i2info(dir);
1564         struct ll_statahead_info *sai = NULL;
1565         struct dentry *parent = dentry->d_parent;
1566         struct task_struct *task;
1567         struct ll_sb_info *sbi = ll_i2sbi(parent->d_inode);
1568         int first = LS_FIRST_DE;
1569         int rc = 0;
1570
1571         ENTRY;
1572
1573         /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1574         first = is_first_dirent(dir, dentry);
1575         if (first == LS_NOT_FIRST_DE)
1576                 /* It is not "ls -{a}l" operation, no need statahead for it. */
1577                 GOTO(out, rc = -EFAULT);
1578
1579         if (unlikely(atomic_inc_return(&sbi->ll_sa_running) >
1580                                        sbi->ll_sa_running_max)) {
1581                 CDEBUG(D_READA,
1582                        "Too many concurrent statahead instances, avoid new statahead instance temporarily.\n");
1583                 GOTO(out, rc = -EMFILE);
1584         }
1585
1586         sai = ll_sai_alloc(parent);
1587         if (!sai)
1588                 GOTO(out, rc = -ENOMEM);
1589
1590         sai->sai_ls_all = (first == LS_FIRST_DOT_DE);
1591         sai->sai_agl_valid = agl;
1592
1593         /*
1594          * if current lli_opendir_key was deauthorized, or dir re-opened by
1595          * another process, don't start statahead, otherwise the newly spawned
1596          * statahead thread won't be notified to quit.
1597          */
1598         spin_lock(&lli->lli_sa_lock);
1599         if (unlikely(lli->lli_sai || !lli->lli_opendir_key ||
1600                      lli->lli_opendir_pid != current->pid)) {
1601                 spin_unlock(&lli->lli_sa_lock);
1602                 GOTO(out, rc = -EPERM);
1603         }
1604         lli->lli_sai = sai;
1605         spin_unlock(&lli->lli_sa_lock);
1606
1607         CDEBUG(D_READA, "start statahead thread: [pid %d] [parent %pd]\n",
1608                current->pid, parent);
1609
1610         task = kthread_create_on_node(ll_statahead_thread, parent, node,
1611                                       "ll_sa_%u", lli->lli_opendir_pid);
1612         if (IS_ERR(task)) {
1613                 spin_lock(&lli->lli_sa_lock);
1614                 lli->lli_sai = NULL;
1615                 spin_unlock(&lli->lli_sa_lock);
1616                 rc = PTR_ERR(task);
1617                 CERROR("can't start ll_sa thread, rc: %d\n", rc);
1618                 GOTO(out, rc);
1619         }
1620
1621         if (ll_i2sbi(parent->d_inode)->ll_flags & LL_SBI_AGL_ENABLED && agl)
1622                 ll_start_agl(parent, sai);
1623
1624         atomic_inc(&ll_i2sbi(parent->d_inode)->ll_sa_total);
1625         sai->sai_task = task;
1626
1627         wake_up_process(task);
1628         /*
1629          * We don't stat-ahead for the first dirent since we are already in
1630          * lookup.
1631          */
1632         RETURN(-EAGAIN);
1633
1634 out:
1635         /*
1636          * once we start statahead thread failed, disable statahead so that
1637          * subsequent stat won't waste time to try it.
1638          */
1639         spin_lock(&lli->lli_sa_lock);
1640         if (lli->lli_opendir_pid == current->pid)
1641                 lli->lli_sa_enabled = 0;
1642         spin_unlock(&lli->lli_sa_lock);
1643
1644         if (sai)
1645                 ll_sai_free(sai);
1646         if (first != LS_NOT_FIRST_DE)
1647                 atomic_dec(&sbi->ll_sa_running);
1648
1649         RETURN(rc);
1650 }
1651
1652 /*
1653  * Check whether statahead for @dir was started.
1654  */
1655 static inline bool ll_statahead_started(struct inode *dir, bool agl)
1656 {
1657         struct ll_inode_info *lli = ll_i2info(dir);
1658         struct ll_statahead_info *sai;
1659
1660         spin_lock(&lli->lli_sa_lock);
1661         sai = lli->lli_sai;
1662         if (sai && sai->sai_agl_valid != agl)
1663                 CDEBUG(D_READA,
1664                        "%s: Statahead AGL hint changed from %d to %d\n",
1665                        ll_i2sbi(dir)->ll_fsname, sai->sai_agl_valid, agl);
1666         spin_unlock(&lli->lli_sa_lock);
1667
1668         return !!sai;
1669 }
1670
1671 /**
1672  * statahead entry function, this is called when client getattr on a file, it
1673  * will start statahead thread if this is the first dir entry, else revalidate
1674  * dentry from statahead cache.
1675  *
1676  * \param[in]  dir      parent directory
1677  * \param[out] dentryp  dentry to getattr
1678  * \param[in]  agl      whether start the agl thread
1679  *
1680  * \retval              1 on success
1681  * \retval              0 revalidation from statahead cache failed, caller needs
1682  *                      to getattr from server directly
1683  * \retval              negative number on error, caller often ignores this and
1684  *                      then getattr from server
1685  */
1686 int ll_start_statahead(struct inode *dir, struct dentry *dentry, bool agl)
1687 {
1688         if (!ll_statahead_started(dir, agl))
1689                 return start_statahead_thread(dir, dentry, agl);
1690         return 0;
1691 }
1692
1693 /**
1694  * revalidate dentry from statahead cache.
1695  *
1696  * \param[in]  dir      parent directory
1697  * \param[out] dentryp  dentry to getattr
1698  * \param[in]  unplug   unplug statahead window only (normally for negative
1699  *                      dentry)
1700  * \retval              1 on success
1701  * \retval              0 revalidation from statahead cache failed, caller needs
1702  *                      to getattr from server directly
1703  * \retval              negative number on error, caller often ignores this and
1704  *                      then getattr from server
1705  */
1706 int ll_revalidate_statahead(struct inode *dir, struct dentry **dentryp,
1707                             bool unplug)
1708 {
1709         struct ll_statahead_info *sai;
1710         int rc = 0;
1711
1712         sai = ll_sai_get(dir);
1713         if (sai) {
1714                 rc = revalidate_statahead_dentry(dir, sai, dentryp, unplug);
1715                 CDEBUG(D_READA, "revalidate statahead %pd: rc = %d.\n",
1716                        *dentryp, rc);
1717                 ll_sai_put(sai);
1718         }
1719         return rc;
1720 }