Whamcloud - gitweb
LU-6245 client: remove types abstraction from client code
[fs/lustre-release.git] / lustre / llite / statahead.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2015, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #include <linux/fs.h>
38 #include <linux/sched.h>
39 #include <linux/kthread.h>
40 #include <linux/mm.h>
41 #include <linux/highmem.h>
42 #include <linux/pagemap.h>
43
44 #define DEBUG_SUBSYSTEM S_LLITE
45
46 #include <obd_support.h>
47 #include <lustre_dlm.h>
48 #include "llite_internal.h"
49
50 #define SA_OMITTED_ENTRY_MAX 8ULL
51
52 typedef enum {
53         /** negative values are for error cases */
54         SA_ENTRY_INIT = 0,      /** init entry */
55         SA_ENTRY_SUCC = 1,      /** stat succeed */
56         SA_ENTRY_INVA = 2,      /** invalid entry */
57 } se_state_t;
58
59 /* sa_entry is not refcounted: statahead thread allocates it and do async stat,
60  * and in async stat callback ll_statahead_interpret() will add it into
61  * sai_interim_entries, later statahead thread will call sa_handle_callback() to
62  * instantiate entry and move it into sai_entries, and then only scanner process
63  * can access and free it. */
64 struct sa_entry {
65         /* link into sai_interim_entries or sai_entries */
66         struct list_head        se_list;
67         /* link into sai hash table locally */
68         struct list_head        se_hash;
69         /* entry index in the sai */
70         __u64                   se_index;
71         /* low layer ldlm lock handle */
72         __u64                   se_handle;
73         /* entry status */
74         se_state_t              se_state;
75         /* entry size, contains name */
76         int                     se_size;
77         /* pointer to async getattr enqueue info */
78         struct md_enqueue_info *se_minfo;
79         /* pointer to the async getattr request */
80         struct ptlrpc_request  *se_req;
81         /* pointer to the target inode */
82         struct inode           *se_inode;
83         /* entry name */
84         struct qstr             se_qstr;
85         /* entry fid */
86         struct lu_fid           se_fid;
87 };
88
89 static unsigned int sai_generation = 0;
90 static DEFINE_SPINLOCK(sai_generation_lock);
91
92 static inline int sa_unhashed(struct sa_entry *entry)
93 {
94         return list_empty(&entry->se_hash);
95 }
96
97 /* sa_entry is ready to use */
98 static inline int sa_ready(struct sa_entry *entry)
99 {
100         smp_rmb();
101         return (entry->se_state != SA_ENTRY_INIT);
102 }
103
104 /* hash value to put in sai_cache */
105 static inline int sa_hash(int val)
106 {
107         return val & LL_SA_CACHE_MASK;
108 }
109
110 /* hash entry into sai_cache */
111 static inline void
112 sa_rehash(struct ll_statahead_info *sai, struct sa_entry *entry)
113 {
114         int i = sa_hash(entry->se_qstr.hash);
115
116         spin_lock(&sai->sai_cache_lock[i]);
117         list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
118         spin_unlock(&sai->sai_cache_lock[i]);
119 }
120
121 /* unhash entry from sai_cache */
122 static inline void
123 sa_unhash(struct ll_statahead_info *sai, struct sa_entry *entry)
124 {
125         int i = sa_hash(entry->se_qstr.hash);
126
127         spin_lock(&sai->sai_cache_lock[i]);
128         list_del_init(&entry->se_hash);
129         spin_unlock(&sai->sai_cache_lock[i]);
130 }
131
132 static inline int agl_should_run(struct ll_statahead_info *sai,
133                                  struct inode *inode)
134 {
135         return (inode != NULL && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
136 }
137
138 static inline struct ll_inode_info *
139 agl_first_entry(struct ll_statahead_info *sai)
140 {
141         return list_entry(sai->sai_agls.next, struct ll_inode_info,
142                           lli_agl_list);
143 }
144
145 /* statahead window is full */
146 static inline int sa_sent_full(struct ll_statahead_info *sai)
147 {
148         return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
149 }
150
151 /* got async stat replies */
152 static inline int sa_has_callback(struct ll_statahead_info *sai)
153 {
154         return !list_empty(&sai->sai_interim_entries);
155 }
156
157 static inline int agl_list_empty(struct ll_statahead_info *sai)
158 {
159         return list_empty(&sai->sai_agls);
160 }
161
162 /**
163  * (1) hit ratio less than 80%
164  * or
165  * (2) consecutive miss more than 8
166  * then means low hit.
167  */
168 static inline int sa_low_hit(struct ll_statahead_info *sai)
169 {
170         return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
171                 (sai->sai_consecutive_miss > 8));
172 }
173
174 /*
175  * if the given index is behind of statahead window more than
176  * SA_OMITTED_ENTRY_MAX, then it is old.
177  */
178 static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
179 {
180         return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
181                  sai->sai_index);
182 }
183
184 /* allocate sa_entry and hash it to allow scanner process to find it */
185 static struct sa_entry *
186 sa_alloc(struct ll_statahead_info *sai, __u64 index, const char *name, int len,
187          const struct lu_fid *fid)
188 {
189         struct ll_inode_info *lli;
190         struct sa_entry *entry;
191         int entry_size;
192         char *dname;
193         ENTRY;
194
195         entry_size = sizeof(struct sa_entry) + (len & ~3) + 4;
196         OBD_ALLOC(entry, entry_size);
197         if (unlikely(entry == NULL))
198                 RETURN(ERR_PTR(-ENOMEM));
199
200         CDEBUG(D_READA, "alloc sa entry %.*s(%p) index %llu\n",
201                len, name, entry, index);
202
203         entry->se_index = index;
204
205         entry->se_state = SA_ENTRY_INIT;
206         entry->se_size = entry_size;
207         dname = (char *)entry + sizeof(struct sa_entry);
208         memcpy(dname, name, len);
209         dname[len] = 0;
210         entry->se_qstr.hash = full_name_hash(name, len);
211         entry->se_qstr.len = len;
212         entry->se_qstr.name = dname;
213         entry->se_fid = *fid;
214
215         lli = ll_i2info(sai->sai_dentry->d_inode);
216
217         spin_lock(&lli->lli_sa_lock);
218         INIT_LIST_HEAD(&entry->se_list);
219         sa_rehash(sai, entry);
220         spin_unlock(&lli->lli_sa_lock);
221
222         atomic_inc(&sai->sai_cache_count);
223
224         RETURN(entry);
225 }
226
227 /* free sa_entry, which should have been unhashed and not in any list */
228 static void sa_free(struct ll_statahead_info *sai, struct sa_entry *entry)
229 {
230         CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n",
231                entry->se_qstr.len, entry->se_qstr.name, entry,
232                entry->se_index);
233
234         LASSERT(list_empty(&entry->se_list));
235         LASSERT(sa_unhashed(entry));
236
237         OBD_FREE(entry, entry->se_size);
238         atomic_dec(&sai->sai_cache_count);
239 }
240
241 /*
242  * find sa_entry by name, used by directory scanner, lock is not needed because
243  * only scanner can remove the entry from cache.
244  */
245 static struct sa_entry *
246 sa_get(struct ll_statahead_info *sai, const struct qstr *qstr)
247 {
248         struct sa_entry *entry;
249         int i = sa_hash(qstr->hash);
250
251         list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
252                 if (entry->se_qstr.hash == qstr->hash &&
253                     entry->se_qstr.len == qstr->len &&
254                     memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
255                         return entry;
256         }
257         return NULL;
258 }
259
260 /* unhash and unlink sa_entry, and then free it */
261 static inline void
262 sa_kill(struct ll_statahead_info *sai, struct sa_entry *entry)
263 {
264         struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
265
266         LASSERT(!sa_unhashed(entry));
267         LASSERT(!list_empty(&entry->se_list));
268         LASSERT(sa_ready(entry));
269
270         sa_unhash(sai, entry);
271
272         spin_lock(&lli->lli_sa_lock);
273         list_del_init(&entry->se_list);
274         spin_unlock(&lli->lli_sa_lock);
275
276         if (entry->se_inode != NULL)
277                 iput(entry->se_inode);
278
279         sa_free(sai, entry);
280 }
281
282 /* called by scanner after use, sa_entry will be killed */
283 static void
284 sa_put(struct ll_statahead_info *sai, struct sa_entry *entry)
285 {
286         struct sa_entry *tmp, *next;
287
288         if (entry != NULL && entry->se_state == SA_ENTRY_SUCC) {
289                 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode);
290
291                 sai->sai_hit++;
292                 sai->sai_consecutive_miss = 0;
293                 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
294         } else {
295                 sai->sai_miss++;
296                 sai->sai_consecutive_miss++;
297         }
298
299         if (entry != NULL)
300                 sa_kill(sai, entry);
301
302         /* kill old completed entries, only scanner process does this, no need
303          * to lock */
304         list_for_each_entry_safe(tmp, next, &sai->sai_entries, se_list) {
305                 if (!is_omitted_entry(sai, tmp->se_index))
306                         break;
307                 sa_kill(sai, tmp);
308         }
309
310         wake_up(&sai->sai_thread.t_ctl_waitq);
311 }
312
313 /* update state and sort add entry to sai_entries by index, return true if
314  * scanner is waiting on this entry. */
315 static bool
316 __sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
317 {
318         struct sa_entry *se;
319         struct list_head *pos = &sai->sai_entries;
320         __u64 index = entry->se_index;
321
322         LASSERT(!sa_ready(entry));
323         LASSERT(list_empty(&entry->se_list));
324
325         list_for_each_entry_reverse(se, &sai->sai_entries, se_list) {
326                 if (se->se_index < entry->se_index) {
327                         pos = &se->se_list;
328                         break;
329                 }
330         }
331         list_add(&entry->se_list, pos);
332         entry->se_state = ret < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC;
333
334         return (index == sai->sai_index_wait);
335 }
336
337 /*
338  * release resources used in async stat RPC, update entry state and wakeup if
339  * scanner process it waiting on this entry.
340  */
341 static void
342 sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
343 {
344         struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
345         struct md_enqueue_info *minfo = entry->se_minfo;
346         struct ptlrpc_request *req = entry->se_req;
347         bool wakeup;
348
349         /* release resources used in RPC */
350         if (minfo) {
351                 entry->se_minfo = NULL;
352                 ll_intent_release(&minfo->mi_it);
353                 iput(minfo->mi_dir);
354                 OBD_FREE_PTR(minfo);
355         }
356
357         if (req) {
358                 entry->se_req = NULL;
359                 ptlrpc_req_finished(req);
360         }
361
362         spin_lock(&lli->lli_sa_lock);
363         wakeup = __sa_make_ready(sai, entry, ret);
364         spin_unlock(&lli->lli_sa_lock);
365
366         if (wakeup)
367                 wake_up(&sai->sai_waitq);
368 }
369
370 /* insert inode into the list of sai_agls */
371 static void ll_agl_add(struct ll_statahead_info *sai,
372                        struct inode *inode, int index)
373 {
374         struct ll_inode_info *child  = ll_i2info(inode);
375         struct ll_inode_info *parent = ll_i2info(sai->sai_dentry->d_inode);
376         int                   added  = 0;
377
378         spin_lock(&child->lli_agl_lock);
379         if (child->lli_agl_index == 0) {
380                 child->lli_agl_index = index;
381                 spin_unlock(&child->lli_agl_lock);
382
383                 LASSERT(list_empty(&child->lli_agl_list));
384
385                 igrab(inode);
386                 spin_lock(&parent->lli_agl_lock);
387                 if (agl_list_empty(sai))
388                         added = 1;
389                 list_add_tail(&child->lli_agl_list, &sai->sai_agls);
390                 spin_unlock(&parent->lli_agl_lock);
391         } else {
392                 spin_unlock(&child->lli_agl_lock);
393         }
394
395         if (added > 0)
396                 wake_up(&sai->sai_agl_thread.t_ctl_waitq);
397 }
398
399 /* allocate sai */
400 static struct ll_statahead_info *ll_sai_alloc(struct dentry *dentry)
401 {
402         struct ll_statahead_info *sai;
403         struct ll_inode_info *lli = ll_i2info(dentry->d_inode);
404         int i;
405         ENTRY;
406
407         OBD_ALLOC_PTR(sai);
408         if (!sai)
409                 RETURN(NULL);
410
411         sai->sai_dentry = dget(dentry);
412         atomic_set(&sai->sai_refcount, 1);
413         sai->sai_max = LL_SA_RPC_MIN;
414         sai->sai_index = 1;
415         init_waitqueue_head(&sai->sai_waitq);
416         init_waitqueue_head(&sai->sai_thread.t_ctl_waitq);
417         init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq);
418
419         INIT_LIST_HEAD(&sai->sai_interim_entries);
420         INIT_LIST_HEAD(&sai->sai_entries);
421         INIT_LIST_HEAD(&sai->sai_agls);
422
423         for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
424                 INIT_LIST_HEAD(&sai->sai_cache[i]);
425                 spin_lock_init(&sai->sai_cache_lock[i]);
426         }
427         atomic_set(&sai->sai_cache_count, 0);
428
429         spin_lock(&sai_generation_lock);
430         lli->lli_sa_generation = ++sai_generation;
431         if (unlikely(sai_generation == 0))
432                 lli->lli_sa_generation = ++sai_generation;
433         spin_unlock(&sai_generation_lock);
434
435         RETURN(sai);
436 }
437
438 /* free sai */
439 static inline void ll_sai_free(struct ll_statahead_info *sai)
440 {
441         LASSERT(sai->sai_dentry != NULL);
442         dput(sai->sai_dentry);
443         OBD_FREE_PTR(sai);
444 }
445
446 /*
447  * take refcount of sai if sai for @dir exists, which means statahead is on for
448  * this directory.
449  */
450 static inline struct ll_statahead_info *ll_sai_get(struct inode *dir)
451 {
452         struct ll_inode_info *lli = ll_i2info(dir);
453         struct ll_statahead_info *sai = NULL;
454
455         spin_lock(&lli->lli_sa_lock);
456         sai = lli->lli_sai;
457         if (sai != NULL)
458                 atomic_inc(&sai->sai_refcount);
459         spin_unlock(&lli->lli_sa_lock);
460
461         return sai;
462 }
463
464 /*
465  * put sai refcount after use, if refcount reaches zero, free sai and sa_entries
466  * attached to it.
467  */
468 static void ll_sai_put(struct ll_statahead_info *sai)
469 {
470         struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
471
472         if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
473                 struct sa_entry *entry, *next;
474                 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode);
475
476                 lli->lli_sai = NULL;
477                 spin_unlock(&lli->lli_sa_lock);
478
479                 LASSERT(thread_is_stopped(&sai->sai_thread));
480                 LASSERT(thread_is_stopped(&sai->sai_agl_thread));
481                 LASSERT(sai->sai_sent == sai->sai_replied);
482                 LASSERT(!sa_has_callback(sai));
483
484                 list_for_each_entry_safe(entry, next, &sai->sai_entries,
485                                          se_list)
486                         sa_kill(sai, entry);
487
488                 LASSERT(atomic_read(&sai->sai_cache_count) == 0);
489                 LASSERT(agl_list_empty(sai));
490
491                 ll_sai_free(sai);
492                 atomic_dec(&sbi->ll_sa_running);
493         }
494 }
495
496 /* Do NOT forget to drop inode refcount when into sai_agls. */
497 static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
498 {
499         struct ll_inode_info *lli = ll_i2info(inode);
500         __u64 index = lli->lli_agl_index;
501         int rc;
502         ENTRY;
503
504         LASSERT(list_empty(&lli->lli_agl_list));
505
506         /* AGL maybe fall behind statahead with one entry */
507         if (is_omitted_entry(sai, index + 1)) {
508                 lli->lli_agl_index = 0;
509                 iput(inode);
510                 RETURN_EXIT;
511         }
512
513         /* Someone is in glimpse (sync or async), do nothing. */
514         rc = down_write_trylock(&lli->lli_glimpse_sem);
515         if (rc == 0) {
516                 lli->lli_agl_index = 0;
517                 iput(inode);
518                 RETURN_EXIT;
519         }
520
521         /*
522          * Someone triggered glimpse within 1 sec before.
523          * 1) The former glimpse succeeded with glimpse lock granted by OST, and
524          *    if the lock is still cached on client, AGL needs to do nothing. If
525          *    it is cancelled by other client, AGL maybe cannot obtaion new lock
526          *    for no glimpse callback triggered by AGL.
527          * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
528          *    Under such case, it is quite possible that the OST will not grant
529          *    glimpse lock for AGL also.
530          * 3) The former glimpse failed, compared with other two cases, it is
531          *    relative rare. AGL can ignore such case, and it will not muchly
532          *    affect the performance.
533          */
534         if (lli->lli_glimpse_time != 0 &&
535             cfs_time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
536                 up_write(&lli->lli_glimpse_sem);
537                 lli->lli_agl_index = 0;
538                 iput(inode);
539                 RETURN_EXIT;
540         }
541
542         CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
543                DFID", idx = %llu\n", PFID(&lli->lli_fid), index);
544
545         cl_agl(inode);
546         lli->lli_agl_index = 0;
547         lli->lli_glimpse_time = cfs_time_current();
548         up_write(&lli->lli_glimpse_sem);
549
550         CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
551                DFID", idx = %llu, rc = %d\n",
552                PFID(&lli->lli_fid), index, rc);
553
554         iput(inode);
555
556         EXIT;
557 }
558
559 /*
560  * prepare inode for sa entry, add it into agl list, now sa_entry is ready
561  * to be used by scanner process.
562  */
563 static void sa_instantiate(struct ll_statahead_info *sai,
564                                  struct sa_entry *entry)
565 {
566         struct inode *dir = sai->sai_dentry->d_inode;
567         struct inode *child;
568         struct md_enqueue_info *minfo;
569         struct lookup_intent *it;
570         struct ptlrpc_request *req;
571         struct mdt_body *body;
572         int rc = 0;
573         ENTRY;
574
575         LASSERT(entry->se_handle != 0);
576
577         minfo = entry->se_minfo;
578         it = &minfo->mi_it;
579         req = entry->se_req;
580         body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
581         if (body == NULL)
582                 GOTO(out, rc = -EFAULT);
583
584         child = entry->se_inode;
585         if (child != NULL) {
586                 /* revalidate; unlinked and re-created with the same name */
587                 if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2,
588                                         &body->mbo_fid1))) {
589                         entry->se_inode = NULL;
590                         iput(child);
591                         child = NULL;
592                 }
593         }
594
595         it->it_lock_handle = entry->se_handle;
596         rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
597         if (rc != 1)
598                 GOTO(out, rc = -EAGAIN);
599
600         rc = ll_prep_inode(&child, req, dir->i_sb, it);
601         if (rc)
602                 GOTO(out, rc);
603
604         CDEBUG(D_READA, "%s: setting %.*s"DFID" l_data to inode %p\n",
605                ll_get_fsname(child->i_sb, NULL, 0),
606                entry->se_qstr.len, entry->se_qstr.name,
607                PFID(ll_inode2fid(child)), child);
608         ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
609
610         entry->se_inode = child;
611
612         if (agl_should_run(sai, child))
613                 ll_agl_add(sai, child, entry->se_index);
614
615         EXIT;
616
617 out:
618         /* sa_make_ready() will drop ldlm ibits lock refcount by calling
619          * ll_intent_drop_lock() in spite of failures. Do not worry about
620          * calling ll_intent_drop_lock() more than once. */
621         sa_make_ready(sai, entry, rc);
622 }
623
624 /* once there are async stat replies, instantiate sa_entry from replies */
625 static void sa_handle_callback(struct ll_statahead_info *sai)
626 {
627         struct ll_inode_info *lli;
628
629         lli = ll_i2info(sai->sai_dentry->d_inode);
630
631         while (sa_has_callback(sai)) {
632                 struct sa_entry *entry;
633
634                 spin_lock(&lli->lli_sa_lock);
635                 if (unlikely(!sa_has_callback(sai))) {
636                         spin_unlock(&lli->lli_sa_lock);
637                         break;
638                 }
639                 entry = list_entry(sai->sai_interim_entries.next,
640                                    struct sa_entry, se_list);
641                 list_del_init(&entry->se_list);
642                 spin_unlock(&lli->lli_sa_lock);
643
644                 sa_instantiate(sai, entry);
645         }
646 }
647
648 /*
649  * callback for async stat RPC, because this is called in ptlrpcd context, we
650  * only put sa_entry in sai_interim_entries, and wake up statahead thread to
651  * really prepare inode and instantiate sa_entry later.
652  */
653 static int ll_statahead_interpret(struct ptlrpc_request *req,
654                                   struct md_enqueue_info *minfo, int rc)
655 {
656         struct lookup_intent *it = &minfo->mi_it;
657         struct inode *dir = minfo->mi_dir;
658         struct ll_inode_info *lli = ll_i2info(dir);
659         struct ll_statahead_info *sai = lli->lli_sai;
660         struct sa_entry *entry = (struct sa_entry *)minfo->mi_cbdata;
661         __u64 handle = 0;
662         wait_queue_head_t *waitq = NULL;
663         ENTRY;
664
665         if (it_disposition(it, DISP_LOOKUP_NEG))
666                 rc = -ENOENT;
667
668         /* because statahead thread will wait for all inflight RPC to finish,
669          * sai should be always valid, no need to refcount */
670         LASSERT(sai != NULL);
671         LASSERT(!thread_is_stopped(&sai->sai_thread));
672         LASSERT(entry != NULL);
673
674         CDEBUG(D_READA, "sa_entry %.*s rc %d\n",
675                entry->se_qstr.len, entry->se_qstr.name, rc);
676
677         if (rc != 0) {
678                 ll_intent_release(it);
679                 iput(dir);
680                 OBD_FREE_PTR(minfo);
681         } else {
682                 /* release ibits lock ASAP to avoid deadlock when statahead
683                  * thread enqueues lock on parent in readdir and another
684                  * process enqueues lock on child with parent lock held, eg.
685                  * unlink. */
686                 handle = it->it_lock_handle;
687                 ll_intent_drop_lock(it);
688         }
689
690         spin_lock(&lli->lli_sa_lock);
691         if (rc != 0) {
692                 if (__sa_make_ready(sai, entry, rc))
693                         waitq = &sai->sai_waitq;
694         } else {
695                 entry->se_minfo = minfo;
696                 entry->se_req = ptlrpc_request_addref(req);
697                 /* Release the async ibits lock ASAP to avoid deadlock
698                  * when statahead thread tries to enqueue lock on parent
699                  * for readpage and other tries to enqueue lock on child
700                  * with parent's lock held, for example: unlink. */
701                 entry->se_handle = handle;
702                 if (!sa_has_callback(sai))
703                         waitq = &sai->sai_thread.t_ctl_waitq;
704
705                 list_add_tail(&entry->se_list, &sai->sai_interim_entries);
706         }
707         sai->sai_replied++;
708         if (waitq != NULL)
709                 wake_up(waitq);
710         spin_unlock(&lli->lli_sa_lock);
711
712         RETURN(rc);
713 }
714
715 /* finish async stat RPC arguments */
716 static void sa_fini_data(struct md_enqueue_info *minfo)
717 {
718         iput(minfo->mi_dir);
719         OBD_FREE_PTR(minfo);
720 }
721
722 /*
723  * prepare arguments for async stat RPC.
724  */
725 static struct md_enqueue_info *
726 sa_prep_data(struct inode *dir, struct inode *child, struct sa_entry *entry)
727 {
728         struct md_enqueue_info   *minfo;
729         struct ldlm_enqueue_info *einfo;
730         struct md_op_data        *op_data;
731
732         OBD_ALLOC_PTR(minfo);
733         if (minfo == NULL)
734                 return ERR_PTR(-ENOMEM);
735
736         op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, NULL, 0, 0,
737                                      LUSTRE_OPC_ANY, NULL);
738         if (IS_ERR(op_data)) {
739                 OBD_FREE_PTR(minfo);
740                 return (struct md_enqueue_info *)op_data;
741         }
742
743         if (child == NULL)
744                 op_data->op_fid2 = entry->se_fid;
745
746         minfo->mi_it.it_op = IT_GETATTR;
747         minfo->mi_dir = igrab(dir);
748         minfo->mi_cb = ll_statahead_interpret;
749         minfo->mi_cbdata = entry;
750
751         einfo = &minfo->mi_einfo;
752         einfo->ei_type   = LDLM_IBITS;
753         einfo->ei_mode   = it_to_lock_mode(&minfo->mi_it);
754         einfo->ei_cb_bl  = ll_md_blocking_ast;
755         einfo->ei_cb_cp  = ldlm_completion_ast;
756         einfo->ei_cb_gl  = NULL;
757         einfo->ei_cbdata = NULL;
758
759         return minfo;
760 }
761
762 /* async stat for file not found in dcache */
763 static int sa_lookup(struct inode *dir, struct sa_entry *entry)
764 {
765         struct md_enqueue_info   *minfo;
766         int                       rc;
767         ENTRY;
768
769         minfo = sa_prep_data(dir, NULL, entry);
770         if (IS_ERR(minfo))
771                 RETURN(PTR_ERR(minfo));
772
773         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo);
774         if (rc < 0)
775                 sa_fini_data(minfo);
776
777         RETURN(rc);
778 }
779
780 /**
781  * async stat for file found in dcache, similar to .revalidate
782  *
783  * \retval      1 dentry valid, no RPC sent
784  * \retval      0 dentry invalid, will send async stat RPC
785  * \retval      negative number upon error
786  */
787 static int sa_revalidate(struct inode *dir, struct sa_entry *entry,
788                          struct dentry *dentry)
789 {
790         struct inode *inode = dentry->d_inode;
791         struct lookup_intent it = { .it_op = IT_GETATTR,
792                                     .it_lock_handle = 0 };
793         struct md_enqueue_info *minfo;
794         int rc;
795         ENTRY;
796
797         if (unlikely(inode == NULL))
798                 RETURN(1);
799
800         if (d_mountpoint(dentry))
801                 RETURN(1);
802
803         entry->se_inode = igrab(inode);
804         rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
805                                 NULL);
806         if (rc == 1) {
807                 entry->se_handle = it.it_lock_handle;
808                 ll_intent_release(&it);
809                 RETURN(1);
810         }
811
812         minfo = sa_prep_data(dir, inode, entry);
813         if (IS_ERR(minfo)) {
814                 entry->se_inode = NULL;
815                 iput(inode);
816                 RETURN(PTR_ERR(minfo));
817         }
818
819         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo);
820         if (rc < 0) {
821                 entry->se_inode = NULL;
822                 iput(inode);
823                 sa_fini_data(minfo);
824         }
825
826         RETURN(rc);
827 }
828
829 /* async stat for file with @name */
830 static void sa_statahead(struct dentry *parent, const char *name, int len,
831                          const struct lu_fid *fid)
832 {
833         struct inode *dir = parent->d_inode;
834         struct ll_inode_info *lli = ll_i2info(dir);
835         struct ll_statahead_info *sai = lli->lli_sai;
836         struct dentry *dentry = NULL;
837         struct sa_entry *entry;
838         int rc;
839         ENTRY;
840
841         entry = sa_alloc(sai, sai->sai_index, name, len, fid);
842         if (IS_ERR(entry))
843                 RETURN_EXIT;
844
845         dentry = d_lookup(parent, &entry->se_qstr);
846         if (!dentry) {
847                 rc = sa_lookup(dir, entry);
848         } else {
849                 rc = sa_revalidate(dir, entry, dentry);
850                 if (rc == 1 && agl_should_run(sai, dentry->d_inode))
851                         ll_agl_add(sai, dentry->d_inode, entry->se_index);
852         }
853
854         if (dentry != NULL)
855                 dput(dentry);
856
857         if (rc != 0)
858                 sa_make_ready(sai, entry, rc);
859         else
860                 sai->sai_sent++;
861
862         sai->sai_index++;
863
864         EXIT;
865 }
866
867 /* async glimpse (agl) thread main function */
868 static int ll_agl_thread(void *arg)
869 {
870         struct dentry *parent = (struct dentry *)arg;
871         struct inode *dir = parent->d_inode;
872         struct ll_inode_info *plli = ll_i2info(dir);
873         struct ll_inode_info *clli;
874         struct ll_sb_info *sbi = ll_i2sbi(dir);
875         struct ll_statahead_info *sai;
876         struct ptlrpc_thread *thread;
877         struct l_wait_info lwi = { 0 };
878         ENTRY;
879
880
881         sai = ll_sai_get(dir);
882         thread = &sai->sai_agl_thread;
883         thread->t_pid = current_pid();
884         CDEBUG(D_READA, "agl thread started: sai %p, parent %.*s\n",
885                sai, parent->d_name.len, parent->d_name.name);
886
887         atomic_inc(&sbi->ll_agl_total);
888         spin_lock(&plli->lli_agl_lock);
889         sai->sai_agl_valid = 1;
890         if (thread_is_init(thread))
891                 /* If someone else has changed the thread state
892                  * (e.g. already changed to SVC_STOPPING), we can't just
893                  * blindly overwrite that setting. */
894                 thread_set_flags(thread, SVC_RUNNING);
895         spin_unlock(&plli->lli_agl_lock);
896         wake_up(&thread->t_ctl_waitq);
897
898         while (1) {
899                 l_wait_event(thread->t_ctl_waitq,
900                              !agl_list_empty(sai) ||
901                              !thread_is_running(thread),
902                              &lwi);
903
904                 if (!thread_is_running(thread))
905                         break;
906
907                 spin_lock(&plli->lli_agl_lock);
908                 /* The statahead thread maybe help to process AGL entries,
909                  * so check whether list empty again. */
910                 if (!agl_list_empty(sai)) {
911                         clli = agl_first_entry(sai);
912                         list_del_init(&clli->lli_agl_list);
913                         spin_unlock(&plli->lli_agl_lock);
914                         ll_agl_trigger(&clli->lli_vfs_inode, sai);
915                 } else {
916                         spin_unlock(&plli->lli_agl_lock);
917                 }
918         }
919
920         spin_lock(&plli->lli_agl_lock);
921         sai->sai_agl_valid = 0;
922         while (!agl_list_empty(sai)) {
923                 clli = agl_first_entry(sai);
924                 list_del_init(&clli->lli_agl_list);
925                 spin_unlock(&plli->lli_agl_lock);
926                 clli->lli_agl_index = 0;
927                 iput(&clli->lli_vfs_inode);
928                 spin_lock(&plli->lli_agl_lock);
929         }
930         thread_set_flags(thread, SVC_STOPPED);
931         spin_unlock(&plli->lli_agl_lock);
932         wake_up(&thread->t_ctl_waitq);
933         ll_sai_put(sai);
934         CDEBUG(D_READA, "agl thread stopped: sai %p, parent %.*s\n",
935                sai, parent->d_name.len, parent->d_name.name);
936         RETURN(0);
937 }
938
939 /* start agl thread */
940 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
941 {
942         struct ptlrpc_thread *thread = &sai->sai_agl_thread;
943         struct l_wait_info    lwi    = { 0 };
944         struct ll_inode_info  *plli;
945         struct task_struct            *task;
946         ENTRY;
947
948         CDEBUG(D_READA, "start agl thread: sai %p, parent %.*s\n",
949                sai, parent->d_name.len, parent->d_name.name);
950
951         plli = ll_i2info(parent->d_inode);
952         task = kthread_run(ll_agl_thread, parent,
953                                "ll_agl_%u", plli->lli_opendir_pid);
954         if (IS_ERR(task)) {
955                 CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
956                 thread_set_flags(thread, SVC_STOPPED);
957                 RETURN_EXIT;
958         }
959
960         l_wait_event(thread->t_ctl_waitq,
961                      thread_is_running(thread) || thread_is_stopped(thread),
962                      &lwi);
963         EXIT;
964 }
965
966 /* statahead thread main function */
967 static int ll_statahead_thread(void *arg)
968 {
969         struct dentry *parent = (struct dentry *)arg;
970         struct inode *dir = parent->d_inode;
971         struct ll_inode_info *lli = ll_i2info(dir);
972         struct ll_sb_info *sbi = ll_i2sbi(dir);
973         struct ll_statahead_info *sai;
974         struct ptlrpc_thread *sa_thread;
975         struct ptlrpc_thread *agl_thread;
976         int first = 0;
977         struct md_op_data *op_data;
978         struct ll_dir_chain chain;
979         struct l_wait_info lwi = { 0 };
980         struct page *page = NULL;
981         __u64 pos = 0;
982         int rc = 0;
983         ENTRY;
984
985         sai = ll_sai_get(dir);
986         sa_thread = &sai->sai_thread;
987         agl_thread = &sai->sai_agl_thread;
988         sa_thread->t_pid = current_pid();
989         CDEBUG(D_READA, "statahead thread starting: sai %p, parent %.*s\n",
990                sai, parent->d_name.len, parent->d_name.name);
991
992         op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
993                                      LUSTRE_OPC_ANY, dir);
994         if (IS_ERR(op_data))
995                 GOTO(out, rc = PTR_ERR(op_data));
996
997         op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages;
998
999         if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
1000                 ll_start_agl(parent, sai);
1001
1002         atomic_inc(&sbi->ll_sa_total);
1003         spin_lock(&lli->lli_sa_lock);
1004         if (thread_is_init(sa_thread))
1005                 /* If someone else has changed the thread state
1006                  * (e.g. already changed to SVC_STOPPING), we can't just
1007                  * blindly overwrite that setting. */
1008                 thread_set_flags(sa_thread, SVC_RUNNING);
1009         spin_unlock(&lli->lli_sa_lock);
1010         wake_up(&sa_thread->t_ctl_waitq);
1011
1012         ll_dir_chain_init(&chain);
1013         while (pos != MDS_DIR_END_OFF && thread_is_running(sa_thread)) {
1014                 struct lu_dirpage *dp;
1015                 struct lu_dirent  *ent;
1016
1017                 sai->sai_in_readpage = 1;
1018                 page = ll_get_dir_page(dir, op_data, pos, &chain);
1019                 sai->sai_in_readpage = 0;
1020                 if (IS_ERR(page)) {
1021                         rc = PTR_ERR(page);
1022                         CDEBUG(D_READA, "error reading dir "DFID" at %llu"
1023                                "/%llu opendir_pid = %u: rc = %d\n",
1024                                PFID(ll_inode2fid(dir)), pos, sai->sai_index,
1025                                lli->lli_opendir_pid, rc);
1026                         break;
1027                 }
1028
1029                 dp = page_address(page);
1030                 for (ent = lu_dirent_start(dp);
1031                      ent != NULL && thread_is_running(sa_thread) &&
1032                      !sa_low_hit(sai);
1033                      ent = lu_dirent_next(ent)) {
1034                         __u64 hash;
1035                         int namelen;
1036                         char *name;
1037                         struct lu_fid fid;
1038
1039                         hash = le64_to_cpu(ent->lde_hash);
1040                         if (unlikely(hash < pos))
1041                                 /*
1042                                  * Skip until we find target hash value.
1043                                  */
1044                                 continue;
1045
1046                         namelen = le16_to_cpu(ent->lde_namelen);
1047                         if (unlikely(namelen == 0))
1048                                 /*
1049                                  * Skip dummy record.
1050                                  */
1051                                 continue;
1052
1053                         name = ent->lde_name;
1054                         if (name[0] == '.') {
1055                                 if (namelen == 1) {
1056                                         /*
1057                                          * skip "."
1058                                          */
1059                                         continue;
1060                                 } else if (name[1] == '.' && namelen == 2) {
1061                                         /*
1062                                          * skip ".."
1063                                          */
1064                                         continue;
1065                                 } else if (!sai->sai_ls_all) {
1066                                         /*
1067                                          * skip hidden files.
1068                                          */
1069                                         sai->sai_skip_hidden++;
1070                                         continue;
1071                                 }
1072                         }
1073
1074                         /*
1075                          * don't stat-ahead first entry.
1076                          */
1077                         if (unlikely(++first == 1))
1078                                 continue;
1079
1080                         fid_le_to_cpu(&fid, &ent->lde_fid);
1081
1082                         /* wait for spare statahead window */
1083                         do {
1084                                 l_wait_event(sa_thread->t_ctl_waitq,
1085                                              !sa_sent_full(sai) ||
1086                                              sa_has_callback(sai) ||
1087                                              !agl_list_empty(sai) ||
1088                                              !thread_is_running(sa_thread),
1089                                              &lwi);
1090
1091                                 sa_handle_callback(sai);
1092
1093                                 spin_lock(&lli->lli_agl_lock);
1094                                 while (sa_sent_full(sai) &&
1095                                        !agl_list_empty(sai)) {
1096                                         struct ll_inode_info *clli;
1097
1098                                         clli = agl_first_entry(sai);
1099                                         list_del_init(&clli->lli_agl_list);
1100                                         spin_unlock(&lli->lli_agl_lock);
1101
1102                                         ll_agl_trigger(&clli->lli_vfs_inode,
1103                                                         sai);
1104
1105                                         spin_lock(&lli->lli_agl_lock);
1106                                 }
1107                                 spin_unlock(&lli->lli_agl_lock);
1108                         } while (sa_sent_full(sai) &&
1109                                  thread_is_running(sa_thread));
1110
1111                         sa_statahead(parent, name, namelen, &fid);
1112                 }
1113
1114                 pos = le64_to_cpu(dp->ldp_hash_end);
1115                 ll_release_page(dir, page,
1116                                 le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1117
1118                 if (sa_low_hit(sai)) {
1119                         rc = -EFAULT;
1120                         atomic_inc(&sbi->ll_sa_wrong);
1121                         CDEBUG(D_READA, "Statahead for dir "DFID" hit "
1122                                "ratio too low: hit/miss %llu/%llu"
1123                                ", sent/replied %llu/%llu, stopping "
1124                                "statahead thread: pid %d\n",
1125                                PFID(&lli->lli_fid), sai->sai_hit,
1126                                sai->sai_miss, sai->sai_sent,
1127                                sai->sai_replied, current_pid());
1128                         break;
1129                 }
1130         }
1131         ll_dir_chain_fini(&chain);
1132         ll_finish_md_op_data(op_data);
1133
1134         if (rc < 0) {
1135                 spin_lock(&lli->lli_sa_lock);
1136                 thread_set_flags(sa_thread, SVC_STOPPING);
1137                 lli->lli_sa_enabled = 0;
1138                 spin_unlock(&lli->lli_sa_lock);
1139         }
1140
1141         /* statahead is finished, but statahead entries need to be cached, wait
1142          * for file release to stop me. */
1143         while (thread_is_running(sa_thread)) {
1144                 l_wait_event(sa_thread->t_ctl_waitq,
1145                              sa_has_callback(sai) ||
1146                              !thread_is_running(sa_thread),
1147                              &lwi);
1148
1149                 sa_handle_callback(sai);
1150         }
1151
1152         EXIT;
1153 out:
1154         if (sai->sai_agl_valid) {
1155                 spin_lock(&lli->lli_agl_lock);
1156                 thread_set_flags(agl_thread, SVC_STOPPING);
1157                 spin_unlock(&lli->lli_agl_lock);
1158                 wake_up(&agl_thread->t_ctl_waitq);
1159
1160                 CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n",
1161                        sai, (unsigned int)agl_thread->t_pid);
1162                 l_wait_event(agl_thread->t_ctl_waitq,
1163                              thread_is_stopped(agl_thread),
1164                              &lwi);
1165         } else {
1166                 /* Set agl_thread flags anyway. */
1167                 thread_set_flags(agl_thread, SVC_STOPPED);
1168         }
1169
1170         /* wait for inflight statahead RPCs to finish, and then we can free sai
1171          * safely because statahead RPC will access sai data */
1172         while (sai->sai_sent != sai->sai_replied) {
1173                 /* in case we're not woken up, timeout wait */
1174                 lwi = LWI_TIMEOUT(msecs_to_jiffies(MSEC_PER_SEC >> 3),
1175                                   NULL, NULL);
1176                 l_wait_event(sa_thread->t_ctl_waitq,
1177                         sai->sai_sent == sai->sai_replied, &lwi);
1178         }
1179
1180         /* release resources held by statahead RPCs */
1181         sa_handle_callback(sai);
1182
1183         spin_lock(&lli->lli_sa_lock);
1184         thread_set_flags(sa_thread, SVC_STOPPED);
1185         spin_unlock(&lli->lli_sa_lock);
1186
1187         CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %.*s\n",
1188                sai, parent->d_name.len, parent->d_name.name);
1189
1190         wake_up(&sai->sai_waitq);
1191         wake_up(&sa_thread->t_ctl_waitq);
1192         ll_sai_put(sai);
1193
1194         return rc;
1195 }
1196
1197 /* authorize opened dir handle @key to statahead */
1198 void ll_authorize_statahead(struct inode *dir, void *key)
1199 {
1200         struct ll_inode_info *lli = ll_i2info(dir);
1201
1202         spin_lock(&lli->lli_sa_lock);
1203         if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL) {
1204                 /*
1205                  * if lli_sai is not NULL, it means previous statahead is not
1206                  * finished yet, we'd better not start a new statahead for now.
1207                  */
1208                 LASSERT(lli->lli_opendir_pid == 0);
1209                 lli->lli_opendir_key = key;
1210                 lli->lli_opendir_pid = current_pid();
1211                 lli->lli_sa_enabled = 1;
1212         }
1213         spin_unlock(&lli->lli_sa_lock);
1214 }
1215
1216 /*
1217  * deauthorize opened dir handle @key to statahead, and notify statahead thread
1218  * to quit if it's running.
1219  */
1220 void ll_deauthorize_statahead(struct inode *dir, void *key)
1221 {
1222         struct ll_inode_info *lli = ll_i2info(dir);
1223         struct ll_statahead_info *sai;
1224
1225         LASSERT(lli->lli_opendir_key == key);
1226         LASSERT(lli->lli_opendir_pid != 0);
1227
1228         CDEBUG(D_READA, "deauthorize statahead for "DFID"\n",
1229                 PFID(&lli->lli_fid));
1230
1231         spin_lock(&lli->lli_sa_lock);
1232         lli->lli_opendir_key = NULL;
1233         lli->lli_opendir_pid = 0;
1234         lli->lli_sa_enabled = 0;
1235         sai = lli->lli_sai;
1236         if (sai != NULL && thread_is_running(&sai->sai_thread)) {
1237                 /*
1238                  * statahead thread may not quit yet because it needs to cache
1239                  * entries, now it's time to tell it to quit.
1240                  */
1241                 thread_set_flags(&sai->sai_thread, SVC_STOPPING);
1242                 wake_up(&sai->sai_thread.t_ctl_waitq);
1243         }
1244         spin_unlock(&lli->lli_sa_lock);
1245 }
1246
1247 enum {
1248         /**
1249          * not first dirent, or is "."
1250          */
1251         LS_NOT_FIRST_DE = 0,
1252         /**
1253          * the first non-hidden dirent
1254          */
1255         LS_FIRST_DE,
1256         /**
1257          * the first hidden dirent, that is "."
1258          */
1259         LS_FIRST_DOT_DE
1260 };
1261
1262 /* file is first dirent under @dir */
1263 static int is_first_dirent(struct inode *dir, struct dentry *dentry)
1264 {
1265         struct ll_dir_chain   chain;
1266         struct qstr          *target = &dentry->d_name;
1267         struct md_op_data    *op_data;
1268         int                   dot_de;
1269         struct page          *page = NULL;
1270         int                   rc = LS_NOT_FIRST_DE;
1271         __u64                 pos = 0;
1272         ENTRY;
1273
1274         op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
1275                                      LUSTRE_OPC_ANY, dir);
1276         if (IS_ERR(op_data))
1277                 RETURN(PTR_ERR(op_data));
1278         /**
1279          *FIXME choose the start offset of the readdir
1280          */
1281         op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages;
1282
1283         ll_dir_chain_init(&chain);
1284         page = ll_get_dir_page(dir, op_data, 0, &chain);
1285
1286         while (1) {
1287                 struct lu_dirpage *dp;
1288                 struct lu_dirent  *ent;
1289
1290                 if (IS_ERR(page)) {
1291                         struct ll_inode_info *lli = ll_i2info(dir);
1292
1293                         rc = PTR_ERR(page);
1294                         CERROR("%s: reading dir "DFID" at %llu"
1295                                "opendir_pid = %u : rc = %d\n",
1296                                ll_get_fsname(dir->i_sb, NULL, 0),
1297                                PFID(ll_inode2fid(dir)), pos,
1298                                lli->lli_opendir_pid, rc);
1299                         break;
1300                 }
1301
1302                 dp = page_address(page);
1303                 for (ent = lu_dirent_start(dp); ent != NULL;
1304                      ent = lu_dirent_next(ent)) {
1305                         __u64 hash;
1306                         int namelen;
1307                         char *name;
1308
1309                         hash = le64_to_cpu(ent->lde_hash);
1310                         /* The ll_get_dir_page() can return any page containing
1311                          * the given hash which may be not the start hash. */
1312                         if (unlikely(hash < pos))
1313                                 continue;
1314
1315                         namelen = le16_to_cpu(ent->lde_namelen);
1316                         if (unlikely(namelen == 0))
1317                                 /*
1318                                  * skip dummy record.
1319                                  */
1320                                 continue;
1321
1322                         name = ent->lde_name;
1323                         if (name[0] == '.') {
1324                                 if (namelen == 1)
1325                                         /*
1326                                          * skip "."
1327                                          */
1328                                         continue;
1329                                 else if (name[1] == '.' && namelen == 2)
1330                                         /*
1331                                          * skip ".."
1332                                          */
1333                                         continue;
1334                                 else
1335                                         dot_de = 1;
1336                         } else {
1337                                 dot_de = 0;
1338                         }
1339
1340                         if (dot_de && target->name[0] != '.') {
1341                                 CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
1342                                        target->len, target->name,
1343                                        namelen, name);
1344                                 continue;
1345                         }
1346
1347                         if (target->len != namelen ||
1348                             memcmp(target->name, name, namelen) != 0)
1349                                 rc = LS_NOT_FIRST_DE;
1350                         else if (!dot_de)
1351                                 rc = LS_FIRST_DE;
1352                         else
1353                                 rc = LS_FIRST_DOT_DE;
1354
1355                         ll_release_page(dir, page, false);
1356                         GOTO(out, rc);
1357                 }
1358                 pos = le64_to_cpu(dp->ldp_hash_end);
1359                 if (pos == MDS_DIR_END_OFF) {
1360                         /*
1361                          * End of directory reached.
1362                          */
1363                         ll_release_page(dir, page, false);
1364                         GOTO(out, rc);
1365                 } else {
1366                         /*
1367                          * chain is exhausted
1368                          * Normal case: continue to the next page.
1369                          */
1370                         ll_release_page(dir, page, le32_to_cpu(dp->ldp_flags) &
1371                                               LDF_COLLIDE);
1372                         page = ll_get_dir_page(dir, op_data, pos, &chain);
1373                 }
1374         }
1375         EXIT;
1376 out:
1377         ll_dir_chain_fini(&chain);
1378         ll_finish_md_op_data(op_data);
1379         return rc;
1380 }
1381
1382 /**
1383  * revalidate @dentryp from statahead cache
1384  *
1385  * \param[in] dir       parent directory
1386  * \param[in] sai       sai structure
1387  * \param[out] dentryp  pointer to dentry which will be revalidated
1388  * \param[in] unplug    unplug statahead window only (normally for negative
1389  *                      dentry)
1390  * \retval              1 on success, dentry is saved in @dentryp
1391  * \retval              0 if revalidation failed (no proper lock on client)
1392  * \retval              negative number upon error
1393  */
1394 static int revalidate_statahead_dentry(struct inode *dir,
1395                                         struct ll_statahead_info *sai,
1396                                         struct dentry **dentryp,
1397                                         bool unplug)
1398 {
1399         struct sa_entry *entry = NULL;
1400         struct l_wait_info lwi = { 0 };
1401         struct ll_dentry_data *ldd;
1402         struct ll_inode_info *lli = ll_i2info(dir);
1403         int rc = 0;
1404         ENTRY;
1405
1406         if ((*dentryp)->d_name.name[0] == '.') {
1407                 if (sai->sai_ls_all ||
1408                     sai->sai_miss_hidden >= sai->sai_skip_hidden) {
1409                         /*
1410                          * Hidden dentry is the first one, or statahead
1411                          * thread does not skip so many hidden dentries
1412                          * before "sai_ls_all" enabled as below.
1413                          */
1414                 } else {
1415                         if (!sai->sai_ls_all)
1416                                 /*
1417                                  * It maybe because hidden dentry is not
1418                                  * the first one, "sai_ls_all" was not
1419                                  * set, then "ls -al" missed. Enable
1420                                  * "sai_ls_all" for such case.
1421                                  */
1422                                 sai->sai_ls_all = 1;
1423
1424                         /*
1425                          * Such "getattr" has been skipped before
1426                          * "sai_ls_all" enabled as above.
1427                          */
1428                         sai->sai_miss_hidden++;
1429                         RETURN(-EAGAIN);
1430                 }
1431         }
1432
1433         if (unplug)
1434                 GOTO(out, rc = 1);
1435
1436         entry = sa_get(sai, &(*dentryp)->d_name);
1437         if (entry == NULL)
1438                 GOTO(out, rc = -EAGAIN);
1439
1440         /* if statahead is busy in readdir, help it do post-work */
1441         if (!sa_ready(entry) && sai->sai_in_readpage)
1442                 sa_handle_callback(sai);
1443
1444         if (!sa_ready(entry)) {
1445                 spin_lock(&lli->lli_sa_lock);
1446                 sai->sai_index_wait = entry->se_index;
1447                 spin_unlock(&lli->lli_sa_lock);
1448                 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
1449                                        LWI_ON_SIGNAL_NOOP, NULL);
1450                 rc = l_wait_event(sai->sai_waitq, sa_ready(entry), &lwi);
1451                 if (rc < 0) {
1452                         /*
1453                          * entry may not be ready, so it may be used by inflight
1454                          * statahead RPC, don't free it.
1455                          */
1456                         entry = NULL;
1457                         GOTO(out, rc = -EAGAIN);
1458                 }
1459         }
1460
1461         if (entry->se_state == SA_ENTRY_SUCC && entry->se_inode != NULL) {
1462                 struct inode *inode = entry->se_inode;
1463                 struct lookup_intent it = { .it_op = IT_GETATTR,
1464                                             .it_lock_handle =
1465                                                 entry->se_handle };
1466                 __u64 bits;
1467
1468                 rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
1469                                         ll_inode2fid(inode), &bits);
1470                 if (rc == 1) {
1471                         if ((*dentryp)->d_inode == NULL) {
1472                                 struct dentry *alias;
1473
1474                                 alias = ll_splice_alias(inode, *dentryp);
1475                                 if (IS_ERR(alias)) {
1476                                         ll_intent_release(&it);
1477                                         GOTO(out, rc = PTR_ERR(alias));
1478                                 }
1479                                 *dentryp = alias;
1480                                 /* statahead prepared this inode, transfer inode
1481                                  * refcount from sa_entry to dentry */
1482                                 entry->se_inode = NULL;
1483                         } else if ((*dentryp)->d_inode != inode) {
1484                                 /* revalidate, but inode is recreated */
1485                                 CDEBUG(D_READA,
1486                                         "%s: stale dentry %.*s inode "
1487                                         DFID", statahead inode "DFID
1488                                         "\n",
1489                                         ll_get_fsname((*dentryp)->d_inode->i_sb,
1490                                                       NULL, 0),
1491                                         (*dentryp)->d_name.len,
1492                                         (*dentryp)->d_name.name,
1493                                         PFID(ll_inode2fid((*dentryp)->d_inode)),
1494                                         PFID(ll_inode2fid(inode)));
1495                                 ll_intent_release(&it);
1496                                 GOTO(out, rc = -ESTALE);
1497                         }
1498
1499                         if ((bits & MDS_INODELOCK_LOOKUP) &&
1500                             d_lustre_invalid(*dentryp))
1501                                 d_lustre_revalidate(*dentryp);
1502                         ll_intent_release(&it);
1503                 }
1504         }
1505 out:
1506         /*
1507          * statahead cached sa_entry can be used only once, and will be killed
1508          * right after use, so if lookup/revalidate accessed statahead cache,
1509          * set dentry ldd_sa_generation to parent lli_sa_generation, later if we
1510          * stat this file again, we know we've done statahead before, see
1511          * dentry_may_statahead().
1512          */
1513         ldd = ll_d2d(*dentryp);
1514         /* ldd can be NULL if llite lookup failed. */
1515         if (ldd != NULL)
1516                 ldd->lld_sa_generation = lli->lli_sa_generation;
1517         sa_put(sai, entry);
1518
1519         RETURN(rc);
1520 }
1521
1522 /**
1523  * start statahead thread
1524  *
1525  * \param[in] dir       parent directory
1526  * \param[in] dentry    dentry that triggers statahead, normally the first
1527  *                      dirent under @dir
1528  * \retval              -EAGAIN on success, because when this function is
1529  *                      called, it's already in lookup call, so client should
1530  *                      do it itself instead of waiting for statahead thread
1531  *                      to do it asynchronously.
1532  * \retval              negative number upon error
1533  */
1534 static int start_statahead_thread(struct inode *dir, struct dentry *dentry)
1535 {
1536         struct ll_inode_info *lli = ll_i2info(dir);
1537         struct ll_statahead_info *sai = NULL;
1538         struct dentry *parent = dentry->d_parent;
1539         struct ptlrpc_thread *thread;
1540         struct l_wait_info lwi = { 0 };
1541         struct task_struct *task;
1542         int rc;
1543         ENTRY;
1544
1545         /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1546         rc = is_first_dirent(dir, dentry);
1547         if (rc == LS_NOT_FIRST_DE)
1548                 /* It is not "ls -{a}l" operation, no need statahead for it. */
1549                 GOTO(out, rc = -EFAULT);
1550
1551         sai = ll_sai_alloc(parent);
1552         if (sai == NULL)
1553                 GOTO(out, rc = -ENOMEM);
1554
1555         sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
1556
1557         /* if current lli_opendir_key was deauthorized, or dir re-opened by
1558          * another process, don't start statahead, otherwise the newly spawned
1559          * statahead thread won't be notified to quit. */
1560         spin_lock(&lli->lli_sa_lock);
1561         if (unlikely(lli->lli_sai != NULL ||
1562                      lli->lli_opendir_key == NULL ||
1563                      lli->lli_opendir_pid != current->pid)) {
1564                 spin_unlock(&lli->lli_sa_lock);
1565                 GOTO(out, rc = -EPERM);
1566         }
1567         lli->lli_sai = sai;
1568         spin_unlock(&lli->lli_sa_lock);
1569
1570         atomic_inc(&ll_i2sbi(parent->d_inode)->ll_sa_running);
1571
1572         CDEBUG(D_READA, "start statahead thread: [pid %d] [parent %.*s]\n",
1573                current_pid(), parent->d_name.len, parent->d_name.name);
1574
1575         task = kthread_run(ll_statahead_thread, parent, "ll_sa_%u",
1576                            lli->lli_opendir_pid);
1577         thread = &sai->sai_thread;
1578         if (IS_ERR(task)) {
1579                 rc = PTR_ERR(task);
1580                 CERROR("can't start ll_sa thread, rc: %d\n", rc);
1581                 GOTO(out, rc);
1582         }
1583
1584         l_wait_event(thread->t_ctl_waitq,
1585                      thread_is_running(thread) || thread_is_stopped(thread),
1586                      &lwi);
1587         ll_sai_put(sai);
1588
1589         /*
1590          * We don't stat-ahead for the first dirent since we are already in
1591          * lookup.
1592          */
1593         RETURN(-EAGAIN);
1594
1595 out:
1596         /* once we start statahead thread failed, disable statahead so that
1597          * subsequent stat won't waste time to try it. */
1598         spin_lock(&lli->lli_sa_lock);
1599         lli->lli_sa_enabled = 0;
1600         lli->lli_sai = NULL;
1601         spin_unlock(&lli->lli_sa_lock);
1602
1603         if (sai != NULL)
1604                 ll_sai_free(sai);
1605
1606         RETURN(rc);
1607 }
1608
1609 /**
1610  * statahead entry function, this is called when client getattr on a file, it
1611  * will start statahead thread if this is the first dir entry, else revalidate
1612  * dentry from statahead cache.
1613  *
1614  * \param[in]  dir      parent directory
1615  * \param[out] dentryp  dentry to getattr
1616  * \param[in]  unplug   unplug statahead window only (normally for negative
1617  *                      dentry)
1618  * \retval              1 on success
1619  * \retval              0 revalidation from statahead cache failed, caller needs
1620  *                      to getattr from server directly
1621  * \retval              negative number on error, caller often ignores this and
1622  *                      then getattr from server
1623  */
1624 int ll_statahead(struct inode *dir, struct dentry **dentryp, bool unplug)
1625 {
1626         struct ll_statahead_info *sai;
1627
1628         sai = ll_sai_get(dir);
1629         if (sai != NULL) {
1630                 int rc;
1631
1632                 rc = revalidate_statahead_dentry(dir, sai, dentryp, unplug);
1633                 CDEBUG(D_READA, "revalidate statahead %.*s: %d.\n",
1634                         (*dentryp)->d_name.len, (*dentryp)->d_name.name, rc);
1635                 ll_sai_put(sai);
1636                 return rc;
1637         }
1638         return start_statahead_thread(dir, *dentryp);
1639 }