Whamcloud - gitweb
LU-5443 lustre: replace direct HZ access with kernel APIs
[fs/lustre-release.git] / lustre / llite / statahead.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #include <linux/fs.h>
38 #include <linux/sched.h>
39 #include <linux/mm.h>
40 #include <linux/highmem.h>
41 #include <linux/pagemap.h>
42
43 #define DEBUG_SUBSYSTEM S_LLITE
44
45 #include <obd_support.h>
46 #include <lustre_dlm.h>
47 #include "llite_internal.h"
48
49 #define SA_OMITTED_ENTRY_MAX 8ULL
50
51 typedef enum {
52         /** negative values are for error cases */
53         SA_ENTRY_INIT = 0,      /** init entry */
54         SA_ENTRY_SUCC = 1,      /** stat succeed */
55         SA_ENTRY_INVA = 2,      /** invalid entry */
56 } se_state_t;
57
58 /* sa_entry is not refcounted: statahead thread allocates it and do async stat,
59  * and in async stat callback ll_statahead_interpret() will add it into
60  * sai_cb_entries, later statahead thread will call sa_handle_callback() to
61  * instantiate entry and move it into sai_entries, and then only scanner process
62  * can access and free it. */
63 struct sa_entry {
64         /* link into sai_cb_entries or sai_entries */
65         struct list_head        se_list;
66         /* link into sai hash table locally */
67         struct list_head        se_hash;
68         /* entry index in the sai */
69         __u64                   se_index;
70         /* low layer ldlm lock handle */
71         __u64                   se_handle;
72         /* entry status */
73         se_state_t              se_state;
74         /* entry size, contains name */
75         int                     se_size;
76         /* pointer to async getattr enqueue info */
77         struct md_enqueue_info *se_minfo;
78         /* pointer to the async getattr request */
79         struct ptlrpc_request  *se_req;
80         /* pointer to the target inode */
81         struct inode           *se_inode;
82         /* entry name */
83         struct qstr             se_qstr;
84 };
85
86 static unsigned int sai_generation = 0;
87 static DEFINE_SPINLOCK(sai_generation_lock);
88
89 static inline int sa_unhashed(struct sa_entry *entry)
90 {
91         return list_empty(&entry->se_hash);
92 }
93
94 /*
95  * The entry only can be released by the caller, it is necessary to hold lock.
96  */
97 static inline int sa_ready(struct sa_entry *entry)
98 {
99         smp_rmb();
100         return (entry->se_state != SA_ENTRY_INIT);
101 }
102
103 static inline int sa_hash(int val)
104 {
105         return val & LL_SA_CACHE_MASK;
106 }
107
108 /*
109  * Insert entry to hash SA table.
110  */
111 static inline void
112 sa_rehash(struct ll_statahead_info *sai, struct sa_entry *entry)
113 {
114         int i = sa_hash(entry->se_qstr.hash);
115
116         spin_lock(&sai->sai_cache_lock[i]);
117         list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
118         spin_unlock(&sai->sai_cache_lock[i]);
119 }
120
121 /*
122  * Remove entry from SA table.
123  */
124 static inline void
125 sa_unhash(struct ll_statahead_info *sai, struct sa_entry *entry)
126 {
127         int i = sa_hash(entry->se_qstr.hash);
128
129         spin_lock(&sai->sai_cache_lock[i]);
130         list_del_init(&entry->se_hash);
131         spin_unlock(&sai->sai_cache_lock[i]);
132 }
133
134 static inline int agl_should_run(struct ll_statahead_info *sai,
135                                  struct inode *inode)
136 {
137         return (inode != NULL && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
138 }
139
140 static inline struct ll_inode_info *
141 agl_first_entry(struct ll_statahead_info *sai)
142 {
143         return list_entry(sai->sai_agls.next, struct ll_inode_info,
144                           lli_agl_list);
145 }
146
147 static inline int sa_sent_full(struct ll_statahead_info *sai)
148 {
149         return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
150 }
151
152 static inline int sa_has_callback(struct ll_statahead_info *sai)
153 {
154         return !list_empty(&sai->sai_interim_entries);
155 }
156
157 static inline int agl_list_empty(struct ll_statahead_info *sai)
158 {
159         return list_empty(&sai->sai_agls);
160 }
161
162 /**
163  * (1) hit ratio less than 80%
164  * or
165  * (2) consecutive miss more than 8
166  * then means low hit.
167  */
168 static inline int sa_low_hit(struct ll_statahead_info *sai)
169 {
170         return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
171                 (sai->sai_consecutive_miss > 8));
172 }
173
174 /*
175  * If the given index is behind of statahead window more than
176  * SA_OMITTED_ENTRY_MAX, then it is old.
177  */
178 static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
179 {
180         return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
181                  sai->sai_index);
182 }
183
184 /* allocate sa_entry and add it into hash to let scanner process to find it */
185 static struct sa_entry *
186 sa_alloc(struct ll_statahead_info *sai, __u64 index, const char *name, int len)
187 {
188         struct ll_inode_info *lli;
189         struct sa_entry *entry;
190         int entry_size;
191         char *dname;
192         ENTRY;
193
194         entry_size = sizeof(struct sa_entry) + (len & ~3) + 4;
195         OBD_ALLOC(entry, entry_size);
196         if (unlikely(entry == NULL))
197                 RETURN(ERR_PTR(-ENOMEM));
198
199         CDEBUG(D_READA, "alloc sa entry %.*s(%p) index "LPU64"\n",
200                len, name, entry, index);
201
202         entry->se_index = index;
203
204         entry->se_state = SA_ENTRY_INIT;
205         entry->se_size = entry_size;
206         dname = (char *)entry + sizeof(struct sa_entry);
207         memcpy(dname, name, len);
208         dname[len] = 0;
209         entry->se_qstr.hash = full_name_hash(name, len);
210         entry->se_qstr.len = len;
211         entry->se_qstr.name = dname;
212
213         lli = ll_i2info(sai->sai_inode);
214         spin_lock(&lli->lli_sa_lock);
215         INIT_LIST_HEAD(&entry->se_list);
216         sa_rehash(sai, entry);
217         spin_unlock(&lli->lli_sa_lock);
218
219         atomic_inc(&sai->sai_cache_count);
220
221         RETURN(entry);
222 }
223
224 /* free sa_entry which should have been unhashed and not in any list */
225 static void sa_free(struct ll_statahead_info *sai, struct sa_entry *entry)
226 {
227         CDEBUG(D_READA, "free sa entry %.*s(%p) index "LPU64"\n",
228                entry->se_qstr.len, entry->se_qstr.name, entry,
229                entry->se_index);
230
231         LASSERT(list_empty(&entry->se_list));
232         LASSERT(sa_unhashed(entry));
233
234         OBD_FREE(entry, entry->se_size);
235         atomic_dec(&sai->sai_cache_count);
236 }
237
238 /* find sa_entry by name, used by directory scanner, lock is not needed because
239  * only scanner can remove the entry from hash.
240  */
241 static struct sa_entry *
242 sa_get(struct ll_statahead_info *sai, const struct qstr *qstr)
243 {
244         struct sa_entry *entry;
245         int i = sa_hash(qstr->hash);
246
247         list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
248                 if (entry->se_qstr.hash == qstr->hash &&
249                     entry->se_qstr.len == qstr->len &&
250                     memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
251                         return entry;
252         }
253         return NULL;
254 }
255
256 static inline void
257 sa_kill(struct ll_statahead_info *sai, struct sa_entry *entry)
258 {
259         struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
260
261         LASSERT(!sa_unhashed(entry));
262         LASSERT(!list_empty(&entry->se_list));
263         LASSERT(sa_ready(entry));
264
265         sa_unhash(sai, entry);
266
267         spin_lock(&lli->lli_sa_lock);
268         list_del_init(&entry->se_list);
269         spin_unlock(&lli->lli_sa_lock);
270
271         if (entry->se_inode != NULL)
272                 iput(entry->se_inode);
273
274         sa_free(sai, entry);
275 }
276
277 /* called by scanner after use, sa_entry will be killed */
278 static void
279 sa_put(struct ll_statahead_info *sai, struct sa_entry *entry)
280 {
281         struct sa_entry *tmp, *next;
282
283         if (entry != NULL && entry->se_state == SA_ENTRY_SUCC) {
284                 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode);
285
286                 sai->sai_hit++;
287                 sai->sai_consecutive_miss = 0;
288                 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
289         } else {
290                 sai->sai_miss++;
291                 sai->sai_consecutive_miss++;
292         }
293
294         if (entry != NULL)
295                 sa_kill(sai, entry);
296
297         /* kill old completed entries, only scanner process does this, no need
298          * to lock */
299         list_for_each_entry_safe(tmp, next, &sai->sai_entries, se_list) {
300                 if (!is_omitted_entry(sai, tmp->se_index))
301                         break;
302                 sa_kill(sai, tmp);
303         }
304
305         wake_up(&sai->sai_thread.t_ctl_waitq);
306 }
307
308 /* update state and sort add entry to sai_entries by index, return true if
309  * scanner is waiting on this entry. */
310 static bool
311 __sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
312 {
313         struct sa_entry *se;
314         struct list_head *pos = &sai->sai_entries;
315
316         LASSERT(!sa_ready(entry));
317         LASSERT(list_empty(&entry->se_list));
318
319         entry->se_state = ret < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC;
320
321         list_for_each_entry_reverse(se, &sai->sai_entries, se_list) {
322                 if (se->se_index < entry->se_index) {
323                         pos = &se->se_list;
324                         break;
325                 }
326         }
327         list_add(&entry->se_list, pos);
328
329         return (entry->se_index == sai->sai_index_wait);
330 }
331
332 /* release resources used in async stat RPC, complete entry information and
333  * wakeup if necessary */
334 static void
335 sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
336 {
337         struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
338         struct md_enqueue_info *minfo = entry->se_minfo;
339         struct ptlrpc_request *req = entry->se_req;
340         bool wakeup;
341
342         /* release resources used in RPC */
343         if (minfo) {
344                 entry->se_minfo = NULL;
345                 ll_intent_release(&minfo->mi_it);
346                 iput(minfo->mi_dir);
347                 OBD_FREE_PTR(minfo);
348         }
349
350         if (req) {
351                 entry->se_req = NULL;
352                 ptlrpc_req_finished(req);
353         }
354
355         spin_lock(&lli->lli_sa_lock);
356         wakeup = __sa_make_ready(sai, entry, ret);
357         spin_unlock(&lli->lli_sa_lock);
358
359         if (wakeup)
360                 wake_up(&sai->sai_waitq);
361 }
362
363 /*
364  * Insert inode into the list of sai_agls.
365  */
366 static void ll_agl_add(struct ll_statahead_info *sai,
367                        struct inode *inode, int index)
368 {
369         struct ll_inode_info *child  = ll_i2info(inode);
370         struct ll_inode_info *parent = ll_i2info(sai->sai_inode);
371         int                   added  = 0;
372
373         spin_lock(&child->lli_agl_lock);
374         if (child->lli_agl_index == 0) {
375                 child->lli_agl_index = index;
376                 spin_unlock(&child->lli_agl_lock);
377
378                 LASSERT(list_empty(&child->lli_agl_list));
379
380                 igrab(inode);
381                 spin_lock(&parent->lli_agl_lock);
382                 if (agl_list_empty(sai))
383                         added = 1;
384                 list_add_tail(&child->lli_agl_list, &sai->sai_agls);
385                 spin_unlock(&parent->lli_agl_lock);
386         } else {
387                 spin_unlock(&child->lli_agl_lock);
388         }
389
390         if (added > 0)
391                 wake_up(&sai->sai_agl_thread.t_ctl_waitq);
392 }
393
394 static struct ll_statahead_info *ll_sai_alloc(void)
395 {
396         struct ll_statahead_info *sai;
397         int                       i;
398         ENTRY;
399
400         OBD_ALLOC_PTR(sai);
401         if (!sai)
402                 RETURN(NULL);
403
404         atomic_set(&sai->sai_refcount, 1);
405
406         spin_lock(&sai_generation_lock);
407         sai->sai_generation = ++sai_generation;
408         if (unlikely(sai_generation == 0))
409                 sai->sai_generation = ++sai_generation;
410         spin_unlock(&sai_generation_lock);
411
412         sai->sai_max = LL_SA_RPC_MIN;
413         sai->sai_index = 1;
414         init_waitqueue_head(&sai->sai_waitq);
415         init_waitqueue_head(&sai->sai_thread.t_ctl_waitq);
416         init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq);
417
418         INIT_LIST_HEAD(&sai->sai_interim_entries);
419         INIT_LIST_HEAD(&sai->sai_entries);
420         INIT_LIST_HEAD(&sai->sai_agls);
421
422         for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
423                 INIT_LIST_HEAD(&sai->sai_cache[i]);
424                 spin_lock_init(&sai->sai_cache_lock[i]);
425         }
426         atomic_set(&sai->sai_cache_count, 0);
427
428         RETURN(sai);
429 }
430
431 static inline struct ll_statahead_info *ll_sai_get(struct inode *dir)
432 {
433         struct ll_inode_info *lli = ll_i2info(dir);
434         struct ll_statahead_info *sai = NULL;
435
436         spin_lock(&lli->lli_sa_lock);
437         sai = lli->lli_sai;
438         if (sai != NULL)
439                 atomic_inc(&sai->sai_refcount);
440         spin_unlock(&lli->lli_sa_lock);
441
442         return sai;
443 }
444
445 static void ll_sai_put(struct ll_statahead_info *sai)
446 {
447         struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
448
449         if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
450                 struct sa_entry *entry, *next;
451                 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode);
452
453                 lli->lli_sai = NULL;
454                 spin_unlock(&lli->lli_sa_lock);
455
456                 LASSERT(thread_is_stopped(&sai->sai_thread));
457                 LASSERT(thread_is_stopped(&sai->sai_agl_thread));
458                 LASSERT(sai->sai_sent == sai->sai_replied);
459                 LASSERT(!sa_has_callback(sai));
460
461                 list_for_each_entry_safe(entry, next, &sai->sai_entries,
462                                          se_list)
463                         sa_kill(sai, entry);
464
465                 LASSERT(atomic_read(&sai->sai_cache_count) == 0);
466                 LASSERT(agl_list_empty(sai));
467
468                 iput(sai->sai_inode);
469                 OBD_FREE_PTR(sai);
470                 atomic_dec(&sbi->ll_sa_running);
471         }
472 }
473
474 /* Do NOT forget to drop inode refcount when into sai_agls. */
475 static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
476 {
477         struct ll_inode_info *lli = ll_i2info(inode);
478         __u64 index = lli->lli_agl_index;
479         int rc;
480         ENTRY;
481
482         LASSERT(list_empty(&lli->lli_agl_list));
483
484         /* AGL maybe fall behind statahead with one entry */
485         if (is_omitted_entry(sai, index + 1)) {
486                 lli->lli_agl_index = 0;
487                 iput(inode);
488                 RETURN_EXIT;
489         }
490
491         /* Someone is in glimpse (sync or async), do nothing. */
492         rc = down_write_trylock(&lli->lli_glimpse_sem);
493         if (rc == 0) {
494                 lli->lli_agl_index = 0;
495                 iput(inode);
496                 RETURN_EXIT;
497         }
498
499         /*
500          * Someone triggered glimpse within 1 sec before.
501          * 1) The former glimpse succeeded with glimpse lock granted by OST, and
502          *    if the lock is still cached on client, AGL needs to do nothing. If
503          *    it is cancelled by other client, AGL maybe cannot obtaion new lock
504          *    for no glimpse callback triggered by AGL.
505          * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
506          *    Under such case, it is quite possible that the OST will not grant
507          *    glimpse lock for AGL also.
508          * 3) The former glimpse failed, compared with other two cases, it is
509          *    relative rare. AGL can ignore such case, and it will not muchly
510          *    affect the performance.
511          */
512         if (lli->lli_glimpse_time != 0 &&
513             cfs_time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
514                 up_write(&lli->lli_glimpse_sem);
515                 lli->lli_agl_index = 0;
516                 iput(inode);
517                 RETURN_EXIT;
518         }
519
520         CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
521                DFID", idx = "LPU64"\n", PFID(&lli->lli_fid), index);
522
523         cl_agl(inode);
524         lli->lli_agl_index = 0;
525         lli->lli_glimpse_time = cfs_time_current();
526         up_write(&lli->lli_glimpse_sem);
527
528         CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
529                DFID", idx = "LPU64", rc = %d\n",
530                PFID(&lli->lli_fid), index, rc);
531
532         iput(inode);
533
534         EXIT;
535 }
536
537 /* prepare inode for sa entry, add it into agl list, now sa_entry is ready
538  * to be used by scanner process. */
539 static void sa_instantiate(struct ll_statahead_info *sai,
540                                  struct sa_entry *entry)
541 {
542         struct inode *dir = sai->sai_inode;
543         struct inode *child;
544         struct md_enqueue_info *minfo;
545         struct lookup_intent *it;
546         struct ptlrpc_request *req;
547         struct mdt_body *body;
548         int rc = 0;
549         ENTRY;
550
551         LASSERT(entry->se_handle != 0);
552
553         minfo = entry->se_minfo;
554         it = &minfo->mi_it;
555         req = entry->se_req;
556         body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
557         if (body == NULL)
558                 GOTO(out, rc = -EFAULT);
559
560         child = entry->se_inode;
561         if (child == NULL) {
562                 /*
563                  * lookup.
564                  */
565                 LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
566
567                 /* XXX: No fid in reply, this is probaly cross-ref case.
568                  * SA can't handle it yet. */
569                 if (body->mbo_valid & OBD_MD_MDS)
570                         GOTO(out, rc = -EAGAIN);
571         } else {
572                 /*
573                  * revalidate.
574                  */
575                 /* unlinked and re-created with the same name */
576                 if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2,
577                                         &body->mbo_fid1))) {
578                         entry->se_inode = NULL;
579                         iput(child);
580                         child = NULL;
581                 }
582         }
583
584         it->d.lustre.it_lock_handle = entry->se_handle;
585         rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
586         if (rc != 1)
587                 GOTO(out, rc = -EAGAIN);
588
589         rc = ll_prep_inode(&child, req, dir->i_sb, it);
590         if (rc)
591                 GOTO(out, rc);
592
593         CDEBUG(D_READA, "%s: setting %.*s"DFID" l_data to inode %p\n",
594                ll_get_fsname(child->i_sb, NULL, 0),
595                entry->se_qstr.len, entry->se_qstr.name,
596                PFID(ll_inode2fid(child)), child);
597         ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
598
599         entry->se_inode = child;
600
601         if (agl_should_run(sai, child))
602                 ll_agl_add(sai, child, entry->se_index);
603
604         EXIT;
605
606 out:
607         /* sa_make_ready() will drop ldlm ibits lock refcount by calling
608          * ll_intent_drop_lock() in spite of failures. Do not worry about
609          * calling ll_intent_drop_lock() more than once. */
610         sa_make_ready(sai, entry, rc);
611 }
612
613 /* once there are async stat replies, instantiate sa_entry */
614 static void sa_handle_callback(struct ll_statahead_info *sai)
615 {
616         struct ll_inode_info *lli;
617
618         lli = ll_i2info(sai->sai_inode);
619
620         while (sa_has_callback(sai)) {
621                 struct sa_entry *entry;
622
623                 spin_lock(&lli->lli_sa_lock);
624                 if (unlikely(!sa_has_callback(sai))) {
625                         spin_unlock(&lli->lli_sa_lock);
626                         break;
627                 }
628                 entry = list_entry(sai->sai_interim_entries.next,
629                                    struct sa_entry, se_list);
630                 list_del_init(&entry->se_list);
631                 spin_unlock(&lli->lli_sa_lock);
632
633                 sa_instantiate(sai, entry);
634         }
635
636         spin_lock(&lli->lli_agl_lock);
637         while (!agl_list_empty(sai)) {
638                 struct ll_inode_info *clli;
639
640                 clli = agl_first_entry(sai);
641                 list_del_init(&clli->lli_agl_list);
642                 spin_unlock(&lli->lli_agl_lock);
643
644                 ll_agl_trigger(&clli->lli_vfs_inode, sai);
645
646                 spin_lock(&lli->lli_agl_lock);
647         }
648         spin_unlock(&lli->lli_agl_lock);
649 }
650
651 /* callback for async stat, because this is called in ptlrpcd context, we only
652  * put sa_entry in sai_cb_entries list, and let sa_handle_callback() to really
653  * prepare inode and instantiate sa_entry later. */
654 static int ll_statahead_interpret(struct ptlrpc_request *req,
655                                   struct md_enqueue_info *minfo, int rc)
656 {
657         struct lookup_intent *it = &minfo->mi_it;
658         struct inode *dir = minfo->mi_dir;
659         struct ll_inode_info *lli = ll_i2info(dir);
660         struct ll_statahead_info *sai = lli->lli_sai;
661         struct sa_entry *entry = (struct sa_entry *)minfo->mi_cbdata;
662         __u64 handle = 0;
663         bool wakeup;
664         ENTRY;
665
666         if (it_disposition(it, DISP_LOOKUP_NEG))
667                 rc = -ENOENT;
668
669         /* because statahead thread will wait for all inflight RPC to finish,
670          * sai should be always valid, no need to refcount */
671         LASSERT(sai != NULL);
672         LASSERT(!thread_is_stopped(&sai->sai_thread));
673         LASSERT(entry != NULL);
674
675         CDEBUG(D_READA, "sa_entry %.*s rc %d\n",
676                entry->se_qstr.len, entry->se_qstr.name, rc);
677
678         if (rc != 0) {
679                 ll_intent_release(it);
680                 iput(dir);
681                 OBD_FREE_PTR(minfo);
682         } else {
683                 /* release ibits lock ASAP to avoid deadlock when statahead
684                  * thread enqueues lock on parent in readdir and another
685                  * process enqueues lock on child with parent lock held, eg.
686                  * unlink. */
687                 handle = it->d.lustre.it_lock_handle;
688                 ll_intent_drop_lock(it);
689         }
690
691         spin_lock(&lli->lli_sa_lock);
692         if (rc != 0) {
693                 wakeup = __sa_make_ready(sai, entry, rc);
694         } else {
695                 entry->se_minfo = minfo;
696                 entry->se_req = ptlrpc_request_addref(req);
697                 /* Release the async ibits lock ASAP to avoid deadlock
698                  * when statahead thread tries to enqueue lock on parent
699                  * for readpage and other tries to enqueue lock on child
700                  * with parent's lock held, for example: unlink. */
701                 entry->se_handle = handle;
702                 wakeup = !sa_has_callback(sai);
703                 list_add_tail(&entry->se_list, &sai->sai_interim_entries);
704         }
705         sai->sai_replied++;
706         if (wakeup)
707                 wake_up(&sai->sai_thread.t_ctl_waitq);
708         spin_unlock(&lli->lli_sa_lock);
709
710         RETURN(rc);
711 }
712
713 static void sa_fini_data(struct md_enqueue_info *minfo,
714                          struct ldlm_enqueue_info *einfo)
715 {
716         LASSERT(minfo && einfo);
717         iput(minfo->mi_dir);
718         capa_put(minfo->mi_data.op_capa1);
719         capa_put(minfo->mi_data.op_capa2);
720         OBD_FREE_PTR(minfo);
721         OBD_FREE_PTR(einfo);
722 }
723
724 /**
725  * There is race condition between "capa_put" and "ll_statahead_interpret" for
726  * accessing "op_data.op_capa[1,2]" as following:
727  * "capa_put" releases "op_data.op_capa[1,2]"'s reference count after calling
728  * "md_intent_getattr_async". But "ll_statahead_interpret" maybe run first, and
729  * fill "op_data.op_capa[1,2]" as POISON, then cause "capa_put" access invalid
730  * "ocapa". So here reserve "op_data.op_capa[1,2]" in "pcapa" before calling
731  * "md_intent_getattr_async".
732  */
733 static int sa_prep_data(struct inode *dir, struct inode *child,
734                         struct sa_entry *entry, struct md_enqueue_info **pmi,
735                         struct ldlm_enqueue_info **pei,
736                         struct obd_capa **pcapa)
737 {
738         struct qstr              *qstr = &entry->se_qstr;
739         struct md_enqueue_info   *minfo;
740         struct ldlm_enqueue_info *einfo;
741         struct md_op_data        *op_data;
742
743         OBD_ALLOC_PTR(einfo);
744         if (einfo == NULL)
745                 return -ENOMEM;
746
747         OBD_ALLOC_PTR(minfo);
748         if (minfo == NULL) {
749                 OBD_FREE_PTR(einfo);
750                 return -ENOMEM;
751         }
752
753         op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, qstr->name,
754                                      qstr->len, 0, LUSTRE_OPC_ANY, NULL);
755         if (IS_ERR(op_data)) {
756                 OBD_FREE_PTR(einfo);
757                 OBD_FREE_PTR(minfo);
758                 return PTR_ERR(op_data);
759         }
760
761         minfo->mi_it.it_op = IT_GETATTR;
762         minfo->mi_dir = igrab(dir);
763         minfo->mi_cb = ll_statahead_interpret;
764         minfo->mi_cbdata = entry;
765
766         einfo->ei_type   = LDLM_IBITS;
767         einfo->ei_mode   = it_to_lock_mode(&minfo->mi_it);
768         einfo->ei_cb_bl  = ll_md_blocking_ast;
769         einfo->ei_cb_cp  = ldlm_completion_ast;
770         einfo->ei_cb_gl  = NULL;
771         einfo->ei_cbdata = NULL;
772
773         *pmi = minfo;
774         *pei = einfo;
775         pcapa[0] = op_data->op_capa1;
776         pcapa[1] = op_data->op_capa2;
777
778         return 0;
779 }
780
781 static int sa_lookup(struct inode *dir, struct sa_entry *entry)
782 {
783         struct md_enqueue_info   *minfo;
784         struct ldlm_enqueue_info *einfo;
785         struct obd_capa          *capas[2];
786         int                       rc;
787         ENTRY;
788
789         rc = sa_prep_data(dir, NULL, entry, &minfo, &einfo, capas);
790         if (rc)
791                 RETURN(rc);
792
793         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
794         if (!rc) {
795                 capa_put(capas[0]);
796                 capa_put(capas[1]);
797         } else {
798                 sa_fini_data(minfo, einfo);
799         }
800
801         RETURN(rc);
802 }
803
804 /**
805  * similar to ll_revalidate_it().
806  * \retval      1 -- dentry valid
807  * \retval      0 -- will send stat-ahead request
808  * \retval others -- prepare stat-ahead request failed
809  */
810 static int sa_revalidate(struct inode *dir, struct sa_entry *entry,
811                          struct dentry *dentry)
812 {
813         struct inode *inode = dentry->d_inode;
814         struct lookup_intent it = { .it_op = IT_GETATTR,
815                                     .d.lustre.it_lock_handle = 0 };
816         struct md_enqueue_info *minfo;
817         struct ldlm_enqueue_info *einfo;
818         struct obd_capa *capas[2];
819         int rc;
820         ENTRY;
821
822         if (unlikely(inode == NULL))
823                 RETURN(1);
824
825         if (d_mountpoint(dentry))
826                 RETURN(1);
827
828         entry->se_inode = igrab(inode);
829         rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
830                                 NULL);
831         if (rc == 1) {
832                 entry->se_handle = it.d.lustre.it_lock_handle;
833                 ll_intent_release(&it);
834                 RETURN(1);
835         }
836
837         rc = sa_prep_data(dir, inode, entry, &minfo, &einfo, capas);
838         if (rc) {
839                 entry->se_inode = NULL;
840                 iput(inode);
841                 RETURN(rc);
842         }
843
844         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
845         if (!rc) {
846                 capa_put(capas[0]);
847                 capa_put(capas[1]);
848         } else {
849                 entry->se_inode = NULL;
850                 iput(inode);
851                 sa_fini_data(minfo, einfo);
852         }
853
854         RETURN(rc);
855 }
856
857 static void sa_statahead(struct dentry *parent, const char *name, int len)
858 {
859         struct inode *dir = parent->d_inode;
860         struct ll_inode_info *lli = ll_i2info(dir);
861         struct ll_statahead_info *sai = lli->lli_sai;
862         struct dentry *dentry = NULL;
863         struct sa_entry *entry;
864         int rc;
865         ENTRY;
866
867         entry = sa_alloc(sai, sai->sai_index, name, len);
868         if (IS_ERR(entry))
869                 RETURN_EXIT;
870
871         dentry = d_lookup(parent, &entry->se_qstr);
872         if (!dentry) {
873                 rc = sa_lookup(dir, entry);
874         } else {
875                 rc = sa_revalidate(dir, entry, dentry);
876                 if (rc == 1 && agl_should_run(sai, dentry->d_inode))
877                         ll_agl_add(sai, dentry->d_inode, entry->se_index);
878         }
879
880         if (dentry != NULL)
881                 dput(dentry);
882
883         if (rc != 0)
884                 sa_make_ready(sai, entry, rc);
885         else
886                 sai->sai_sent++;
887
888         sai->sai_index++;
889
890         EXIT;
891 }
892
893 static int ll_agl_thread(void *arg)
894 {
895         struct dentry *parent = (struct dentry *)arg;
896         struct inode *dir = parent->d_inode;
897         struct ll_inode_info *plli = ll_i2info(dir);
898         struct ll_inode_info *clli;
899         struct ll_sb_info *sbi = ll_i2sbi(dir);
900         struct ll_statahead_info *sai;
901         struct ptlrpc_thread *thread;
902         struct l_wait_info lwi = { 0 };
903         ENTRY;
904
905
906         sai = ll_sai_get(dir);
907         thread = &sai->sai_agl_thread;
908         thread->t_pid = current_pid();
909         CDEBUG(D_READA, "agl thread started: sai %p, parent %.*s\n",
910                sai, parent->d_name.len, parent->d_name.name);
911
912         atomic_inc(&sbi->ll_agl_total);
913         spin_lock(&plli->lli_agl_lock);
914         sai->sai_agl_valid = 1;
915         if (thread_is_init(thread))
916                 /* If someone else has changed the thread state
917                  * (e.g. already changed to SVC_STOPPING), we can't just
918                  * blindly overwrite that setting. */
919                 thread_set_flags(thread, SVC_RUNNING);
920         spin_unlock(&plli->lli_agl_lock);
921         wake_up(&thread->t_ctl_waitq);
922
923         while (1) {
924                 l_wait_event(thread->t_ctl_waitq,
925                              !agl_list_empty(sai) ||
926                              !thread_is_running(thread),
927                              &lwi);
928
929                 if (!thread_is_running(thread))
930                         break;
931
932                 spin_lock(&plli->lli_agl_lock);
933                 /* The statahead thread maybe help to process AGL entries,
934                  * so check whether list empty again. */
935                 if (!agl_list_empty(sai)) {
936                         clli = agl_first_entry(sai);
937                         list_del_init(&clli->lli_agl_list);
938                         spin_unlock(&plli->lli_agl_lock);
939                         ll_agl_trigger(&clli->lli_vfs_inode, sai);
940                 } else {
941                         spin_unlock(&plli->lli_agl_lock);
942                 }
943         }
944
945         spin_lock(&plli->lli_agl_lock);
946         sai->sai_agl_valid = 0;
947         while (!agl_list_empty(sai)) {
948                 clli = agl_first_entry(sai);
949                 list_del_init(&clli->lli_agl_list);
950                 spin_unlock(&plli->lli_agl_lock);
951                 clli->lli_agl_index = 0;
952                 iput(&clli->lli_vfs_inode);
953                 spin_lock(&plli->lli_agl_lock);
954         }
955         thread_set_flags(thread, SVC_STOPPED);
956         spin_unlock(&plli->lli_agl_lock);
957         wake_up(&thread->t_ctl_waitq);
958         ll_sai_put(sai);
959         CDEBUG(D_READA, "agl thread stopped: sai %p, parent %.*s\n",
960                sai, parent->d_name.len, parent->d_name.name);
961         RETURN(0);
962 }
963
964 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
965 {
966         struct ptlrpc_thread *thread = &sai->sai_agl_thread;
967         struct l_wait_info    lwi    = { 0 };
968         struct ll_inode_info  *plli;
969         struct task_struct            *task;
970         ENTRY;
971
972         CDEBUG(D_READA, "start agl thread: sai %p, parent %.*s\n",
973                sai, parent->d_name.len, parent->d_name.name);
974
975         plli = ll_i2info(parent->d_inode);
976         task = kthread_run(ll_agl_thread, parent,
977                                "ll_agl_%u", plli->lli_opendir_pid);
978         if (IS_ERR(task)) {
979                 CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
980                 thread_set_flags(thread, SVC_STOPPED);
981                 RETURN_EXIT;
982         }
983
984         l_wait_event(thread->t_ctl_waitq,
985                      thread_is_running(thread) || thread_is_stopped(thread),
986                      &lwi);
987         EXIT;
988 }
989
990 static int ll_statahead_thread(void *arg)
991 {
992         struct dentry *parent = (struct dentry *)arg;
993         struct inode *dir = parent->d_inode;
994         struct ll_inode_info *lli = ll_i2info(dir);
995         struct ll_sb_info *sbi = ll_i2sbi(dir);
996         struct ll_statahead_info *sai;
997         struct ptlrpc_thread *thread;
998         struct ptlrpc_thread *agl_thread;
999         int first = 0;
1000         struct md_op_data *op_data;
1001         struct ll_dir_chain chain;
1002         struct l_wait_info lwi = { 0 };
1003         struct page *page = NULL;
1004         __u64 pos = 0;
1005         int rc = 0;
1006         ENTRY;
1007
1008         sai = ll_sai_get(dir);
1009         thread = &sai->sai_thread;
1010         agl_thread = &sai->sai_agl_thread;
1011         thread->t_pid = current_pid();
1012         CDEBUG(D_READA, "statahead thread starting: sai %p, parent %.*s\n",
1013                sai, parent->d_name.len, parent->d_name.name);
1014
1015         op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
1016                                      LUSTRE_OPC_ANY, dir);
1017         if (IS_ERR(op_data))
1018                 GOTO(out, rc = PTR_ERR(op_data));
1019
1020         op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages;
1021
1022         if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
1023                 ll_start_agl(parent, sai);
1024
1025         atomic_inc(&sbi->ll_sa_total);
1026         spin_lock(&lli->lli_sa_lock);
1027         if (thread_is_init(thread))
1028                 /* If someone else has changed the thread state
1029                  * (e.g. already changed to SVC_STOPPING), we can't just
1030                  * blindly overwrite that setting. */
1031                 thread_set_flags(thread, SVC_RUNNING);
1032         spin_unlock(&lli->lli_sa_lock);
1033         wake_up(&thread->t_ctl_waitq);
1034
1035         ll_dir_chain_init(&chain);
1036         while (pos != MDS_DIR_END_OFF && thread_is_running(thread)) {
1037                 struct lu_dirpage *dp;
1038                 struct lu_dirent  *ent;
1039
1040                 sai->sai_in_readpage = 1;
1041                 page = ll_get_dir_page(dir, op_data, pos, &chain);
1042                 sai->sai_in_readpage = 0;
1043                 if (IS_ERR(page)) {
1044                         rc = PTR_ERR(page);
1045                         CDEBUG(D_READA, "error reading dir "DFID" at "LPU64
1046                                "/"LPU64" opendir_pid = %u: rc = %d\n",
1047                                PFID(ll_inode2fid(dir)), pos, sai->sai_index,
1048                                lli->lli_opendir_pid, rc);
1049                         break;
1050                 }
1051
1052                 dp = page_address(page);
1053                 for (ent = lu_dirent_start(dp);
1054                      ent != NULL && thread_is_running(thread) &&
1055                      !sa_low_hit(sai);
1056                      ent = lu_dirent_next(ent)) {
1057                         __u64 hash;
1058                         int namelen;
1059                         char *name;
1060
1061                         hash = le64_to_cpu(ent->lde_hash);
1062                         if (unlikely(hash < pos))
1063                                 /*
1064                                  * Skip until we find target hash value.
1065                                  */
1066                                 continue;
1067
1068                         namelen = le16_to_cpu(ent->lde_namelen);
1069                         if (unlikely(namelen == 0))
1070                                 /*
1071                                  * Skip dummy record.
1072                                  */
1073                                 continue;
1074
1075                         name = ent->lde_name;
1076                         if (name[0] == '.') {
1077                                 if (namelen == 1) {
1078                                         /*
1079                                          * skip "."
1080                                          */
1081                                         continue;
1082                                 } else if (name[1] == '.' && namelen == 2) {
1083                                         /*
1084                                          * skip ".."
1085                                          */
1086                                         continue;
1087                                 } else if (!sai->sai_ls_all) {
1088                                         /*
1089                                          * skip hidden files.
1090                                          */
1091                                         sai->sai_skip_hidden++;
1092                                         continue;
1093                                 }
1094                         }
1095
1096                         /*
1097                          * don't stat-ahead first entry.
1098                          */
1099                         if (unlikely(++first == 1))
1100                                 continue;
1101
1102                         /* wait for spare statahead window */
1103                         do {
1104                                 l_wait_event(thread->t_ctl_waitq,
1105                                              !sa_sent_full(sai) ||
1106                                              sa_has_callback(sai) ||
1107                                              !agl_list_empty(sai) ||
1108                                              !thread_is_running(thread),
1109                                              &lwi);
1110
1111                                 sa_handle_callback(sai);
1112                         } while (sa_sent_full(sai) &&
1113                                  thread_is_running(thread));
1114
1115                         sa_statahead(parent, name, namelen);
1116                 }
1117
1118                 pos = le64_to_cpu(dp->ldp_hash_end);
1119                 ll_release_page(dir, page,
1120                                 le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1121
1122                 if (sa_low_hit(sai)) {
1123                         rc = -EFAULT;
1124                         atomic_inc(&sbi->ll_sa_wrong);
1125                         CDEBUG(D_READA, "Statahead for dir "DFID" hit "
1126                                "ratio too low: hit/miss "LPU64"/"LPU64
1127                                ", sent/replied "LPU64"/"LPU64", stopping "
1128                                "statahead thread: pid %d\n",
1129                                PFID(&lli->lli_fid), sai->sai_hit,
1130                                sai->sai_miss, sai->sai_sent,
1131                                sai->sai_replied, current_pid());
1132                         break;
1133                 }
1134         }
1135         ll_dir_chain_fini(&chain);
1136         ll_finish_md_op_data(op_data);
1137
1138         if (rc < 0) {
1139                 spin_lock(&lli->lli_sa_lock);
1140                 thread_set_flags(thread, SVC_STOPPING);
1141                 lli->lli_sa_enabled = 0;
1142                 spin_unlock(&lli->lli_sa_lock);
1143         }
1144
1145         /* statahead is finished, but statahead entries need to be cached, wait
1146          * for file release to stop me. */
1147         while (thread_is_running(thread)) {
1148                 l_wait_event(thread->t_ctl_waitq,
1149                              sa_has_callback(sai) ||
1150                              !agl_list_empty(sai) ||
1151                              !thread_is_running(thread),
1152                              &lwi);
1153
1154                 sa_handle_callback(sai);
1155         }
1156
1157         EXIT;
1158 out:
1159         if (sai->sai_agl_valid) {
1160                 spin_lock(&lli->lli_agl_lock);
1161                 thread_set_flags(agl_thread, SVC_STOPPING);
1162                 spin_unlock(&lli->lli_agl_lock);
1163                 wake_up(&agl_thread->t_ctl_waitq);
1164
1165                 CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n",
1166                        sai, (unsigned int)agl_thread->t_pid);
1167                 l_wait_event(agl_thread->t_ctl_waitq,
1168                              thread_is_stopped(agl_thread),
1169                              &lwi);
1170         } else {
1171                 /* Set agl_thread flags anyway. */
1172                 thread_set_flags(agl_thread, SVC_STOPPED);
1173         }
1174
1175         /* wait for inflight statahead RPCs to finish, and then we can free sai
1176          * safely because statahead RPC will access sai data */
1177         while (sai->sai_sent != sai->sai_replied) {
1178                 /* in case we're not woken up, timeout wait */
1179                 lwi = LWI_TIMEOUT(msecs_to_jiffies(MSEC_PER_SEC >> 3),
1180                                   NULL, NULL);
1181                 l_wait_event(thread->t_ctl_waitq,
1182                         sai->sai_sent == sai->sai_replied, &lwi);
1183         }
1184
1185         /* release resources held by statahead RPCs */
1186         sa_handle_callback(sai);
1187
1188         spin_lock(&lli->lli_sa_lock);
1189         thread_set_flags(thread, SVC_STOPPED);
1190         spin_unlock(&lli->lli_sa_lock);
1191
1192         wake_up(&sai->sai_waitq);
1193         wake_up(&thread->t_ctl_waitq);
1194         ll_sai_put(sai);
1195         CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %.*s\n",
1196                sai, parent->d_name.len, parent->d_name.name);
1197         dput(parent);
1198         return rc;
1199 }
1200
1201 /* authorize opened dir handle @key to statahead later */
1202 void ll_authorize_statahead(struct inode *dir, void *key)
1203 {
1204         struct ll_inode_info *lli = ll_i2info(dir);
1205
1206         spin_lock(&lli->lli_sa_lock);
1207         if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL) {
1208                 /*
1209                  * if lli_sai is not NULL, it means previous statahead is not
1210                  * finished yet, we'd better not start a new statahead for now.
1211                  */
1212                 LASSERT(lli->lli_opendir_pid == 0);
1213                 lli->lli_opendir_key = key;
1214                 lli->lli_opendir_pid = current_pid();
1215                 lli->lli_sa_enabled = 1;
1216         }
1217         spin_unlock(&lli->lli_sa_lock);
1218 }
1219
1220 /* deauthorize opened dir handle @key to statahead, but statahead thread may
1221  * still be running, notify it to quit. */
1222 void ll_deauthorize_statahead(struct inode *dir, void *key)
1223 {
1224         struct ll_inode_info *lli = ll_i2info(dir);
1225         struct ll_statahead_info *sai;
1226
1227         LASSERT(lli->lli_opendir_key == key);
1228         LASSERT(lli->lli_opendir_pid != 0);
1229
1230         CDEBUG(D_READA, "deauthorize statahead for "DFID"\n",
1231                 PFID(&lli->lli_fid));
1232
1233         spin_lock(&lli->lli_sa_lock);
1234         lli->lli_opendir_key = NULL;
1235         lli->lli_opendir_pid = 0;
1236         lli->lli_sa_enabled = 0;
1237         sai = lli->lli_sai;
1238         if (sai != NULL && thread_is_running(&sai->sai_thread)) {
1239                 /*
1240                  * statahead thread may not quit yet because it needs to cache
1241                  * entries, now it's time to tell it to quit.
1242                  */
1243                 thread_set_flags(&sai->sai_thread, SVC_STOPPING);
1244                 wake_up(&sai->sai_thread.t_ctl_waitq);
1245         }
1246         spin_unlock(&lli->lli_sa_lock);
1247 }
1248
1249 enum {
1250         /**
1251          * not first dirent, or is "."
1252          */
1253         LS_NONE_FIRST_DE = 0,
1254         /**
1255          * the first non-hidden dirent
1256          */
1257         LS_FIRST_DE,
1258         /**
1259          * the first hidden dirent, that is "."
1260          */
1261         LS_FIRST_DOT_DE
1262 };
1263
1264 static int is_first_dirent(struct inode *dir, struct dentry *dentry)
1265 {
1266         struct ll_dir_chain   chain;
1267         struct qstr          *target = &dentry->d_name;
1268         struct md_op_data    *op_data;
1269         int                   dot_de;
1270         struct page          *page = NULL;
1271         int                   rc     = LS_NONE_FIRST_DE;
1272         __u64                 pos = 0;
1273         ENTRY;
1274
1275         op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
1276                                      LUSTRE_OPC_ANY, dir);
1277         if (IS_ERR(op_data))
1278                 RETURN(PTR_ERR(op_data));
1279         /**
1280          *FIXME choose the start offset of the readdir
1281          */
1282         op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages;
1283
1284         ll_dir_chain_init(&chain);
1285         page = ll_get_dir_page(dir, op_data, 0, &chain);
1286
1287         while (1) {
1288                 struct lu_dirpage *dp;
1289                 struct lu_dirent  *ent;
1290
1291                 if (IS_ERR(page)) {
1292                         struct ll_inode_info *lli = ll_i2info(dir);
1293
1294                         rc = PTR_ERR(page);
1295                         CERROR("%s: reading dir "DFID" at "LPU64
1296                                "opendir_pid = %u : rc = %d\n",
1297                                ll_get_fsname(dir->i_sb, NULL, 0),
1298                                PFID(ll_inode2fid(dir)), pos,
1299                                lli->lli_opendir_pid, rc);
1300                         break;
1301                 }
1302
1303                 dp = page_address(page);
1304                 for (ent = lu_dirent_start(dp); ent != NULL;
1305                      ent = lu_dirent_next(ent)) {
1306                         __u64 hash;
1307                         int namelen;
1308                         char *name;
1309
1310                         hash = le64_to_cpu(ent->lde_hash);
1311                         /* The ll_get_dir_page() can return any page containing
1312                          * the given hash which may be not the start hash. */
1313                         if (unlikely(hash < pos))
1314                                 continue;
1315
1316                         namelen = le16_to_cpu(ent->lde_namelen);
1317                         if (unlikely(namelen == 0))
1318                                 /*
1319                                  * skip dummy record.
1320                                  */
1321                                 continue;
1322
1323                         name = ent->lde_name;
1324                         if (name[0] == '.') {
1325                                 if (namelen == 1)
1326                                         /*
1327                                          * skip "."
1328                                          */
1329                                         continue;
1330                                 else if (name[1] == '.' && namelen == 2)
1331                                         /*
1332                                          * skip ".."
1333                                          */
1334                                         continue;
1335                                 else
1336                                         dot_de = 1;
1337                         } else {
1338                                 dot_de = 0;
1339                         }
1340
1341                         if (dot_de && target->name[0] != '.') {
1342                                 CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
1343                                        target->len, target->name,
1344                                        namelen, name);
1345                                 continue;
1346                         }
1347
1348                         if (target->len != namelen ||
1349                             memcmp(target->name, name, namelen) != 0)
1350                                 rc = LS_NONE_FIRST_DE;
1351                         else if (!dot_de)
1352                                 rc = LS_FIRST_DE;
1353                         else
1354                                 rc = LS_FIRST_DOT_DE;
1355
1356                         ll_release_page(dir, page, false);
1357                         GOTO(out, rc);
1358                 }
1359                 pos = le64_to_cpu(dp->ldp_hash_end);
1360                 if (pos == MDS_DIR_END_OFF) {
1361                         /*
1362                          * End of directory reached.
1363                          */
1364                         ll_release_page(dir, page, false);
1365                         GOTO(out, rc);
1366                 } else {
1367                         /*
1368                          * chain is exhausted
1369                          * Normal case: continue to the next page.
1370                          */
1371                         ll_release_page(dir, page, le32_to_cpu(dp->ldp_flags) &
1372                                               LDF_COLLIDE);
1373                         page = ll_get_dir_page(dir, op_data, pos, &chain);
1374                 }
1375         }
1376         EXIT;
1377 out:
1378         ll_dir_chain_fini(&chain);
1379         ll_finish_md_op_data(op_data);
1380         return rc;
1381 }
1382
1383 static int revalidate_statahead_dentry(struct inode *dir,
1384                                         struct ll_statahead_info *sai,
1385                                         struct dentry **dentryp,
1386                                         int only_unplug)
1387 {
1388         struct sa_entry *entry = NULL;
1389         struct l_wait_info lwi = { 0 };
1390         int rc = 0;
1391         ENTRY;
1392
1393         if ((*dentryp)->d_name.name[0] == '.') {
1394                 if (sai->sai_ls_all ||
1395                     sai->sai_miss_hidden >= sai->sai_skip_hidden) {
1396                         /*
1397                          * Hidden dentry is the first one, or statahead
1398                          * thread does not skip so many hidden dentries
1399                          * before "sai_ls_all" enabled as below.
1400                          */
1401                 } else {
1402                         if (!sai->sai_ls_all)
1403                                 /*
1404                                  * It maybe because hidden dentry is not
1405                                  * the first one, "sai_ls_all" was not
1406                                  * set, then "ls -al" missed. Enable
1407                                  * "sai_ls_all" for such case.
1408                                  */
1409                                 sai->sai_ls_all = 1;
1410
1411                         /*
1412                          * Such "getattr" has been skipped before
1413                          * "sai_ls_all" enabled as above.
1414                          */
1415                         sai->sai_miss_hidden++;
1416                         RETURN(-EAGAIN);
1417                 }
1418         }
1419
1420         entry = sa_get(sai, &(*dentryp)->d_name);
1421         if (entry == NULL || only_unplug) {
1422                 sa_put(sai, entry);
1423                 RETURN(entry ? 1 : -EAGAIN);
1424         }
1425
1426         /* if statahead is busy in readdir, help it do post-work */
1427         if (!sa_ready(entry) && sai->sai_in_readpage)
1428                 sa_handle_callback(sai);
1429
1430         if (!sa_ready(entry)) {
1431                 sai->sai_index_wait = entry->se_index;
1432                 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
1433                                         LWI_ON_SIGNAL_NOOP, NULL);
1434                 rc = l_wait_event(sai->sai_waitq,
1435                                 sa_ready(entry) ||
1436                                 thread_is_stopped(&sai->sai_thread),
1437                                 &lwi);
1438                 if (rc < 0) {
1439                         sa_put(sai, entry);
1440                         RETURN(-EAGAIN);
1441                 }
1442         }
1443
1444         if (entry->se_state == SA_ENTRY_SUCC && entry->se_inode != NULL) {
1445                 struct inode *inode = entry->se_inode;
1446                 struct lookup_intent it = { .it_op = IT_GETATTR,
1447                                             .d.lustre.it_lock_handle =
1448                                                 entry->se_handle };
1449                 __u64 bits;
1450
1451                 rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
1452                                         ll_inode2fid(inode), &bits);
1453                 if (rc == 1) {
1454                         if ((*dentryp)->d_inode == NULL) {
1455                                 struct dentry *alias;
1456
1457                                 alias = ll_splice_alias(inode, *dentryp);
1458                                 if (IS_ERR(alias)) {
1459                                         sa_put(sai, entry);
1460                                         RETURN(PTR_ERR(alias));
1461                                 }
1462                                 *dentryp = alias;
1463                         } else if ((*dentryp)->d_inode != inode) {
1464                                 /* revalidate, but inode is recreated */
1465                                 CDEBUG(D_READA,
1466                                         "%s: stale dentry %.*s inode "
1467                                         DFID", statahead inode "DFID
1468                                         "\n",
1469                                         ll_get_fsname((*dentryp)->d_inode->i_sb,
1470                                                       NULL, 0),
1471                                         (*dentryp)->d_name.len,
1472                                         (*dentryp)->d_name.name,
1473                                         PFID(ll_inode2fid((*dentryp)->d_inode)),
1474                                         PFID(ll_inode2fid(inode)));
1475                                 sa_put(sai, entry);
1476                                 RETURN(-ESTALE);
1477                         } else {
1478                                 iput(inode);
1479                         }
1480                         entry->se_inode = NULL;
1481
1482                         if ((bits & MDS_INODELOCK_LOOKUP) &&
1483                             d_lustre_invalid(*dentryp))
1484                                 d_lustre_revalidate(*dentryp);
1485                         ll_intent_release(&it);
1486                 }
1487         }
1488
1489         sa_put(sai, entry);
1490         RETURN(rc);
1491 }
1492
1493 static int start_statahead_thread(struct inode *dir, struct dentry *dentry)
1494 {
1495         struct ll_inode_info *lli = ll_i2info(dir);
1496         struct ll_statahead_info *sai = NULL;
1497         struct dentry *parent;
1498         struct ptlrpc_thread *thread;
1499         struct l_wait_info lwi = { 0 };
1500         struct task_struct *task;
1501         int rc;
1502         ENTRY;
1503
1504         /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1505         rc = is_first_dirent(dir, dentry);
1506         if (rc == LS_NONE_FIRST_DE)
1507                 /* It is not "ls -{a}l" operation, no need statahead for it. */
1508                 GOTO(out, rc = -EAGAIN);
1509
1510         sai = ll_sai_alloc();
1511         if (sai == NULL)
1512                 GOTO(out, rc = -ENOMEM);
1513
1514         sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
1515         sai->sai_inode = igrab(dir);
1516         if (unlikely(sai->sai_inode == NULL)) {
1517                 CWARN("Do not start stat ahead on dying inode "DFID"\n",
1518                         PFID(&lli->lli_fid));
1519                 GOTO(out, rc = -ESTALE);
1520         }
1521
1522         /* get parent reference count here, and put it in ll_statahead_thread */
1523         parent = dget(dentry->d_parent);
1524         if (unlikely(sai->sai_inode != parent->d_inode)) {
1525                 struct ll_inode_info *nlli = ll_i2info(parent->d_inode);
1526
1527                 CWARN("Race condition, someone changed %.*s just now: "
1528                         "old parent "DFID", new parent "DFID"\n",
1529                         dentry->d_name.len, dentry->d_name.name,
1530                         PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
1531                 dput(parent);
1532                 iput(sai->sai_inode);
1533                 GOTO(out, rc = -EAGAIN);
1534         }
1535
1536         CDEBUG(D_READA, "start statahead thread: sai %p, parent %.*s\n",
1537                sai, parent->d_name.len, parent->d_name.name);
1538
1539         /* if another process started statahead thread, or deauthorized current
1540          * lli_opendir_key, don't start statahead. */
1541         spin_lock(&lli->lli_sa_lock);
1542         if (unlikely(lli->lli_sai != NULL ||
1543                      lli->lli_opendir_key == NULL ||
1544                      lli->lli_opendir_pid != current->pid)) {
1545                 spin_unlock(&lli->lli_sa_lock);
1546
1547                 dput(parent);
1548                 iput(sai->sai_inode);
1549                 GOTO(out, rc = -EAGAIN);
1550         }
1551         lli->lli_sai = sai;
1552         spin_unlock(&lli->lli_sa_lock);
1553
1554         atomic_inc(&ll_i2sbi(parent->d_inode)->ll_sa_running);
1555
1556         task = kthread_run(ll_statahead_thread, parent, "ll_sa_%u",
1557                            lli->lli_opendir_pid);
1558         thread = &sai->sai_thread;
1559         if (IS_ERR(task)) {
1560                 rc = PTR_ERR(task);
1561                 CERROR("cannot start ll_sa thread: rc = %d\n", rc);
1562                 dput(parent);
1563
1564                 spin_lock(&lli->lli_sa_lock);
1565                 thread_set_flags(thread, SVC_STOPPED);
1566                 thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
1567                 spin_unlock(&lli->lli_sa_lock);
1568
1569                 ll_sai_put(sai);
1570                 LASSERT(lli->lli_sai == NULL);
1571                 RETURN(-EAGAIN);
1572         }
1573
1574         l_wait_event(thread->t_ctl_waitq,
1575                      thread_is_running(thread) || thread_is_stopped(thread),
1576                      &lwi);
1577         ll_sai_put(sai);
1578
1579         /*
1580          * We don't stat-ahead for the first dirent since we are already in
1581          * lookup.
1582          */
1583         RETURN(-EAGAIN);
1584
1585 out:
1586         if (sai != NULL)
1587                 OBD_FREE_PTR(sai);
1588
1589         /* once we start statahead thread failed, disable statahead so
1590          * subsequent won't waste time to try it. */
1591         spin_lock(&lli->lli_sa_lock);
1592         lli->lli_sa_enabled = 0;
1593         spin_unlock(&lli->lli_sa_lock);
1594
1595         RETURN(rc);
1596 }
1597
1598 /**
1599  * Start statahead thread if this is the first dir entry.
1600  * Otherwise if a thread is started already, wait it until it is ahead of me.
1601  * \retval 1       -- find entry with lock in cache, the caller needs to do
1602  *                    nothing.
1603  * \retval 0       -- find entry in cache, but without lock, the caller needs
1604  *                    refresh from MDS.
1605  * \retval others  -- the caller need to process as non-statahead.
1606  */
1607 int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
1608                        int only_unplug)
1609 {
1610         struct ll_statahead_info *sai;
1611
1612         sai = ll_sai_get(dir);
1613         if (sai != NULL) {
1614                 int rc;
1615
1616                 rc = revalidate_statahead_dentry(dir, sai, dentryp,
1617                                                  only_unplug);
1618                 CDEBUG(D_READA, "revalidate statahead %.*s: %d.\n",
1619                         (*dentryp)->d_name.len, (*dentryp)->d_name.name, rc);
1620                 ll_sai_put(sai);
1621                 return rc;
1622         }
1623
1624         return start_statahead_thread(dir, *dentryp);
1625 }