Whamcloud - gitweb
LU-1146 build: batch update copyright messages
[fs/lustre-release.git] / lustre / llite / statahead.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2011, 2012, Whamcloud, Inc.
33  */
34 /*
35  * This file is part of Lustre, http://www.lustre.org/
36  * Lustre is a trademark of Sun Microsystems, Inc.
37  */
38
39 #include <linux/fs.h>
40 #include <linux/sched.h>
41 #include <linux/mm.h>
42 #include <linux/smp_lock.h>
43 #include <linux/highmem.h>
44 #include <linux/pagemap.h>
45
46 #define DEBUG_SUBSYSTEM S_LLITE
47
48 #include <obd_support.h>
49 #include <lustre_lite.h>
50 #include <lustre_dlm.h>
51 #include <linux/lustre_version.h>
52 #include "llite_internal.h"
53
54 #define SA_OMITTED_ENTRY_MAX 8ULL
55
56 typedef enum {
57         /** negative values are for error cases */
58         SA_ENTRY_INIT = 0,      /** init entry */
59         SA_ENTRY_SUCC = 1,      /** stat succeed */
60         SA_ENTRY_INVA = 2,      /** invalid entry */
61         SA_ENTRY_DEST = 3,      /** entry to be destroyed */
62 } se_stat_t;
63
64 struct ll_sa_entry {
65         /* link into sai->sai_entries_{sent,received,stated} */
66         cfs_list_t              se_list;
67         /* link into sai hash table locally */
68         cfs_list_t              se_hash;
69         /* entry reference count */
70         cfs_atomic_t            se_refcount;
71         /* entry index in the sai */
72         __u64                   se_index;
73         /* low layer ldlm lock handle */
74         __u64                   se_handle;
75         /* entry status */
76         se_stat_t               se_stat;
77         /* entry size, contains name */
78         int                     se_size;
79         /* pointer to async getattr enqueue info */
80         struct md_enqueue_info *se_minfo;
81         /* pointer to the async getattr request */
82         struct ptlrpc_request  *se_req;
83         /* pointer to the target inode */
84         struct inode           *se_inode;
85         /* entry name */
86         struct qstr             se_qstr;
87 };
88
89 static unsigned int sai_generation = 0;
90 static cfs_spinlock_t sai_generation_lock = CFS_SPIN_LOCK_UNLOCKED;
91
92 static inline int ll_sa_entry_unlinked(struct ll_sa_entry *entry)
93 {
94         return cfs_list_empty(&entry->se_list);
95 }
96
97 static inline int ll_sa_entry_unhashed(struct ll_sa_entry *entry)
98 {
99         return cfs_list_empty(&entry->se_hash);
100 }
101
102 /*
103  * The entry only can be released by the caller, it is necessary to hold lock.
104  */
105 static inline int ll_sa_entry_stated(struct ll_sa_entry *entry)
106 {
107         smp_rmb();
108         return (entry->se_stat != SA_ENTRY_INIT);
109 }
110
111 static inline int ll_sa_entry_hash(int val)
112 {
113         return val & LL_SA_CACHE_MASK;
114 }
115
116 /*
117  * Insert entry to hash SA table.
118  */
119 static inline void
120 ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
121 {
122         int i = ll_sa_entry_hash(entry->se_qstr.hash);
123
124         cfs_spin_lock(&sai->sai_cache_lock[i]);
125         cfs_list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
126         cfs_spin_unlock(&sai->sai_cache_lock[i]);
127 }
128
129 /*
130  * Remove entry from SA table.
131  */
132 static inline void
133 ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
134 {
135         int i = ll_sa_entry_hash(entry->se_qstr.hash);
136
137         cfs_spin_lock(&sai->sai_cache_lock[i]);
138         cfs_list_del_init(&entry->se_hash);
139         cfs_spin_unlock(&sai->sai_cache_lock[i]);
140 }
141
142 static inline int agl_should_run(struct ll_statahead_info *sai,
143                                  struct inode *inode)
144 {
145         if (inode != NULL && S_ISREG(inode->i_mode) &&
146             ll_i2info(inode)->lli_smd != NULL && sai->sai_agl_valid)
147                 return 1;
148         return 0;
149 }
150
151 static inline struct ll_sa_entry *
152 sa_first_received_entry(struct ll_statahead_info *sai)
153 {
154         return cfs_list_entry(sai->sai_entries_received.next,
155                               struct ll_sa_entry, se_list);
156 }
157
158 static inline struct ll_inode_info *
159 agl_first_entry(struct ll_statahead_info *sai)
160 {
161         return cfs_list_entry(sai->sai_entries_agl.next,
162                               struct ll_inode_info, lli_agl_list);
163 }
164
165 static inline int sa_sent_full(struct ll_statahead_info *sai)
166 {
167         return cfs_atomic_read(&sai->sai_cache_count) >= sai->sai_max;
168 }
169
170 static inline int sa_received_empty(struct ll_statahead_info *sai)
171 {
172         return cfs_list_empty(&sai->sai_entries_received);
173 }
174
175 static inline int agl_list_empty(struct ll_statahead_info *sai)
176 {
177         return cfs_list_empty(&sai->sai_entries_agl);
178 }
179
180 /**
181  * (1) hit ratio less than 80%
182  * or
183  * (2) consecutive miss more than 8
184  * then means low hit.
185  */
186 static inline int sa_low_hit(struct ll_statahead_info *sai)
187 {
188         return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
189                 (sai->sai_consecutive_miss > 8));
190 }
191
192 /*
193  * If the given index is behind of statahead window more than
194  * SA_OMITTED_ENTRY_MAX, then it is old.
195  */
196 static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
197 {
198         return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
199                  sai->sai_index);
200 }
201
202 /*
203  * Insert it into sai_entries_sent tail when init.
204  */
205 static struct ll_sa_entry *
206 ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index,
207                   const char *name, int len)
208 {
209         struct ll_inode_info *lli;
210         struct ll_sa_entry   *entry;
211         int                   entry_size;
212         char                 *dname;
213         ENTRY;
214
215         entry_size = sizeof(struct ll_sa_entry) + (len & ~3) + 4;
216         OBD_ALLOC(entry, entry_size);
217         if (unlikely(entry == NULL))
218                 RETURN(ERR_PTR(-ENOMEM));
219
220         CDEBUG(D_READA, "alloc sai entry %.*s(%p) index "LPU64"\n",
221                len, name, entry, index);
222
223         entry->se_index = index;
224
225         /*
226          * Statahead entry reference rules:
227          *
228          * 1) When statahead entry is initialized, its reference is set as 2.
229          *    One reference is used by the directory scanner. When the scanner
230          *    searches the statahead cache for the given name, it can perform
231          *    lockless hash lookup (only the scanner can remove entry from hash
232          *    list), and once found, it needn't to call "atomic_inc()" for the
233          *    entry reference. So the performance is improved. After using the
234          *    statahead entry, the scanner will call "atomic_dec()" to drop the
235          *    reference held when initialization. If it is the last reference,
236          *    the statahead entry will be freed.
237          *
238          * 2) All other threads, including statahead thread and ptlrpcd thread,
239          *    when they process the statahead entry, the reference for target
240          *    should be held to guarantee the entry will not be released by the
241          *    directory scanner. After processing the entry, these threads will
242          *    drop the entry reference. If it is the last reference, the entry
243          *    will be freed.
244          *
245          *    The second reference when initializes the statahead entry is used
246          *    by the statahead thread, following the rule 2).
247          */
248         cfs_atomic_set(&entry->se_refcount, 2);
249         entry->se_stat = SA_ENTRY_INIT;
250         entry->se_size = entry_size;
251         dname = (char *)entry + sizeof(struct ll_sa_entry);
252         memcpy(dname, name, len);
253         dname[len] = 0;
254         entry->se_qstr.hash = full_name_hash(name, len);
255         entry->se_qstr.len = len;
256         entry->se_qstr.name = dname;
257
258         lli = ll_i2info(sai->sai_inode);
259         cfs_spin_lock(&lli->lli_sa_lock);
260         cfs_list_add_tail(&entry->se_list, &sai->sai_entries_sent);
261         cfs_spin_unlock(&lli->lli_sa_lock);
262
263         cfs_atomic_inc(&sai->sai_cache_count);
264         ll_sa_entry_enhash(sai, entry);
265
266         RETURN(entry);
267 }
268
269 /*
270  * Used by the directory scanner to search entry with name.
271  *
272  * Only the caller can remove the entry from hash, so it is unnecessary to hold
273  * hash lock. It is caller's duty to release the init refcount on the entry, so
274  * it is also unnecessary to increase refcount on the entry.
275  */
276 static struct ll_sa_entry *
277 ll_sa_entry_get_byname(struct ll_statahead_info *sai, const struct qstr *qstr)
278 {
279         struct ll_sa_entry *entry;
280         int i = ll_sa_entry_hash(qstr->hash);
281
282         cfs_list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
283                 if (entry->se_qstr.hash == qstr->hash &&
284                     entry->se_qstr.len == qstr->len &&
285                     memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
286                         return entry;
287         }
288         return NULL;
289 }
290
291 /*
292  * Used by the async getattr request callback to find entry with index.
293  *
294  * Inside lli_sa_lock to prevent others to change the list during the search.
295  * It needs to increase entry refcount before returning to guarantee that the
296  * entry cannot be freed by others.
297  */
298 static struct ll_sa_entry *
299 ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index)
300 {
301         struct ll_sa_entry *entry;
302
303         cfs_list_for_each_entry(entry, &sai->sai_entries_sent, se_list) {
304                 if (entry->se_index == index) {
305                         cfs_atomic_inc(&entry->se_refcount);
306                         return entry;
307                 }
308                 if (entry->se_index > index)
309                         break;
310         }
311         return NULL;
312 }
313
314 static void ll_sa_entry_cleanup(struct ll_statahead_info *sai,
315                                  struct ll_sa_entry *entry)
316 {
317         struct md_enqueue_info *minfo = entry->se_minfo;
318         struct ptlrpc_request  *req   = entry->se_req;
319
320         if (minfo) {
321                 entry->se_minfo = NULL;
322                 ll_intent_release(&minfo->mi_it);
323                 iput(minfo->mi_dir);
324                 OBD_FREE_PTR(minfo);
325         }
326
327         if (req) {
328                 entry->se_req = NULL;
329                 ptlrpc_req_finished(req);
330         }
331 }
332
333 static void ll_sa_entry_put(struct ll_statahead_info *sai,
334                              struct ll_sa_entry *entry)
335 {
336         if (cfs_atomic_dec_and_test(&entry->se_refcount)) {
337                 CDEBUG(D_READA, "free sai entry %.*s(%p) index "LPU64"\n",
338                        entry->se_qstr.len, entry->se_qstr.name, entry,
339                        entry->se_index);
340
341                 LASSERT(ll_sa_entry_unhashed(entry));
342                 LASSERT(ll_sa_entry_unlinked(entry));
343
344                 ll_sa_entry_cleanup(sai, entry);
345                 if (entry->se_inode)
346                         iput(entry->se_inode);
347
348                 OBD_FREE(entry, entry->se_size);
349                 cfs_atomic_dec(&sai->sai_cache_count);
350         }
351 }
352
353 static inline void
354 do_sai_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
355 {
356         struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
357
358         ll_sa_entry_unhash(sai, entry);
359
360         cfs_spin_lock(&lli->lli_sa_lock);
361         entry->se_stat = SA_ENTRY_DEST;
362         if (likely(!ll_sa_entry_unlinked(entry)))
363                 cfs_list_del_init(&entry->se_list);
364         cfs_spin_unlock(&lli->lli_sa_lock);
365
366         ll_sa_entry_put(sai, entry);
367 }
368
369 /*
370  * Delete it from sai_entries_stated list when fini.
371  */
372 static void
373 ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
374 {
375         struct ll_sa_entry *pos, *next;
376
377         if (entry)
378                 do_sai_entry_fini(sai, entry);
379
380         /* drop old entry from sent list */
381         cfs_list_for_each_entry_safe(pos, next, &sai->sai_entries_sent,
382                                      se_list) {
383                 if (is_omitted_entry(sai, pos->se_index))
384                         do_sai_entry_fini(sai, pos);
385                 else
386                         break;
387         }
388
389         /* drop old entry from stated list */
390         cfs_list_for_each_entry_safe(pos, next, &sai->sai_entries_stated,
391                                      se_list) {
392                 if (is_omitted_entry(sai, pos->se_index))
393                         do_sai_entry_fini(sai, pos);
394                 else
395                         break;
396         }
397 }
398
399 /*
400  * Inside lli_sa_lock.
401  */
402 static void
403 do_sai_entry_to_stated(struct ll_statahead_info *sai,
404                        struct ll_sa_entry *entry, int rc)
405 {
406         struct ll_sa_entry *se;
407         cfs_list_t         *pos = &sai->sai_entries_stated;
408
409         if (!ll_sa_entry_unlinked(entry))
410                 cfs_list_del_init(&entry->se_list);
411
412         cfs_list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
413                 if (se->se_index < entry->se_index) {
414                         pos = &se->se_list;
415                         break;
416                 }
417         }
418
419         cfs_list_add(&entry->se_list, pos);
420         entry->se_stat = rc;
421 }
422
423 /*
424  * Move entry to sai_entries_stated and sort with the index.
425  * \retval 1    -- entry to be destroyed.
426  * \retval 0    -- entry is inserted into stated list.
427  */
428 static int
429 ll_sa_entry_to_stated(struct ll_statahead_info *sai,
430                        struct ll_sa_entry *entry, int rc)
431 {
432         struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
433         int                   ret = 1;
434
435         ll_sa_entry_cleanup(sai, entry);
436
437         cfs_spin_lock(&lli->lli_sa_lock);
438         if (likely(entry->se_stat != SA_ENTRY_DEST)) {
439                 do_sai_entry_to_stated(sai, entry, rc);
440                 ret = 0;
441         }
442         cfs_spin_unlock(&lli->lli_sa_lock);
443
444         return ret;
445 }
446
447 /*
448  * Insert inode into the list of sai_entries_agl.
449  */
450 static void ll_agl_add(struct ll_statahead_info *sai,
451                        struct inode *inode, int index)
452 {
453         struct ll_inode_info *child  = ll_i2info(inode);
454         struct ll_inode_info *parent = ll_i2info(sai->sai_inode);
455         int                   added  = 0;
456
457         cfs_spin_lock(&child->lli_agl_lock);
458         if (child->lli_agl_index == 0) {
459                 child->lli_agl_index = index;
460                 cfs_spin_unlock(&child->lli_agl_lock);
461
462                 LASSERT(cfs_list_empty(&child->lli_agl_list));
463
464                 igrab(inode);
465                 cfs_spin_lock(&parent->lli_agl_lock);
466                 if (agl_list_empty(sai))
467                         added = 1;
468                 cfs_list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
469                 cfs_spin_unlock(&parent->lli_agl_lock);
470         } else {
471                 cfs_spin_unlock(&child->lli_agl_lock);
472         }
473
474         if (added > 0)
475                 cfs_waitq_signal(&sai->sai_agl_thread.t_ctl_waitq);
476 }
477
478 static struct ll_statahead_info *ll_sai_alloc(void)
479 {
480         struct ll_statahead_info *sai;
481         int                       i;
482         ENTRY;
483
484         OBD_ALLOC_PTR(sai);
485         if (!sai)
486                 RETURN(NULL);
487
488         cfs_atomic_set(&sai->sai_refcount, 1);
489
490         cfs_spin_lock(&sai_generation_lock);
491         sai->sai_generation = ++sai_generation;
492         if (unlikely(sai_generation == 0))
493                 sai->sai_generation = ++sai_generation;
494         cfs_spin_unlock(&sai_generation_lock);
495
496         sai->sai_max = LL_SA_RPC_MIN;
497         sai->sai_index = 1;
498         cfs_waitq_init(&sai->sai_waitq);
499         cfs_waitq_init(&sai->sai_thread.t_ctl_waitq);
500         cfs_waitq_init(&sai->sai_agl_thread.t_ctl_waitq);
501
502         CFS_INIT_LIST_HEAD(&sai->sai_entries_sent);
503         CFS_INIT_LIST_HEAD(&sai->sai_entries_received);
504         CFS_INIT_LIST_HEAD(&sai->sai_entries_stated);
505         CFS_INIT_LIST_HEAD(&sai->sai_entries_agl);
506
507         for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
508                 CFS_INIT_LIST_HEAD(&sai->sai_cache[i]);
509                 cfs_spin_lock_init(&sai->sai_cache_lock[i]);
510         }
511         cfs_atomic_set(&sai->sai_cache_count, 0);
512
513         RETURN(sai);
514 }
515
516 static inline struct ll_statahead_info *
517 ll_sai_get(struct ll_statahead_info *sai)
518 {
519         cfs_atomic_inc(&sai->sai_refcount);
520         return sai;
521 }
522
523 static void ll_sai_put(struct ll_statahead_info *sai)
524 {
525         struct inode         *inode = sai->sai_inode;
526         struct ll_inode_info *lli   = ll_i2info(inode);
527         ENTRY;
528
529         if (cfs_atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
530                 struct ll_sa_entry *entry, *next;
531
532                 if (unlikely(cfs_atomic_read(&sai->sai_refcount) > 0)) {
533                         /* It is race case, the interpret callback just hold
534                          * a reference count */
535                         cfs_spin_unlock(&lli->lli_sa_lock);
536                         RETURN_EXIT;
537                 }
538
539                 LASSERT(lli->lli_opendir_key == NULL);
540                 LASSERT(thread_is_stopped(&sai->sai_thread));
541                 LASSERT(thread_is_stopped(&sai->sai_agl_thread));
542
543                 lli->lli_sai = NULL;
544                 lli->lli_opendir_pid = 0;
545                 cfs_spin_unlock(&lli->lli_sa_lock);
546
547                 if (sai->sai_sent > sai->sai_replied)
548                         CDEBUG(D_READA,"statahead for dir "DFID" does not "
549                               "finish: [sent:"LPU64"] [replied:"LPU64"]\n",
550                               PFID(&lli->lli_fid),
551                               sai->sai_sent, sai->sai_replied);
552
553                 cfs_list_for_each_entry_safe(entry, next,
554                                              &sai->sai_entries_sent, se_list)
555                         do_sai_entry_fini(sai, entry);
556
557                 LASSERT(sa_received_empty(sai));
558
559                 cfs_list_for_each_entry_safe(entry, next,
560                                              &sai->sai_entries_stated, se_list)
561                         do_sai_entry_fini(sai, entry);
562
563                 LASSERT(cfs_atomic_read(&sai->sai_cache_count) == 0);
564                 LASSERT(agl_list_empty(sai));
565
566                 iput(inode);
567                 OBD_FREE_PTR(sai);
568         }
569
570         EXIT;
571 }
572
573 /* Do NOT forget to drop inode refcount when into sai_entries_agl. */
574 static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
575 {
576         struct ll_inode_info *lli   = ll_i2info(inode);
577         __u64                 index = lli->lli_agl_index;
578         int                   rc;
579         ENTRY;
580
581         LASSERT(cfs_list_empty(&lli->lli_agl_list));
582
583         /* AGL maybe fall behind statahead with one entry */
584         if (is_omitted_entry(sai, index + 1)) {
585                 lli->lli_agl_index = 0;
586                 iput(inode);
587                 RETURN_EXIT;
588         }
589
590         /* Someone is in glimpse (sync or async), do nothing. */
591         rc = cfs_down_write_trylock(&lli->lli_glimpse_sem);
592         if (rc == 0) {
593                 lli->lli_agl_index = 0;
594                 iput(inode);
595                 RETURN_EXIT;
596         }
597
598         /*
599          * Someone triggered glimpse within 1 sec before.
600          * 1) The former glimpse succeeded with glimpse lock granted by OST, and
601          *    if the lock is still cached on client, AGL needs to do nothing. If
602          *    it is cancelled by other client, AGL maybe cannot obtaion new lock
603          *    for no glimpse callback triggered by AGL.
604          * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
605          *    Under such case, it is quite possible that the OST will not grant
606          *    glimpse lock for AGL also.
607          * 3) The former glimpse failed, compared with other two cases, it is
608          *    relative rare. AGL can ignore such case, and it will not muchly
609          *    affect the performance.
610          */
611         if (lli->lli_glimpse_time != 0 &&
612             cfs_time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
613                 cfs_up_write(&lli->lli_glimpse_sem);
614                 lli->lli_agl_index = 0;
615                 iput(inode);
616                 RETURN_EXIT;
617         }
618
619         CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
620                DFID", idx = "LPU64"\n", PFID(&lli->lli_fid), index);
621
622         cl_agl(inode);
623         lli->lli_agl_index = 0;
624         lli->lli_glimpse_time = cfs_time_current();
625         cfs_up_write(&lli->lli_glimpse_sem);
626
627         CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
628                DFID", idx = "LPU64", rc = %d\n",
629                PFID(&lli->lli_fid), index, rc);
630
631         iput(inode);
632
633         EXIT;
634 }
635
636 static void do_statahead_interpret(struct ll_statahead_info *sai,
637                                    struct ll_sa_entry *target)
638 {
639         struct inode           *dir   = sai->sai_inode;
640         struct inode           *child;
641         struct ll_inode_info   *lli   = ll_i2info(dir);
642         struct ll_sa_entry     *entry;
643         struct md_enqueue_info *minfo;
644         struct lookup_intent   *it;
645         struct ptlrpc_request  *req;
646         struct mdt_body        *body;
647         int                     rc    = 0;
648         ENTRY;
649
650         cfs_spin_lock(&lli->lli_sa_lock);
651         if (target != NULL && target->se_req != NULL &&
652             !cfs_list_empty(&target->se_list)) {
653                 entry = target;
654         } else if (unlikely(sa_received_empty(sai))) {
655                 cfs_spin_unlock(&lli->lli_sa_lock);
656                 RETURN_EXIT;
657         } else {
658                 entry = sa_first_received_entry(sai);
659         }
660
661         cfs_atomic_inc(&entry->se_refcount);
662         cfs_list_del_init(&entry->se_list);
663         cfs_spin_unlock(&lli->lli_sa_lock);
664
665         LASSERT(entry->se_handle != 0);
666
667         minfo = entry->se_minfo;
668         it = &minfo->mi_it;
669         req = entry->se_req;
670         body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
671         if (body == NULL)
672                 GOTO(out, rc = -EFAULT);
673
674         child = entry->se_inode;
675         if (child == NULL) {
676                 /*
677                  * lookup.
678                  */
679                 LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
680
681                 /* XXX: No fid in reply, this is probaly cross-ref case.
682                  * SA can't handle it yet. */
683                 if (body->valid & OBD_MD_MDS)
684                         GOTO(out, rc = -EAGAIN);
685         } else {
686                 /*
687                  * revalidate.
688                  */
689                 /* unlinked and re-created with the same name */
690                 if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->fid1))){
691                         entry->se_inode = NULL;
692                         iput(child);
693                         child = NULL;
694                 }
695         }
696
697         it->d.lustre.it_lock_handle = entry->se_handle;
698         rc = md_revalidate_lock(ll_i2mdexp(dir), it, NULL, NULL);
699         if (rc != 1)
700                 GOTO(out, rc = -EAGAIN);
701
702         rc = ll_prep_inode(&child, req, dir->i_sb);
703         if (rc)
704                 GOTO(out, rc);
705
706         CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
707                child, child->i_ino, child->i_generation);
708         ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
709
710         entry->se_inode = child;
711
712         if (agl_should_run(sai, child))
713                 ll_agl_add(sai, child, entry->se_index);
714
715         EXIT;
716
717 out:
718         /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
719          * reference count by calling "ll_intent_drop_lock()" in spite of the
720          * above operations failed or not. Do not worry about calling
721          * "ll_intent_drop_lock()" more than once. */
722         rc = ll_sa_entry_to_stated(sai, entry, rc < 0 ? rc : SA_ENTRY_SUCC);
723         if (rc == 0 && entry->se_index == sai->sai_index_wait && target == NULL)
724                 cfs_waitq_signal(&sai->sai_waitq);
725         ll_sa_entry_put(sai, entry);
726 }
727
728 static int ll_statahead_interpret(struct ptlrpc_request *req,
729                                   struct md_enqueue_info *minfo, int rc)
730 {
731         struct lookup_intent     *it  = &minfo->mi_it;
732         struct inode             *dir = minfo->mi_dir;
733         struct ll_inode_info     *lli = ll_i2info(dir);
734         struct ll_statahead_info *sai = NULL;
735         struct ll_sa_entry       *entry;
736         int                       wakeup;
737         ENTRY;
738
739         if (it_disposition(it, DISP_LOOKUP_NEG))
740                 rc = -ENOENT;
741
742         cfs_spin_lock(&lli->lli_sa_lock);
743         /* stale entry */
744         if (unlikely(lli->lli_sai == NULL ||
745                      lli->lli_sai->sai_generation != minfo->mi_generation)) {
746                 cfs_spin_unlock(&lli->lli_sa_lock);
747                 GOTO(out, rc = -ESTALE);
748         } else {
749                 sai = ll_sai_get(lli->lli_sai);
750                 if (unlikely(!thread_is_running(&sai->sai_thread))) {
751                         sai->sai_replied++;
752                         cfs_spin_unlock(&lli->lli_sa_lock);
753                         GOTO(out, rc = -EBADFD);
754                 }
755
756                 entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata);
757                 if (entry == NULL) {
758                         sai->sai_replied++;
759                         cfs_spin_unlock(&lli->lli_sa_lock);
760                         GOTO(out, rc = -EIDRM);
761                 }
762
763                 cfs_list_del_init(&entry->se_list);
764                 if (rc != 0) {
765                         sai->sai_replied++;
766                         do_sai_entry_to_stated(sai, entry, rc);
767                         cfs_spin_unlock(&lli->lli_sa_lock);
768                         if (entry->se_index == sai->sai_index_wait)
769                                 cfs_waitq_signal(&sai->sai_waitq);
770                 } else {
771                         entry->se_minfo = minfo;
772                         entry->se_req = ptlrpc_request_addref(req);
773                         /* Release the async ibits lock ASAP to avoid deadlock
774                          * when statahead thread tries to enqueue lock on parent
775                          * for readpage and other tries to enqueue lock on child
776                          * with parent's lock held, for example: unlink. */
777                         entry->se_handle = it->d.lustre.it_lock_handle;
778                         ll_intent_drop_lock(it);
779                         wakeup = sa_received_empty(sai);
780                         cfs_list_add_tail(&entry->se_list,
781                                           &sai->sai_entries_received);
782                         sai->sai_replied++;
783                         cfs_spin_unlock(&lli->lli_sa_lock);
784                         if (wakeup)
785                                 cfs_waitq_signal(&sai->sai_thread.t_ctl_waitq);
786                 }
787                 ll_sa_entry_put(sai, entry);
788         }
789
790         EXIT;
791
792 out:
793         if (rc != 0) {
794                 ll_intent_release(it);
795                 iput(dir);
796                 OBD_FREE_PTR(minfo);
797         }
798         if (sai != NULL)
799                 ll_sai_put(sai);
800         return rc;
801 }
802
803 static void sa_args_fini(struct md_enqueue_info *minfo,
804                          struct ldlm_enqueue_info *einfo)
805 {
806         LASSERT(minfo && einfo);
807         iput(minfo->mi_dir);
808         capa_put(minfo->mi_data.op_capa1);
809         capa_put(minfo->mi_data.op_capa2);
810         OBD_FREE_PTR(minfo);
811         OBD_FREE_PTR(einfo);
812 }
813
814 /**
815  * There is race condition between "capa_put" and "ll_statahead_interpret" for
816  * accessing "op_data.op_capa[1,2]" as following:
817  * "capa_put" releases "op_data.op_capa[1,2]"'s reference count after calling
818  * "md_intent_getattr_async". But "ll_statahead_interpret" maybe run first, and
819  * fill "op_data.op_capa[1,2]" as POISON, then cause "capa_put" access invalid
820  * "ocapa". So here reserve "op_data.op_capa[1,2]" in "pcapa" before calling
821  * "md_intent_getattr_async".
822  */
823 static int sa_args_init(struct inode *dir, struct inode *child,
824                         struct ll_sa_entry *entry, struct md_enqueue_info **pmi,
825                         struct ldlm_enqueue_info **pei,
826                         struct obd_capa **pcapa)
827 {
828         struct qstr              *qstr = &entry->se_qstr;
829         struct ll_inode_info     *lli  = ll_i2info(dir);
830         struct md_enqueue_info   *minfo;
831         struct ldlm_enqueue_info *einfo;
832         struct md_op_data        *op_data;
833
834         OBD_ALLOC_PTR(einfo);
835         if (einfo == NULL)
836                 return -ENOMEM;
837
838         OBD_ALLOC_PTR(minfo);
839         if (minfo == NULL) {
840                 OBD_FREE_PTR(einfo);
841                 return -ENOMEM;
842         }
843
844         op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, qstr->name,
845                                      qstr->len, 0, LUSTRE_OPC_ANY, NULL);
846         if (IS_ERR(op_data)) {
847                 OBD_FREE_PTR(einfo);
848                 OBD_FREE_PTR(minfo);
849                 return PTR_ERR(op_data);
850         }
851
852         minfo->mi_it.it_op = IT_GETATTR;
853         minfo->mi_dir = igrab(dir);
854         minfo->mi_cb = ll_statahead_interpret;
855         minfo->mi_generation = lli->lli_sai->sai_generation;
856         minfo->mi_cbdata = entry->se_index;
857
858         einfo->ei_type   = LDLM_IBITS;
859         einfo->ei_mode   = it_to_lock_mode(&minfo->mi_it);
860         einfo->ei_cb_bl  = ll_md_blocking_ast;
861         einfo->ei_cb_cp  = ldlm_completion_ast;
862         einfo->ei_cb_gl  = NULL;
863         einfo->ei_cbdata = NULL;
864
865         *pmi = minfo;
866         *pei = einfo;
867         pcapa[0] = op_data->op_capa1;
868         pcapa[1] = op_data->op_capa2;
869
870         return 0;
871 }
872
873 static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry)
874 {
875         struct md_enqueue_info   *minfo;
876         struct ldlm_enqueue_info *einfo;
877         struct obd_capa          *capas[2];
878         int                       rc;
879         ENTRY;
880
881         rc = sa_args_init(dir, NULL, entry, &minfo, &einfo, capas);
882         if (rc)
883                 RETURN(rc);
884
885         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
886         if (!rc) {
887                 capa_put(capas[0]);
888                 capa_put(capas[1]);
889         } else {
890                 sa_args_fini(minfo, einfo);
891         }
892
893         RETURN(rc);
894 }
895
896 /**
897  * similar to ll_revalidate_it().
898  * \retval      1 -- dentry valid
899  * \retval      0 -- will send stat-ahead request
900  * \retval others -- prepare stat-ahead request failed
901  */
902 static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
903                             struct dentry *dentry)
904 {
905         struct inode             *inode = dentry->d_inode;
906         struct lookup_intent      it = { .it_op = IT_GETATTR,
907                                          .d.lustre.it_lock_handle = 0 };
908         struct md_enqueue_info   *minfo;
909         struct ldlm_enqueue_info *einfo;
910         struct obd_capa          *capas[2];
911         int rc;
912         ENTRY;
913
914         if (unlikely(inode == NULL))
915                 RETURN(1);
916
917         if (d_mountpoint(dentry))
918                 RETURN(1);
919
920         if (unlikely(dentry == dentry->d_sb->s_root))
921                 RETURN(1);
922
923         entry->se_inode = igrab(inode);
924         rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),NULL);
925         if (rc == 1) {
926                 entry->se_handle = it.d.lustre.it_lock_handle;
927                 ll_intent_release(&it);
928                 RETURN(1);
929         }
930
931         rc = sa_args_init(dir, inode, entry, &minfo, &einfo, capas);
932         if (rc) {
933                 entry->se_inode = NULL;
934                 iput(inode);
935                 RETURN(rc);
936         }
937
938         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
939         if (!rc) {
940                 capa_put(capas[0]);
941                 capa_put(capas[1]);
942         } else {
943                 entry->se_inode = NULL;
944                 iput(inode);
945                 sa_args_fini(minfo, einfo);
946         }
947
948         RETURN(rc);
949 }
950
951 static void ll_statahead_one(struct dentry *parent, const char* entry_name,
952                              int entry_name_len)
953 {
954         struct inode             *dir    = parent->d_inode;
955         struct ll_inode_info     *lli    = ll_i2info(dir);
956         struct ll_statahead_info *sai    = lli->lli_sai;
957         struct dentry            *dentry = NULL;
958         struct ll_sa_entry       *entry;
959         int                       rc;
960         int                       rc1;
961         ENTRY;
962
963         entry = ll_sa_entry_alloc(sai, sai->sai_index, entry_name,
964                                   entry_name_len);
965         if (IS_ERR(entry))
966                 RETURN_EXIT;
967
968         dentry = d_lookup(parent, &entry->se_qstr);
969         if (!dentry) {
970                 rc = do_sa_lookup(dir, entry);
971         } else {
972                 rc = do_sa_revalidate(dir, entry, dentry);
973                 if (rc == 1 && agl_should_run(sai, dentry->d_inode))
974                         ll_agl_add(sai, dentry->d_inode, entry->se_index);
975         }
976
977         if (dentry != NULL)
978                 dput(dentry);
979
980         if (rc) {
981                 rc1 = ll_sa_entry_to_stated(sai, entry,
982                                         rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
983                 if (rc1 == 0 && entry->se_index == sai->sai_index_wait)
984                         cfs_waitq_signal(&sai->sai_waitq);
985         } else {
986                 sai->sai_sent++;
987         }
988
989         sai->sai_index++;
990         /* drop one refcount on entry by ll_sa_entry_alloc */
991         ll_sa_entry_put(sai, entry);
992
993         EXIT;
994 }
995
996 static int ll_agl_thread(void *arg)
997 {
998         struct dentry            *parent = (struct dentry *)arg;
999         struct inode             *dir    = parent->d_inode;
1000         struct ll_inode_info     *plli   = ll_i2info(dir);
1001         struct ll_inode_info     *clli;
1002         struct ll_sb_info        *sbi    = ll_i2sbi(dir);
1003         struct ll_statahead_info *sai    = ll_sai_get(plli->lli_sai);
1004         struct ptlrpc_thread     *thread = &sai->sai_agl_thread;
1005         struct l_wait_info        lwi    = { 0 };
1006         ENTRY;
1007
1008         {
1009                 char pname[16];
1010                 snprintf(pname, 15, "ll_agl_%u", plli->lli_opendir_pid);
1011                 cfs_daemonize(pname);
1012         }
1013
1014         CDEBUG(D_READA, "agl thread started: [pid %d] [parent %.*s]\n",
1015                cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1016
1017         atomic_inc(&sbi->ll_agl_total);
1018         cfs_spin_lock(&plli->lli_agl_lock);
1019         sai->sai_agl_valid = 1;
1020         thread_set_flags(thread, SVC_RUNNING);
1021         cfs_spin_unlock(&plli->lli_agl_lock);
1022         cfs_waitq_signal(&thread->t_ctl_waitq);
1023
1024         while (1) {
1025                 l_wait_event(thread->t_ctl_waitq,
1026                              !agl_list_empty(sai) ||
1027                              !thread_is_running(thread),
1028                              &lwi);
1029
1030                 if (!thread_is_running(thread))
1031                         break;
1032
1033                 cfs_spin_lock(&plli->lli_agl_lock);
1034                 /* The statahead thread maybe help to process AGL entries,
1035                  * so check whether list empty again. */
1036                 if (!agl_list_empty(sai)) {
1037                         clli = agl_first_entry(sai);
1038                         cfs_list_del_init(&clli->lli_agl_list);
1039                         cfs_spin_unlock(&plli->lli_agl_lock);
1040                         ll_agl_trigger(&clli->lli_vfs_inode, sai);
1041                 } else {
1042                         cfs_spin_unlock(&plli->lli_agl_lock);
1043                 }
1044         }
1045
1046         cfs_spin_lock(&plli->lli_agl_lock);
1047         sai->sai_agl_valid = 0;
1048         while (!agl_list_empty(sai)) {
1049                 clli = agl_first_entry(sai);
1050                 cfs_list_del_init(&clli->lli_agl_list);
1051                 cfs_spin_unlock(&plli->lli_agl_lock);
1052                 clli->lli_agl_index = 0;
1053                 iput(&clli->lli_vfs_inode);
1054                 cfs_spin_lock(&plli->lli_agl_lock);
1055         }
1056         thread_set_flags(thread, SVC_STOPPED);
1057         cfs_spin_unlock(&plli->lli_agl_lock);
1058         cfs_waitq_signal(&thread->t_ctl_waitq);
1059         ll_sai_put(sai);
1060         CDEBUG(D_READA, "agl thread stopped: [pid %d] [parent %.*s]\n",
1061                cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1062         RETURN(0);
1063 }
1064
1065 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
1066 {
1067         struct ptlrpc_thread *thread = &sai->sai_agl_thread;
1068         struct l_wait_info    lwi    = { 0 };
1069         int                   rc;
1070         ENTRY;
1071
1072         CDEBUG(D_READA, "start agl thread: [pid %d] [parent %.*s]\n",
1073                cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1074
1075         rc = cfs_create_thread(ll_agl_thread, parent, 0);
1076         if (rc < 0) {
1077                 CERROR("can't start ll_agl thread, rc: %d\n", rc);
1078                 RETURN_EXIT;
1079         }
1080
1081         l_wait_event(thread->t_ctl_waitq,
1082                      thread_is_running(thread) || thread_is_stopped(thread),
1083                      &lwi);
1084         EXIT;
1085 }
1086
1087 static int ll_statahead_thread(void *arg)
1088 {
1089         struct dentry            *parent = (struct dentry *)arg;
1090         struct inode             *dir    = parent->d_inode;
1091         struct ll_inode_info     *plli   = ll_i2info(dir);
1092         struct ll_inode_info     *clli;
1093         struct ll_sb_info        *sbi    = ll_i2sbi(dir);
1094         struct ll_statahead_info *sai    = ll_sai_get(plli->lli_sai);
1095         struct ptlrpc_thread     *thread = &sai->sai_thread;
1096         struct page              *page;
1097         __u64                     pos    = 0;
1098         int                       first  = 0;
1099         int                       rc     = 0;
1100         struct ll_dir_chain       chain;
1101         struct l_wait_info        lwi    = { 0 };
1102         ENTRY;
1103
1104         {
1105                 char pname[16];
1106                 snprintf(pname, 15, "ll_sa_%u", plli->lli_opendir_pid);
1107                 cfs_daemonize(pname);
1108         }
1109
1110         CDEBUG(D_READA, "statahead thread started: [pid %d] [parent %.*s]\n",
1111                cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1112
1113         if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
1114                 ll_start_agl(parent, sai);
1115
1116         atomic_inc(&sbi->ll_sa_total);
1117         cfs_spin_lock(&plli->lli_sa_lock);
1118         thread_set_flags(thread, SVC_RUNNING);
1119         cfs_spin_unlock(&plli->lli_sa_lock);
1120         cfs_waitq_signal(&thread->t_ctl_waitq);
1121
1122         plli->lli_sa_pos = 0;
1123         ll_dir_chain_init(&chain);
1124         page = ll_get_dir_page(NULL, dir, pos, &chain);
1125
1126         while (1) {
1127                 struct lu_dirpage *dp;
1128                 struct lu_dirent  *ent;
1129
1130                 if (IS_ERR(page)) {
1131                         rc = PTR_ERR(page);
1132                         CDEBUG(D_READA, "error reading dir "DFID" at "LPU64
1133                                "/"LPU64": [rc %d] [parent %u]\n",
1134                                PFID(ll_inode2fid(dir)), pos, sai->sai_index,
1135                                rc, plli->lli_opendir_pid);
1136                         GOTO(out, rc);
1137                 }
1138
1139                 dp = page_address(page);
1140                 for (ent = lu_dirent_start(dp); ent != NULL;
1141                      ent = lu_dirent_next(ent)) {
1142                         __u64 hash;
1143                         int namelen;
1144                         char *name;
1145
1146                         hash = le64_to_cpu(ent->lde_hash);
1147                         if (unlikely(hash < pos))
1148                                 /*
1149                                  * Skip until we find target hash value.
1150                                  */
1151                                 continue;
1152
1153                         namelen = le16_to_cpu(ent->lde_namelen);
1154                         if (unlikely(namelen == 0))
1155                                 /*
1156                                  * Skip dummy record.
1157                                  */
1158                                 continue;
1159
1160                         name = ent->lde_name;
1161                         if (name[0] == '.') {
1162                                 if (namelen == 1) {
1163                                         /*
1164                                          * skip "."
1165                                          */
1166                                         continue;
1167                                 } else if (name[1] == '.' && namelen == 2) {
1168                                         /*
1169                                          * skip ".."
1170                                          */
1171                                         continue;
1172                                 } else if (!sai->sai_ls_all) {
1173                                         /*
1174                                          * skip hidden files.
1175                                          */
1176                                         sai->sai_skip_hidden++;
1177                                         continue;
1178                                 }
1179                         }
1180
1181                         /*
1182                          * don't stat-ahead first entry.
1183                          */
1184                         if (unlikely(++first == 1))
1185                                 continue;
1186
1187 keep_it:
1188                         l_wait_event(thread->t_ctl_waitq,
1189                                      !sa_sent_full(sai) ||
1190                                      !sa_received_empty(sai) ||
1191                                      !agl_list_empty(sai) ||
1192                                      !thread_is_running(thread),
1193                                      &lwi);
1194
1195 interpret_it:
1196                         while (!sa_received_empty(sai))
1197                                 do_statahead_interpret(sai, NULL);
1198
1199                         if (unlikely(!thread_is_running(thread))) {
1200                                 ll_release_page(page, 0);
1201                                 GOTO(out, rc = 0);
1202                         }
1203
1204                         /* If no window for metadata statahead, but there are
1205                          * some AGL entries to be triggered, then try to help
1206                          * to process the AGL entries. */
1207                         if (sa_sent_full(sai)) {
1208                                 cfs_spin_lock(&plli->lli_agl_lock);
1209                                 while (!agl_list_empty(sai)) {
1210                                         clli = agl_first_entry(sai);
1211                                         cfs_list_del_init(&clli->lli_agl_list);
1212                                         cfs_spin_unlock(&plli->lli_agl_lock);
1213                                         ll_agl_trigger(&clli->lli_vfs_inode,
1214                                                        sai);
1215
1216                                         if (!sa_received_empty(sai))
1217                                                 goto interpret_it;
1218
1219                                         if (unlikely(
1220                                                 !thread_is_running(thread))) {
1221                                                 ll_release_page(page, 0);
1222                                                 GOTO(out, rc = 0);
1223                                         }
1224
1225                                         if (!sa_sent_full(sai))
1226                                                 goto do_it;
1227
1228                                         cfs_spin_lock(&plli->lli_agl_lock);
1229                                 }
1230                                 cfs_spin_unlock(&plli->lli_agl_lock);
1231
1232                                 goto keep_it;
1233                         }
1234
1235 do_it:
1236                         ll_statahead_one(parent, name, namelen);
1237                 }
1238                 pos = le64_to_cpu(dp->ldp_hash_end);
1239                 if (pos == MDS_DIR_END_OFF) {
1240                         /*
1241                          * End of directory reached.
1242                          */
1243                         ll_release_page(page, 0);
1244                         while (1) {
1245                                 l_wait_event(thread->t_ctl_waitq,
1246                                              !sa_received_empty(sai) ||
1247                                              sai->sai_sent == sai->sai_replied||
1248                                              !thread_is_running(thread),
1249                                              &lwi);
1250
1251                                 while (!sa_received_empty(sai))
1252                                         do_statahead_interpret(sai, NULL);
1253
1254                                 if (unlikely(!thread_is_running(thread)))
1255                                         GOTO(out, rc = 0);
1256
1257                                 if (sai->sai_sent == sai->sai_replied &&
1258                                     sa_received_empty(sai))
1259                                         break;
1260                         }
1261
1262                         cfs_spin_lock(&plli->lli_agl_lock);
1263                         while (!agl_list_empty(sai) &&
1264                                thread_is_running(thread)) {
1265                                 clli = agl_first_entry(sai);
1266                                 cfs_list_del_init(&clli->lli_agl_list);
1267                                 cfs_spin_unlock(&plli->lli_agl_lock);
1268                                 ll_agl_trigger(&clli->lli_vfs_inode, sai);
1269                                 cfs_spin_lock(&plli->lli_agl_lock);
1270                         }
1271                         cfs_spin_unlock(&plli->lli_agl_lock);
1272
1273                         GOTO(out, rc = 0);
1274                 } else if (1) {
1275                         /*
1276                          * chain is exhausted.
1277                          * Normal case: continue to the next page.
1278                          */
1279                         ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
1280                                               LDF_COLLIDE);
1281                         plli->lli_sa_pos = pos;
1282                         sai->sai_in_readpage = 1;
1283                         page = ll_get_dir_page(NULL, dir, pos, &chain);
1284                         sai->sai_in_readpage = 0;
1285                 } else {
1286                         LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1287                         ll_release_page(page, 1);
1288                         /*
1289                          * go into overflow page.
1290                          */
1291                 }
1292         }
1293         EXIT;
1294
1295 out:
1296         if (sai->sai_agl_valid) {
1297                 struct ptlrpc_thread *agl_thread = &sai->sai_agl_thread;
1298
1299                 cfs_spin_lock(&plli->lli_agl_lock);
1300                 thread_set_flags(agl_thread, SVC_STOPPING);
1301                 cfs_spin_unlock(&plli->lli_agl_lock);
1302                 cfs_waitq_signal(&agl_thread->t_ctl_waitq);
1303
1304                 CDEBUG(D_READA, "stop agl thread: [pid %d]\n",
1305                        cfs_curproc_pid());
1306                 l_wait_event(agl_thread->t_ctl_waitq,
1307                              thread_is_stopped(agl_thread),
1308                              &lwi);
1309         }
1310
1311         ll_dir_chain_fini(&chain);
1312         cfs_spin_lock(&plli->lli_sa_lock);
1313         if (!sa_received_empty(sai)) {
1314                 thread_set_flags(thread, SVC_STOPPING);
1315                 cfs_spin_unlock(&plli->lli_sa_lock);
1316
1317                 /* To release the resources held by received entries. */
1318                 while (!sa_received_empty(sai))
1319                         do_statahead_interpret(sai, NULL);
1320
1321                 cfs_spin_lock(&plli->lli_sa_lock);
1322         }
1323         thread_set_flags(thread, SVC_STOPPED);
1324         cfs_spin_unlock(&plli->lli_sa_lock);
1325         cfs_waitq_signal(&sai->sai_waitq);
1326         cfs_waitq_signal(&thread->t_ctl_waitq);
1327         ll_sai_put(sai);
1328         dput(parent);
1329         CDEBUG(D_READA, "statahead thread stopped: [pid %d] [parent %.*s]\n",
1330                cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1331         return rc;
1332 }
1333
1334 /**
1335  * called in ll_file_release().
1336  */
1337 void ll_stop_statahead(struct inode *dir, void *key)
1338 {
1339         struct ll_inode_info *lli = ll_i2info(dir);
1340
1341         if (unlikely(key == NULL))
1342                 return;
1343
1344         cfs_spin_lock(&lli->lli_sa_lock);
1345         if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
1346                 cfs_spin_unlock(&lli->lli_sa_lock);
1347                 return;
1348         }
1349
1350         lli->lli_opendir_key = NULL;
1351
1352         if (lli->lli_sai) {
1353                 struct l_wait_info lwi = { 0 };
1354                 struct ptlrpc_thread *thread = &lli->lli_sai->sai_thread;
1355
1356                 if (!thread_is_stopped(thread)) {
1357                         thread_set_flags(thread, SVC_STOPPING);
1358                         cfs_spin_unlock(&lli->lli_sa_lock);
1359                         cfs_waitq_signal(&thread->t_ctl_waitq);
1360
1361                         CDEBUG(D_READA, "stop statahead thread: [pid %d]\n",
1362                                cfs_curproc_pid());
1363                         l_wait_event(thread->t_ctl_waitq,
1364                                      thread_is_stopped(thread),
1365                                      &lwi);
1366                 } else {
1367                         cfs_spin_unlock(&lli->lli_sa_lock);
1368                 }
1369
1370                 /*
1371                  * Put the ref which was held when first statahead_enter.
1372                  * It maybe not the last ref for some statahead requests
1373                  * maybe inflight.
1374                  */
1375                 ll_sai_put(lli->lli_sai);
1376         } else {
1377                 lli->lli_opendir_pid = 0;
1378                 cfs_spin_unlock(&lli->lli_sa_lock);
1379         }
1380 }
1381
1382 enum {
1383         /**
1384          * not first dirent, or is "."
1385          */
1386         LS_NONE_FIRST_DE = 0,
1387         /**
1388          * the first non-hidden dirent
1389          */
1390         LS_FIRST_DE,
1391         /**
1392          * the first hidden dirent, that is "."
1393          */
1394         LS_FIRST_DOT_DE
1395 };
1396
1397 static int is_first_dirent(struct inode *dir, struct dentry *dentry)
1398 {
1399         struct ll_inode_info *lli    = ll_i2info(dir);
1400         struct ll_dir_chain   chain;
1401         struct qstr          *target = &dentry->d_name;
1402         struct page          *page;
1403         __u64                 pos    = 0;
1404         int                   dot_de;
1405         int                   rc     = LS_NONE_FIRST_DE;
1406         ENTRY;
1407
1408         lli->lli_sa_pos = 0;
1409         ll_dir_chain_init(&chain);
1410         page = ll_get_dir_page(NULL, dir, pos, &chain);
1411
1412         while (1) {
1413                 struct lu_dirpage *dp;
1414                 struct lu_dirent  *ent;
1415
1416                 if (IS_ERR(page)) {
1417                         struct ll_inode_info *lli = ll_i2info(dir);
1418
1419                         rc = PTR_ERR(page);
1420                         CERROR("error reading dir "DFID" at "LPU64": "
1421                                "[rc %d] [parent %u]\n",
1422                                PFID(ll_inode2fid(dir)), pos,
1423                                rc, lli->lli_opendir_pid);
1424                         break;
1425                 }
1426
1427                 dp = page_address(page);
1428                 for (ent = lu_dirent_start(dp); ent != NULL;
1429                      ent = lu_dirent_next(ent)) {
1430                         __u64 hash;
1431                         int namelen;
1432                         char *name;
1433
1434                         hash = le64_to_cpu(ent->lde_hash);
1435                         /* The ll_get_dir_page() can return any page containing
1436                          * the given hash which may be not the start hash. */
1437                         if (unlikely(hash < pos))
1438                                 continue;
1439
1440                         namelen = le16_to_cpu(ent->lde_namelen);
1441                         if (unlikely(namelen == 0))
1442                                 /*
1443                                  * skip dummy record.
1444                                  */
1445                                 continue;
1446
1447                         name = ent->lde_name;
1448                         if (name[0] == '.') {
1449                                 if (namelen == 1)
1450                                         /*
1451                                          * skip "."
1452                                          */
1453                                         continue;
1454                                 else if (name[1] == '.' && namelen == 2)
1455                                         /*
1456                                          * skip ".."
1457                                          */
1458                                         continue;
1459                                 else
1460                                         dot_de = 1;
1461                         } else {
1462                                 dot_de = 0;
1463                         }
1464
1465                         if (dot_de && target->name[0] != '.') {
1466                                 CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
1467                                        target->len, target->name,
1468                                        namelen, name);
1469                                 continue;
1470                         }
1471
1472                         if (target->len != namelen ||
1473                             memcmp(target->name, name, namelen) != 0)
1474                                 rc = LS_NONE_FIRST_DE;
1475                         else if (!dot_de)
1476                                 rc = LS_FIRST_DE;
1477                         else
1478                                 rc = LS_FIRST_DOT_DE;
1479
1480                         ll_release_page(page, 0);
1481                         GOTO(out, rc);
1482                 }
1483                 pos = le64_to_cpu(dp->ldp_hash_end);
1484                 if (pos == MDS_DIR_END_OFF) {
1485                         /*
1486                          * End of directory reached.
1487                          */
1488                         ll_release_page(page, 0);
1489                         break;
1490                 } else if (1) {
1491                         /*
1492                          * chain is exhausted
1493                          * Normal case: continue to the next page.
1494                          */
1495                         ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
1496                                               LDF_COLLIDE);
1497                         lli->lli_sa_pos = pos;
1498                         page = ll_get_dir_page(NULL, dir, pos, &chain);
1499                 } else {
1500                         /*
1501                          * go into overflow page.
1502                          */
1503                         LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1504                         ll_release_page(page, 1);
1505                 }
1506         }
1507         EXIT;
1508
1509 out:
1510         ll_dir_chain_fini(&chain);
1511         return rc;
1512 }
1513
1514 static void
1515 ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
1516 {
1517         struct ptlrpc_thread *thread = &sai->sai_thread;
1518         struct ll_sb_info    *sbi    = ll_i2sbi(sai->sai_inode);
1519         int                   hit;
1520         ENTRY;
1521
1522         if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC)
1523                 hit = 1;
1524         else
1525                 hit = 0;
1526
1527         ll_sa_entry_fini(sai, entry);
1528         if (hit) {
1529                 sai->sai_hit++;
1530                 sai->sai_consecutive_miss = 0;
1531                 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
1532         } else {
1533                 struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
1534
1535                 sai->sai_miss++;
1536                 sai->sai_consecutive_miss++;
1537                 if (sa_low_hit(sai) && thread_is_running(thread)) {
1538                         atomic_inc(&sbi->ll_sa_wrong);
1539                         CDEBUG(D_READA, "Statahead for dir "DFID" hit "
1540                                "ratio too low: hit/miss "LPU64"/"LPU64
1541                                ", sent/replied "LPU64"/"LPU64", stopping "
1542                                "statahead thread: pid %d\n",
1543                                PFID(&lli->lli_fid), sai->sai_hit,
1544                                sai->sai_miss, sai->sai_sent,
1545                                sai->sai_replied, cfs_curproc_pid());
1546                         cfs_spin_lock(&lli->lli_sa_lock);
1547                         if (!thread_is_stopped(thread))
1548                                 thread_set_flags(thread, SVC_STOPPING);
1549                         cfs_spin_unlock(&lli->lli_sa_lock);
1550                 }
1551         }
1552
1553         if (!thread_is_stopped(thread))
1554                 cfs_waitq_signal(&thread->t_ctl_waitq);
1555
1556         EXIT;
1557 }
1558
1559 /**
1560  * Start statahead thread if this is the first dir entry.
1561  * Otherwise if a thread is started already, wait it until it is ahead of me.
1562  * \retval 1       -- find entry with lock in cache, the caller needs to do
1563  *                    nothing.
1564  * \retval 0       -- find entry in cache, but without lock, the caller needs
1565  *                    refresh from MDS.
1566  * \retval others  -- the caller need to process as non-statahead.
1567  */
1568 int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
1569                        int only_unplug)
1570 {
1571         struct ll_inode_info     *lli   = ll_i2info(dir);
1572         struct ll_statahead_info *sai   = lli->lli_sai;
1573         struct dentry            *parent;
1574         struct ll_sa_entry       *entry;
1575         struct ptlrpc_thread     *thread;
1576         struct l_wait_info        lwi   = { 0 };
1577         int                       rc    = 0;
1578         ENTRY;
1579
1580         LASSERT(lli->lli_opendir_pid == cfs_curproc_pid());
1581
1582         if (sai) {
1583                 thread = &sai->sai_thread;
1584                 if (unlikely(thread_is_stopped(thread) &&
1585                              cfs_list_empty(&sai->sai_entries_stated))) {
1586                         /* to release resource */
1587                         ll_stop_statahead(dir, lli->lli_opendir_key);
1588                         RETURN(-EAGAIN);
1589                 }
1590
1591                 if ((*dentryp)->d_name.name[0] == '.') {
1592                         if (sai->sai_ls_all ||
1593                             sai->sai_miss_hidden >= sai->sai_skip_hidden) {
1594                                 /*
1595                                  * Hidden dentry is the first one, or statahead
1596                                  * thread does not skip so many hidden dentries
1597                                  * before "sai_ls_all" enabled as below.
1598                                  */
1599                         } else {
1600                                 if (!sai->sai_ls_all)
1601                                         /*
1602                                          * It maybe because hidden dentry is not
1603                                          * the first one, "sai_ls_all" was not
1604                                          * set, then "ls -al" missed. Enable
1605                                          * "sai_ls_all" for such case.
1606                                          */
1607                                         sai->sai_ls_all = 1;
1608
1609                                 /*
1610                                  * Such "getattr" has been skipped before
1611                                  * "sai_ls_all" enabled as above.
1612                                  */
1613                                 sai->sai_miss_hidden++;
1614                                 RETURN(-EAGAIN);
1615                         }
1616                 }
1617
1618                 entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name);
1619                 if (entry == NULL || only_unplug) {
1620                         ll_sai_unplug(sai, entry);
1621                         RETURN(entry ? 1 : -EAGAIN);
1622                 }
1623
1624                 while (!ll_sa_entry_stated(entry) &&
1625                        sai->sai_in_readpage &&
1626                        !sa_received_empty(sai))
1627                         do_statahead_interpret(sai, entry);
1628
1629                 if (!ll_sa_entry_stated(entry)) {
1630                         sai->sai_index_wait = entry->se_index;
1631                         lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
1632                                                LWI_ON_SIGNAL_NOOP, NULL);
1633                         rc = l_wait_event(sai->sai_waitq,
1634                                           ll_sa_entry_stated(entry) ||
1635                                           thread_is_stopped(thread),
1636                                           &lwi);
1637                         if (rc < 0) {
1638                                 ll_sai_unplug(sai, entry);
1639                                 RETURN(-EAGAIN);
1640                         }
1641                 }
1642
1643                 if (entry->se_stat == SA_ENTRY_SUCC &&
1644                     entry->se_inode != NULL) {
1645                         struct inode *inode = entry->se_inode;
1646                         struct lookup_intent it = { .it_op = IT_GETATTR,
1647                                                     .d.lustre.it_lock_handle =
1648                                                      entry->se_handle };
1649                         struct ll_dentry_data *lld;
1650                         __u64 bits;
1651
1652                         rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
1653                                                 ll_inode2fid(inode), &bits);
1654                         if (rc == 1) {
1655                                 if ((*dentryp)->d_inode == NULL) {
1656                                         *dentryp = ll_find_alias(inode,
1657                                                                  *dentryp);
1658                                         lld = ll_d2d(*dentryp);
1659                                         if (unlikely(lld == NULL))
1660                                                 ll_dops_init(*dentryp, 1, 1);
1661                                 } else {
1662                                         LASSERT((*dentryp)->d_inode == inode);
1663
1664                                         ll_dentry_rehash(*dentryp, 0);
1665                                         iput(inode);
1666                                 }
1667                                 entry->se_inode = NULL;
1668
1669                                 ll_dentry_reset_flags(*dentryp, bits);
1670                                 ll_intent_release(&it);
1671                         }
1672                 }
1673
1674                 ll_sai_unplug(sai, entry);
1675                 RETURN(rc);
1676         }
1677
1678         /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1679         rc = is_first_dirent(dir, *dentryp);
1680         if (rc == LS_NONE_FIRST_DE)
1681                 /* It is not "ls -{a}l" operation, no need statahead for it. */
1682                 GOTO(out, rc = -EAGAIN);
1683
1684         sai = ll_sai_alloc();
1685         if (sai == NULL)
1686                 GOTO(out, rc = -ENOMEM);
1687
1688         sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
1689         sai->sai_inode = igrab(dir);
1690         if (unlikely(sai->sai_inode == NULL)) {
1691                 CWARN("Do not start stat ahead on dying inode "DFID"\n",
1692                       PFID(&lli->lli_fid));
1693                 GOTO(out, rc = -ESTALE);
1694         }
1695
1696         /* get parent reference count here, and put it in ll_statahead_thread */
1697         parent = dget((*dentryp)->d_parent);
1698         if (unlikely(sai->sai_inode != parent->d_inode)) {
1699                 struct ll_inode_info *nlli = ll_i2info(parent->d_inode);
1700
1701                 CWARN("Race condition, someone changed %.*s just now: "
1702                       "old parent "DFID", new parent "DFID"\n",
1703                       (*dentryp)->d_name.len, (*dentryp)->d_name.name,
1704                       PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
1705                 dput(parent);
1706                 iput(sai->sai_inode);
1707                 GOTO(out, rc = -EAGAIN);
1708         }
1709
1710         CDEBUG(D_READA, "start statahead thread: [pid %d] [parent %.*s]\n",
1711                cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1712
1713         lli->lli_sai = sai;
1714         rc = cfs_create_thread(ll_statahead_thread, parent, 0);
1715         thread = &sai->sai_thread;
1716         if (rc < 0) {
1717                 CERROR("can't start ll_sa thread, rc: %d\n", rc);
1718                 dput(parent);
1719                 lli->lli_opendir_key = NULL;
1720                 thread_set_flags(thread, SVC_STOPPED);
1721                 ll_sai_put(sai);
1722                 LASSERT(lli->lli_sai == NULL);
1723                 RETURN(-EAGAIN);
1724         }
1725
1726         l_wait_event(thread->t_ctl_waitq,
1727                      thread_is_running(thread) || thread_is_stopped(thread),
1728                      &lwi);
1729
1730         /*
1731          * We don't stat-ahead for the first dirent since we are already in
1732          * lookup, and -EEXIST also indicates that this is the first dirent.
1733          */
1734         RETURN(-EEXIST);
1735
1736 out:
1737         if (sai != NULL)
1738                 OBD_FREE_PTR(sai);
1739         cfs_spin_lock(&lli->lli_sa_lock);
1740         lli->lli_opendir_key = NULL;
1741         lli->lli_opendir_pid = 0;
1742         cfs_spin_unlock(&lli->lli_sa_lock);
1743         return rc;
1744 }