Whamcloud - gitweb
LU-3030 build: Update Master Copyrights pre 2.4 split
[fs/lustre-release.git] / lustre / llite / statahead.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #include <linux/fs.h>
38 #include <linux/sched.h>
39 #include <linux/mm.h>
40 #include <linux/highmem.h>
41 #include <linux/pagemap.h>
42
43 #define DEBUG_SUBSYSTEM S_LLITE
44
45 #include <obd_support.h>
46 #include <lustre_lite.h>
47 #include <lustre_dlm.h>
48 #include "llite_internal.h"
49
50 #define SA_OMITTED_ENTRY_MAX 8ULL
51
52 typedef enum {
53         /** negative values are for error cases */
54         SA_ENTRY_INIT = 0,      /** init entry */
55         SA_ENTRY_SUCC = 1,      /** stat succeed */
56         SA_ENTRY_INVA = 2,      /** invalid entry */
57         SA_ENTRY_DEST = 3,      /** entry to be destroyed */
58 } se_stat_t;
59
60 struct ll_sa_entry {
61         /* link into sai->sai_entries */
62         cfs_list_t              se_link;
63         /* link into sai->sai_entries_{received,stated} */
64         cfs_list_t              se_list;
65         /* link into sai hash table locally */
66         cfs_list_t              se_hash;
67         /* entry reference count */
68         cfs_atomic_t            se_refcount;
69         /* entry index in the sai */
70         __u64                   se_index;
71         /* low layer ldlm lock handle */
72         __u64                   se_handle;
73         /* entry status */
74         se_stat_t               se_stat;
75         /* entry size, contains name */
76         int                     se_size;
77         /* pointer to async getattr enqueue info */
78         struct md_enqueue_info *se_minfo;
79         /* pointer to the async getattr request */
80         struct ptlrpc_request  *se_req;
81         /* pointer to the target inode */
82         struct inode           *se_inode;
83         /* entry name */
84         struct qstr             se_qstr;
85 };
86
87 static unsigned int sai_generation = 0;
88 static DEFINE_SPINLOCK(sai_generation_lock);
89
90 static inline int ll_sa_entry_unhashed(struct ll_sa_entry *entry)
91 {
92         return cfs_list_empty(&entry->se_hash);
93 }
94
95 /*
96  * The entry only can be released by the caller, it is necessary to hold lock.
97  */
98 static inline int ll_sa_entry_stated(struct ll_sa_entry *entry)
99 {
100         smp_rmb();
101         return (entry->se_stat != SA_ENTRY_INIT);
102 }
103
104 static inline int ll_sa_entry_hash(int val)
105 {
106         return val & LL_SA_CACHE_MASK;
107 }
108
109 /*
110  * Insert entry to hash SA table.
111  */
112 static inline void
113 ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
114 {
115         int i = ll_sa_entry_hash(entry->se_qstr.hash);
116
117         spin_lock(&sai->sai_cache_lock[i]);
118         cfs_list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
119         spin_unlock(&sai->sai_cache_lock[i]);
120 }
121
122 /*
123  * Remove entry from SA table.
124  */
125 static inline void
126 ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
127 {
128         int i = ll_sa_entry_hash(entry->se_qstr.hash);
129
130         spin_lock(&sai->sai_cache_lock[i]);
131         cfs_list_del_init(&entry->se_hash);
132         spin_unlock(&sai->sai_cache_lock[i]);
133 }
134
135 static inline int agl_should_run(struct ll_statahead_info *sai,
136                                  struct inode *inode)
137 {
138         return (inode != NULL && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
139 }
140
141 static inline struct ll_sa_entry *
142 sa_first_received_entry(struct ll_statahead_info *sai)
143 {
144         return cfs_list_entry(sai->sai_entries_received.next,
145                               struct ll_sa_entry, se_list);
146 }
147
148 static inline struct ll_inode_info *
149 agl_first_entry(struct ll_statahead_info *sai)
150 {
151         return cfs_list_entry(sai->sai_entries_agl.next,
152                               struct ll_inode_info, lli_agl_list);
153 }
154
155 static inline int sa_sent_full(struct ll_statahead_info *sai)
156 {
157         return cfs_atomic_read(&sai->sai_cache_count) >= sai->sai_max;
158 }
159
160 static inline int sa_received_empty(struct ll_statahead_info *sai)
161 {
162         return cfs_list_empty(&sai->sai_entries_received);
163 }
164
165 static inline int agl_list_empty(struct ll_statahead_info *sai)
166 {
167         return cfs_list_empty(&sai->sai_entries_agl);
168 }
169
170 /**
171  * (1) hit ratio less than 80%
172  * or
173  * (2) consecutive miss more than 8
174  * then means low hit.
175  */
176 static inline int sa_low_hit(struct ll_statahead_info *sai)
177 {
178         return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
179                 (sai->sai_consecutive_miss > 8));
180 }
181
182 /*
183  * If the given index is behind of statahead window more than
184  * SA_OMITTED_ENTRY_MAX, then it is old.
185  */
186 static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
187 {
188         return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
189                  sai->sai_index);
190 }
191
192 /*
193  * Insert it into sai_entries tail when init.
194  */
195 static struct ll_sa_entry *
196 ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index,
197                   const char *name, int len)
198 {
199         struct ll_inode_info *lli;
200         struct ll_sa_entry   *entry;
201         int                   entry_size;
202         char                 *dname;
203         ENTRY;
204
205         entry_size = sizeof(struct ll_sa_entry) + (len & ~3) + 4;
206         OBD_ALLOC(entry, entry_size);
207         if (unlikely(entry == NULL))
208                 RETURN(ERR_PTR(-ENOMEM));
209
210         CDEBUG(D_READA, "alloc sa entry %.*s(%p) index "LPU64"\n",
211                len, name, entry, index);
212
213         entry->se_index = index;
214
215         /*
216          * Statahead entry reference rules:
217          *
218          * 1) When statahead entry is initialized, its reference is set as 2.
219          *    One reference is used by the directory scanner. When the scanner
220          *    searches the statahead cache for the given name, it can perform
221          *    lockless hash lookup (only the scanner can remove entry from hash
222          *    list), and once found, it needn't to call "atomic_inc()" for the
223          *    entry reference. So the performance is improved. After using the
224          *    statahead entry, the scanner will call "atomic_dec()" to drop the
225          *    reference held when initialization. If it is the last reference,
226          *    the statahead entry will be freed.
227          *
228          * 2) All other threads, including statahead thread and ptlrpcd thread,
229          *    when they process the statahead entry, the reference for target
230          *    should be held to guarantee the entry will not be released by the
231          *    directory scanner. After processing the entry, these threads will
232          *    drop the entry reference. If it is the last reference, the entry
233          *    will be freed.
234          *
235          *    The second reference when initializes the statahead entry is used
236          *    by the statahead thread, following the rule 2).
237          */
238         cfs_atomic_set(&entry->se_refcount, 2);
239         entry->se_stat = SA_ENTRY_INIT;
240         entry->se_size = entry_size;
241         dname = (char *)entry + sizeof(struct ll_sa_entry);
242         memcpy(dname, name, len);
243         dname[len] = 0;
244         entry->se_qstr.hash = full_name_hash(name, len);
245         entry->se_qstr.len = len;
246         entry->se_qstr.name = dname;
247
248         lli = ll_i2info(sai->sai_inode);
249         spin_lock(&lli->lli_sa_lock);
250         cfs_list_add_tail(&entry->se_link, &sai->sai_entries);
251         CFS_INIT_LIST_HEAD(&entry->se_list);
252         ll_sa_entry_enhash(sai, entry);
253         spin_unlock(&lli->lli_sa_lock);
254
255         cfs_atomic_inc(&sai->sai_cache_count);
256
257         RETURN(entry);
258 }
259
260 /*
261  * Used by the directory scanner to search entry with name.
262  *
263  * Only the caller can remove the entry from hash, so it is unnecessary to hold
264  * hash lock. It is caller's duty to release the init refcount on the entry, so
265  * it is also unnecessary to increase refcount on the entry.
266  */
267 static struct ll_sa_entry *
268 ll_sa_entry_get_byname(struct ll_statahead_info *sai, const struct qstr *qstr)
269 {
270         struct ll_sa_entry *entry;
271         int i = ll_sa_entry_hash(qstr->hash);
272
273         cfs_list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
274                 if (entry->se_qstr.hash == qstr->hash &&
275                     entry->se_qstr.len == qstr->len &&
276                     memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
277                         return entry;
278         }
279         return NULL;
280 }
281
282 /*
283  * Used by the async getattr request callback to find entry with index.
284  *
285  * Inside lli_sa_lock to prevent others to change the list during the search.
286  * It needs to increase entry refcount before returning to guarantee that the
287  * entry cannot be freed by others.
288  */
289 static struct ll_sa_entry *
290 ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index)
291 {
292         struct ll_sa_entry *entry;
293
294         cfs_list_for_each_entry(entry, &sai->sai_entries, se_link) {
295                 if (entry->se_index == index) {
296                         LASSERT(atomic_read(&entry->se_refcount) > 0);
297                         cfs_atomic_inc(&entry->se_refcount);
298                         return entry;
299                 }
300                 if (entry->se_index > index)
301                         break;
302         }
303         return NULL;
304 }
305
306 static void ll_sa_entry_cleanup(struct ll_statahead_info *sai,
307                                  struct ll_sa_entry *entry)
308 {
309         struct md_enqueue_info *minfo = entry->se_minfo;
310         struct ptlrpc_request  *req   = entry->se_req;
311
312         if (minfo) {
313                 entry->se_minfo = NULL;
314                 ll_intent_release(&minfo->mi_it);
315                 iput(minfo->mi_dir);
316                 OBD_FREE_PTR(minfo);
317         }
318
319         if (req) {
320                 entry->se_req = NULL;
321                 ptlrpc_req_finished(req);
322         }
323 }
324
325 static void ll_sa_entry_put(struct ll_statahead_info *sai,
326                              struct ll_sa_entry *entry)
327 {
328         if (cfs_atomic_dec_and_test(&entry->se_refcount)) {
329                 CDEBUG(D_READA, "free sa entry %.*s(%p) index "LPU64"\n",
330                        entry->se_qstr.len, entry->se_qstr.name, entry,
331                        entry->se_index);
332
333                 LASSERT(cfs_list_empty(&entry->se_link));
334                 LASSERT(cfs_list_empty(&entry->se_list));
335                 LASSERT(ll_sa_entry_unhashed(entry));
336
337                 ll_sa_entry_cleanup(sai, entry);
338                 if (entry->se_inode)
339                         iput(entry->se_inode);
340
341                 OBD_FREE(entry, entry->se_size);
342                 cfs_atomic_dec(&sai->sai_cache_count);
343         }
344 }
345
346 static inline void
347 do_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
348 {
349         struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
350
351         LASSERT(!ll_sa_entry_unhashed(entry));
352         LASSERT(!cfs_list_empty(&entry->se_link));
353
354         ll_sa_entry_unhash(sai, entry);
355
356         spin_lock(&lli->lli_sa_lock);
357         entry->se_stat = SA_ENTRY_DEST;
358         cfs_list_del_init(&entry->se_link);
359         if (likely(!cfs_list_empty(&entry->se_list)))
360                 cfs_list_del_init(&entry->se_list);
361         spin_unlock(&lli->lli_sa_lock);
362
363         ll_sa_entry_put(sai, entry);
364 }
365
366 /*
367  * Delete it from sai_entries_stated list when fini.
368  */
369 static void
370 ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
371 {
372         struct ll_sa_entry *pos, *next;
373
374         if (entry)
375                 do_sa_entry_fini(sai, entry);
376
377         /* drop old entry, only 'scanner' process does this, no need to lock */
378         cfs_list_for_each_entry_safe(pos, next, &sai->sai_entries, se_link) {
379                 if (!is_omitted_entry(sai, pos->se_index))
380                         break;
381                 do_sa_entry_fini(sai, pos);
382         }
383 }
384
385 /*
386  * Inside lli_sa_lock.
387  */
388 static void
389 do_sa_entry_to_stated(struct ll_statahead_info *sai,
390                       struct ll_sa_entry *entry, se_stat_t stat)
391 {
392         struct ll_sa_entry *se;
393         cfs_list_t         *pos = &sai->sai_entries_stated;
394
395         if (!cfs_list_empty(&entry->se_list))
396                 cfs_list_del_init(&entry->se_list);
397
398         cfs_list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
399                 if (se->se_index < entry->se_index) {
400                         pos = &se->se_list;
401                         break;
402                 }
403         }
404
405         cfs_list_add(&entry->se_list, pos);
406         entry->se_stat = stat;
407 }
408
409 /*
410  * Move entry to sai_entries_stated and sort with the index.
411  * \retval 1    -- entry to be destroyed.
412  * \retval 0    -- entry is inserted into stated list.
413  */
414 static int
415 ll_sa_entry_to_stated(struct ll_statahead_info *sai,
416                       struct ll_sa_entry *entry, se_stat_t stat)
417 {
418         struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
419         int                   ret = 1;
420
421         ll_sa_entry_cleanup(sai, entry);
422
423         spin_lock(&lli->lli_sa_lock);
424         if (likely(entry->se_stat != SA_ENTRY_DEST)) {
425                 do_sa_entry_to_stated(sai, entry, stat);
426                 ret = 0;
427         }
428         spin_unlock(&lli->lli_sa_lock);
429
430         return ret;
431 }
432
433 /*
434  * Insert inode into the list of sai_entries_agl.
435  */
436 static void ll_agl_add(struct ll_statahead_info *sai,
437                        struct inode *inode, int index)
438 {
439         struct ll_inode_info *child  = ll_i2info(inode);
440         struct ll_inode_info *parent = ll_i2info(sai->sai_inode);
441         int                   added  = 0;
442
443         spin_lock(&child->lli_agl_lock);
444         if (child->lli_agl_index == 0) {
445                 child->lli_agl_index = index;
446                 spin_unlock(&child->lli_agl_lock);
447
448                 LASSERT(cfs_list_empty(&child->lli_agl_list));
449
450                 igrab(inode);
451                 spin_lock(&parent->lli_agl_lock);
452                 if (agl_list_empty(sai))
453                         added = 1;
454                 cfs_list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
455                 spin_unlock(&parent->lli_agl_lock);
456         } else {
457                 spin_unlock(&child->lli_agl_lock);
458         }
459
460         if (added > 0)
461                 cfs_waitq_signal(&sai->sai_agl_thread.t_ctl_waitq);
462 }
463
464 static struct ll_statahead_info *ll_sai_alloc(void)
465 {
466         struct ll_statahead_info *sai;
467         int                       i;
468         ENTRY;
469
470         OBD_ALLOC_PTR(sai);
471         if (!sai)
472                 RETURN(NULL);
473
474         cfs_atomic_set(&sai->sai_refcount, 1);
475
476         spin_lock(&sai_generation_lock);
477         sai->sai_generation = ++sai_generation;
478         if (unlikely(sai_generation == 0))
479                 sai->sai_generation = ++sai_generation;
480         spin_unlock(&sai_generation_lock);
481
482         sai->sai_max = LL_SA_RPC_MIN;
483         sai->sai_index = 1;
484         cfs_waitq_init(&sai->sai_waitq);
485         cfs_waitq_init(&sai->sai_thread.t_ctl_waitq);
486         cfs_waitq_init(&sai->sai_agl_thread.t_ctl_waitq);
487
488         CFS_INIT_LIST_HEAD(&sai->sai_entries);
489         CFS_INIT_LIST_HEAD(&sai->sai_entries_received);
490         CFS_INIT_LIST_HEAD(&sai->sai_entries_stated);
491         CFS_INIT_LIST_HEAD(&sai->sai_entries_agl);
492
493         for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
494                 CFS_INIT_LIST_HEAD(&sai->sai_cache[i]);
495                 spin_lock_init(&sai->sai_cache_lock[i]);
496         }
497         cfs_atomic_set(&sai->sai_cache_count, 0);
498
499         RETURN(sai);
500 }
501
502 static inline struct ll_statahead_info *
503 ll_sai_get(struct ll_statahead_info *sai)
504 {
505         cfs_atomic_inc(&sai->sai_refcount);
506         return sai;
507 }
508
509 static void ll_sai_put(struct ll_statahead_info *sai)
510 {
511         struct inode         *inode = sai->sai_inode;
512         struct ll_inode_info *lli   = ll_i2info(inode);
513         ENTRY;
514
515         if (cfs_atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
516                 struct ll_sa_entry *entry, *next;
517
518                 if (unlikely(cfs_atomic_read(&sai->sai_refcount) > 0)) {
519                         /* It is race case, the interpret callback just hold
520                          * a reference count */
521                         spin_unlock(&lli->lli_sa_lock);
522                         RETURN_EXIT;
523                 }
524
525                 LASSERT(lli->lli_opendir_key == NULL);
526                 LASSERT(thread_is_stopped(&sai->sai_thread));
527                 LASSERT(thread_is_stopped(&sai->sai_agl_thread));
528
529                 lli->lli_sai = NULL;
530                 lli->lli_opendir_pid = 0;
531                 spin_unlock(&lli->lli_sa_lock);
532
533                 if (sai->sai_sent > sai->sai_replied)
534                         CDEBUG(D_READA,"statahead for dir "DFID" does not "
535                               "finish: [sent:"LPU64"] [replied:"LPU64"]\n",
536                               PFID(&lli->lli_fid),
537                               sai->sai_sent, sai->sai_replied);
538
539                 cfs_list_for_each_entry_safe(entry, next,
540                                              &sai->sai_entries, se_link)
541                         do_sa_entry_fini(sai, entry);
542
543                 LASSERT(list_empty(&sai->sai_entries));
544                 LASSERT(sa_received_empty(sai));
545                 LASSERT(list_empty(&sai->sai_entries_stated));
546
547                 LASSERT(cfs_atomic_read(&sai->sai_cache_count) == 0);
548                 LASSERT(agl_list_empty(sai));
549
550                 iput(inode);
551                 OBD_FREE_PTR(sai);
552         }
553
554         EXIT;
555 }
556
557 /* Do NOT forget to drop inode refcount when into sai_entries_agl. */
558 static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
559 {
560         struct ll_inode_info *lli   = ll_i2info(inode);
561         __u64                 index = lli->lli_agl_index;
562         int                   rc;
563         ENTRY;
564
565         LASSERT(cfs_list_empty(&lli->lli_agl_list));
566
567         /* AGL maybe fall behind statahead with one entry */
568         if (is_omitted_entry(sai, index + 1)) {
569                 lli->lli_agl_index = 0;
570                 iput(inode);
571                 RETURN_EXIT;
572         }
573
574         /* Someone is in glimpse (sync or async), do nothing. */
575         rc = down_write_trylock(&lli->lli_glimpse_sem);
576         if (rc == 0) {
577                 lli->lli_agl_index = 0;
578                 iput(inode);
579                 RETURN_EXIT;
580         }
581
582         /*
583          * Someone triggered glimpse within 1 sec before.
584          * 1) The former glimpse succeeded with glimpse lock granted by OST, and
585          *    if the lock is still cached on client, AGL needs to do nothing. If
586          *    it is cancelled by other client, AGL maybe cannot obtaion new lock
587          *    for no glimpse callback triggered by AGL.
588          * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
589          *    Under such case, it is quite possible that the OST will not grant
590          *    glimpse lock for AGL also.
591          * 3) The former glimpse failed, compared with other two cases, it is
592          *    relative rare. AGL can ignore such case, and it will not muchly
593          *    affect the performance.
594          */
595         if (lli->lli_glimpse_time != 0 &&
596             cfs_time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
597                 up_write(&lli->lli_glimpse_sem);
598                 lli->lli_agl_index = 0;
599                 iput(inode);
600                 RETURN_EXIT;
601         }
602
603         CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
604                DFID", idx = "LPU64"\n", PFID(&lli->lli_fid), index);
605
606         cl_agl(inode);
607         lli->lli_agl_index = 0;
608         lli->lli_glimpse_time = cfs_time_current();
609         up_write(&lli->lli_glimpse_sem);
610
611         CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
612                DFID", idx = "LPU64", rc = %d\n",
613                PFID(&lli->lli_fid), index, rc);
614
615         iput(inode);
616
617         EXIT;
618 }
619
620 static void ll_post_statahead(struct ll_statahead_info *sai)
621 {
622         struct inode           *dir   = sai->sai_inode;
623         struct inode           *child;
624         struct ll_inode_info   *lli   = ll_i2info(dir);
625         struct ll_sa_entry     *entry;
626         struct md_enqueue_info *minfo;
627         struct lookup_intent   *it;
628         struct ptlrpc_request  *req;
629         struct mdt_body        *body;
630         int                     rc    = 0;
631         ENTRY;
632
633         spin_lock(&lli->lli_sa_lock);
634         if (unlikely(sa_received_empty(sai))) {
635                 spin_unlock(&lli->lli_sa_lock);
636                 RETURN_EXIT;
637         }
638         entry = sa_first_received_entry(sai);
639         cfs_atomic_inc(&entry->se_refcount);
640         cfs_list_del_init(&entry->se_list);
641         spin_unlock(&lli->lli_sa_lock);
642
643         LASSERT(entry->se_handle != 0);
644
645         minfo = entry->se_minfo;
646         it = &minfo->mi_it;
647         req = entry->se_req;
648         body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
649         if (body == NULL)
650                 GOTO(out, rc = -EFAULT);
651
652         child = entry->se_inode;
653         if (child == NULL) {
654                 /*
655                  * lookup.
656                  */
657                 LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
658
659                 /* XXX: No fid in reply, this is probaly cross-ref case.
660                  * SA can't handle it yet. */
661                 if (body->valid & OBD_MD_MDS)
662                         GOTO(out, rc = -EAGAIN);
663         } else {
664                 /*
665                  * revalidate.
666                  */
667                 /* unlinked and re-created with the same name */
668                 if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->fid1))){
669                         entry->se_inode = NULL;
670                         iput(child);
671                         child = NULL;
672                 }
673         }
674
675         it->d.lustre.it_lock_handle = entry->se_handle;
676         rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
677         if (rc != 1)
678                 GOTO(out, rc = -EAGAIN);
679
680         rc = ll_prep_inode(&child, req, dir->i_sb, it);
681         if (rc)
682                 GOTO(out, rc);
683
684         CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
685                child, child->i_ino, child->i_generation);
686         ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
687
688         entry->se_inode = child;
689
690         if (agl_should_run(sai, child))
691                 ll_agl_add(sai, child, entry->se_index);
692
693         EXIT;
694
695 out:
696         /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
697          * reference count by calling "ll_intent_drop_lock()" in spite of the
698          * above operations failed or not. Do not worry about calling
699          * "ll_intent_drop_lock()" more than once. */
700         rc = ll_sa_entry_to_stated(sai, entry,
701                                    rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
702         if (rc == 0 && entry->se_index == sai->sai_index_wait)
703                 cfs_waitq_signal(&sai->sai_waitq);
704         ll_sa_entry_put(sai, entry);
705 }
706
707 static int ll_statahead_interpret(struct ptlrpc_request *req,
708                                   struct md_enqueue_info *minfo, int rc)
709 {
710         struct lookup_intent     *it  = &minfo->mi_it;
711         struct inode             *dir = minfo->mi_dir;
712         struct ll_inode_info     *lli = ll_i2info(dir);
713         struct ll_statahead_info *sai = NULL;
714         struct ll_sa_entry       *entry;
715         int                       wakeup;
716         ENTRY;
717
718         if (it_disposition(it, DISP_LOOKUP_NEG))
719                 rc = -ENOENT;
720
721         spin_lock(&lli->lli_sa_lock);
722         /* stale entry */
723         if (unlikely(lli->lli_sai == NULL ||
724                      lli->lli_sai->sai_generation != minfo->mi_generation)) {
725                 spin_unlock(&lli->lli_sa_lock);
726                 GOTO(out, rc = -ESTALE);
727         } else {
728                 sai = ll_sai_get(lli->lli_sai);
729                 if (unlikely(!thread_is_running(&sai->sai_thread))) {
730                         sai->sai_replied++;
731                         spin_unlock(&lli->lli_sa_lock);
732                         GOTO(out, rc = -EBADFD);
733                 }
734
735                 entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata);
736                 if (entry == NULL) {
737                         sai->sai_replied++;
738                         spin_unlock(&lli->lli_sa_lock);
739                         GOTO(out, rc = -EIDRM);
740                 }
741
742                 if (rc != 0) {
743                         do_sa_entry_to_stated(sai, entry, SA_ENTRY_INVA);
744                         wakeup = (entry->se_index == sai->sai_index_wait);
745                 } else {
746                         entry->se_minfo = minfo;
747                         entry->se_req = ptlrpc_request_addref(req);
748                         /* Release the async ibits lock ASAP to avoid deadlock
749                          * when statahead thread tries to enqueue lock on parent
750                          * for readpage and other tries to enqueue lock on child
751                          * with parent's lock held, for example: unlink. */
752                         entry->se_handle = it->d.lustre.it_lock_handle;
753                         ll_intent_drop_lock(it);
754                         wakeup = sa_received_empty(sai);
755                         cfs_list_add_tail(&entry->se_list,
756                                           &sai->sai_entries_received);
757                 }
758                 sai->sai_replied++;
759                 spin_unlock(&lli->lli_sa_lock);
760
761                 ll_sa_entry_put(sai, entry);
762                 if (wakeup)
763                         cfs_waitq_signal(&sai->sai_thread.t_ctl_waitq);
764         }
765
766         EXIT;
767
768 out:
769         if (rc != 0) {
770                 ll_intent_release(it);
771                 iput(dir);
772                 OBD_FREE_PTR(minfo);
773         }
774         if (sai != NULL)
775                 ll_sai_put(sai);
776         return rc;
777 }
778
779 static void sa_args_fini(struct md_enqueue_info *minfo,
780                          struct ldlm_enqueue_info *einfo)
781 {
782         LASSERT(minfo && einfo);
783         iput(minfo->mi_dir);
784         capa_put(minfo->mi_data.op_capa1);
785         capa_put(minfo->mi_data.op_capa2);
786         OBD_FREE_PTR(minfo);
787         OBD_FREE_PTR(einfo);
788 }
789
790 /**
791  * There is race condition between "capa_put" and "ll_statahead_interpret" for
792  * accessing "op_data.op_capa[1,2]" as following:
793  * "capa_put" releases "op_data.op_capa[1,2]"'s reference count after calling
794  * "md_intent_getattr_async". But "ll_statahead_interpret" maybe run first, and
795  * fill "op_data.op_capa[1,2]" as POISON, then cause "capa_put" access invalid
796  * "ocapa". So here reserve "op_data.op_capa[1,2]" in "pcapa" before calling
797  * "md_intent_getattr_async".
798  */
799 static int sa_args_init(struct inode *dir, struct inode *child,
800                         struct ll_sa_entry *entry, struct md_enqueue_info **pmi,
801                         struct ldlm_enqueue_info **pei,
802                         struct obd_capa **pcapa)
803 {
804         struct qstr              *qstr = &entry->se_qstr;
805         struct ll_inode_info     *lli  = ll_i2info(dir);
806         struct md_enqueue_info   *minfo;
807         struct ldlm_enqueue_info *einfo;
808         struct md_op_data        *op_data;
809
810         OBD_ALLOC_PTR(einfo);
811         if (einfo == NULL)
812                 return -ENOMEM;
813
814         OBD_ALLOC_PTR(minfo);
815         if (minfo == NULL) {
816                 OBD_FREE_PTR(einfo);
817                 return -ENOMEM;
818         }
819
820         op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, qstr->name,
821                                      qstr->len, 0, LUSTRE_OPC_ANY, NULL);
822         if (IS_ERR(op_data)) {
823                 OBD_FREE_PTR(einfo);
824                 OBD_FREE_PTR(minfo);
825                 return PTR_ERR(op_data);
826         }
827
828         minfo->mi_it.it_op = IT_GETATTR;
829         minfo->mi_dir = igrab(dir);
830         minfo->mi_cb = ll_statahead_interpret;
831         minfo->mi_generation = lli->lli_sai->sai_generation;
832         minfo->mi_cbdata = entry->se_index;
833
834         einfo->ei_type   = LDLM_IBITS;
835         einfo->ei_mode   = it_to_lock_mode(&minfo->mi_it);
836         einfo->ei_cb_bl  = ll_md_blocking_ast;
837         einfo->ei_cb_cp  = ldlm_completion_ast;
838         einfo->ei_cb_gl  = NULL;
839         einfo->ei_cbdata = NULL;
840
841         *pmi = minfo;
842         *pei = einfo;
843         pcapa[0] = op_data->op_capa1;
844         pcapa[1] = op_data->op_capa2;
845
846         return 0;
847 }
848
849 static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry)
850 {
851         struct md_enqueue_info   *minfo;
852         struct ldlm_enqueue_info *einfo;
853         struct obd_capa          *capas[2];
854         int                       rc;
855         ENTRY;
856
857         rc = sa_args_init(dir, NULL, entry, &minfo, &einfo, capas);
858         if (rc)
859                 RETURN(rc);
860
861         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
862         if (!rc) {
863                 capa_put(capas[0]);
864                 capa_put(capas[1]);
865         } else {
866                 sa_args_fini(minfo, einfo);
867         }
868
869         RETURN(rc);
870 }
871
872 /**
873  * similar to ll_revalidate_it().
874  * \retval      1 -- dentry valid
875  * \retval      0 -- will send stat-ahead request
876  * \retval others -- prepare stat-ahead request failed
877  */
878 static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
879                             struct dentry *dentry)
880 {
881         struct inode             *inode = dentry->d_inode;
882         struct lookup_intent      it = { .it_op = IT_GETATTR,
883                                          .d.lustre.it_lock_handle = 0 };
884         struct md_enqueue_info   *minfo;
885         struct ldlm_enqueue_info *einfo;
886         struct obd_capa          *capas[2];
887         int rc;
888         ENTRY;
889
890         if (unlikely(inode == NULL))
891                 RETURN(1);
892
893         if (d_mountpoint(dentry))
894                 RETURN(1);
895
896         if (unlikely(dentry == dentry->d_sb->s_root))
897                 RETURN(1);
898
899         entry->se_inode = igrab(inode);
900         rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),NULL);
901         if (rc == 1) {
902                 entry->se_handle = it.d.lustre.it_lock_handle;
903                 ll_intent_release(&it);
904                 RETURN(1);
905         }
906
907         rc = sa_args_init(dir, inode, entry, &minfo, &einfo, capas);
908         if (rc) {
909                 entry->se_inode = NULL;
910                 iput(inode);
911                 RETURN(rc);
912         }
913
914         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
915         if (!rc) {
916                 capa_put(capas[0]);
917                 capa_put(capas[1]);
918         } else {
919                 entry->se_inode = NULL;
920                 iput(inode);
921                 sa_args_fini(minfo, einfo);
922         }
923
924         RETURN(rc);
925 }
926
927 static void ll_statahead_one(struct dentry *parent, const char* entry_name,
928                              int entry_name_len)
929 {
930         struct inode             *dir    = parent->d_inode;
931         struct ll_inode_info     *lli    = ll_i2info(dir);
932         struct ll_statahead_info *sai    = lli->lli_sai;
933         struct dentry            *dentry = NULL;
934         struct ll_sa_entry       *entry;
935         int                       rc;
936         int                       rc1;
937         ENTRY;
938
939         entry = ll_sa_entry_alloc(sai, sai->sai_index, entry_name,
940                                   entry_name_len);
941         if (IS_ERR(entry))
942                 RETURN_EXIT;
943
944         dentry = d_lookup(parent, &entry->se_qstr);
945         if (!dentry) {
946                 rc = do_sa_lookup(dir, entry);
947         } else {
948                 rc = do_sa_revalidate(dir, entry, dentry);
949                 if (rc == 1 && agl_should_run(sai, dentry->d_inode))
950                         ll_agl_add(sai, dentry->d_inode, entry->se_index);
951         }
952
953         if (dentry != NULL)
954                 dput(dentry);
955
956         if (rc) {
957                 rc1 = ll_sa_entry_to_stated(sai, entry,
958                                         rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
959                 if (rc1 == 0 && entry->se_index == sai->sai_index_wait)
960                         cfs_waitq_signal(&sai->sai_waitq);
961         } else {
962                 sai->sai_sent++;
963         }
964
965         sai->sai_index++;
966         /* drop one refcount on entry by ll_sa_entry_alloc */
967         ll_sa_entry_put(sai, entry);
968
969         EXIT;
970 }
971
972 static int ll_agl_thread(void *arg)
973 {
974         struct dentry            *parent = (struct dentry *)arg;
975         struct inode             *dir    = parent->d_inode;
976         struct ll_inode_info     *plli   = ll_i2info(dir);
977         struct ll_inode_info     *clli;
978         struct ll_sb_info        *sbi    = ll_i2sbi(dir);
979         struct ll_statahead_info *sai    = ll_sai_get(plli->lli_sai);
980         struct ptlrpc_thread     *thread = &sai->sai_agl_thread;
981         struct l_wait_info        lwi    = { 0 };
982         ENTRY;
983
984         {
985                 char pname[16];
986                 snprintf(pname, 15, "ll_agl_%u", plli->lli_opendir_pid);
987                 cfs_daemonize(pname);
988         }
989
990         CDEBUG(D_READA, "agl thread started: [pid %d] [parent %.*s]\n",
991                cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
992
993         atomic_inc(&sbi->ll_agl_total);
994         spin_lock(&plli->lli_agl_lock);
995         sai->sai_agl_valid = 1;
996         thread_set_flags(thread, SVC_RUNNING);
997         spin_unlock(&plli->lli_agl_lock);
998         cfs_waitq_signal(&thread->t_ctl_waitq);
999
1000         while (1) {
1001                 l_wait_event(thread->t_ctl_waitq,
1002                              !agl_list_empty(sai) ||
1003                              !thread_is_running(thread),
1004                              &lwi);
1005
1006                 if (!thread_is_running(thread))
1007                         break;
1008
1009                 spin_lock(&plli->lli_agl_lock);
1010                 /* The statahead thread maybe help to process AGL entries,
1011                  * so check whether list empty again. */
1012                 if (!agl_list_empty(sai)) {
1013                         clli = agl_first_entry(sai);
1014                         cfs_list_del_init(&clli->lli_agl_list);
1015                         spin_unlock(&plli->lli_agl_lock);
1016                         ll_agl_trigger(&clli->lli_vfs_inode, sai);
1017                 } else {
1018                         spin_unlock(&plli->lli_agl_lock);
1019                 }
1020         }
1021
1022         spin_lock(&plli->lli_agl_lock);
1023         sai->sai_agl_valid = 0;
1024         while (!agl_list_empty(sai)) {
1025                 clli = agl_first_entry(sai);
1026                 cfs_list_del_init(&clli->lli_agl_list);
1027                 spin_unlock(&plli->lli_agl_lock);
1028                 clli->lli_agl_index = 0;
1029                 iput(&clli->lli_vfs_inode);
1030                 spin_lock(&plli->lli_agl_lock);
1031         }
1032         thread_set_flags(thread, SVC_STOPPED);
1033         spin_unlock(&plli->lli_agl_lock);
1034         cfs_waitq_signal(&thread->t_ctl_waitq);
1035         ll_sai_put(sai);
1036         CDEBUG(D_READA, "agl thread stopped: [pid %d] [parent %.*s]\n",
1037                cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1038         RETURN(0);
1039 }
1040
1041 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
1042 {
1043         struct ptlrpc_thread *thread = &sai->sai_agl_thread;
1044         struct l_wait_info    lwi    = { 0 };
1045         int                   rc;
1046         ENTRY;
1047
1048         CDEBUG(D_READA, "start agl thread: [pid %d] [parent %.*s]\n",
1049                cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1050
1051         rc = cfs_create_thread(ll_agl_thread, parent, 0);
1052         if (rc < 0) {
1053                 CERROR("can't start ll_agl thread, rc: %d\n", rc);
1054                 thread_set_flags(thread, SVC_STOPPED);
1055                 RETURN_EXIT;
1056         }
1057
1058         l_wait_event(thread->t_ctl_waitq,
1059                      thread_is_running(thread) || thread_is_stopped(thread),
1060                      &lwi);
1061         EXIT;
1062 }
1063
1064 static int ll_statahead_thread(void *arg)
1065 {
1066         struct dentry            *parent = (struct dentry *)arg;
1067         struct inode             *dir    = parent->d_inode;
1068         struct ll_inode_info     *plli   = ll_i2info(dir);
1069         struct ll_inode_info     *clli;
1070         struct ll_sb_info        *sbi    = ll_i2sbi(dir);
1071         struct ll_statahead_info *sai    = ll_sai_get(plli->lli_sai);
1072         struct ptlrpc_thread     *thread = &sai->sai_thread;
1073         struct ptlrpc_thread *agl_thread = &sai->sai_agl_thread;
1074         struct page              *page;
1075         __u64                     pos    = 0;
1076         int                       first  = 0;
1077         int                       rc     = 0;
1078         struct ll_dir_chain       chain;
1079         struct l_wait_info        lwi    = { 0 };
1080         ENTRY;
1081
1082         {
1083                 char pname[16];
1084                 snprintf(pname, 15, "ll_sa_%u", plli->lli_opendir_pid);
1085                 cfs_daemonize(pname);
1086         }
1087
1088         CDEBUG(D_READA, "statahead thread started: [pid %d] [parent %.*s]\n",
1089                cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1090
1091         if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
1092                 ll_start_agl(parent, sai);
1093
1094         atomic_inc(&sbi->ll_sa_total);
1095         spin_lock(&plli->lli_sa_lock);
1096         thread_set_flags(thread, SVC_RUNNING);
1097         spin_unlock(&plli->lli_sa_lock);
1098         cfs_waitq_signal(&thread->t_ctl_waitq);
1099
1100         ll_dir_chain_init(&chain);
1101         page = ll_get_dir_page(dir, pos, &chain);
1102
1103         while (1) {
1104                 struct lu_dirpage *dp;
1105                 struct lu_dirent  *ent;
1106
1107                 if (IS_ERR(page)) {
1108                         rc = PTR_ERR(page);
1109                         CDEBUG(D_READA, "error reading dir "DFID" at "LPU64
1110                                "/"LPU64": [rc %d] [parent %u]\n",
1111                                PFID(ll_inode2fid(dir)), pos, sai->sai_index,
1112                                rc, plli->lli_opendir_pid);
1113                         GOTO(out, rc);
1114                 }
1115
1116                 dp = page_address(page);
1117                 for (ent = lu_dirent_start(dp); ent != NULL;
1118                      ent = lu_dirent_next(ent)) {
1119                         __u64 hash;
1120                         int namelen;
1121                         char *name;
1122
1123                         hash = le64_to_cpu(ent->lde_hash);
1124                         if (unlikely(hash < pos))
1125                                 /*
1126                                  * Skip until we find target hash value.
1127                                  */
1128                                 continue;
1129
1130                         namelen = le16_to_cpu(ent->lde_namelen);
1131                         if (unlikely(namelen == 0))
1132                                 /*
1133                                  * Skip dummy record.
1134                                  */
1135                                 continue;
1136
1137                         name = ent->lde_name;
1138                         if (name[0] == '.') {
1139                                 if (namelen == 1) {
1140                                         /*
1141                                          * skip "."
1142                                          */
1143                                         continue;
1144                                 } else if (name[1] == '.' && namelen == 2) {
1145                                         /*
1146                                          * skip ".."
1147                                          */
1148                                         continue;
1149                                 } else if (!sai->sai_ls_all) {
1150                                         /*
1151                                          * skip hidden files.
1152                                          */
1153                                         sai->sai_skip_hidden++;
1154                                         continue;
1155                                 }
1156                         }
1157
1158                         /*
1159                          * don't stat-ahead first entry.
1160                          */
1161                         if (unlikely(++first == 1))
1162                                 continue;
1163
1164 keep_it:
1165                         l_wait_event(thread->t_ctl_waitq,
1166                                      !sa_sent_full(sai) ||
1167                                      !sa_received_empty(sai) ||
1168                                      !agl_list_empty(sai) ||
1169                                      !thread_is_running(thread),
1170                                      &lwi);
1171
1172 interpret_it:
1173                         while (!sa_received_empty(sai))
1174                                 ll_post_statahead(sai);
1175
1176                         if (unlikely(!thread_is_running(thread))) {
1177                                 ll_release_page(page, 0);
1178                                 GOTO(out, rc = 0);
1179                         }
1180
1181                         /* If no window for metadata statahead, but there are
1182                          * some AGL entries to be triggered, then try to help
1183                          * to process the AGL entries. */
1184                         if (sa_sent_full(sai)) {
1185                                 spin_lock(&plli->lli_agl_lock);
1186                                 while (!agl_list_empty(sai)) {
1187                                         clli = agl_first_entry(sai);
1188                                         cfs_list_del_init(&clli->lli_agl_list);
1189                                         spin_unlock(&plli->lli_agl_lock);
1190                                         ll_agl_trigger(&clli->lli_vfs_inode,
1191                                                        sai);
1192
1193                                         if (!sa_received_empty(sai))
1194                                                 goto interpret_it;
1195
1196                                         if (unlikely(
1197                                                 !thread_is_running(thread))) {
1198                                                 ll_release_page(page, 0);
1199                                                 GOTO(out, rc = 0);
1200                                         }
1201
1202                                         if (!sa_sent_full(sai))
1203                                                 goto do_it;
1204
1205                                         spin_lock(&plli->lli_agl_lock);
1206                                 }
1207                                 spin_unlock(&plli->lli_agl_lock);
1208
1209                                 goto keep_it;
1210                         }
1211
1212 do_it:
1213                         ll_statahead_one(parent, name, namelen);
1214                 }
1215                 pos = le64_to_cpu(dp->ldp_hash_end);
1216                 if (pos == MDS_DIR_END_OFF) {
1217                         /*
1218                          * End of directory reached.
1219                          */
1220                         ll_release_page(page, 0);
1221                         while (1) {
1222                                 l_wait_event(thread->t_ctl_waitq,
1223                                              !sa_received_empty(sai) ||
1224                                              sai->sai_sent == sai->sai_replied||
1225                                              !thread_is_running(thread),
1226                                              &lwi);
1227
1228                                 while (!sa_received_empty(sai))
1229                                         ll_post_statahead(sai);
1230
1231                                 if (unlikely(!thread_is_running(thread)))
1232                                         GOTO(out, rc = 0);
1233
1234                                 if (sai->sai_sent == sai->sai_replied &&
1235                                     sa_received_empty(sai))
1236                                         break;
1237                         }
1238
1239                         spin_lock(&plli->lli_agl_lock);
1240                         while (!agl_list_empty(sai) &&
1241                                thread_is_running(thread)) {
1242                                 clli = agl_first_entry(sai);
1243                                 cfs_list_del_init(&clli->lli_agl_list);
1244                                 spin_unlock(&plli->lli_agl_lock);
1245                                 ll_agl_trigger(&clli->lli_vfs_inode, sai);
1246                                 spin_lock(&plli->lli_agl_lock);
1247                         }
1248                         spin_unlock(&plli->lli_agl_lock);
1249
1250                         GOTO(out, rc = 0);
1251                 } else if (1) {
1252                         /*
1253                          * chain is exhausted.
1254                          * Normal case: continue to the next page.
1255                          */
1256                         ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
1257                                               LDF_COLLIDE);
1258                         sai->sai_in_readpage = 1;
1259                         page = ll_get_dir_page(dir, pos, &chain);
1260                         sai->sai_in_readpage = 0;
1261                 } else {
1262                         LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1263                         ll_release_page(page, 1);
1264                         /*
1265                          * go into overflow page.
1266                          */
1267                 }
1268         }
1269         EXIT;
1270
1271 out:
1272         if (sai->sai_agl_valid) {
1273                 spin_lock(&plli->lli_agl_lock);
1274                 thread_set_flags(agl_thread, SVC_STOPPING);
1275                 spin_unlock(&plli->lli_agl_lock);
1276                 cfs_waitq_signal(&agl_thread->t_ctl_waitq);
1277
1278                 CDEBUG(D_READA, "stop agl thread: [pid %d]\n",
1279                        cfs_curproc_pid());
1280                 l_wait_event(agl_thread->t_ctl_waitq,
1281                              thread_is_stopped(agl_thread),
1282                              &lwi);
1283         } else {
1284                 /* Set agl_thread flags anyway. */
1285                 thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
1286         }
1287         ll_dir_chain_fini(&chain);
1288         spin_lock(&plli->lli_sa_lock);
1289         if (!sa_received_empty(sai)) {
1290                 thread_set_flags(thread, SVC_STOPPING);
1291                 spin_unlock(&plli->lli_sa_lock);
1292
1293                 /* To release the resources held by received entries. */
1294                 while (!sa_received_empty(sai))
1295                         ll_post_statahead(sai);
1296
1297                 spin_lock(&plli->lli_sa_lock);
1298         }
1299         thread_set_flags(thread, SVC_STOPPED);
1300         spin_unlock(&plli->lli_sa_lock);
1301         cfs_waitq_signal(&sai->sai_waitq);
1302         cfs_waitq_signal(&thread->t_ctl_waitq);
1303         ll_sai_put(sai);
1304         dput(parent);
1305         CDEBUG(D_READA, "statahead thread stopped: [pid %d] [parent %.*s]\n",
1306                cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1307         return rc;
1308 }
1309
1310 /**
1311  * called in ll_file_release().
1312  */
1313 void ll_stop_statahead(struct inode *dir, void *key)
1314 {
1315         struct ll_inode_info *lli = ll_i2info(dir);
1316
1317         if (unlikely(key == NULL))
1318                 return;
1319
1320         spin_lock(&lli->lli_sa_lock);
1321         if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
1322                 spin_unlock(&lli->lli_sa_lock);
1323                 return;
1324         }
1325
1326         lli->lli_opendir_key = NULL;
1327
1328         if (lli->lli_sai) {
1329                 struct l_wait_info lwi = { 0 };
1330                 struct ptlrpc_thread *thread = &lli->lli_sai->sai_thread;
1331
1332                 if (!thread_is_stopped(thread)) {
1333                         thread_set_flags(thread, SVC_STOPPING);
1334                         spin_unlock(&lli->lli_sa_lock);
1335                         cfs_waitq_signal(&thread->t_ctl_waitq);
1336
1337                         CDEBUG(D_READA, "stop statahead thread: [pid %d]\n",
1338                                cfs_curproc_pid());
1339                         l_wait_event(thread->t_ctl_waitq,
1340                                      thread_is_stopped(thread),
1341                                      &lwi);
1342                 } else {
1343                         spin_unlock(&lli->lli_sa_lock);
1344                 }
1345
1346                 /*
1347                  * Put the ref which was held when first statahead_enter.
1348                  * It maybe not the last ref for some statahead requests
1349                  * maybe inflight.
1350                  */
1351                 ll_sai_put(lli->lli_sai);
1352         } else {
1353                 lli->lli_opendir_pid = 0;
1354                 spin_unlock(&lli->lli_sa_lock);
1355         }
1356 }
1357
1358 enum {
1359         /**
1360          * not first dirent, or is "."
1361          */
1362         LS_NONE_FIRST_DE = 0,
1363         /**
1364          * the first non-hidden dirent
1365          */
1366         LS_FIRST_DE,
1367         /**
1368          * the first hidden dirent, that is "."
1369          */
1370         LS_FIRST_DOT_DE
1371 };
1372
1373 static int is_first_dirent(struct inode *dir, struct dentry *dentry)
1374 {
1375         struct ll_dir_chain   chain;
1376         struct qstr          *target = &dentry->d_name;
1377         struct page          *page;
1378         __u64                 pos    = 0;
1379         int                   dot_de;
1380         int                   rc     = LS_NONE_FIRST_DE;
1381         ENTRY;
1382
1383         ll_dir_chain_init(&chain);
1384         page = ll_get_dir_page(dir, pos, &chain);
1385
1386         while (1) {
1387                 struct lu_dirpage *dp;
1388                 struct lu_dirent  *ent;
1389
1390                 if (IS_ERR(page)) {
1391                         struct ll_inode_info *lli = ll_i2info(dir);
1392
1393                         rc = PTR_ERR(page);
1394                         CERROR("error reading dir "DFID" at "LPU64": "
1395                                "[rc %d] [parent %u]\n",
1396                                PFID(ll_inode2fid(dir)), pos,
1397                                rc, lli->lli_opendir_pid);
1398                         break;
1399                 }
1400
1401                 dp = page_address(page);
1402                 for (ent = lu_dirent_start(dp); ent != NULL;
1403                      ent = lu_dirent_next(ent)) {
1404                         __u64 hash;
1405                         int namelen;
1406                         char *name;
1407
1408                         hash = le64_to_cpu(ent->lde_hash);
1409                         /* The ll_get_dir_page() can return any page containing
1410                          * the given hash which may be not the start hash. */
1411                         if (unlikely(hash < pos))
1412                                 continue;
1413
1414                         namelen = le16_to_cpu(ent->lde_namelen);
1415                         if (unlikely(namelen == 0))
1416                                 /*
1417                                  * skip dummy record.
1418                                  */
1419                                 continue;
1420
1421                         name = ent->lde_name;
1422                         if (name[0] == '.') {
1423                                 if (namelen == 1)
1424                                         /*
1425                                          * skip "."
1426                                          */
1427                                         continue;
1428                                 else if (name[1] == '.' && namelen == 2)
1429                                         /*
1430                                          * skip ".."
1431                                          */
1432                                         continue;
1433                                 else
1434                                         dot_de = 1;
1435                         } else {
1436                                 dot_de = 0;
1437                         }
1438
1439                         if (dot_de && target->name[0] != '.') {
1440                                 CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
1441                                        target->len, target->name,
1442                                        namelen, name);
1443                                 continue;
1444                         }
1445
1446                         if (target->len != namelen ||
1447                             memcmp(target->name, name, namelen) != 0)
1448                                 rc = LS_NONE_FIRST_DE;
1449                         else if (!dot_de)
1450                                 rc = LS_FIRST_DE;
1451                         else
1452                                 rc = LS_FIRST_DOT_DE;
1453
1454                         ll_release_page(page, 0);
1455                         GOTO(out, rc);
1456                 }
1457                 pos = le64_to_cpu(dp->ldp_hash_end);
1458                 if (pos == MDS_DIR_END_OFF) {
1459                         /*
1460                          * End of directory reached.
1461                          */
1462                         ll_release_page(page, 0);
1463                         break;
1464                 } else if (1) {
1465                         /*
1466                          * chain is exhausted
1467                          * Normal case: continue to the next page.
1468                          */
1469                         ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
1470                                               LDF_COLLIDE);
1471                         page = ll_get_dir_page(dir, pos, &chain);
1472                 } else {
1473                         /*
1474                          * go into overflow page.
1475                          */
1476                         LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1477                         ll_release_page(page, 1);
1478                 }
1479         }
1480         EXIT;
1481
1482 out:
1483         ll_dir_chain_fini(&chain);
1484         return rc;
1485 }
1486
1487 static void
1488 ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
1489 {
1490         struct ptlrpc_thread *thread = &sai->sai_thread;
1491         struct ll_sb_info    *sbi    = ll_i2sbi(sai->sai_inode);
1492         int                   hit;
1493         ENTRY;
1494
1495         if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC)
1496                 hit = 1;
1497         else
1498                 hit = 0;
1499
1500         ll_sa_entry_fini(sai, entry);
1501         if (hit) {
1502                 sai->sai_hit++;
1503                 sai->sai_consecutive_miss = 0;
1504                 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
1505         } else {
1506                 struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
1507
1508                 sai->sai_miss++;
1509                 sai->sai_consecutive_miss++;
1510                 if (sa_low_hit(sai) && thread_is_running(thread)) {
1511                         atomic_inc(&sbi->ll_sa_wrong);
1512                         CDEBUG(D_READA, "Statahead for dir "DFID" hit "
1513                                "ratio too low: hit/miss "LPU64"/"LPU64
1514                                ", sent/replied "LPU64"/"LPU64", stopping "
1515                                "statahead thread: pid %d\n",
1516                                PFID(&lli->lli_fid), sai->sai_hit,
1517                                sai->sai_miss, sai->sai_sent,
1518                                sai->sai_replied, cfs_curproc_pid());
1519                         spin_lock(&lli->lli_sa_lock);
1520                         if (!thread_is_stopped(thread))
1521                                 thread_set_flags(thread, SVC_STOPPING);
1522                         spin_unlock(&lli->lli_sa_lock);
1523                 }
1524         }
1525
1526         if (!thread_is_stopped(thread))
1527                 cfs_waitq_signal(&thread->t_ctl_waitq);
1528
1529         EXIT;
1530 }
1531
1532 /**
1533  * Start statahead thread if this is the first dir entry.
1534  * Otherwise if a thread is started already, wait it until it is ahead of me.
1535  * \retval 1       -- find entry with lock in cache, the caller needs to do
1536  *                    nothing.
1537  * \retval 0       -- find entry in cache, but without lock, the caller needs
1538  *                    refresh from MDS.
1539  * \retval others  -- the caller need to process as non-statahead.
1540  */
1541 int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
1542                        int only_unplug)
1543 {
1544         struct ll_inode_info     *lli   = ll_i2info(dir);
1545         struct ll_statahead_info *sai   = lli->lli_sai;
1546         struct dentry            *parent;
1547         struct ll_sa_entry       *entry;
1548         struct ptlrpc_thread     *thread;
1549         struct l_wait_info        lwi   = { 0 };
1550         int                       rc    = 0;
1551         ENTRY;
1552
1553         LASSERT(lli->lli_opendir_pid == cfs_curproc_pid());
1554
1555         if (sai) {
1556                 thread = &sai->sai_thread;
1557                 if (unlikely(thread_is_stopped(thread) &&
1558                              cfs_list_empty(&sai->sai_entries_stated))) {
1559                         /* to release resource */
1560                         ll_stop_statahead(dir, lli->lli_opendir_key);
1561                         RETURN(-EAGAIN);
1562                 }
1563
1564                 if ((*dentryp)->d_name.name[0] == '.') {
1565                         if (sai->sai_ls_all ||
1566                             sai->sai_miss_hidden >= sai->sai_skip_hidden) {
1567                                 /*
1568                                  * Hidden dentry is the first one, or statahead
1569                                  * thread does not skip so many hidden dentries
1570                                  * before "sai_ls_all" enabled as below.
1571                                  */
1572                         } else {
1573                                 if (!sai->sai_ls_all)
1574                                         /*
1575                                          * It maybe because hidden dentry is not
1576                                          * the first one, "sai_ls_all" was not
1577                                          * set, then "ls -al" missed. Enable
1578                                          * "sai_ls_all" for such case.
1579                                          */
1580                                         sai->sai_ls_all = 1;
1581
1582                                 /*
1583                                  * Such "getattr" has been skipped before
1584                                  * "sai_ls_all" enabled as above.
1585                                  */
1586                                 sai->sai_miss_hidden++;
1587                                 RETURN(-EAGAIN);
1588                         }
1589                 }
1590
1591                 entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name);
1592                 if (entry == NULL || only_unplug) {
1593                         ll_sai_unplug(sai, entry);
1594                         RETURN(entry ? 1 : -EAGAIN);
1595                 }
1596
1597                 /* if statahead is busy in readdir, help it do post-work */
1598                 while (!ll_sa_entry_stated(entry) &&
1599                        sai->sai_in_readpage &&
1600                        !sa_received_empty(sai))
1601                         ll_post_statahead(sai);
1602
1603                 if (!ll_sa_entry_stated(entry)) {
1604                         sai->sai_index_wait = entry->se_index;
1605                         lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
1606                                                LWI_ON_SIGNAL_NOOP, NULL);
1607                         rc = l_wait_event(sai->sai_waitq,
1608                                           ll_sa_entry_stated(entry) ||
1609                                           thread_is_stopped(thread),
1610                                           &lwi);
1611                         if (rc < 0) {
1612                                 ll_sai_unplug(sai, entry);
1613                                 RETURN(-EAGAIN);
1614                         }
1615                 }
1616
1617                 if (entry->se_stat == SA_ENTRY_SUCC &&
1618                     entry->se_inode != NULL) {
1619                         struct inode *inode = entry->se_inode;
1620                         struct lookup_intent it = { .it_op = IT_GETATTR,
1621                                                     .d.lustre.it_lock_handle =
1622                                                      entry->se_handle };
1623                         __u64 bits;
1624
1625                         rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
1626                                                 ll_inode2fid(inode), &bits);
1627                         if (rc == 1) {
1628                                 if ((*dentryp)->d_inode == NULL) {
1629                                         *dentryp = ll_splice_alias(inode,
1630                                                                    *dentryp);
1631                                 } else if ((*dentryp)->d_inode != inode) {
1632                                         /* revalidate, but inode is recreated */
1633                                         CDEBUG(D_READA,
1634                                               "stale dentry %.*s inode %lu/%u, "
1635                                               "statahead inode %lu/%u\n",
1636                                               (*dentryp)->d_name.len,
1637                                               (*dentryp)->d_name.name,
1638                                               (*dentryp)->d_inode->i_ino,
1639                                               (*dentryp)->d_inode->i_generation,
1640                                               inode->i_ino,
1641                                               inode->i_generation);
1642                                         ll_sai_unplug(sai, entry);
1643                                         RETURN(-ESTALE);
1644                                 } else {
1645                                         iput(inode);
1646                                 }
1647                                 entry->se_inode = NULL;
1648
1649                                 if ((bits & MDS_INODELOCK_LOOKUP) &&
1650                                     d_lustre_invalid(*dentryp))
1651                                         d_lustre_revalidate(*dentryp);
1652                                 ll_intent_release(&it);
1653                         }
1654                 }
1655
1656                 ll_sai_unplug(sai, entry);
1657                 RETURN(rc);
1658         }
1659
1660         /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1661         rc = is_first_dirent(dir, *dentryp);
1662         if (rc == LS_NONE_FIRST_DE)
1663                 /* It is not "ls -{a}l" operation, no need statahead for it. */
1664                 GOTO(out, rc = -EAGAIN);
1665
1666         sai = ll_sai_alloc();
1667         if (sai == NULL)
1668                 GOTO(out, rc = -ENOMEM);
1669
1670         sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
1671         sai->sai_inode = igrab(dir);
1672         if (unlikely(sai->sai_inode == NULL)) {
1673                 CWARN("Do not start stat ahead on dying inode "DFID"\n",
1674                       PFID(&lli->lli_fid));
1675                 GOTO(out, rc = -ESTALE);
1676         }
1677
1678         /* get parent reference count here, and put it in ll_statahead_thread */
1679         parent = dget((*dentryp)->d_parent);
1680         if (unlikely(sai->sai_inode != parent->d_inode)) {
1681                 struct ll_inode_info *nlli = ll_i2info(parent->d_inode);
1682
1683                 CWARN("Race condition, someone changed %.*s just now: "
1684                       "old parent "DFID", new parent "DFID"\n",
1685                       (*dentryp)->d_name.len, (*dentryp)->d_name.name,
1686                       PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
1687                 dput(parent);
1688                 iput(sai->sai_inode);
1689                 GOTO(out, rc = -EAGAIN);
1690         }
1691
1692         CDEBUG(D_READA, "start statahead thread: [pid %d] [parent %.*s]\n",
1693                cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1694
1695         lli->lli_sai = sai;
1696         rc = cfs_create_thread(ll_statahead_thread, parent, 0);
1697         thread = &sai->sai_thread;
1698         if (rc < 0) {
1699                 CERROR("can't start ll_sa thread, rc: %d\n", rc);
1700                 dput(parent);
1701                 lli->lli_opendir_key = NULL;
1702                 thread_set_flags(thread, SVC_STOPPED);
1703                 thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
1704                 ll_sai_put(sai);
1705                 LASSERT(lli->lli_sai == NULL);
1706                 RETURN(-EAGAIN);
1707         }
1708
1709         l_wait_event(thread->t_ctl_waitq,
1710                      thread_is_running(thread) || thread_is_stopped(thread),
1711                      &lwi);
1712
1713         /*
1714          * We don't stat-ahead for the first dirent since we are already in
1715          * lookup.
1716          */
1717         RETURN(-EAGAIN);
1718
1719 out:
1720         if (sai != NULL)
1721                 OBD_FREE_PTR(sai);
1722         spin_lock(&lli->lli_sa_lock);
1723         lli->lli_opendir_key = NULL;
1724         lli->lli_opendir_pid = 0;
1725         spin_unlock(&lli->lli_sa_lock);
1726         return rc;
1727 }