Whamcloud - gitweb
LU-925 agl: async glimpse lock process in CLIO stack
[fs/lustre-release.git] / lustre / llite / statahead.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2011 Whamcloud, Inc.
33  *
34  */
35 /*
36  * This file is part of Lustre, http://www.lustre.org/
37  * Lustre is a trademark of Sun Microsystems, Inc.
38  */
39
40 #include <linux/fs.h>
41 #include <linux/sched.h>
42 #include <linux/mm.h>
43 #include <linux/smp_lock.h>
44 #include <linux/highmem.h>
45 #include <linux/pagemap.h>
46
47 #define DEBUG_SUBSYSTEM S_LLITE
48
49 #include <obd_support.h>
50 #include <lustre_lite.h>
51 #include <lustre_dlm.h>
52 #include <linux/lustre_version.h>
53 #include "llite_internal.h"
54
55 #define SA_OMITTED_ENTRY_MAX 8ULL
56
57 typedef enum {
58         /** negative values are for error cases */
59         SA_ENTRY_INIT = 0,      /** init entry */
60         SA_ENTRY_SUCC = 1,      /** stat succeed */
61         SA_ENTRY_INVA = 2,      /** invalid entry */
62         SA_ENTRY_DEST = 3,      /** entry to be destroyed */
63 } se_stat_t;
64
65 struct ll_sa_entry {
66         /* link into sai->sai_entries_{sent,received,stated} */
67         cfs_list_t              se_list;
68         /* link into sai hash table locally */
69         cfs_list_t              se_hash;
70         /* entry reference count */
71         cfs_atomic_t            se_refcount;
72         /* entry index in the sai */
73         __u64                   se_index;
74         /* low layer ldlm lock handle */
75         __u64                   se_handle;
76         /* entry status */
77         se_stat_t               se_stat;
78         /* entry size, contains name */
79         int                     se_size;
80         /* pointer to async getattr enqueue info */
81         struct md_enqueue_info *se_minfo;
82         /* pointer to the async getattr request */
83         struct ptlrpc_request  *se_req;
84         /* pointer to the target inode */
85         struct inode           *se_inode;
86         /* entry name */
87         struct qstr             se_qstr;
88 };
89
90 static unsigned int sai_generation = 0;
91 static cfs_spinlock_t sai_generation_lock = CFS_SPIN_LOCK_UNLOCKED;
92
93 static inline int ll_sa_entry_unlinked(struct ll_sa_entry *entry)
94 {
95         return cfs_list_empty(&entry->se_list);
96 }
97
98 static inline int ll_sa_entry_unhashed(struct ll_sa_entry *entry)
99 {
100         return cfs_list_empty(&entry->se_hash);
101 }
102
103 /*
104  * The entry only can be released by the caller, it is necessary to hold lock.
105  */
106 static inline int ll_sa_entry_stated(struct ll_sa_entry *entry)
107 {
108         smp_rmb();
109         return (entry->se_stat != SA_ENTRY_INIT);
110 }
111
112 static inline int ll_sa_entry_hash(int val)
113 {
114         return val & LL_SA_CACHE_MASK;
115 }
116
117 /*
118  * Insert entry to hash SA table.
119  */
120 static inline void
121 ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
122 {
123         int i = ll_sa_entry_hash(entry->se_qstr.hash);
124
125         cfs_spin_lock(&sai->sai_cache_lock[i]);
126         cfs_list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
127         cfs_spin_unlock(&sai->sai_cache_lock[i]);
128 }
129
130 /*
131  * Remove entry from SA table.
132  */
133 static inline void
134 ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
135 {
136         int i = ll_sa_entry_hash(entry->se_qstr.hash);
137
138         cfs_spin_lock(&sai->sai_cache_lock[i]);
139         cfs_list_del_init(&entry->se_hash);
140         cfs_spin_unlock(&sai->sai_cache_lock[i]);
141 }
142
143 static inline int agl_should_run(struct ll_statahead_info *sai,
144                                  struct inode *inode)
145 {
146         if (inode != NULL && S_ISREG(inode->i_mode) &&
147             ll_i2info(inode)->lli_smd != NULL && sai->sai_agl_valid)
148                 return 1;
149         return 0;
150 }
151
152 static inline struct ll_sa_entry *
153 sa_first_received_entry(struct ll_statahead_info *sai)
154 {
155         return cfs_list_entry(sai->sai_entries_received.next,
156                               struct ll_sa_entry, se_list);
157 }
158
159 static inline struct ll_inode_info *
160 agl_first_entry(struct ll_statahead_info *sai)
161 {
162         return cfs_list_entry(sai->sai_entries_agl.next,
163                               struct ll_inode_info, lli_agl_list);
164 }
165
166 static inline int sa_sent_full(struct ll_statahead_info *sai)
167 {
168         return cfs_atomic_read(&sai->sai_cache_count) >= sai->sai_max;
169 }
170
171 static inline int sa_received_empty(struct ll_statahead_info *sai)
172 {
173         return cfs_list_empty(&sai->sai_entries_received);
174 }
175
176 static inline int agl_list_empty(struct ll_statahead_info *sai)
177 {
178         return cfs_list_empty(&sai->sai_entries_agl);
179 }
180
181 /**
182  * (1) hit ratio less than 80%
183  * or
184  * (2) consecutive miss more than 8
185  * then means low hit.
186  */
187 static inline int sa_low_hit(struct ll_statahead_info *sai)
188 {
189         return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
190                 (sai->sai_consecutive_miss > 8));
191 }
192
193 /*
194  * If the given index is behind of statahead window more than
195  * SA_OMITTED_ENTRY_MAX, then it is old.
196  */
197 static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
198 {
199         return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
200                  sai->sai_index);
201 }
202
203 /*
204  * Insert it into sai_entries_sent tail when init.
205  */
206 static struct ll_sa_entry *
207 ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index,
208                   const char *name, int len)
209 {
210         struct ll_inode_info *lli;
211         struct ll_sa_entry   *entry;
212         int                   entry_size;
213         char                 *dname;
214         ENTRY;
215
216         entry_size = sizeof(struct ll_sa_entry) + (len & ~3) + 4;
217         OBD_ALLOC(entry, entry_size);
218         if (unlikely(entry == NULL))
219                 RETURN(ERR_PTR(-ENOMEM));
220
221         CDEBUG(D_READA, "alloc sai entry %.*s(%p) index "LPU64"\n",
222                len, name, entry, index);
223
224         entry->se_index = index;
225
226         /*
227          * Statahead entry reference rules:
228          *
229          * 1) When statahead entry is initialized, its reference is set as 2.
230          *    One reference is used by the directory scanner. When the scanner
231          *    searches the statahead cache for the given name, it can perform
232          *    lockless hash lookup (only the scanner can remove entry from hash
233          *    list), and once found, it needn't to call "atomic_inc()" for the
234          *    entry reference. So the performance is improved. After using the
235          *    statahead entry, the scanner will call "atomic_dec()" to drop the
236          *    reference held when initialization. If it is the last reference,
237          *    the statahead entry will be freed.
238          *
239          * 2) All other threads, including statahead thread and ptlrpcd thread,
240          *    when they process the statahead entry, the reference for target
241          *    should be held to guarantee the entry will not be released by the
242          *    directory scanner. After processing the entry, these threads will
243          *    drop the entry reference. If it is the last reference, the entry
244          *    will be freed.
245          *
246          *    The second reference when initializes the statahead entry is used
247          *    by the statahead thread, following the rule 2).
248          */
249         cfs_atomic_set(&entry->se_refcount, 2);
250         entry->se_stat = SA_ENTRY_INIT;
251         entry->se_size = entry_size;
252         dname = (char *)entry + sizeof(struct ll_sa_entry);
253         memcpy(dname, name, len);
254         dname[len] = 0;
255         entry->se_qstr.hash = full_name_hash(name, len);
256         entry->se_qstr.len = len;
257         entry->se_qstr.name = dname;
258
259         lli = ll_i2info(sai->sai_inode);
260         cfs_spin_lock(&lli->lli_sa_lock);
261         cfs_list_add_tail(&entry->se_list, &sai->sai_entries_sent);
262         cfs_spin_unlock(&lli->lli_sa_lock);
263
264         cfs_atomic_inc(&sai->sai_cache_count);
265         ll_sa_entry_enhash(sai, entry);
266
267         RETURN(entry);
268 }
269
270 /*
271  * Used by the directory scanner to search entry with name.
272  *
273  * Only the caller can remove the entry from hash, so it is unnecessary to hold
274  * hash lock. It is caller's duty to release the init refcount on the entry, so
275  * it is also unnecessary to increase refcount on the entry.
276  */
277 static struct ll_sa_entry *
278 ll_sa_entry_get_byname(struct ll_statahead_info *sai, const struct qstr *qstr)
279 {
280         struct ll_sa_entry *entry;
281         int i = ll_sa_entry_hash(qstr->hash);
282
283         cfs_list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
284                 if (entry->se_qstr.hash == qstr->hash &&
285                     entry->se_qstr.len == qstr->len &&
286                     memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
287                         return entry;
288         }
289         return NULL;
290 }
291
292 /*
293  * Used by the async getattr request callback to find entry with index.
294  *
295  * Inside lli_sa_lock to prevent others to change the list during the search.
296  * It needs to increase entry refcount before returning to guarantee that the
297  * entry cannot be freed by others.
298  */
299 static struct ll_sa_entry *
300 ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index)
301 {
302         struct ll_sa_entry *entry;
303
304         cfs_list_for_each_entry(entry, &sai->sai_entries_sent, se_list) {
305                 if (entry->se_index == index) {
306                         cfs_atomic_inc(&entry->se_refcount);
307                         return entry;
308                 }
309                 if (entry->se_index > index)
310                         break;
311         }
312         return NULL;
313 }
314
315 static void ll_sa_entry_cleanup(struct ll_statahead_info *sai,
316                                  struct ll_sa_entry *entry)
317 {
318         struct md_enqueue_info *minfo = entry->se_minfo;
319         struct ptlrpc_request  *req   = entry->se_req;
320
321         if (minfo) {
322                 entry->se_minfo = NULL;
323                 ll_intent_release(&minfo->mi_it);
324                 iput(minfo->mi_dir);
325                 OBD_FREE_PTR(minfo);
326         }
327
328         if (req) {
329                 entry->se_req = NULL;
330                 ptlrpc_req_finished(req);
331         }
332 }
333
334 static void ll_sa_entry_put(struct ll_statahead_info *sai,
335                              struct ll_sa_entry *entry)
336 {
337         if (cfs_atomic_dec_and_test(&entry->se_refcount)) {
338                 CDEBUG(D_READA, "free sai entry %.*s(%p) index "LPU64"\n",
339                        entry->se_qstr.len, entry->se_qstr.name, entry,
340                        entry->se_index);
341
342                 LASSERT(ll_sa_entry_unhashed(entry));
343                 LASSERT(ll_sa_entry_unlinked(entry));
344
345                 ll_sa_entry_cleanup(sai, entry);
346                 if (entry->se_inode)
347                         iput(entry->se_inode);
348
349                 OBD_FREE(entry, entry->se_size);
350                 cfs_atomic_dec(&sai->sai_cache_count);
351         }
352 }
353
354 static inline void
355 do_sai_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
356 {
357         struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
358
359         ll_sa_entry_unhash(sai, entry);
360
361         cfs_spin_lock(&lli->lli_sa_lock);
362         entry->se_stat = SA_ENTRY_DEST;
363         if (likely(!ll_sa_entry_unlinked(entry)))
364                 cfs_list_del_init(&entry->se_list);
365         cfs_spin_unlock(&lli->lli_sa_lock);
366
367         ll_sa_entry_put(sai, entry);
368 }
369
370 /*
371  * Delete it from sai_entries_stated list when fini.
372  */
373 static void
374 ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
375 {
376         struct ll_sa_entry *pos, *next;
377
378         if (entry)
379                 do_sai_entry_fini(sai, entry);
380
381         /* drop old entry from sent list */
382         cfs_list_for_each_entry_safe(pos, next, &sai->sai_entries_sent,
383                                      se_list) {
384                 if (is_omitted_entry(sai, pos->se_index))
385                         do_sai_entry_fini(sai, pos);
386                 else
387                         break;
388         }
389
390         /* drop old entry from stated list */
391         cfs_list_for_each_entry_safe(pos, next, &sai->sai_entries_stated,
392                                      se_list) {
393                 if (is_omitted_entry(sai, pos->se_index))
394                         do_sai_entry_fini(sai, pos);
395                 else
396                         break;
397         }
398 }
399
400 /*
401  * Inside lli_sa_lock.
402  */
403 static void
404 do_sai_entry_to_stated(struct ll_statahead_info *sai,
405                        struct ll_sa_entry *entry, int rc)
406 {
407         struct ll_sa_entry *se;
408         cfs_list_t         *pos = &sai->sai_entries_stated;
409
410         if (!ll_sa_entry_unlinked(entry))
411                 cfs_list_del_init(&entry->se_list);
412
413         cfs_list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
414                 if (se->se_index < entry->se_index) {
415                         pos = &se->se_list;
416                         break;
417                 }
418         }
419
420         cfs_list_add(&entry->se_list, pos);
421         entry->se_stat = rc;
422 }
423
424 /*
425  * Move entry to sai_entries_stated and sort with the index.
426  * \retval 1    -- entry to be destroyed.
427  * \retval 0    -- entry is inserted into stated list.
428  */
429 static int
430 ll_sa_entry_to_stated(struct ll_statahead_info *sai,
431                        struct ll_sa_entry *entry, int rc)
432 {
433         struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
434         int                   ret = 1;
435
436         ll_sa_entry_cleanup(sai, entry);
437
438         cfs_spin_lock(&lli->lli_sa_lock);
439         if (likely(entry->se_stat != SA_ENTRY_DEST)) {
440                 do_sai_entry_to_stated(sai, entry, rc);
441                 ret = 0;
442         }
443         cfs_spin_unlock(&lli->lli_sa_lock);
444
445         return ret;
446 }
447
448 /*
449  * Insert inode into the list of sai_entries_agl.
450  */
451 static void ll_agl_add(struct ll_statahead_info *sai,
452                        struct inode *inode, int index)
453 {
454         struct ll_inode_info *child  = ll_i2info(inode);
455         struct ll_inode_info *parent = ll_i2info(sai->sai_inode);
456         int                   added  = 0;
457
458         cfs_spin_lock(&child->lli_agl_lock);
459         if (child->lli_agl_index == 0) {
460                 child->lli_agl_index = index;
461                 cfs_spin_unlock(&child->lli_agl_lock);
462
463                 LASSERT(cfs_list_empty(&child->lli_agl_list));
464
465                 igrab(inode);
466                 cfs_spin_lock(&parent->lli_agl_lock);
467                 if (agl_list_empty(sai))
468                         added = 1;
469                 cfs_list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
470                 cfs_spin_unlock(&parent->lli_agl_lock);
471         } else {
472                 cfs_spin_unlock(&child->lli_agl_lock);
473         }
474
475         if (added > 0)
476                 cfs_waitq_signal(&sai->sai_agl_thread.t_ctl_waitq);
477 }
478
479 static struct ll_statahead_info *ll_sai_alloc(void)
480 {
481         struct ll_statahead_info *sai;
482         int                       i;
483         ENTRY;
484
485         OBD_ALLOC_PTR(sai);
486         if (!sai)
487                 RETURN(NULL);
488
489         cfs_atomic_set(&sai->sai_refcount, 1);
490
491         cfs_spin_lock(&sai_generation_lock);
492         sai->sai_generation = ++sai_generation;
493         if (unlikely(sai_generation == 0))
494                 sai->sai_generation = ++sai_generation;
495         cfs_spin_unlock(&sai_generation_lock);
496
497         sai->sai_max = LL_SA_RPC_MIN;
498         sai->sai_index = 1;
499         cfs_waitq_init(&sai->sai_waitq);
500         cfs_waitq_init(&sai->sai_thread.t_ctl_waitq);
501         cfs_waitq_init(&sai->sai_agl_thread.t_ctl_waitq);
502
503         CFS_INIT_LIST_HEAD(&sai->sai_entries_sent);
504         CFS_INIT_LIST_HEAD(&sai->sai_entries_received);
505         CFS_INIT_LIST_HEAD(&sai->sai_entries_stated);
506         CFS_INIT_LIST_HEAD(&sai->sai_entries_agl);
507
508         for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
509                 CFS_INIT_LIST_HEAD(&sai->sai_cache[i]);
510                 cfs_spin_lock_init(&sai->sai_cache_lock[i]);
511         }
512         cfs_atomic_set(&sai->sai_cache_count, 0);
513
514         RETURN(sai);
515 }
516
517 static inline struct ll_statahead_info *
518 ll_sai_get(struct ll_statahead_info *sai)
519 {
520         cfs_atomic_inc(&sai->sai_refcount);
521         return sai;
522 }
523
524 static void ll_sai_put(struct ll_statahead_info *sai)
525 {
526         struct inode         *inode = sai->sai_inode;
527         struct ll_inode_info *lli   = ll_i2info(inode);
528         ENTRY;
529
530         if (cfs_atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
531                 struct ll_sa_entry *entry, *next;
532
533                 if (unlikely(cfs_atomic_read(&sai->sai_refcount) > 0)) {
534                         /* It is race case, the interpret callback just hold
535                          * a reference count */
536                         cfs_spin_unlock(&lli->lli_sa_lock);
537                         RETURN_EXIT;
538                 }
539
540                 LASSERT(lli->lli_opendir_key == NULL);
541                 LASSERT(thread_is_stopped(&sai->sai_thread));
542                 LASSERT(thread_is_stopped(&sai->sai_agl_thread));
543
544                 lli->lli_sai = NULL;
545                 lli->lli_opendir_pid = 0;
546                 cfs_spin_unlock(&lli->lli_sa_lock);
547
548                 if (sai->sai_sent > sai->sai_replied)
549                         CDEBUG(D_READA,"statahead for dir "DFID" does not "
550                               "finish: [sent:"LPU64"] [replied:"LPU64"]\n",
551                               PFID(&lli->lli_fid),
552                               sai->sai_sent, sai->sai_replied);
553
554                 cfs_list_for_each_entry_safe(entry, next,
555                                              &sai->sai_entries_sent, se_list)
556                         do_sai_entry_fini(sai, entry);
557
558                 LASSERT(sa_received_empty(sai));
559
560                 cfs_list_for_each_entry_safe(entry, next,
561                                              &sai->sai_entries_stated, se_list)
562                         do_sai_entry_fini(sai, entry);
563
564                 LASSERT(cfs_atomic_read(&sai->sai_cache_count) == 0);
565                 LASSERT(agl_list_empty(sai));
566
567                 iput(inode);
568                 OBD_FREE_PTR(sai);
569         }
570
571         EXIT;
572 }
573
574 /* Do NOT forget to drop inode refcount when into sai_entries_agl. */
575 static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
576 {
577         struct ll_inode_info *lli   = ll_i2info(inode);
578         __u64                 index = lli->lli_agl_index;
579         int                   rc;
580         ENTRY;
581
582         LASSERT(cfs_list_empty(&lli->lli_agl_list));
583
584         /* AGL maybe fall behind statahead with one entry */
585         if (is_omitted_entry(sai, index + 1)) {
586                 lli->lli_agl_index = 0;
587                 iput(inode);
588                 RETURN_EXIT;
589         }
590
591         /* Someone is in glimpse (sync or async), do nothing. */
592         rc = cfs_down_write_trylock(&lli->lli_glimpse_sem);
593         if (rc == 0) {
594                 lli->lli_agl_index = 0;
595                 iput(inode);
596                 RETURN_EXIT;
597         }
598
599         /*
600          * Someone triggered glimpse within 1 sec before.
601          * 1) The former glimpse succeeded with glimpse lock granted by OST, and
602          *    if the lock is still cached on client, AGL needs to do nothing. If
603          *    it is cancelled by other client, AGL maybe cannot obtaion new lock
604          *    for no glimpse callback triggered by AGL.
605          * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
606          *    Under such case, it is quite possible that the OST will not grant
607          *    glimpse lock for AGL also.
608          * 3) The former glimpse failed, compared with other two cases, it is
609          *    relative rare. AGL can ignore such case, and it will not muchly
610          *    affect the performance.
611          */
612         if (lli->lli_glimpse_time != 0 &&
613             cfs_time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
614                 cfs_up_write(&lli->lli_glimpse_sem);
615                 lli->lli_agl_index = 0;
616                 iput(inode);
617                 RETURN_EXIT;
618         }
619
620         CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
621                DFID", idx = "LPU64"\n", PFID(&lli->lli_fid), index);
622
623         cl_agl(inode);
624         lli->lli_agl_index = 0;
625         lli->lli_glimpse_time = cfs_time_current();
626         cfs_up_write(&lli->lli_glimpse_sem);
627
628         CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
629                DFID", idx = "LPU64", rc = %d\n",
630                PFID(&lli->lli_fid), index, rc);
631
632         iput(inode);
633
634         EXIT;
635 }
636
637 static void do_statahead_interpret(struct ll_statahead_info *sai,
638                                    struct ll_sa_entry *target)
639 {
640         struct inode           *dir   = sai->sai_inode;
641         struct inode           *child;
642         struct ll_inode_info   *lli   = ll_i2info(dir);
643         struct ll_sa_entry     *entry;
644         struct md_enqueue_info *minfo;
645         struct lookup_intent   *it;
646         struct ptlrpc_request  *req;
647         struct mdt_body        *body;
648         int                     rc    = 0;
649         ENTRY;
650
651         cfs_spin_lock(&lli->lli_sa_lock);
652         if (target != NULL && target->se_req != NULL &&
653             !cfs_list_empty(&target->se_list)) {
654                 entry = target;
655         } else if (unlikely(sa_received_empty(sai))) {
656                 cfs_spin_unlock(&lli->lli_sa_lock);
657                 RETURN_EXIT;
658         } else {
659                 entry = sa_first_received_entry(sai);
660         }
661
662         cfs_atomic_inc(&entry->se_refcount);
663         cfs_list_del_init(&entry->se_list);
664         cfs_spin_unlock(&lli->lli_sa_lock);
665
666         LASSERT(entry->se_handle != 0);
667
668         minfo = entry->se_minfo;
669         it = &minfo->mi_it;
670         req = entry->se_req;
671         body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
672         if (body == NULL)
673                 GOTO(out, rc = -EFAULT);
674
675         child = entry->se_inode;
676         if (child == NULL) {
677                 /*
678                  * lookup.
679                  */
680                 LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
681
682                 /* XXX: No fid in reply, this is probaly cross-ref case.
683                  * SA can't handle it yet. */
684                 if (body->valid & OBD_MD_MDS)
685                         GOTO(out, rc = -EAGAIN);
686         } else {
687                 /*
688                  * revalidate.
689                  */
690                 /* unlinked and re-created with the same name */
691                 if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->fid1))){
692                         entry->se_inode = NULL;
693                         iput(child);
694                         child = NULL;
695                 }
696         }
697
698         it->d.lustre.it_lock_handle = entry->se_handle;
699         rc = md_revalidate_lock(ll_i2mdexp(dir), it, NULL, NULL);
700         if (rc != 1)
701                 GOTO(out, rc = -EAGAIN);
702
703         rc = ll_prep_inode(&child, req, dir->i_sb);
704         if (rc)
705                 GOTO(out, rc);
706
707         CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
708                child, child->i_ino, child->i_generation);
709         ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
710
711         entry->se_inode = child;
712
713         if (agl_should_run(sai, child))
714                 ll_agl_add(sai, child, entry->se_index);
715
716         EXIT;
717
718 out:
719         /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
720          * reference count by calling "ll_intent_drop_lock()" in spite of the
721          * above operations failed or not. Do not worry about calling
722          * "ll_intent_drop_lock()" more than once. */
723         rc = ll_sa_entry_to_stated(sai, entry, rc < 0 ? rc : SA_ENTRY_SUCC);
724         if (rc == 0 && entry->se_index == sai->sai_index_wait && target == NULL)
725                 cfs_waitq_signal(&sai->sai_waitq);
726         ll_sa_entry_put(sai, entry);
727 }
728
729 static int ll_statahead_interpret(struct ptlrpc_request *req,
730                                   struct md_enqueue_info *minfo, int rc)
731 {
732         struct lookup_intent     *it  = &minfo->mi_it;
733         struct inode             *dir = minfo->mi_dir;
734         struct ll_inode_info     *lli = ll_i2info(dir);
735         struct ll_statahead_info *sai = NULL;
736         struct ll_sa_entry       *entry;
737         int                       wakeup;
738         ENTRY;
739
740         if (it_disposition(it, DISP_LOOKUP_NEG))
741                 rc = -ENOENT;
742
743         cfs_spin_lock(&lli->lli_sa_lock);
744         /* stale entry */
745         if (unlikely(lli->lli_sai == NULL ||
746                      lli->lli_sai->sai_generation != minfo->mi_generation)) {
747                 cfs_spin_unlock(&lli->lli_sa_lock);
748                 GOTO(out, rc = -ESTALE);
749         } else {
750                 sai = ll_sai_get(lli->lli_sai);
751                 if (unlikely(!thread_is_running(&sai->sai_thread))) {
752                         sai->sai_replied++;
753                         cfs_spin_unlock(&lli->lli_sa_lock);
754                         GOTO(out, rc = -EBADFD);
755                 }
756
757                 entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata);
758                 if (entry == NULL) {
759                         sai->sai_replied++;
760                         cfs_spin_unlock(&lli->lli_sa_lock);
761                         GOTO(out, rc = -EIDRM);
762                 }
763
764                 cfs_list_del_init(&entry->se_list);
765                 if (rc != 0) {
766                         sai->sai_replied++;
767                         do_sai_entry_to_stated(sai, entry, rc);
768                         cfs_spin_unlock(&lli->lli_sa_lock);
769                         if (entry->se_index == sai->sai_index_wait)
770                                 cfs_waitq_signal(&sai->sai_waitq);
771                 } else {
772                         entry->se_minfo = minfo;
773                         entry->se_req = ptlrpc_request_addref(req);
774                         /* Release the async ibits lock ASAP to avoid deadlock
775                          * when statahead thread tries to enqueue lock on parent
776                          * for readpage and other tries to enqueue lock on child
777                          * with parent's lock held, for example: unlink. */
778                         entry->se_handle = it->d.lustre.it_lock_handle;
779                         ll_intent_drop_lock(it);
780                         wakeup = sa_received_empty(sai);
781                         cfs_list_add_tail(&entry->se_list,
782                                           &sai->sai_entries_received);
783                         sai->sai_replied++;
784                         cfs_spin_unlock(&lli->lli_sa_lock);
785                         if (wakeup)
786                                 cfs_waitq_signal(&sai->sai_thread.t_ctl_waitq);
787                 }
788                 ll_sa_entry_put(sai, entry);
789         }
790
791         EXIT;
792
793 out:
794         if (rc != 0) {
795                 ll_intent_release(it);
796                 iput(dir);
797                 OBD_FREE_PTR(minfo);
798         }
799         if (sai != NULL)
800                 ll_sai_put(sai);
801         return rc;
802 }
803
804 static void sa_args_fini(struct md_enqueue_info *minfo,
805                          struct ldlm_enqueue_info *einfo)
806 {
807         LASSERT(minfo && einfo);
808         iput(minfo->mi_dir);
809         capa_put(minfo->mi_data.op_capa1);
810         capa_put(minfo->mi_data.op_capa2);
811         OBD_FREE_PTR(minfo);
812         OBD_FREE_PTR(einfo);
813 }
814
815 /**
816  * There is race condition between "capa_put" and "ll_statahead_interpret" for
817  * accessing "op_data.op_capa[1,2]" as following:
818  * "capa_put" releases "op_data.op_capa[1,2]"'s reference count after calling
819  * "md_intent_getattr_async". But "ll_statahead_interpret" maybe run first, and
820  * fill "op_data.op_capa[1,2]" as POISON, then cause "capa_put" access invalid
821  * "ocapa". So here reserve "op_data.op_capa[1,2]" in "pcapa" before calling
822  * "md_intent_getattr_async".
823  */
824 static int sa_args_init(struct inode *dir, struct inode *child,
825                         struct ll_sa_entry *entry, struct md_enqueue_info **pmi,
826                         struct ldlm_enqueue_info **pei,
827                         struct obd_capa **pcapa)
828 {
829         struct qstr              *qstr = &entry->se_qstr;
830         struct ll_inode_info     *lli  = ll_i2info(dir);
831         struct md_enqueue_info   *minfo;
832         struct ldlm_enqueue_info *einfo;
833         struct md_op_data        *op_data;
834
835         OBD_ALLOC_PTR(einfo);
836         if (einfo == NULL)
837                 return -ENOMEM;
838
839         OBD_ALLOC_PTR(minfo);
840         if (minfo == NULL) {
841                 OBD_FREE_PTR(einfo);
842                 return -ENOMEM;
843         }
844
845         op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, qstr->name,
846                                      qstr->len, 0, LUSTRE_OPC_ANY, NULL);
847         if (IS_ERR(op_data)) {
848                 OBD_FREE_PTR(einfo);
849                 OBD_FREE_PTR(minfo);
850                 return PTR_ERR(op_data);
851         }
852
853         minfo->mi_it.it_op = IT_GETATTR;
854         minfo->mi_dir = igrab(dir);
855         minfo->mi_cb = ll_statahead_interpret;
856         minfo->mi_generation = lli->lli_sai->sai_generation;
857         minfo->mi_cbdata = entry->se_index;
858
859         einfo->ei_type   = LDLM_IBITS;
860         einfo->ei_mode   = it_to_lock_mode(&minfo->mi_it);
861         einfo->ei_cb_bl  = ll_md_blocking_ast;
862         einfo->ei_cb_cp  = ldlm_completion_ast;
863         einfo->ei_cb_gl  = NULL;
864         einfo->ei_cbdata = NULL;
865
866         *pmi = minfo;
867         *pei = einfo;
868         pcapa[0] = op_data->op_capa1;
869         pcapa[1] = op_data->op_capa2;
870
871         return 0;
872 }
873
874 static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry)
875 {
876         struct md_enqueue_info   *minfo;
877         struct ldlm_enqueue_info *einfo;
878         struct obd_capa          *capas[2];
879         int                       rc;
880         ENTRY;
881
882         rc = sa_args_init(dir, NULL, entry, &minfo, &einfo, capas);
883         if (rc)
884                 RETURN(rc);
885
886         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
887         if (!rc) {
888                 capa_put(capas[0]);
889                 capa_put(capas[1]);
890         } else {
891                 sa_args_fini(minfo, einfo);
892         }
893
894         RETURN(rc);
895 }
896
897 /**
898  * similar to ll_revalidate_it().
899  * \retval      1 -- dentry valid
900  * \retval      0 -- will send stat-ahead request
901  * \retval others -- prepare stat-ahead request failed
902  */
903 static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
904                             struct dentry *dentry)
905 {
906         struct inode             *inode = dentry->d_inode;
907         struct lookup_intent      it = { .it_op = IT_GETATTR,
908                                          .d.lustre.it_lock_handle = 0 };
909         struct md_enqueue_info   *minfo;
910         struct ldlm_enqueue_info *einfo;
911         struct obd_capa          *capas[2];
912         int rc;
913         ENTRY;
914
915         if (unlikely(inode == NULL))
916                 RETURN(1);
917
918         if (d_mountpoint(dentry))
919                 RETURN(1);
920
921         if (unlikely(dentry == dentry->d_sb->s_root))
922                 RETURN(1);
923
924         entry->se_inode = igrab(inode);
925         rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),NULL);
926         if (rc == 1) {
927                 entry->se_handle = it.d.lustre.it_lock_handle;
928                 ll_intent_release(&it);
929                 RETURN(1);
930         }
931
932         rc = sa_args_init(dir, inode, entry, &minfo, &einfo, capas);
933         if (rc) {
934                 entry->se_inode = NULL;
935                 iput(inode);
936                 RETURN(rc);
937         }
938
939         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
940         if (!rc) {
941                 capa_put(capas[0]);
942                 capa_put(capas[1]);
943         } else {
944                 entry->se_inode = NULL;
945                 iput(inode);
946                 sa_args_fini(minfo, einfo);
947         }
948
949         RETURN(rc);
950 }
951
952 static void ll_statahead_one(struct dentry *parent, const char* entry_name,
953                              int entry_name_len)
954 {
955         struct inode             *dir    = parent->d_inode;
956         struct ll_inode_info     *lli    = ll_i2info(dir);
957         struct ll_statahead_info *sai    = lli->lli_sai;
958         struct dentry            *dentry = NULL;
959         struct ll_sa_entry       *entry;
960         int                       rc;
961         int                       rc1;
962         ENTRY;
963
964         entry = ll_sa_entry_alloc(sai, sai->sai_index, entry_name,
965                                   entry_name_len);
966         if (IS_ERR(entry))
967                 RETURN_EXIT;
968
969         dentry = d_lookup(parent, &entry->se_qstr);
970         if (!dentry) {
971                 rc = do_sa_lookup(dir, entry);
972         } else {
973                 rc = do_sa_revalidate(dir, entry, dentry);
974                 if (rc == 1 && agl_should_run(sai, dentry->d_inode))
975                         ll_agl_add(sai, dentry->d_inode, entry->se_index);
976         }
977
978         if (dentry != NULL)
979                 dput(dentry);
980
981         if (rc) {
982                 rc1 = ll_sa_entry_to_stated(sai, entry,
983                                         rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
984                 if (rc1 == 0 && entry->se_index == sai->sai_index_wait)
985                         cfs_waitq_signal(&sai->sai_waitq);
986         } else {
987                 sai->sai_sent++;
988         }
989
990         sai->sai_index++;
991         /* drop one refcount on entry by ll_sa_entry_alloc */
992         ll_sa_entry_put(sai, entry);
993
994         EXIT;
995 }
996
997 static int ll_agl_thread(void *arg)
998 {
999         struct dentry            *parent = (struct dentry *)arg;
1000         struct inode             *dir    = parent->d_inode;
1001         struct ll_inode_info     *plli   = ll_i2info(dir);
1002         struct ll_inode_info     *clli;
1003         struct ll_sb_info        *sbi    = ll_i2sbi(dir);
1004         struct ll_statahead_info *sai    = ll_sai_get(plli->lli_sai);
1005         struct ptlrpc_thread     *thread = &sai->sai_agl_thread;
1006         struct l_wait_info        lwi    = { 0 };
1007         ENTRY;
1008
1009         {
1010                 char pname[16];
1011                 snprintf(pname, 15, "ll_agl_%u", plli->lli_opendir_pid);
1012                 cfs_daemonize(pname);
1013         }
1014
1015         CDEBUG(D_READA, "agl thread started: [pid %d] [parent %.*s]\n",
1016                cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1017
1018         atomic_inc(&sbi->ll_agl_total);
1019         cfs_spin_lock(&plli->lli_agl_lock);
1020         sai->sai_agl_valid = 1;
1021         thread_set_flags(thread, SVC_RUNNING);
1022         cfs_spin_unlock(&plli->lli_agl_lock);
1023         cfs_waitq_signal(&thread->t_ctl_waitq);
1024
1025         while (1) {
1026                 l_wait_event(thread->t_ctl_waitq,
1027                              !agl_list_empty(sai) ||
1028                              !thread_is_running(thread),
1029                              &lwi);
1030
1031                 if (!thread_is_running(thread))
1032                         break;
1033
1034                 cfs_spin_lock(&plli->lli_agl_lock);
1035                 /* The statahead thread maybe help to process AGL entries,
1036                  * so check whether list empty again. */
1037                 if (!agl_list_empty(sai)) {
1038                         clli = agl_first_entry(sai);
1039                         cfs_list_del_init(&clli->lli_agl_list);
1040                         cfs_spin_unlock(&plli->lli_agl_lock);
1041                         ll_agl_trigger(&clli->lli_vfs_inode, sai);
1042                 } else {
1043                         cfs_spin_unlock(&plli->lli_agl_lock);
1044                 }
1045         }
1046
1047         cfs_spin_lock(&plli->lli_agl_lock);
1048         sai->sai_agl_valid = 0;
1049         while (!agl_list_empty(sai)) {
1050                 clli = agl_first_entry(sai);
1051                 cfs_list_del_init(&clli->lli_agl_list);
1052                 cfs_spin_unlock(&plli->lli_agl_lock);
1053                 clli->lli_agl_index = 0;
1054                 iput(&clli->lli_vfs_inode);
1055                 cfs_spin_lock(&plli->lli_agl_lock);
1056         }
1057         thread_set_flags(thread, SVC_STOPPED);
1058         cfs_spin_unlock(&plli->lli_agl_lock);
1059         cfs_waitq_signal(&thread->t_ctl_waitq);
1060         ll_sai_put(sai);
1061         CDEBUG(D_READA, "agl thread stopped: [pid %d] [parent %.*s]\n",
1062                cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1063         RETURN(0);
1064 }
1065
1066 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
1067 {
1068         struct ptlrpc_thread *thread = &sai->sai_agl_thread;
1069         struct l_wait_info    lwi    = { 0 };
1070         int                   rc;
1071         ENTRY;
1072
1073         CDEBUG(D_READA, "start agl thread: [pid %d] [parent %.*s]\n",
1074                cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1075
1076         rc = cfs_create_thread(ll_agl_thread, parent, 0);
1077         if (rc < 0) {
1078                 CERROR("can't start ll_agl thread, rc: %d\n", rc);
1079                 RETURN_EXIT;
1080         }
1081
1082         l_wait_event(thread->t_ctl_waitq,
1083                      thread_is_running(thread) || thread_is_stopped(thread),
1084                      &lwi);
1085         EXIT;
1086 }
1087
1088 static int ll_statahead_thread(void *arg)
1089 {
1090         struct dentry            *parent = (struct dentry *)arg;
1091         struct inode             *dir    = parent->d_inode;
1092         struct ll_inode_info     *plli   = ll_i2info(dir);
1093         struct ll_inode_info     *clli;
1094         struct ll_sb_info        *sbi    = ll_i2sbi(dir);
1095         struct ll_statahead_info *sai    = ll_sai_get(plli->lli_sai);
1096         struct ptlrpc_thread     *thread = &sai->sai_thread;
1097         struct page              *page;
1098         __u64                     pos    = 0;
1099         int                       first  = 0;
1100         int                       rc     = 0;
1101         struct ll_dir_chain       chain;
1102         struct l_wait_info        lwi    = { 0 };
1103         ENTRY;
1104
1105         {
1106                 char pname[16];
1107                 snprintf(pname, 15, "ll_sa_%u", plli->lli_opendir_pid);
1108                 cfs_daemonize(pname);
1109         }
1110
1111         CDEBUG(D_READA, "statahead thread started: [pid %d] [parent %.*s]\n",
1112                cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1113
1114         if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
1115                 ll_start_agl(parent, sai);
1116
1117         atomic_inc(&sbi->ll_sa_total);
1118         cfs_spin_lock(&plli->lli_sa_lock);
1119         thread_set_flags(thread, SVC_RUNNING);
1120         cfs_spin_unlock(&plli->lli_sa_lock);
1121         cfs_waitq_signal(&thread->t_ctl_waitq);
1122
1123         plli->lli_sa_pos = 0;
1124         ll_dir_chain_init(&chain);
1125         page = ll_get_dir_page(NULL, dir, pos, &chain);
1126
1127         while (1) {
1128                 struct lu_dirpage *dp;
1129                 struct lu_dirent  *ent;
1130
1131                 if (IS_ERR(page)) {
1132                         rc = PTR_ERR(page);
1133                         CDEBUG(D_READA, "error reading dir "DFID" at "LPU64
1134                                "/"LPU64": [rc %d] [parent %u]\n",
1135                                PFID(ll_inode2fid(dir)), pos, sai->sai_index,
1136                                rc, plli->lli_opendir_pid);
1137                         GOTO(out, rc);
1138                 }
1139
1140                 dp = page_address(page);
1141                 for (ent = lu_dirent_start(dp); ent != NULL;
1142                      ent = lu_dirent_next(ent)) {
1143                         __u64 hash;
1144                         int namelen;
1145                         char *name;
1146
1147                         hash = le64_to_cpu(ent->lde_hash);
1148                         if (unlikely(hash < pos))
1149                                 /*
1150                                  * Skip until we find target hash value.
1151                                  */
1152                                 continue;
1153
1154                         namelen = le16_to_cpu(ent->lde_namelen);
1155                         if (unlikely(namelen == 0))
1156                                 /*
1157                                  * Skip dummy record.
1158                                  */
1159                                 continue;
1160
1161                         name = ent->lde_name;
1162                         if (name[0] == '.') {
1163                                 if (namelen == 1) {
1164                                         /*
1165                                          * skip "."
1166                                          */
1167                                         continue;
1168                                 } else if (name[1] == '.' && namelen == 2) {
1169                                         /*
1170                                          * skip ".."
1171                                          */
1172                                         continue;
1173                                 } else if (!sai->sai_ls_all) {
1174                                         /*
1175                                          * skip hidden files.
1176                                          */
1177                                         sai->sai_skip_hidden++;
1178                                         continue;
1179                                 }
1180                         }
1181
1182                         /*
1183                          * don't stat-ahead first entry.
1184                          */
1185                         if (unlikely(++first == 1))
1186                                 continue;
1187
1188 keep_it:
1189                         l_wait_event(thread->t_ctl_waitq,
1190                                      !sa_sent_full(sai) ||
1191                                      !sa_received_empty(sai) ||
1192                                      !agl_list_empty(sai) ||
1193                                      !thread_is_running(thread),
1194                                      &lwi);
1195
1196 interpret_it:
1197                         while (!sa_received_empty(sai))
1198                                 do_statahead_interpret(sai, NULL);
1199
1200                         if (unlikely(!thread_is_running(thread))) {
1201                                 ll_release_page(page, 0);
1202                                 GOTO(out, rc = 0);
1203                         }
1204
1205                         /* If no window for metadata statahead, but there are
1206                          * some AGL entries to be triggered, then try to help
1207                          * to process the AGL entries. */
1208                         if (sa_sent_full(sai)) {
1209                                 cfs_spin_lock(&plli->lli_agl_lock);
1210                                 while (!agl_list_empty(sai)) {
1211                                         clli = agl_first_entry(sai);
1212                                         cfs_list_del_init(&clli->lli_agl_list);
1213                                         cfs_spin_unlock(&plli->lli_agl_lock);
1214                                         ll_agl_trigger(&clli->lli_vfs_inode,
1215                                                        sai);
1216
1217                                         if (!sa_received_empty(sai))
1218                                                 goto interpret_it;
1219
1220                                         if (unlikely(
1221                                                 !thread_is_running(thread))) {
1222                                                 ll_release_page(page, 0);
1223                                                 GOTO(out, rc = 0);
1224                                         }
1225
1226                                         if (!sa_sent_full(sai))
1227                                                 goto do_it;
1228
1229                                         cfs_spin_lock(&plli->lli_agl_lock);
1230                                 }
1231                                 cfs_spin_unlock(&plli->lli_agl_lock);
1232
1233                                 goto keep_it;
1234                         }
1235
1236 do_it:
1237                         ll_statahead_one(parent, name, namelen);
1238                 }
1239                 pos = le64_to_cpu(dp->ldp_hash_end);
1240                 if (pos == MDS_DIR_END_OFF) {
1241                         /*
1242                          * End of directory reached.
1243                          */
1244                         ll_release_page(page, 0);
1245                         while (1) {
1246                                 l_wait_event(thread->t_ctl_waitq,
1247                                              !sa_received_empty(sai) ||
1248                                              sai->sai_sent == sai->sai_replied||
1249                                              !thread_is_running(thread),
1250                                              &lwi);
1251
1252                                 while (!sa_received_empty(sai))
1253                                         do_statahead_interpret(sai, NULL);
1254
1255                                 if (unlikely(!thread_is_running(thread)))
1256                                         GOTO(out, rc = 0);
1257
1258                                 if (sai->sai_sent == sai->sai_replied &&
1259                                     sa_received_empty(sai))
1260                                         break;
1261                         }
1262
1263                         cfs_spin_lock(&plli->lli_agl_lock);
1264                         while (!agl_list_empty(sai) &&
1265                                thread_is_running(thread)) {
1266                                 clli = agl_first_entry(sai);
1267                                 cfs_list_del_init(&clli->lli_agl_list);
1268                                 cfs_spin_unlock(&plli->lli_agl_lock);
1269                                 ll_agl_trigger(&clli->lli_vfs_inode, sai);
1270                                 cfs_spin_lock(&plli->lli_agl_lock);
1271                         }
1272                         cfs_spin_unlock(&plli->lli_agl_lock);
1273
1274                         GOTO(out, rc = 0);
1275                 } else if (1) {
1276                         /*
1277                          * chain is exhausted.
1278                          * Normal case: continue to the next page.
1279                          */
1280                         ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
1281                                               LDF_COLLIDE);
1282                         plli->lli_sa_pos = pos;
1283                         sai->sai_in_readpage = 1;
1284                         page = ll_get_dir_page(NULL, dir, pos, &chain);
1285                         sai->sai_in_readpage = 0;
1286                 } else {
1287                         LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1288                         ll_release_page(page, 1);
1289                         /*
1290                          * go into overflow page.
1291                          */
1292                 }
1293         }
1294         EXIT;
1295
1296 out:
1297         if (sai->sai_agl_valid) {
1298                 struct ptlrpc_thread *agl_thread = &sai->sai_agl_thread;
1299
1300                 cfs_spin_lock(&plli->lli_agl_lock);
1301                 thread_set_flags(agl_thread, SVC_STOPPING);
1302                 cfs_spin_unlock(&plli->lli_agl_lock);
1303                 cfs_waitq_signal(&agl_thread->t_ctl_waitq);
1304
1305                 CDEBUG(D_READA, "stop agl thread: [pid %d]\n",
1306                        cfs_curproc_pid());
1307                 l_wait_event(agl_thread->t_ctl_waitq,
1308                              thread_is_stopped(agl_thread),
1309                              &lwi);
1310         }
1311
1312         ll_dir_chain_fini(&chain);
1313         cfs_spin_lock(&plli->lli_sa_lock);
1314         if (!sa_received_empty(sai)) {
1315                 thread_set_flags(thread, SVC_STOPPING);
1316                 cfs_spin_unlock(&plli->lli_sa_lock);
1317
1318                 /* To release the resources held by received entries. */
1319                 while (!sa_received_empty(sai))
1320                         do_statahead_interpret(sai, NULL);
1321
1322                 cfs_spin_lock(&plli->lli_sa_lock);
1323         }
1324         thread_set_flags(thread, SVC_STOPPED);
1325         cfs_spin_unlock(&plli->lli_sa_lock);
1326         cfs_waitq_signal(&sai->sai_waitq);
1327         cfs_waitq_signal(&thread->t_ctl_waitq);
1328         ll_sai_put(sai);
1329         dput(parent);
1330         CDEBUG(D_READA, "statahead thread stopped: [pid %d] [parent %.*s]\n",
1331                cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1332         return rc;
1333 }
1334
1335 /**
1336  * called in ll_file_release().
1337  */
1338 void ll_stop_statahead(struct inode *dir, void *key)
1339 {
1340         struct ll_inode_info *lli = ll_i2info(dir);
1341
1342         if (unlikely(key == NULL))
1343                 return;
1344
1345         cfs_spin_lock(&lli->lli_sa_lock);
1346         if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
1347                 cfs_spin_unlock(&lli->lli_sa_lock);
1348                 return;
1349         }
1350
1351         lli->lli_opendir_key = NULL;
1352
1353         if (lli->lli_sai) {
1354                 struct l_wait_info lwi = { 0 };
1355                 struct ptlrpc_thread *thread = &lli->lli_sai->sai_thread;
1356
1357                 if (!thread_is_stopped(thread)) {
1358                         thread_set_flags(thread, SVC_STOPPING);
1359                         cfs_spin_unlock(&lli->lli_sa_lock);
1360                         cfs_waitq_signal(&thread->t_ctl_waitq);
1361
1362                         CDEBUG(D_READA, "stop statahead thread: [pid %d]\n",
1363                                cfs_curproc_pid());
1364                         l_wait_event(thread->t_ctl_waitq,
1365                                      thread_is_stopped(thread),
1366                                      &lwi);
1367                 } else {
1368                         cfs_spin_unlock(&lli->lli_sa_lock);
1369                 }
1370
1371                 /*
1372                  * Put the ref which was held when first statahead_enter.
1373                  * It maybe not the last ref for some statahead requests
1374                  * maybe inflight.
1375                  */
1376                 ll_sai_put(lli->lli_sai);
1377         } else {
1378                 lli->lli_opendir_pid = 0;
1379                 cfs_spin_unlock(&lli->lli_sa_lock);
1380         }
1381 }
1382
1383 enum {
1384         /**
1385          * not first dirent, or is "."
1386          */
1387         LS_NONE_FIRST_DE = 0,
1388         /**
1389          * the first non-hidden dirent
1390          */
1391         LS_FIRST_DE,
1392         /**
1393          * the first hidden dirent, that is "."
1394          */
1395         LS_FIRST_DOT_DE
1396 };
1397
1398 static int is_first_dirent(struct inode *dir, struct dentry *dentry)
1399 {
1400         struct ll_inode_info *lli    = ll_i2info(dir);
1401         struct ll_dir_chain   chain;
1402         struct qstr          *target = &dentry->d_name;
1403         struct page          *page;
1404         __u64                 pos    = 0;
1405         int                   dot_de;
1406         int                   rc     = LS_NONE_FIRST_DE;
1407         ENTRY;
1408
1409         lli->lli_sa_pos = 0;
1410         ll_dir_chain_init(&chain);
1411         page = ll_get_dir_page(NULL, dir, pos, &chain);
1412
1413         while (1) {
1414                 struct lu_dirpage *dp;
1415                 struct lu_dirent  *ent;
1416
1417                 if (IS_ERR(page)) {
1418                         struct ll_inode_info *lli = ll_i2info(dir);
1419
1420                         rc = PTR_ERR(page);
1421                         CERROR("error reading dir "DFID" at "LPU64": "
1422                                "[rc %d] [parent %u]\n",
1423                                PFID(ll_inode2fid(dir)), pos,
1424                                rc, lli->lli_opendir_pid);
1425                         break;
1426                 }
1427
1428                 dp = page_address(page);
1429                 for (ent = lu_dirent_start(dp); ent != NULL;
1430                      ent = lu_dirent_next(ent)) {
1431                         __u64 hash;
1432                         int namelen;
1433                         char *name;
1434
1435                         hash = le64_to_cpu(ent->lde_hash);
1436                         /* The ll_get_dir_page() can return any page containing
1437                          * the given hash which may be not the start hash. */
1438                         if (unlikely(hash < pos))
1439                                 continue;
1440
1441                         namelen = le16_to_cpu(ent->lde_namelen);
1442                         if (unlikely(namelen == 0))
1443                                 /*
1444                                  * skip dummy record.
1445                                  */
1446                                 continue;
1447
1448                         name = ent->lde_name;
1449                         if (name[0] == '.') {
1450                                 if (namelen == 1)
1451                                         /*
1452                                          * skip "."
1453                                          */
1454                                         continue;
1455                                 else if (name[1] == '.' && namelen == 2)
1456                                         /*
1457                                          * skip ".."
1458                                          */
1459                                         continue;
1460                                 else
1461                                         dot_de = 1;
1462                         } else {
1463                                 dot_de = 0;
1464                         }
1465
1466                         if (dot_de && target->name[0] != '.') {
1467                                 CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
1468                                        target->len, target->name,
1469                                        namelen, name);
1470                                 continue;
1471                         }
1472
1473                         if (target->len != namelen ||
1474                             memcmp(target->name, name, namelen) != 0)
1475                                 rc = LS_NONE_FIRST_DE;
1476                         else if (!dot_de)
1477                                 rc = LS_FIRST_DE;
1478                         else
1479                                 rc = LS_FIRST_DOT_DE;
1480
1481                         ll_release_page(page, 0);
1482                         GOTO(out, rc);
1483                 }
1484                 pos = le64_to_cpu(dp->ldp_hash_end);
1485                 if (pos == MDS_DIR_END_OFF) {
1486                         /*
1487                          * End of directory reached.
1488                          */
1489                         ll_release_page(page, 0);
1490                         break;
1491                 } else if (1) {
1492                         /*
1493                          * chain is exhausted
1494                          * Normal case: continue to the next page.
1495                          */
1496                         ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
1497                                               LDF_COLLIDE);
1498                         lli->lli_sa_pos = pos;
1499                         page = ll_get_dir_page(NULL, dir, pos, &chain);
1500                 } else {
1501                         /*
1502                          * go into overflow page.
1503                          */
1504                         LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1505                         ll_release_page(page, 1);
1506                 }
1507         }
1508         EXIT;
1509
1510 out:
1511         ll_dir_chain_fini(&chain);
1512         return rc;
1513 }
1514
1515 static void
1516 ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
1517 {
1518         struct ptlrpc_thread *thread = &sai->sai_thread;
1519         struct ll_sb_info    *sbi    = ll_i2sbi(sai->sai_inode);
1520         int                   hit;
1521         ENTRY;
1522
1523         if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC)
1524                 hit = 1;
1525         else
1526                 hit = 0;
1527
1528         ll_sa_entry_fini(sai, entry);
1529         if (hit) {
1530                 sai->sai_hit++;
1531                 sai->sai_consecutive_miss = 0;
1532                 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
1533         } else {
1534                 struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
1535
1536                 sai->sai_miss++;
1537                 sai->sai_consecutive_miss++;
1538                 if (sa_low_hit(sai) && thread_is_running(thread)) {
1539                         atomic_inc(&sbi->ll_sa_wrong);
1540                         CDEBUG(D_READA, "Statahead for dir "DFID" hit "
1541                                "ratio too low: hit/miss "LPU64"/"LPU64
1542                                ", sent/replied "LPU64"/"LPU64", stopping "
1543                                "statahead thread: pid %d\n",
1544                                PFID(&lli->lli_fid), sai->sai_hit,
1545                                sai->sai_miss, sai->sai_sent,
1546                                sai->sai_replied, cfs_curproc_pid());
1547                         cfs_spin_lock(&lli->lli_sa_lock);
1548                         if (!thread_is_stopped(thread))
1549                                 thread_set_flags(thread, SVC_STOPPING);
1550                         cfs_spin_unlock(&lli->lli_sa_lock);
1551                 }
1552         }
1553
1554         if (!thread_is_stopped(thread))
1555                 cfs_waitq_signal(&thread->t_ctl_waitq);
1556
1557         EXIT;
1558 }
1559
1560 /**
1561  * Start statahead thread if this is the first dir entry.
1562  * Otherwise if a thread is started already, wait it until it is ahead of me.
1563  * \retval 1       -- find entry with lock in cache, the caller needs to do
1564  *                    nothing.
1565  * \retval 0       -- find entry in cache, but without lock, the caller needs
1566  *                    refresh from MDS.
1567  * \retval others  -- the caller need to process as non-statahead.
1568  */
1569 int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
1570                        int only_unplug)
1571 {
1572         struct ll_inode_info     *lli   = ll_i2info(dir);
1573         struct ll_statahead_info *sai   = lli->lli_sai;
1574         struct dentry            *parent;
1575         struct ll_sa_entry       *entry;
1576         struct ptlrpc_thread     *thread;
1577         struct l_wait_info        lwi   = { 0 };
1578         int                       rc    = 0;
1579         ENTRY;
1580
1581         LASSERT(lli->lli_opendir_pid == cfs_curproc_pid());
1582
1583         if (sai) {
1584                 thread = &sai->sai_thread;
1585                 if (unlikely(thread_is_stopped(thread) &&
1586                              cfs_list_empty(&sai->sai_entries_stated))) {
1587                         /* to release resource */
1588                         ll_stop_statahead(dir, lli->lli_opendir_key);
1589                         RETURN(-EAGAIN);
1590                 }
1591
1592                 if ((*dentryp)->d_name.name[0] == '.') {
1593                         if (sai->sai_ls_all ||
1594                             sai->sai_miss_hidden >= sai->sai_skip_hidden) {
1595                                 /*
1596                                  * Hidden dentry is the first one, or statahead
1597                                  * thread does not skip so many hidden dentries
1598                                  * before "sai_ls_all" enabled as below.
1599                                  */
1600                         } else {
1601                                 if (!sai->sai_ls_all)
1602                                         /*
1603                                          * It maybe because hidden dentry is not
1604                                          * the first one, "sai_ls_all" was not
1605                                          * set, then "ls -al" missed. Enable
1606                                          * "sai_ls_all" for such case.
1607                                          */
1608                                         sai->sai_ls_all = 1;
1609
1610                                 /*
1611                                  * Such "getattr" has been skipped before
1612                                  * "sai_ls_all" enabled as above.
1613                                  */
1614                                 sai->sai_miss_hidden++;
1615                                 RETURN(-EAGAIN);
1616                         }
1617                 }
1618
1619                 entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name);
1620                 if (entry == NULL || only_unplug) {
1621                         ll_sai_unplug(sai, entry);
1622                         RETURN(entry ? 1 : -EAGAIN);
1623                 }
1624
1625                 while (!ll_sa_entry_stated(entry) &&
1626                        sai->sai_in_readpage &&
1627                        !sa_received_empty(sai))
1628                         do_statahead_interpret(sai, entry);
1629
1630                 if (!ll_sa_entry_stated(entry)) {
1631                         sai->sai_index_wait = entry->se_index;
1632                         lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
1633                                                LWI_ON_SIGNAL_NOOP, NULL);
1634                         rc = l_wait_event(sai->sai_waitq,
1635                                           ll_sa_entry_stated(entry) ||
1636                                           thread_is_stopped(thread),
1637                                           &lwi);
1638                         if (rc < 0) {
1639                                 ll_sai_unplug(sai, entry);
1640                                 RETURN(-EAGAIN);
1641                         }
1642                 }
1643
1644                 if (entry->se_stat == SA_ENTRY_SUCC &&
1645                     entry->se_inode != NULL) {
1646                         struct inode *inode = entry->se_inode;
1647                         struct lookup_intent it = { .it_op = IT_GETATTR,
1648                                                     .d.lustre.it_lock_handle =
1649                                                      entry->se_handle };
1650                         struct ll_dentry_data *lld;
1651                         __u64 bits;
1652
1653                         rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
1654                                                 ll_inode2fid(inode), &bits);
1655                         if (rc == 1) {
1656                                 if ((*dentryp)->d_inode == NULL) {
1657                                         *dentryp = ll_find_alias(inode,
1658                                                                  *dentryp);
1659                                         lld = ll_d2d(*dentryp);
1660                                         if (unlikely(lld == NULL))
1661                                                 ll_dops_init(*dentryp, 1, 1);
1662                                 } else {
1663                                         LASSERT((*dentryp)->d_inode == inode);
1664
1665                                         ll_dentry_rehash(*dentryp, 0);
1666                                         iput(inode);
1667                                 }
1668                                 entry->se_inode = NULL;
1669
1670                                 ll_dentry_reset_flags(*dentryp, bits);
1671                                 ll_intent_release(&it);
1672                         }
1673                 }
1674
1675                 ll_sai_unplug(sai, entry);
1676                 RETURN(rc);
1677         }
1678
1679         /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1680         rc = is_first_dirent(dir, *dentryp);
1681         if (rc == LS_NONE_FIRST_DE)
1682                 /* It is not "ls -{a}l" operation, no need statahead for it. */
1683                 GOTO(out, rc = -EAGAIN);
1684
1685         sai = ll_sai_alloc();
1686         if (sai == NULL)
1687                 GOTO(out, rc = -ENOMEM);
1688
1689         sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
1690         sai->sai_inode = igrab(dir);
1691         if (unlikely(sai->sai_inode == NULL)) {
1692                 CWARN("Do not start stat ahead on dying inode "DFID"\n",
1693                       PFID(&lli->lli_fid));
1694                 GOTO(out, rc = -ESTALE);
1695         }
1696
1697         /* get parent reference count here, and put it in ll_statahead_thread */
1698         parent = dget((*dentryp)->d_parent);
1699         if (unlikely(sai->sai_inode != parent->d_inode)) {
1700                 struct ll_inode_info *nlli = ll_i2info(parent->d_inode);
1701
1702                 CWARN("Race condition, someone changed %.*s just now: "
1703                       "old parent "DFID", new parent "DFID"\n",
1704                       (*dentryp)->d_name.len, (*dentryp)->d_name.name,
1705                       PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
1706                 dput(parent);
1707                 iput(sai->sai_inode);
1708                 GOTO(out, rc = -EAGAIN);
1709         }
1710
1711         CDEBUG(D_READA, "start statahead thread: [pid %d] [parent %.*s]\n",
1712                cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
1713
1714         lli->lli_sai = sai;
1715         rc = cfs_create_thread(ll_statahead_thread, parent, 0);
1716         thread = &sai->sai_thread;
1717         if (rc < 0) {
1718                 CERROR("can't start ll_sa thread, rc: %d\n", rc);
1719                 dput(parent);
1720                 lli->lli_opendir_key = NULL;
1721                 thread_set_flags(thread, SVC_STOPPED);
1722                 ll_sai_put(sai);
1723                 LASSERT(lli->lli_sai == NULL);
1724                 RETURN(-EAGAIN);
1725         }
1726
1727         l_wait_event(thread->t_ctl_waitq,
1728                      thread_is_running(thread) || thread_is_stopped(thread),
1729                      &lwi);
1730
1731         /*
1732          * We don't stat-ahead for the first dirent since we are already in
1733          * lookup, and -EEXIST also indicates that this is the first dirent.
1734          */
1735         RETURN(-EEXIST);
1736
1737 out:
1738         if (sai != NULL)
1739                 OBD_FREE_PTR(sai);
1740         cfs_spin_lock(&lli->lli_sa_lock);
1741         lli->lli_opendir_key = NULL;
1742         lli->lli_opendir_pid = 0;
1743         cfs_spin_unlock(&lli->lli_sa_lock);
1744         return rc;
1745 }