Whamcloud - gitweb
LU-5600 kernel: kernel update RHEL6.5 [2.6.32-431.29.2.el6]
[fs/lustre-release.git] / lustre / llite / statahead.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #include <linux/fs.h>
38 #include <linux/sched.h>
39 #include <linux/mm.h>
40 #include <linux/highmem.h>
41 #include <linux/pagemap.h>
42
43 #define DEBUG_SUBSYSTEM S_LLITE
44
45 #include <obd_support.h>
46 #include <lustre_lite.h>
47 #include <lustre_dlm.h>
48 #include "llite_internal.h"
49
50 #define SA_OMITTED_ENTRY_MAX 8ULL
51
52 typedef enum {
53         /** negative values are for error cases */
54         SA_ENTRY_INIT = 0,      /** init entry */
55         SA_ENTRY_SUCC = 1,      /** stat succeed */
56         SA_ENTRY_INVA = 2,      /** invalid entry */
57         SA_ENTRY_DEST = 3,      /** entry to be destroyed */
58 } se_stat_t;
59
60 struct ll_sa_entry {
61         /* link into sai->sai_entries */
62         struct list_head        se_link;
63         /* link into sai->sai_entries_{received,stated} */
64         struct list_head        se_list;
65         /* link into sai hash table locally */
66         struct list_head        se_hash;
67         /* entry reference count */
68         atomic_t                se_refcount;
69         /* entry index in the sai */
70         __u64                   se_index;
71         /* low layer ldlm lock handle */
72         __u64                   se_handle;
73         /* entry status */
74         se_stat_t               se_stat;
75         /* entry size, contains name */
76         int                     se_size;
77         /* pointer to async getattr enqueue info */
78         struct md_enqueue_info *se_minfo;
79         /* pointer to the async getattr request */
80         struct ptlrpc_request  *se_req;
81         /* pointer to the target inode */
82         struct inode           *se_inode;
83         /* entry name */
84         struct qstr             se_qstr;
85 };
86
87 static unsigned int sai_generation = 0;
88 static DEFINE_SPINLOCK(sai_generation_lock);
89
90 static inline int ll_sa_entry_unhashed(struct ll_sa_entry *entry)
91 {
92         return list_empty(&entry->se_hash);
93 }
94
95 /*
96  * The entry only can be released by the caller, it is necessary to hold lock.
97  */
98 static inline int ll_sa_entry_stated(struct ll_sa_entry *entry)
99 {
100         smp_rmb();
101         return (entry->se_stat != SA_ENTRY_INIT);
102 }
103
104 static inline int ll_sa_entry_hash(int val)
105 {
106         return val & LL_SA_CACHE_MASK;
107 }
108
109 /*
110  * Insert entry to hash SA table.
111  */
112 static inline void
113 ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
114 {
115         int i = ll_sa_entry_hash(entry->se_qstr.hash);
116
117         spin_lock(&sai->sai_cache_lock[i]);
118         list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
119         spin_unlock(&sai->sai_cache_lock[i]);
120 }
121
122 /*
123  * Remove entry from SA table.
124  */
125 static inline void
126 ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
127 {
128         int i = ll_sa_entry_hash(entry->se_qstr.hash);
129
130         spin_lock(&sai->sai_cache_lock[i]);
131         list_del_init(&entry->se_hash);
132         spin_unlock(&sai->sai_cache_lock[i]);
133 }
134
135 static inline int agl_should_run(struct ll_statahead_info *sai,
136                                  struct inode *inode)
137 {
138         return (inode != NULL && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
139 }
140
141 static inline struct ll_sa_entry *
142 sa_first_received_entry(struct ll_statahead_info *sai)
143 {
144         return list_entry(sai->sai_entries_received.next,
145                           struct ll_sa_entry, se_list);
146 }
147
148 static inline struct ll_inode_info *
149 agl_first_entry(struct ll_statahead_info *sai)
150 {
151         return list_entry(sai->sai_entries_agl.next,
152                           struct ll_inode_info, lli_agl_list);
153 }
154
155 static inline int sa_sent_full(struct ll_statahead_info *sai)
156 {
157         return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
158 }
159
160 static inline int sa_received_empty(struct ll_statahead_info *sai)
161 {
162         return list_empty(&sai->sai_entries_received);
163 }
164
165 static inline int agl_list_empty(struct ll_statahead_info *sai)
166 {
167         return list_empty(&sai->sai_entries_agl);
168 }
169
170 /**
171  * (1) hit ratio less than 80%
172  * or
173  * (2) consecutive miss more than 8
174  * then means low hit.
175  */
176 static inline int sa_low_hit(struct ll_statahead_info *sai)
177 {
178         return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
179                 (sai->sai_consecutive_miss > 8));
180 }
181
182 /*
183  * If the given index is behind of statahead window more than
184  * SA_OMITTED_ENTRY_MAX, then it is old.
185  */
186 static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
187 {
188         return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
189                  sai->sai_index);
190 }
191
192 /*
193  * Insert it into sai_entries tail when init.
194  */
195 static struct ll_sa_entry *
196 ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index, const char *name,
197                  int len)
198 {
199         struct ll_inode_info *lli;
200         struct ll_sa_entry   *entry;
201         int                   entry_size;
202         char                 *dname;
203         ENTRY;
204
205         entry_size = sizeof(struct ll_sa_entry) + (len & ~3) + 4;
206         OBD_ALLOC(entry, entry_size);
207         if (unlikely(entry == NULL))
208                 RETURN(ERR_PTR(-ENOMEM));
209
210         CDEBUG(D_READA, "alloc sa entry %.*s(%p) index "LPU64"\n",
211                len, name, entry, index);
212
213         entry->se_index = index;
214
215         /*
216          * Statahead entry reference rules:
217          *
218          * 1) When statahead entry is initialized, its reference is set as 2.
219          *    One reference is used by the directory scanner. When the scanner
220          *    searches the statahead cache for the given name, it can perform
221          *    lockless hash lookup (only the scanner can remove entry from hash
222          *    list), and once found, it needn't to call "atomic_inc()" for the
223          *    entry reference. So the performance is improved. After using the
224          *    statahead entry, the scanner will call "atomic_dec()" to drop the
225          *    reference held when initialization. If it is the last reference,
226          *    the statahead entry will be freed.
227          *
228          * 2) All other threads, including statahead thread and ptlrpcd thread,
229          *    when they process the statahead entry, the reference for target
230          *    should be held to guarantee the entry will not be released by the
231          *    directory scanner. After processing the entry, these threads will
232          *    drop the entry reference. If it is the last reference, the entry
233          *    will be freed.
234          *
235          *    The second reference when initializes the statahead entry is used
236          *    by the statahead thread, following the rule 2).
237          */
238         atomic_set(&entry->se_refcount, 2);
239         entry->se_stat = SA_ENTRY_INIT;
240         entry->se_size = entry_size;
241         dname = (char *)entry + sizeof(struct ll_sa_entry);
242         memcpy(dname, name, len);
243         dname[len] = 0;
244         entry->se_qstr.hash = full_name_hash(name, len);
245         entry->se_qstr.len = len;
246         entry->se_qstr.name = dname;
247
248         lli = ll_i2info(sai->sai_inode);
249         spin_lock(&lli->lli_sa_lock);
250         list_add_tail(&entry->se_link, &sai->sai_entries);
251         INIT_LIST_HEAD(&entry->se_list);
252         ll_sa_entry_enhash(sai, entry);
253         spin_unlock(&lli->lli_sa_lock);
254
255         atomic_inc(&sai->sai_cache_count);
256
257         RETURN(entry);
258 }
259
260 /*
261  * Used by the directory scanner to search entry with name.
262  *
263  * Only the caller can remove the entry from hash, so it is unnecessary to hold
264  * hash lock. It is caller's duty to release the init refcount on the entry, so
265  * it is also unnecessary to increase refcount on the entry.
266  */
267 static struct ll_sa_entry *
268 ll_sa_entry_get_byname(struct ll_statahead_info *sai, const struct qstr *qstr)
269 {
270         struct ll_sa_entry *entry;
271         int i = ll_sa_entry_hash(qstr->hash);
272
273         list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
274                 if (entry->se_qstr.hash == qstr->hash &&
275                     entry->se_qstr.len == qstr->len &&
276                     memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
277                         return entry;
278         }
279         return NULL;
280 }
281
282 /*
283  * Used by the async getattr request callback to find entry with index.
284  *
285  * Inside lli_sa_lock to prevent others to change the list during the search.
286  * It needs to increase entry refcount before returning to guarantee that the
287  * entry cannot be freed by others.
288  */
289 static struct ll_sa_entry *
290 ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index)
291 {
292         struct ll_sa_entry *entry;
293
294         list_for_each_entry(entry, &sai->sai_entries, se_link) {
295                 if (entry->se_index == index) {
296                         LASSERT(atomic_read(&entry->se_refcount) > 0);
297                         atomic_inc(&entry->se_refcount);
298                         return entry;
299                 }
300                 if (entry->se_index > index)
301                         break;
302         }
303         return NULL;
304 }
305
306 static void ll_sa_entry_put(struct ll_statahead_info *sai,
307                              struct ll_sa_entry *entry)
308 {
309         if (atomic_dec_and_test(&entry->se_refcount)) {
310                 CDEBUG(D_READA, "free sa entry %.*s(%p) index "LPU64"\n",
311                        entry->se_qstr.len, entry->se_qstr.name, entry,
312                        entry->se_index);
313
314                 LASSERT(list_empty(&entry->se_link));
315                 LASSERT(list_empty(&entry->se_list));
316                 LASSERT(ll_sa_entry_unhashed(entry));
317
318                 if (entry->se_inode)
319                         iput(entry->se_inode);
320
321                 OBD_FREE(entry, entry->se_size);
322                 atomic_dec(&sai->sai_cache_count);
323         }
324 }
325
326 static inline void
327 do_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
328 {
329         struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
330
331         LASSERT(!ll_sa_entry_unhashed(entry));
332         LASSERT(!list_empty(&entry->se_link));
333
334         ll_sa_entry_unhash(sai, entry);
335
336         spin_lock(&lli->lli_sa_lock);
337         entry->se_stat = SA_ENTRY_DEST;
338         list_del_init(&entry->se_link);
339         if (likely(!list_empty(&entry->se_list)))
340                 list_del_init(&entry->se_list);
341         spin_unlock(&lli->lli_sa_lock);
342
343         ll_sa_entry_put(sai, entry);
344 }
345
346 /*
347  * Delete it from sai_entries_stated list when fini.
348  */
349 static void
350 ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
351 {
352         struct ll_sa_entry *pos, *next;
353
354         if (entry)
355                 do_sa_entry_fini(sai, entry);
356
357         /* drop old entry, only 'scanner' process does this, no need to lock */
358         list_for_each_entry_safe(pos, next, &sai->sai_entries, se_link) {
359                 if (!is_omitted_entry(sai, pos->se_index))
360                         break;
361                 /* keep those whose statahead RPC not finished */
362                 if (pos->se_stat == SA_ENTRY_SUCC ||
363                     pos->se_stat == SA_ENTRY_INVA)
364                         do_sa_entry_fini(sai, pos);
365         }
366 }
367
368 /*
369  * Inside lli_sa_lock.
370  */
371 static void
372 __sa_entry_post_stat(struct ll_statahead_info *sai, struct ll_sa_entry *entry,
373                      se_stat_t stat)
374 {
375         struct ll_sa_entry *se;
376         struct list_head *pos = &sai->sai_entries_stated;
377
378         LASSERT(entry->se_stat == SA_ENTRY_INIT);
379
380         if (!list_empty(&entry->se_list))
381                 list_del_init(&entry->se_list);
382
383         list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
384                 if (se->se_index < entry->se_index) {
385                         pos = &se->se_list;
386                         break;
387                 }
388         }
389
390         list_add(&entry->se_list, pos);
391         entry->se_stat = stat;
392 }
393
394 /*
395  * Move entry to sai_entries_stated and sort with the index.
396  * \retval 1    -- entry to be destroyed.
397  * \retval 0    -- entry is inserted into stated list.
398  */
399 static void
400 sa_entry_post_stat(struct ll_statahead_info *sai, struct ll_sa_entry *entry,
401                    se_stat_t stat)
402 {
403         struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
404         struct md_enqueue_info *minfo = entry->se_minfo;
405         struct ptlrpc_request *req = entry->se_req;
406
407         /* release resources used in RPC */
408         if (minfo) {
409                 entry->se_minfo = NULL;
410                 ll_intent_release(&minfo->mi_it);
411                 iput(minfo->mi_dir);
412                 OBD_FREE_PTR(minfo);
413         }
414
415         if (req) {
416                 entry->se_req = NULL;
417                 ptlrpc_req_finished(req);
418         }
419
420         spin_lock(&lli->lli_sa_lock);
421         __sa_entry_post_stat(sai, entry, stat);
422         spin_unlock(&lli->lli_sa_lock);
423 }
424
425 /*
426  * Insert inode into the list of sai_entries_agl.
427  */
428 static void ll_agl_add(struct ll_statahead_info *sai,
429                        struct inode *inode, int index)
430 {
431         struct ll_inode_info *child  = ll_i2info(inode);
432         struct ll_inode_info *parent = ll_i2info(sai->sai_inode);
433         int                   added  = 0;
434
435         spin_lock(&child->lli_agl_lock);
436         if (child->lli_agl_index == 0) {
437                 child->lli_agl_index = index;
438                 spin_unlock(&child->lli_agl_lock);
439
440                 LASSERT(list_empty(&child->lli_agl_list));
441
442                 igrab(inode);
443                 spin_lock(&parent->lli_agl_lock);
444                 if (agl_list_empty(sai))
445                         added = 1;
446                 list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
447                 spin_unlock(&parent->lli_agl_lock);
448         } else {
449                 spin_unlock(&child->lli_agl_lock);
450         }
451
452         if (added > 0)
453                 wake_up(&sai->sai_agl_thread.t_ctl_waitq);
454 }
455
456 static struct ll_statahead_info *ll_sai_alloc(void)
457 {
458         struct ll_statahead_info *sai;
459         int                       i;
460         ENTRY;
461
462         OBD_ALLOC_PTR(sai);
463         if (!sai)
464                 RETURN(NULL);
465
466         atomic_set(&sai->sai_refcount, 1);
467
468         spin_lock(&sai_generation_lock);
469         sai->sai_generation = ++sai_generation;
470         if (unlikely(sai_generation == 0))
471                 sai->sai_generation = ++sai_generation;
472         spin_unlock(&sai_generation_lock);
473
474         sai->sai_max = LL_SA_RPC_MIN;
475         sai->sai_index = 1;
476         init_waitqueue_head(&sai->sai_waitq);
477         init_waitqueue_head(&sai->sai_thread.t_ctl_waitq);
478         init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq);
479
480         INIT_LIST_HEAD(&sai->sai_entries);
481         INIT_LIST_HEAD(&sai->sai_entries_received);
482         INIT_LIST_HEAD(&sai->sai_entries_stated);
483         INIT_LIST_HEAD(&sai->sai_entries_agl);
484
485         for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
486                 INIT_LIST_HEAD(&sai->sai_cache[i]);
487                 spin_lock_init(&sai->sai_cache_lock[i]);
488         }
489         atomic_set(&sai->sai_cache_count, 0);
490
491         RETURN(sai);
492 }
493
494 static inline struct ll_statahead_info *ll_sai_get(struct inode *dir)
495 {
496         struct ll_inode_info *lli = ll_i2info(dir);
497         struct ll_statahead_info *sai = NULL;
498
499         spin_lock(&lli->lli_sa_lock);
500         sai = lli->lli_sai;
501         if (sai != NULL)
502                 atomic_inc(&sai->sai_refcount);
503         spin_unlock(&lli->lli_sa_lock);
504
505         return sai;
506 }
507
508 static void ll_sai_put(struct ll_statahead_info *sai)
509 {
510         struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
511
512         if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
513                 struct ll_sa_entry *entry, *next;
514                 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode);
515
516                 lli->lli_sai = NULL;
517                 spin_unlock(&lli->lli_sa_lock);
518
519                 LASSERT(thread_is_stopped(&sai->sai_thread));
520                 LASSERT(thread_is_stopped(&sai->sai_agl_thread));
521                 LASSERT(sai->sai_sent == sai->sai_replied);
522
523                 list_for_each_entry_safe(entry, next, &sai->sai_entries,
524                                          se_link)
525                         do_sa_entry_fini(sai, entry);
526
527                 LASSERT(atomic_read(&sai->sai_cache_count) == 0);
528                 LASSERT(agl_list_empty(sai));
529                 LASSERT(atomic_read(&sai->sai_refcount) == 0);
530
531                 iput(sai->sai_inode);
532                 OBD_FREE_PTR(sai);
533                 atomic_dec(&sbi->ll_sa_running);
534         }
535 }
536
537 /* Do NOT forget to drop inode refcount when into sai_entries_agl. */
538 static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
539 {
540         struct ll_inode_info *lli   = ll_i2info(inode);
541         __u64                 index = lli->lli_agl_index;
542         int                   rc;
543         ENTRY;
544
545         LASSERT(list_empty(&lli->lli_agl_list));
546
547         /* AGL maybe fall behind statahead with one entry */
548         if (is_omitted_entry(sai, index + 1)) {
549                 lli->lli_agl_index = 0;
550                 iput(inode);
551                 RETURN_EXIT;
552         }
553
554         /* Someone is in glimpse (sync or async), do nothing. */
555         rc = down_write_trylock(&lli->lli_glimpse_sem);
556         if (rc == 0) {
557                 lli->lli_agl_index = 0;
558                 iput(inode);
559                 RETURN_EXIT;
560         }
561
562         /*
563          * Someone triggered glimpse within 1 sec before.
564          * 1) The former glimpse succeeded with glimpse lock granted by OST, and
565          *    if the lock is still cached on client, AGL needs to do nothing. If
566          *    it is cancelled by other client, AGL maybe cannot obtaion new lock
567          *    for no glimpse callback triggered by AGL.
568          * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
569          *    Under such case, it is quite possible that the OST will not grant
570          *    glimpse lock for AGL also.
571          * 3) The former glimpse failed, compared with other two cases, it is
572          *    relative rare. AGL can ignore such case, and it will not muchly
573          *    affect the performance.
574          */
575         if (lli->lli_glimpse_time != 0 &&
576             cfs_time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
577                 up_write(&lli->lli_glimpse_sem);
578                 lli->lli_agl_index = 0;
579                 iput(inode);
580                 RETURN_EXIT;
581         }
582
583         CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
584                DFID", idx = "LPU64"\n", PFID(&lli->lli_fid), index);
585
586         cl_agl(inode);
587         lli->lli_agl_index = 0;
588         lli->lli_glimpse_time = cfs_time_current();
589         up_write(&lli->lli_glimpse_sem);
590
591         CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
592                DFID", idx = "LPU64", rc = %d\n",
593                PFID(&lli->lli_fid), index, rc);
594
595         iput(inode);
596
597         EXIT;
598 }
599
600 /* prepare inode for received statahead entry, and add it into agl list */
601 static void sa_post_one(struct ll_statahead_info *sai,
602                         struct ll_sa_entry *entry)
603 {
604         struct inode           *dir   = sai->sai_inode;
605         struct inode           *child;
606         struct md_enqueue_info *minfo;
607         struct lookup_intent   *it;
608         struct ptlrpc_request  *req;
609         struct mdt_body        *body;
610         int                     rc    = 0;
611         ENTRY;
612
613         LASSERT(entry->se_handle != 0);
614
615         minfo = entry->se_minfo;
616         it = &minfo->mi_it;
617         req = entry->se_req;
618         body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
619         if (body == NULL)
620                 GOTO(out, rc = -EFAULT);
621
622         child = entry->se_inode;
623         if (child == NULL) {
624                 /*
625                  * lookup.
626                  */
627                 LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
628
629                 /* XXX: No fid in reply, this is probaly cross-ref case.
630                  * SA can't handle it yet. */
631                 if (body->mbo_valid & OBD_MD_MDS)
632                         GOTO(out, rc = -EAGAIN);
633         } else {
634                 /*
635                  * revalidate.
636                  */
637                 /* unlinked and re-created with the same name */
638                 if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2,
639                                         &body->mbo_fid1))) {
640                         entry->se_inode = NULL;
641                         iput(child);
642                         child = NULL;
643                 }
644         }
645
646         it->d.lustre.it_lock_handle = entry->se_handle;
647         rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
648         if (rc != 1)
649                 GOTO(out, rc = -EAGAIN);
650
651         rc = ll_prep_inode(&child, req, dir->i_sb, it);
652         if (rc)
653                 GOTO(out, rc);
654
655         CDEBUG(D_DLMTRACE, "%s: setting l_data to inode "DFID"(%p)\n",
656                ll_get_fsname(child->i_sb, NULL, 0),
657                PFID(ll_inode2fid(child)), child);
658         ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
659
660         entry->se_inode = child;
661
662         if (agl_should_run(sai, child))
663                 ll_agl_add(sai, child, entry->se_index);
664
665         EXIT;
666
667 out:
668         /* The "sa_entry_post_stat()" will drop related ldlm ibits lock
669          * reference count by calling "ll_intent_drop_lock()" in spite of the
670          * above operations failed or not. Do not worry about calling
671          * "ll_intent_drop_lock()" more than once. */
672         sa_entry_post_stat(sai, entry, rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
673         if (entry->se_index == sai->sai_index_wait)
674                 wake_up(&sai->sai_waitq);
675         ll_sa_entry_put(sai, entry);
676 }
677
678 static void ll_post_statahead(struct ll_statahead_info *sai)
679 {
680         struct ll_inode_info *lli;
681
682         lli = ll_i2info(sai->sai_inode);
683
684         while (!sa_received_empty(sai)) {
685                 struct ll_sa_entry *entry;
686
687                 spin_lock(&lli->lli_sa_lock);
688                 if (unlikely(sa_received_empty(sai))) {
689                         spin_unlock(&lli->lli_sa_lock);
690                         break;
691                 }
692                 entry = sa_first_received_entry(sai);
693                 atomic_inc(&entry->se_refcount);
694                 list_del_init(&entry->se_list);
695                 spin_unlock(&lli->lli_sa_lock);
696
697                 sa_post_one(sai, entry);
698         }
699
700         spin_lock(&lli->lli_agl_lock);
701         while (!agl_list_empty(sai)) {
702                 struct ll_inode_info *clli;
703
704                 clli = agl_first_entry(sai);
705                 list_del_init(&clli->lli_agl_list);
706                 spin_unlock(&lli->lli_agl_lock);
707
708                 ll_agl_trigger(&clli->lli_vfs_inode, sai);
709
710                 spin_lock(&lli->lli_agl_lock);
711         }
712         spin_unlock(&lli->lli_agl_lock);
713 }
714
715 static int ll_statahead_interpret(struct ptlrpc_request *req,
716                                   struct md_enqueue_info *minfo, int rc)
717 {
718         struct lookup_intent *it = &minfo->mi_it;
719         struct inode *dir = minfo->mi_dir;
720         struct ll_inode_info *lli = ll_i2info(dir);
721         struct ll_statahead_info *sai;
722         struct ll_sa_entry *entry;
723         int wakeup;
724         ENTRY;
725
726         if (it_disposition(it, DISP_LOOKUP_NEG))
727                 rc = -ENOENT;
728
729         sai = ll_sai_get(dir);
730         LASSERT(sai != NULL);
731         LASSERT(!thread_is_stopped(&sai->sai_thread));
732
733         spin_lock(&lli->lli_sa_lock);
734         entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata);
735         LASSERT(entry != NULL);
736         if (rc != 0) {
737                 __sa_entry_post_stat(sai, entry, SA_ENTRY_INVA);
738                 wakeup = (entry->se_index == sai->sai_index_wait);
739         } else {
740                 entry->se_minfo = minfo;
741                 entry->se_req = ptlrpc_request_addref(req);
742                 /* Release the async ibits lock ASAP to avoid deadlock
743                  * when statahead thread tries to enqueue lock on parent
744                  * for readpage and other tries to enqueue lock on child
745                  * with parent's lock held, for example: unlink. */
746                 entry->se_handle = it->d.lustre.it_lock_handle;
747                 ll_intent_drop_lock(it);
748                 wakeup = sa_received_empty(sai);
749                 list_add_tail(&entry->se_list, &sai->sai_entries_received);
750         }
751         sai->sai_replied++;
752         spin_unlock(&lli->lli_sa_lock);
753
754         ll_sa_entry_put(sai, entry);
755         if (wakeup)
756                 wake_up(&sai->sai_thread.t_ctl_waitq);
757
758         if (rc != 0) {
759                 ll_intent_release(it);
760                 iput(dir);
761                 OBD_FREE_PTR(minfo);
762         }
763         ll_sai_put(sai);
764         RETURN(rc);
765 }
766
767 static void sa_args_fini(struct md_enqueue_info *minfo,
768                          struct ldlm_enqueue_info *einfo)
769 {
770         LASSERT(minfo && einfo);
771         iput(minfo->mi_dir);
772         capa_put(minfo->mi_data.op_capa1);
773         capa_put(minfo->mi_data.op_capa2);
774         OBD_FREE_PTR(minfo);
775         OBD_FREE_PTR(einfo);
776 }
777
778 /**
779  * There is race condition between "capa_put" and "ll_statahead_interpret" for
780  * accessing "op_data.op_capa[1,2]" as following:
781  * "capa_put" releases "op_data.op_capa[1,2]"'s reference count after calling
782  * "md_intent_getattr_async". But "ll_statahead_interpret" maybe run first, and
783  * fill "op_data.op_capa[1,2]" as POISON, then cause "capa_put" access invalid
784  * "ocapa". So here reserve "op_data.op_capa[1,2]" in "pcapa" before calling
785  * "md_intent_getattr_async".
786  */
787 static int sa_args_init(struct inode *dir, struct inode *child,
788                         struct ll_sa_entry *entry, struct md_enqueue_info **pmi,
789                         struct ldlm_enqueue_info **pei,
790                         struct obd_capa **pcapa)
791 {
792         struct qstr              *qstr = &entry->se_qstr;
793         struct md_enqueue_info   *minfo;
794         struct ldlm_enqueue_info *einfo;
795         struct md_op_data        *op_data;
796
797         OBD_ALLOC_PTR(einfo);
798         if (einfo == NULL)
799                 return -ENOMEM;
800
801         OBD_ALLOC_PTR(minfo);
802         if (minfo == NULL) {
803                 OBD_FREE_PTR(einfo);
804                 return -ENOMEM;
805         }
806
807         op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, qstr->name,
808                                      qstr->len, 0, LUSTRE_OPC_ANY, NULL);
809         if (IS_ERR(op_data)) {
810                 OBD_FREE_PTR(einfo);
811                 OBD_FREE_PTR(minfo);
812                 return PTR_ERR(op_data);
813         }
814
815         minfo->mi_it.it_op = IT_GETATTR;
816         minfo->mi_dir = igrab(dir);
817         minfo->mi_cb = ll_statahead_interpret;
818         minfo->mi_cbdata = entry->se_index;
819
820         einfo->ei_type   = LDLM_IBITS;
821         einfo->ei_mode   = it_to_lock_mode(&minfo->mi_it);
822         einfo->ei_cb_bl  = ll_md_blocking_ast;
823         einfo->ei_cb_cp  = ldlm_completion_ast;
824         einfo->ei_cb_gl  = NULL;
825         einfo->ei_cbdata = NULL;
826
827         *pmi = minfo;
828         *pei = einfo;
829         pcapa[0] = op_data->op_capa1;
830         pcapa[1] = op_data->op_capa2;
831
832         return 0;
833 }
834
835 static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry)
836 {
837         struct md_enqueue_info   *minfo;
838         struct ldlm_enqueue_info *einfo;
839         struct obd_capa          *capas[2];
840         int                       rc;
841         ENTRY;
842
843         rc = sa_args_init(dir, NULL, entry, &minfo, &einfo, capas);
844         if (rc)
845                 RETURN(rc);
846
847         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
848         if (!rc) {
849                 capa_put(capas[0]);
850                 capa_put(capas[1]);
851         } else {
852                 sa_args_fini(minfo, einfo);
853         }
854
855         RETURN(rc);
856 }
857
858 /**
859  * similar to ll_revalidate_it().
860  * \retval      1 -- dentry valid
861  * \retval      0 -- will send stat-ahead request
862  * \retval others -- prepare stat-ahead request failed
863  */
864 static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
865                             struct dentry *dentry)
866 {
867         struct inode             *inode = dentry->d_inode;
868         struct lookup_intent      it = { .it_op = IT_GETATTR,
869                                          .d.lustre.it_lock_handle = 0 };
870         struct md_enqueue_info   *minfo;
871         struct ldlm_enqueue_info *einfo;
872         struct obd_capa          *capas[2];
873         int rc;
874         ENTRY;
875
876         if (unlikely(inode == NULL))
877                 RETURN(1);
878
879         if (d_mountpoint(dentry))
880                 RETURN(1);
881
882         entry->se_inode = igrab(inode);
883         rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),NULL);
884         if (rc == 1) {
885                 entry->se_handle = it.d.lustre.it_lock_handle;
886                 ll_intent_release(&it);
887                 RETURN(1);
888         }
889
890         rc = sa_args_init(dir, inode, entry, &minfo, &einfo, capas);
891         if (rc) {
892                 entry->se_inode = NULL;
893                 iput(inode);
894                 RETURN(rc);
895         }
896
897         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
898         if (!rc) {
899                 capa_put(capas[0]);
900                 capa_put(capas[1]);
901         } else {
902                 entry->se_inode = NULL;
903                 iput(inode);
904                 sa_args_fini(minfo, einfo);
905         }
906
907         RETURN(rc);
908 }
909
910 static void ll_statahead_one(struct dentry *parent, const char *name,
911                              const int namelen)
912 {
913         struct inode             *dir    = parent->d_inode;
914         struct ll_inode_info     *lli    = ll_i2info(dir);
915         struct ll_statahead_info *sai    = lli->lli_sai;
916         struct dentry            *dentry = NULL;
917         struct ll_sa_entry       *entry;
918         int                       rc;
919         ENTRY;
920
921         entry = ll_sa_entry_alloc(sai, sai->sai_index, name,namelen);
922         if (IS_ERR(entry))
923                 RETURN_EXIT;
924
925         dentry = d_lookup(parent, &entry->se_qstr);
926         if (!dentry) {
927                 rc = do_sa_lookup(dir, entry);
928         } else {
929                 rc = do_sa_revalidate(dir, entry, dentry);
930                 if (rc == 1 && agl_should_run(sai, dentry->d_inode))
931                         ll_agl_add(sai, dentry->d_inode, entry->se_index);
932         }
933
934         if (dentry != NULL)
935                 dput(dentry);
936
937         if (rc) {
938                 sa_entry_post_stat(sai, entry,
939                                    rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
940                 if (entry->se_index == sai->sai_index_wait)
941                         wake_up(&sai->sai_waitq);
942         } else {
943                 sai->sai_sent++;
944         }
945
946         sai->sai_index++;
947         /* drop one refcount on entry by ll_sa_entry_alloc */
948         ll_sa_entry_put(sai, entry);
949
950         EXIT;
951 }
952
953 static int ll_agl_thread(void *arg)
954 {
955         struct dentry *parent = (struct dentry *)arg;
956         struct inode *dir = parent->d_inode;
957         struct ll_inode_info *plli = ll_i2info(dir);
958         struct ll_inode_info *clli;
959         struct ll_sb_info *sbi = ll_i2sbi(dir);
960         struct ll_statahead_info *sai;
961         struct ptlrpc_thread *thread;
962         struct l_wait_info lwi = { 0 };
963         ENTRY;
964
965
966         sai = ll_sai_get(dir);
967         thread = &sai->sai_agl_thread;
968         thread->t_pid = current_pid();
969         CDEBUG(D_READA, "agl thread started: sai %p, parent %.*s\n",
970                sai, parent->d_name.len, parent->d_name.name);
971
972         atomic_inc(&sbi->ll_agl_total);
973         spin_lock(&plli->lli_agl_lock);
974         sai->sai_agl_valid = 1;
975         if (thread_is_init(thread))
976                 /* If someone else has changed the thread state
977                  * (e.g. already changed to SVC_STOPPING), we can't just
978                  * blindly overwrite that setting. */
979                 thread_set_flags(thread, SVC_RUNNING);
980         spin_unlock(&plli->lli_agl_lock);
981         wake_up(&thread->t_ctl_waitq);
982
983         while (1) {
984                 l_wait_event(thread->t_ctl_waitq,
985                              !agl_list_empty(sai) ||
986                              !thread_is_running(thread),
987                              &lwi);
988
989                 if (!thread_is_running(thread))
990                         break;
991
992                 spin_lock(&plli->lli_agl_lock);
993                 /* The statahead thread maybe help to process AGL entries,
994                  * so check whether list empty again. */
995                 if (!agl_list_empty(sai)) {
996                         clli = agl_first_entry(sai);
997                         list_del_init(&clli->lli_agl_list);
998                         spin_unlock(&plli->lli_agl_lock);
999                         ll_agl_trigger(&clli->lli_vfs_inode, sai);
1000                 } else {
1001                         spin_unlock(&plli->lli_agl_lock);
1002                 }
1003         }
1004
1005         spin_lock(&plli->lli_agl_lock);
1006         sai->sai_agl_valid = 0;
1007         while (!agl_list_empty(sai)) {
1008                 clli = agl_first_entry(sai);
1009                 list_del_init(&clli->lli_agl_list);
1010                 spin_unlock(&plli->lli_agl_lock);
1011                 clli->lli_agl_index = 0;
1012                 iput(&clli->lli_vfs_inode);
1013                 spin_lock(&plli->lli_agl_lock);
1014         }
1015         thread_set_flags(thread, SVC_STOPPED);
1016         spin_unlock(&plli->lli_agl_lock);
1017         wake_up(&thread->t_ctl_waitq);
1018         ll_sai_put(sai);
1019         CDEBUG(D_READA, "agl thread stopped: sai %p, parent %.*s\n",
1020                sai, parent->d_name.len, parent->d_name.name);
1021         RETURN(0);
1022 }
1023
1024 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
1025 {
1026         struct ptlrpc_thread *thread = &sai->sai_agl_thread;
1027         struct l_wait_info    lwi    = { 0 };
1028         struct ll_inode_info  *plli;
1029         struct task_struct            *task;
1030         ENTRY;
1031
1032         CDEBUG(D_READA, "start agl thread: sai %p, parent %.*s\n",
1033                sai, parent->d_name.len, parent->d_name.name);
1034
1035         plli = ll_i2info(parent->d_inode);
1036         task = kthread_run(ll_agl_thread, parent,
1037                                "ll_agl_%u", plli->lli_opendir_pid);
1038         if (IS_ERR(task)) {
1039                 CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
1040                 thread_set_flags(thread, SVC_STOPPED);
1041                 RETURN_EXIT;
1042         }
1043
1044         l_wait_event(thread->t_ctl_waitq,
1045                      thread_is_running(thread) || thread_is_stopped(thread),
1046                      &lwi);
1047         EXIT;
1048 }
1049
1050 static int ll_statahead_thread(void *arg)
1051 {
1052         struct dentry *parent = (struct dentry *)arg;
1053         struct inode *dir = parent->d_inode;
1054         struct ll_inode_info *lli = ll_i2info(dir);
1055         struct ll_sb_info *sbi = ll_i2sbi(dir);
1056         struct ll_statahead_info *sai;
1057         struct ptlrpc_thread *thread;
1058         struct ptlrpc_thread *agl_thread;
1059         int first = 0;
1060         struct md_op_data *op_data;
1061         struct ll_dir_chain chain;
1062         struct l_wait_info lwi = { 0 };
1063         struct page *page = NULL;
1064         __u64 pos = 0;
1065         int rc = 0;
1066         ENTRY;
1067
1068         sai = ll_sai_get(dir);
1069         thread = &sai->sai_thread;
1070         agl_thread = &sai->sai_agl_thread;
1071         thread->t_pid = current_pid();
1072         CDEBUG(D_READA, "statahead thread starting: sai %p, parent %.*s\n",
1073                sai, parent->d_name.len, parent->d_name.name);
1074
1075         op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
1076                                      LUSTRE_OPC_ANY, dir);
1077         if (IS_ERR(op_data))
1078                 GOTO(out, rc = PTR_ERR(op_data));
1079
1080         op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages;
1081
1082         if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
1083                 ll_start_agl(parent, sai);
1084
1085         atomic_inc(&sbi->ll_sa_total);
1086         spin_lock(&lli->lli_sa_lock);
1087         if (thread_is_init(thread))
1088                 /* If someone else has changed the thread state
1089                  * (e.g. already changed to SVC_STOPPING), we can't just
1090                  * blindly overwrite that setting. */
1091                 thread_set_flags(thread, SVC_RUNNING);
1092         spin_unlock(&lli->lli_sa_lock);
1093         wake_up(&thread->t_ctl_waitq);
1094
1095         ll_dir_chain_init(&chain);
1096         while (pos != MDS_DIR_END_OFF && thread_is_running(thread)) {
1097                 struct lu_dirpage *dp;
1098                 struct lu_dirent  *ent;
1099
1100                 sai->sai_in_readpage = 1;
1101                 page = ll_get_dir_page(dir, op_data, pos, &chain);
1102                 sai->sai_in_readpage = 0;
1103                 if (IS_ERR(page)) {
1104                         rc = PTR_ERR(page);
1105                         CDEBUG(D_READA, "error reading dir "DFID" at "LPU64
1106                                "/"LPU64" opendir_pid = %u: rc = %d\n",
1107                                PFID(ll_inode2fid(dir)), pos, sai->sai_index,
1108                                lli->lli_opendir_pid, rc);
1109                         break;
1110                 }
1111
1112                 dp = page_address(page);
1113                 for (ent = lu_dirent_start(dp);
1114                      ent != NULL && thread_is_running(thread) &&
1115                      !sa_low_hit(sai);
1116                      ent = lu_dirent_next(ent)) {
1117                         __u64 hash;
1118                         int namelen;
1119                         char *name;
1120
1121                         hash = le64_to_cpu(ent->lde_hash);
1122                         if (unlikely(hash < pos))
1123                                 /*
1124                                  * Skip until we find target hash value.
1125                                  */
1126                                 continue;
1127
1128                         namelen = le16_to_cpu(ent->lde_namelen);
1129                         if (unlikely(namelen == 0))
1130                                 /*
1131                                  * Skip dummy record.
1132                                  */
1133                                 continue;
1134
1135                         name = ent->lde_name;
1136                         if (name[0] == '.') {
1137                                 if (namelen == 1) {
1138                                         /*
1139                                          * skip "."
1140                                          */
1141                                         continue;
1142                                 } else if (name[1] == '.' && namelen == 2) {
1143                                         /*
1144                                          * skip ".."
1145                                          */
1146                                         continue;
1147                                 } else if (!sai->sai_ls_all) {
1148                                         /*
1149                                          * skip hidden files.
1150                                          */
1151                                         sai->sai_skip_hidden++;
1152                                         continue;
1153                                 }
1154                         }
1155
1156                         /*
1157                          * don't stat-ahead first entry.
1158                          */
1159                         if (unlikely(++first == 1))
1160                                 continue;
1161
1162                         /* wait for spare statahead window */
1163                         do {
1164                                 l_wait_event(thread->t_ctl_waitq,
1165                                              !sa_sent_full(sai) ||
1166                                              !sa_received_empty(sai) ||
1167                                              !agl_list_empty(sai) ||
1168                                              !thread_is_running(thread),
1169                                              &lwi);
1170
1171                                 ll_post_statahead(sai);
1172                         } while (sa_sent_full(sai) &&
1173                                  thread_is_running(thread));
1174
1175                         ll_statahead_one(parent, name, namelen);
1176                 }
1177
1178                 pos = le64_to_cpu(dp->ldp_hash_end);
1179                 ll_release_page(dir, page,
1180                                 le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1181
1182                 if (sa_low_hit(sai)) {
1183                         rc = -EFAULT;
1184                         atomic_inc(&sbi->ll_sa_wrong);
1185                         CDEBUG(D_READA, "Statahead for dir "DFID" hit "
1186                                "ratio too low: hit/miss "LPU64"/"LPU64
1187                                ", sent/replied "LPU64"/"LPU64", stopping "
1188                                "statahead thread: pid %d\n",
1189                                PFID(&lli->lli_fid), sai->sai_hit,
1190                                sai->sai_miss, sai->sai_sent,
1191                                sai->sai_replied, current_pid());
1192                         break;
1193                 }
1194         }
1195         ll_dir_chain_fini(&chain);
1196         ll_finish_md_op_data(op_data);
1197
1198         if (rc < 0) {
1199                 spin_lock(&lli->lli_sa_lock);
1200                 thread_set_flags(thread, SVC_STOPPING);
1201                 lli->lli_sa_enabled = 0;
1202                 spin_unlock(&lli->lli_sa_lock);
1203         }
1204
1205         /* statahead is finished, but statahead entries need to be cached, wait
1206          * for file release to stop me. */
1207         while (thread_is_running(thread)) {
1208                 l_wait_event(thread->t_ctl_waitq,
1209                              !sa_received_empty(sai) ||
1210                              !agl_list_empty(sai) ||
1211                              !thread_is_running(thread),
1212                              &lwi);
1213
1214                 ll_post_statahead(sai);
1215         }
1216
1217         EXIT;
1218 out:
1219         if (sai->sai_agl_valid) {
1220                 spin_lock(&lli->lli_agl_lock);
1221                 thread_set_flags(agl_thread, SVC_STOPPING);
1222                 spin_unlock(&lli->lli_agl_lock);
1223                 wake_up(&agl_thread->t_ctl_waitq);
1224
1225                 CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n",
1226                        sai, (unsigned int)agl_thread->t_pid);
1227                 l_wait_event(agl_thread->t_ctl_waitq,
1228                              thread_is_stopped(agl_thread),
1229                              &lwi);
1230         } else {
1231                 /* Set agl_thread flags anyway. */
1232                 thread_set_flags(agl_thread, SVC_STOPPED);
1233         }
1234
1235         /* wait for inflight statahead RPCs to finish, and then we can free sai
1236          * safely because statahead RPC will access sai data */
1237         while (sai->sai_sent != sai->sai_replied) {
1238                 /* in case we're not woken up, timeout wait */
1239                 lwi = LWI_TIMEOUT(HZ >> 3, NULL, NULL);
1240                 l_wait_event(thread->t_ctl_waitq,
1241                         sai->sai_sent == sai->sai_replied, &lwi);
1242         }
1243
1244         /* release resources held by received entries. */
1245         ll_post_statahead(sai);
1246
1247         spin_lock(&lli->lli_sa_lock);
1248         thread_set_flags(thread, SVC_STOPPED);
1249         spin_unlock(&lli->lli_sa_lock);
1250
1251         wake_up(&sai->sai_waitq);
1252         wake_up(&thread->t_ctl_waitq);
1253         ll_sai_put(sai);
1254         CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %.*s\n",
1255                sai, parent->d_name.len, parent->d_name.name);
1256         dput(parent);
1257         return rc;
1258 }
1259
1260 /* authorize opened dir handle @key to statahead later */
1261 void ll_authorize_statahead(struct inode *dir, void *key)
1262 {
1263         struct ll_inode_info *lli = ll_i2info(dir);
1264
1265         spin_lock(&lli->lli_sa_lock);
1266         if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL) {
1267                 /*
1268                  * if lli_sai is not NULL, it means previous statahead is not
1269                  * finished yet, we'd better not start a new statahead for now.
1270                  */
1271                 LASSERT(lli->lli_opendir_pid == 0);
1272                 lli->lli_opendir_key = key;
1273                 lli->lli_opendir_pid = current_pid();
1274                 lli->lli_sa_enabled = 1;
1275         }
1276         spin_unlock(&lli->lli_sa_lock);
1277 }
1278
1279 /* deauthorize opened dir handle @key to statahead, but statahead thread may
1280  * still be running, notify it to quit. */
1281 void ll_deauthorize_statahead(struct inode *dir, void *key)
1282 {
1283         struct ll_inode_info *lli = ll_i2info(dir);
1284         struct ll_statahead_info *sai;
1285
1286         LASSERT(lli->lli_opendir_key == key);
1287         LASSERT(lli->lli_opendir_pid != 0);
1288
1289         CDEBUG(D_READA, "deauthorize statahead for "DFID"\n",
1290                 PFID(&lli->lli_fid));
1291
1292         spin_lock(&lli->lli_sa_lock);
1293         lli->lli_opendir_key = NULL;
1294         lli->lli_opendir_pid = 0;
1295         lli->lli_sa_enabled = 0;
1296         sai = lli->lli_sai;
1297         if (sai != NULL && thread_is_running(&sai->sai_thread)) {
1298                 /*
1299                  * statahead thread may not quit yet because it needs to cache
1300                  * stated entries, now it's time to tell it to quit.
1301                  */
1302                 thread_set_flags(&sai->sai_thread, SVC_STOPPING);
1303                 wake_up(&sai->sai_thread.t_ctl_waitq);
1304         }
1305         spin_unlock(&lli->lli_sa_lock);
1306 }
1307
1308 enum {
1309         /**
1310          * not first dirent, or is "."
1311          */
1312         LS_NONE_FIRST_DE = 0,
1313         /**
1314          * the first non-hidden dirent
1315          */
1316         LS_FIRST_DE,
1317         /**
1318          * the first hidden dirent, that is "."
1319          */
1320         LS_FIRST_DOT_DE
1321 };
1322
1323 static int is_first_dirent(struct inode *dir, struct dentry *dentry)
1324 {
1325         struct ll_dir_chain   chain;
1326         struct qstr          *target = &dentry->d_name;
1327         struct md_op_data    *op_data;
1328         int                   dot_de;
1329         struct page          *page = NULL;
1330         int                   rc     = LS_NONE_FIRST_DE;
1331         __u64                 pos = 0;
1332         ENTRY;
1333
1334         op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
1335                                      LUSTRE_OPC_ANY, dir);
1336         if (IS_ERR(op_data))
1337                 RETURN(PTR_ERR(op_data));
1338         /**
1339          *FIXME choose the start offset of the readdir
1340          */
1341         op_data->op_stripe_offset = 0;
1342         op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages;
1343
1344         ll_dir_chain_init(&chain);
1345         page = ll_get_dir_page(dir, op_data, 0, &chain);
1346
1347         while (1) {
1348                 struct lu_dirpage *dp;
1349                 struct lu_dirent  *ent;
1350
1351                 if (IS_ERR(page)) {
1352                         struct ll_inode_info *lli = ll_i2info(dir);
1353
1354                         rc = PTR_ERR(page);
1355                         CERROR("%s: reading dir "DFID" at "LPU64
1356                                "opendir_pid = %u : rc = %d\n",
1357                                ll_get_fsname(dir->i_sb, NULL, 0),
1358                                PFID(ll_inode2fid(dir)), pos,
1359                                lli->lli_opendir_pid, rc);
1360                         break;
1361                 }
1362
1363                 dp = page_address(page);
1364                 for (ent = lu_dirent_start(dp); ent != NULL;
1365                      ent = lu_dirent_next(ent)) {
1366                         __u64 hash;
1367                         int namelen;
1368                         char *name;
1369
1370                         hash = le64_to_cpu(ent->lde_hash);
1371                         /* The ll_get_dir_page() can return any page containing
1372                          * the given hash which may be not the start hash. */
1373                         if (unlikely(hash < pos))
1374                                 continue;
1375
1376                         namelen = le16_to_cpu(ent->lde_namelen);
1377                         if (unlikely(namelen == 0))
1378                                 /*
1379                                  * skip dummy record.
1380                                  */
1381                                 continue;
1382
1383                         name = ent->lde_name;
1384                         if (name[0] == '.') {
1385                                 if (namelen == 1)
1386                                         /*
1387                                          * skip "."
1388                                          */
1389                                         continue;
1390                                 else if (name[1] == '.' && namelen == 2)
1391                                         /*
1392                                          * skip ".."
1393                                          */
1394                                         continue;
1395                                 else
1396                                         dot_de = 1;
1397                         } else {
1398                                 dot_de = 0;
1399                         }
1400
1401                         if (dot_de && target->name[0] != '.') {
1402                                 CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
1403                                        target->len, target->name,
1404                                        namelen, name);
1405                                 continue;
1406                         }
1407
1408                         if (target->len != namelen ||
1409                             memcmp(target->name, name, namelen) != 0)
1410                                 rc = LS_NONE_FIRST_DE;
1411                         else if (!dot_de)
1412                                 rc = LS_FIRST_DE;
1413                         else
1414                                 rc = LS_FIRST_DOT_DE;
1415
1416                         ll_release_page(dir, page, false);
1417                         GOTO(out, rc);
1418                 }
1419                 pos = le64_to_cpu(dp->ldp_hash_end);
1420                 if (pos == MDS_DIR_END_OFF) {
1421                         /*
1422                          * End of directory reached.
1423                          */
1424                         ll_release_page(dir, page, false);
1425                         GOTO(out, rc);
1426                 } else {
1427                         /*
1428                          * chain is exhausted
1429                          * Normal case: continue to the next page.
1430                          */
1431                         ll_release_page(dir, page, le32_to_cpu(dp->ldp_flags) &
1432                                               LDF_COLLIDE);
1433                         page = ll_get_dir_page(dir, op_data, pos, &chain);
1434                 }
1435         }
1436         EXIT;
1437 out:
1438         ll_dir_chain_fini(&chain);
1439         ll_finish_md_op_data(op_data);
1440         return rc;
1441 }
1442
1443 static void
1444 ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
1445 {
1446         if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC) {
1447                 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode);
1448
1449                 sai->sai_hit++;
1450                 sai->sai_consecutive_miss = 0;
1451                 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
1452         } else {
1453                 sai->sai_miss++;
1454                 sai->sai_consecutive_miss++;
1455         }
1456         ll_sa_entry_fini(sai, entry);
1457         wake_up(&sai->sai_thread.t_ctl_waitq);
1458 }
1459
1460 static int revalidate_statahead_dentry(struct inode *dir,
1461                                         struct ll_statahead_info *sai,
1462                                         struct dentry **dentryp,
1463                                         int only_unplug)
1464 {
1465         struct ll_sa_entry *entry = NULL;
1466         struct l_wait_info lwi = { 0 };
1467         int rc = 0;
1468         ENTRY;
1469
1470         if ((*dentryp)->d_name.name[0] == '.') {
1471                 if (sai->sai_ls_all ||
1472                     sai->sai_miss_hidden >= sai->sai_skip_hidden) {
1473                         /*
1474                          * Hidden dentry is the first one, or statahead
1475                          * thread does not skip so many hidden dentries
1476                          * before "sai_ls_all" enabled as below.
1477                          */
1478                 } else {
1479                         if (!sai->sai_ls_all)
1480                                 /*
1481                                  * It maybe because hidden dentry is not
1482                                  * the first one, "sai_ls_all" was not
1483                                  * set, then "ls -al" missed. Enable
1484                                  * "sai_ls_all" for such case.
1485                                  */
1486                                 sai->sai_ls_all = 1;
1487
1488                         /*
1489                          * Such "getattr" has been skipped before
1490                          * "sai_ls_all" enabled as above.
1491                          */
1492                         sai->sai_miss_hidden++;
1493                         RETURN(-EAGAIN);
1494                 }
1495         }
1496
1497         entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name);
1498         if (entry == NULL || only_unplug) {
1499                 ll_sai_unplug(sai, entry);
1500                 RETURN(entry ? 1 : -EAGAIN);
1501         }
1502
1503         /* if statahead is busy in readdir, help it do post-work */
1504         if (!ll_sa_entry_stated(entry) && sai->sai_in_readpage)
1505                 ll_post_statahead(sai);
1506
1507         if (!ll_sa_entry_stated(entry)) {
1508                 sai->sai_index_wait = entry->se_index;
1509                 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
1510                                         LWI_ON_SIGNAL_NOOP, NULL);
1511                 rc = l_wait_event(sai->sai_waitq,
1512                                 ll_sa_entry_stated(entry) ||
1513                                 thread_is_stopped(&sai->sai_thread),
1514                                 &lwi);
1515                 if (rc < 0) {
1516                         ll_sai_unplug(sai, entry);
1517                         RETURN(-EAGAIN);
1518                 }
1519         }
1520
1521         if (entry->se_stat == SA_ENTRY_SUCC && entry->se_inode != NULL) {
1522                 struct inode *inode = entry->se_inode;
1523                 struct lookup_intent it = { .it_op = IT_GETATTR,
1524                                             .d.lustre.it_lock_handle =
1525                                                 entry->se_handle };
1526                 __u64 bits;
1527
1528                 rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
1529                                         ll_inode2fid(inode), &bits);
1530                 if (rc == 1) {
1531                         if ((*dentryp)->d_inode == NULL) {
1532                                 struct dentry *alias;
1533
1534                                 alias = ll_splice_alias(inode, *dentryp);
1535                                 if (IS_ERR(alias)) {
1536                                         ll_sai_unplug(sai, entry);
1537                                         RETURN(PTR_ERR(alias));
1538                                 }
1539                                 *dentryp = alias;
1540                         } else if ((*dentryp)->d_inode != inode) {
1541                                 /* revalidate, but inode is recreated */
1542                                 CDEBUG(D_READA,
1543                                         "%s: stale dentry %.*s inode "
1544                                         DFID", statahead inode "DFID
1545                                         "\n",
1546                                         ll_get_fsname((*dentryp)->d_inode->i_sb,
1547                                                       NULL, 0),
1548                                         (*dentryp)->d_name.len,
1549                                         (*dentryp)->d_name.name,
1550                                         PFID(ll_inode2fid((*dentryp)->d_inode)),
1551                                         PFID(ll_inode2fid(inode)));
1552                                 ll_sai_unplug(sai, entry);
1553                                 RETURN(-ESTALE);
1554                         } else {
1555                                 iput(inode);
1556                         }
1557                         entry->se_inode = NULL;
1558
1559                         if ((bits & MDS_INODELOCK_LOOKUP) &&
1560                             d_lustre_invalid(*dentryp))
1561                                 d_lustre_revalidate(*dentryp);
1562                         ll_intent_release(&it);
1563                 }
1564         }
1565
1566         ll_sai_unplug(sai, entry);
1567         RETURN(rc);
1568 }
1569
1570 static int start_statahead_thread(struct inode *dir, struct dentry *dentry)
1571 {
1572         struct ll_inode_info *lli = ll_i2info(dir);
1573         struct ll_statahead_info *sai = NULL;
1574         struct dentry *parent;
1575         struct ptlrpc_thread *thread;
1576         struct l_wait_info lwi = { 0 };
1577         struct task_struct *task;
1578         int rc;
1579         ENTRY;
1580
1581         /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1582         rc = is_first_dirent(dir, dentry);
1583         if (rc == LS_NONE_FIRST_DE)
1584                 /* It is not "ls -{a}l" operation, no need statahead for it. */
1585                 GOTO(out, rc = -EAGAIN);
1586
1587         sai = ll_sai_alloc();
1588         if (sai == NULL)
1589                 GOTO(out, rc = -ENOMEM);
1590
1591         sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
1592         sai->sai_inode = igrab(dir);
1593         if (unlikely(sai->sai_inode == NULL)) {
1594                 CWARN("Do not start stat ahead on dying inode "DFID"\n",
1595                         PFID(&lli->lli_fid));
1596                 GOTO(out, rc = -ESTALE);
1597         }
1598
1599         /* get parent reference count here, and put it in ll_statahead_thread */
1600         parent = dget(dentry->d_parent);
1601         if (unlikely(sai->sai_inode != parent->d_inode)) {
1602                 struct ll_inode_info *nlli = ll_i2info(parent->d_inode);
1603
1604                 CWARN("Race condition, someone changed %.*s just now: "
1605                         "old parent "DFID", new parent "DFID"\n",
1606                         dentry->d_name.len, dentry->d_name.name,
1607                         PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
1608                 dput(parent);
1609                 iput(sai->sai_inode);
1610                 GOTO(out, rc = -EAGAIN);
1611         }
1612
1613         CDEBUG(D_READA, "start statahead thread: sai %p, parent %.*s\n",
1614                sai, parent->d_name.len, parent->d_name.name);
1615
1616         lli->lli_sai = sai;
1617
1618         task = kthread_run(ll_statahead_thread, parent, "ll_sa_%u",
1619                            lli->lli_opendir_pid);
1620         thread = &sai->sai_thread;
1621         if (IS_ERR(task)) {
1622                 rc = PTR_ERR(task);
1623                 CERROR("cannot start ll_sa thread: rc = %d\n", rc);
1624                 dput(parent);
1625                 lli->lli_opendir_key = NULL;
1626                 thread_set_flags(thread, SVC_STOPPED);
1627                 thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
1628                 ll_sai_put(sai);
1629                 LASSERT(lli->lli_sai == NULL);
1630                 RETURN(-EAGAIN);
1631         }
1632
1633         l_wait_event(thread->t_ctl_waitq,
1634                      thread_is_running(thread) || thread_is_stopped(thread),
1635                      &lwi);
1636         atomic_inc(&ll_i2sbi(parent->d_inode)->ll_sa_running);
1637         ll_sai_put(sai);
1638
1639         /*
1640          * We don't stat-ahead for the first dirent since we are already in
1641          * lookup.
1642          */
1643         RETURN(-EAGAIN);
1644
1645 out:
1646         if (sai != NULL)
1647                 OBD_FREE_PTR(sai);
1648         spin_lock(&lli->lli_sa_lock);
1649         lli->lli_opendir_key = NULL;
1650         lli->lli_opendir_pid = 0;
1651         lli->lli_sa_enabled = 0;
1652         spin_unlock(&lli->lli_sa_lock);
1653
1654         RETURN(rc);
1655 }
1656
1657 /**
1658  * Start statahead thread if this is the first dir entry.
1659  * Otherwise if a thread is started already, wait it until it is ahead of me.
1660  * \retval 1       -- find entry with lock in cache, the caller needs to do
1661  *                    nothing.
1662  * \retval 0       -- find entry in cache, but without lock, the caller needs
1663  *                    refresh from MDS.
1664  * \retval others  -- the caller need to process as non-statahead.
1665  */
1666 int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
1667                        int only_unplug)
1668 {
1669         struct ll_statahead_info *sai;
1670
1671         sai = ll_sai_get(dir);
1672         if (sai != NULL) {
1673                 int rc;
1674
1675                 rc = revalidate_statahead_dentry(dir, sai, dentryp,
1676                                                  only_unplug);
1677                 CDEBUG(D_READA, "revalidate statahead %.*s: %d.\n",
1678                         (*dentryp)->d_name.len, (*dentryp)->d_name.name, rc);
1679                 ll_sai_put(sai);
1680                 return rc;
1681         }
1682
1683         return start_statahead_thread(dir, *dentryp);
1684 }