Whamcloud - gitweb
6adb7e8d6531b092d95e112dedb051df348baeed
[fs/lustre-release.git] / lustre / llite / statahead.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2014, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #include <linux/fs.h>
38 #include <linux/sched.h>
39 #include <linux/kthread.h>
40 #include <linux/mm.h>
41 #include <linux/highmem.h>
42 #include <linux/pagemap.h>
43
44 #define DEBUG_SUBSYSTEM S_LLITE
45
46 #include <obd_support.h>
47 #include <lustre_dlm.h>
48 #include "llite_internal.h"
49
50 #define SA_OMITTED_ENTRY_MAX 8ULL
51
52 typedef enum {
53         /** negative values are for error cases */
54         SA_ENTRY_INIT = 0,      /** init entry */
55         SA_ENTRY_SUCC = 1,      /** stat succeed */
56         SA_ENTRY_INVA = 2,      /** invalid entry */
57 } se_state_t;
58
59 /* sa_entry is not refcounted: statahead thread allocates it and do async stat,
60  * and in async stat callback ll_statahead_interpret() will add it into
61  * sai_interim_entries, later statahead thread will call sa_handle_callback() to
62  * instantiate entry and move it into sai_entries, and then only scanner process
63  * can access and free it. */
64 struct sa_entry {
65         /* link into sai_interim_entries or sai_entries */
66         struct list_head        se_list;
67         /* link into sai hash table locally */
68         struct list_head        se_hash;
69         /* entry index in the sai */
70         __u64                   se_index;
71         /* low layer ldlm lock handle */
72         __u64                   se_handle;
73         /* entry status */
74         se_state_t              se_state;
75         /* entry size, contains name */
76         int                     se_size;
77         /* pointer to async getattr enqueue info */
78         struct md_enqueue_info *se_minfo;
79         /* pointer to the async getattr request */
80         struct ptlrpc_request  *se_req;
81         /* pointer to the target inode */
82         struct inode           *se_inode;
83         /* entry name */
84         struct qstr             se_qstr;
85 };
86
87 static unsigned int sai_generation = 0;
88 static DEFINE_SPINLOCK(sai_generation_lock);
89
90 static inline int sa_unhashed(struct sa_entry *entry)
91 {
92         return list_empty(&entry->se_hash);
93 }
94
95 /* sa_entry is ready to use */
96 static inline int sa_ready(struct sa_entry *entry)
97 {
98         smp_rmb();
99         return (entry->se_state != SA_ENTRY_INIT);
100 }
101
102 /* hash value to put in sai_cache */
103 static inline int sa_hash(int val)
104 {
105         return val & LL_SA_CACHE_MASK;
106 }
107
108 /* hash entry into sai_cache */
109 static inline void
110 sa_rehash(struct ll_statahead_info *sai, struct sa_entry *entry)
111 {
112         int i = sa_hash(entry->se_qstr.hash);
113
114         spin_lock(&sai->sai_cache_lock[i]);
115         list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
116         spin_unlock(&sai->sai_cache_lock[i]);
117 }
118
119 /* unhash entry from sai_cache */
120 static inline void
121 sa_unhash(struct ll_statahead_info *sai, struct sa_entry *entry)
122 {
123         int i = sa_hash(entry->se_qstr.hash);
124
125         spin_lock(&sai->sai_cache_lock[i]);
126         list_del_init(&entry->se_hash);
127         spin_unlock(&sai->sai_cache_lock[i]);
128 }
129
130 static inline int agl_should_run(struct ll_statahead_info *sai,
131                                  struct inode *inode)
132 {
133         return (inode != NULL && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
134 }
135
136 static inline struct ll_inode_info *
137 agl_first_entry(struct ll_statahead_info *sai)
138 {
139         return list_entry(sai->sai_agls.next, struct ll_inode_info,
140                           lli_agl_list);
141 }
142
143 /* statahead window is full */
144 static inline int sa_sent_full(struct ll_statahead_info *sai)
145 {
146         return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
147 }
148
149 /* got async stat replies */
150 static inline int sa_has_callback(struct ll_statahead_info *sai)
151 {
152         return !list_empty(&sai->sai_interim_entries);
153 }
154
155 static inline int agl_list_empty(struct ll_statahead_info *sai)
156 {
157         return list_empty(&sai->sai_agls);
158 }
159
160 /**
161  * (1) hit ratio less than 80%
162  * or
163  * (2) consecutive miss more than 8
164  * then means low hit.
165  */
166 static inline int sa_low_hit(struct ll_statahead_info *sai)
167 {
168         return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
169                 (sai->sai_consecutive_miss > 8));
170 }
171
172 /*
173  * if the given index is behind of statahead window more than
174  * SA_OMITTED_ENTRY_MAX, then it is old.
175  */
176 static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
177 {
178         return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
179                  sai->sai_index);
180 }
181
182 /* allocate sa_entry and hash it to allow scanner process to find it */
183 static struct sa_entry *
184 sa_alloc(struct ll_statahead_info *sai, __u64 index, const char *name, int len)
185 {
186         struct ll_inode_info *lli;
187         struct sa_entry *entry;
188         int entry_size;
189         char *dname;
190         ENTRY;
191
192         entry_size = sizeof(struct sa_entry) + (len & ~3) + 4;
193         OBD_ALLOC(entry, entry_size);
194         if (unlikely(entry == NULL))
195                 RETURN(ERR_PTR(-ENOMEM));
196
197         CDEBUG(D_READA, "alloc sa entry %.*s(%p) index "LPU64"\n",
198                len, name, entry, index);
199
200         entry->se_index = index;
201
202         entry->se_state = SA_ENTRY_INIT;
203         entry->se_size = entry_size;
204         dname = (char *)entry + sizeof(struct sa_entry);
205         memcpy(dname, name, len);
206         dname[len] = 0;
207         entry->se_qstr.hash = full_name_hash(name, len);
208         entry->se_qstr.len = len;
209         entry->se_qstr.name = dname;
210
211         lli = ll_i2info(sai->sai_dentry->d_inode);
212
213         spin_lock(&lli->lli_sa_lock);
214         INIT_LIST_HEAD(&entry->se_list);
215         sa_rehash(sai, entry);
216         spin_unlock(&lli->lli_sa_lock);
217
218         atomic_inc(&sai->sai_cache_count);
219
220         RETURN(entry);
221 }
222
223 /* free sa_entry, which should have been unhashed and not in any list */
224 static void sa_free(struct ll_statahead_info *sai, struct sa_entry *entry)
225 {
226         CDEBUG(D_READA, "free sa entry %.*s(%p) index "LPU64"\n",
227                entry->se_qstr.len, entry->se_qstr.name, entry,
228                entry->se_index);
229
230         LASSERT(list_empty(&entry->se_list));
231         LASSERT(sa_unhashed(entry));
232
233         OBD_FREE(entry, entry->se_size);
234         atomic_dec(&sai->sai_cache_count);
235 }
236
237 /*
238  * find sa_entry by name, used by directory scanner, lock is not needed because
239  * only scanner can remove the entry from cache.
240  */
241 static struct sa_entry *
242 sa_get(struct ll_statahead_info *sai, const struct qstr *qstr)
243 {
244         struct sa_entry *entry;
245         int i = sa_hash(qstr->hash);
246
247         list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
248                 if (entry->se_qstr.hash == qstr->hash &&
249                     entry->se_qstr.len == qstr->len &&
250                     memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
251                         return entry;
252         }
253         return NULL;
254 }
255
256 /* unhash and unlink sa_entry, and then free it */
257 static inline void
258 sa_kill(struct ll_statahead_info *sai, struct sa_entry *entry)
259 {
260         struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
261
262         LASSERT(!sa_unhashed(entry));
263         LASSERT(!list_empty(&entry->se_list));
264         LASSERT(sa_ready(entry));
265
266         sa_unhash(sai, entry);
267
268         spin_lock(&lli->lli_sa_lock);
269         list_del_init(&entry->se_list);
270         spin_unlock(&lli->lli_sa_lock);
271
272         if (entry->se_inode != NULL)
273                 iput(entry->se_inode);
274
275         sa_free(sai, entry);
276 }
277
278 /* called by scanner after use, sa_entry will be killed */
279 static void
280 sa_put(struct ll_statahead_info *sai, struct sa_entry *entry)
281 {
282         struct sa_entry *tmp, *next;
283
284         if (entry != NULL && entry->se_state == SA_ENTRY_SUCC) {
285                 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode);
286
287                 sai->sai_hit++;
288                 sai->sai_consecutive_miss = 0;
289                 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
290         } else {
291                 sai->sai_miss++;
292                 sai->sai_consecutive_miss++;
293         }
294
295         if (entry != NULL)
296                 sa_kill(sai, entry);
297
298         /* kill old completed entries, only scanner process does this, no need
299          * to lock */
300         list_for_each_entry_safe(tmp, next, &sai->sai_entries, se_list) {
301                 if (!is_omitted_entry(sai, tmp->se_index))
302                         break;
303                 sa_kill(sai, tmp);
304         }
305
306         wake_up(&sai->sai_thread.t_ctl_waitq);
307 }
308
309 /* update state and sort add entry to sai_entries by index, return true if
310  * scanner is waiting on this entry. */
311 static bool
312 __sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
313 {
314         struct sa_entry *se;
315         struct list_head *pos = &sai->sai_entries;
316         __u64 index = entry->se_index;
317
318         LASSERT(!sa_ready(entry));
319         LASSERT(list_empty(&entry->se_list));
320
321         list_for_each_entry_reverse(se, &sai->sai_entries, se_list) {
322                 if (se->se_index < entry->se_index) {
323                         pos = &se->se_list;
324                         break;
325                 }
326         }
327         list_add(&entry->se_list, pos);
328         entry->se_state = ret < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC;
329
330         return (index == sai->sai_index_wait);
331 }
332
333 /*
334  * release resources used in async stat RPC, update entry state and wakeup if
335  * scanner process it waiting on this entry.
336  */
337 static void
338 sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
339 {
340         struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
341         struct md_enqueue_info *minfo = entry->se_minfo;
342         struct ptlrpc_request *req = entry->se_req;
343         bool wakeup;
344
345         /* release resources used in RPC */
346         if (minfo) {
347                 entry->se_minfo = NULL;
348                 ll_intent_release(&minfo->mi_it);
349                 iput(minfo->mi_dir);
350                 OBD_FREE_PTR(minfo);
351         }
352
353         if (req) {
354                 entry->se_req = NULL;
355                 ptlrpc_req_finished(req);
356         }
357
358         spin_lock(&lli->lli_sa_lock);
359         wakeup = __sa_make_ready(sai, entry, ret);
360         spin_unlock(&lli->lli_sa_lock);
361
362         if (wakeup)
363                 wake_up(&sai->sai_waitq);
364 }
365
366 /* insert inode into the list of sai_agls */
367 static void ll_agl_add(struct ll_statahead_info *sai,
368                        struct inode *inode, int index)
369 {
370         struct ll_inode_info *child  = ll_i2info(inode);
371         struct ll_inode_info *parent = ll_i2info(sai->sai_dentry->d_inode);
372         int                   added  = 0;
373
374         spin_lock(&child->lli_agl_lock);
375         if (child->lli_agl_index == 0) {
376                 child->lli_agl_index = index;
377                 spin_unlock(&child->lli_agl_lock);
378
379                 LASSERT(list_empty(&child->lli_agl_list));
380
381                 igrab(inode);
382                 spin_lock(&parent->lli_agl_lock);
383                 if (agl_list_empty(sai))
384                         added = 1;
385                 list_add_tail(&child->lli_agl_list, &sai->sai_agls);
386                 spin_unlock(&parent->lli_agl_lock);
387         } else {
388                 spin_unlock(&child->lli_agl_lock);
389         }
390
391         if (added > 0)
392                 wake_up(&sai->sai_agl_thread.t_ctl_waitq);
393 }
394
395 /* allocate sai */
396 static struct ll_statahead_info *ll_sai_alloc(struct dentry *dentry)
397 {
398         struct ll_statahead_info *sai;
399         struct ll_inode_info *lli = ll_i2info(dentry->d_inode);
400         int i;
401         ENTRY;
402
403         OBD_ALLOC_PTR(sai);
404         if (!sai)
405                 RETURN(NULL);
406
407         sai->sai_dentry = dget(dentry);
408         atomic_set(&sai->sai_refcount, 1);
409         sai->sai_max = LL_SA_RPC_MIN;
410         sai->sai_index = 1;
411         init_waitqueue_head(&sai->sai_waitq);
412         init_waitqueue_head(&sai->sai_thread.t_ctl_waitq);
413         init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq);
414
415         INIT_LIST_HEAD(&sai->sai_interim_entries);
416         INIT_LIST_HEAD(&sai->sai_entries);
417         INIT_LIST_HEAD(&sai->sai_agls);
418
419         for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
420                 INIT_LIST_HEAD(&sai->sai_cache[i]);
421                 spin_lock_init(&sai->sai_cache_lock[i]);
422         }
423         atomic_set(&sai->sai_cache_count, 0);
424
425         spin_lock(&sai_generation_lock);
426         lli->lli_sa_generation = ++sai_generation;
427         if (unlikely(sai_generation == 0))
428                 lli->lli_sa_generation = ++sai_generation;
429         spin_unlock(&sai_generation_lock);
430
431         RETURN(sai);
432 }
433
434 /* free sai */
435 static inline void ll_sai_free(struct ll_statahead_info *sai)
436 {
437         LASSERT(sai->sai_dentry != NULL);
438         dput(sai->sai_dentry);
439         OBD_FREE_PTR(sai);
440 }
441
442 /*
443  * take refcount of sai if sai for @dir exists, which means statahead is on for
444  * this directory.
445  */
446 static inline struct ll_statahead_info *ll_sai_get(struct inode *dir)
447 {
448         struct ll_inode_info *lli = ll_i2info(dir);
449         struct ll_statahead_info *sai = NULL;
450
451         spin_lock(&lli->lli_sa_lock);
452         sai = lli->lli_sai;
453         if (sai != NULL)
454                 atomic_inc(&sai->sai_refcount);
455         spin_unlock(&lli->lli_sa_lock);
456
457         return sai;
458 }
459
460 /*
461  * put sai refcount after use, if refcount reaches zero, free sai and sa_entries
462  * attached to it.
463  */
464 static void ll_sai_put(struct ll_statahead_info *sai)
465 {
466         struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
467
468         if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
469                 struct sa_entry *entry, *next;
470                 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode);
471
472                 lli->lli_sai = NULL;
473                 spin_unlock(&lli->lli_sa_lock);
474
475                 LASSERT(thread_is_stopped(&sai->sai_thread));
476                 LASSERT(thread_is_stopped(&sai->sai_agl_thread));
477                 LASSERT(sai->sai_sent == sai->sai_replied);
478                 LASSERT(!sa_has_callback(sai));
479
480                 list_for_each_entry_safe(entry, next, &sai->sai_entries,
481                                          se_list)
482                         sa_kill(sai, entry);
483
484                 LASSERT(atomic_read(&sai->sai_cache_count) == 0);
485                 LASSERT(agl_list_empty(sai));
486
487                 ll_sai_free(sai);
488                 atomic_dec(&sbi->ll_sa_running);
489         }
490 }
491
492 /* Do NOT forget to drop inode refcount when into sai_agls. */
493 static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
494 {
495         struct ll_inode_info *lli = ll_i2info(inode);
496         __u64 index = lli->lli_agl_index;
497         int rc;
498         ENTRY;
499
500         LASSERT(list_empty(&lli->lli_agl_list));
501
502         /* AGL maybe fall behind statahead with one entry */
503         if (is_omitted_entry(sai, index + 1)) {
504                 lli->lli_agl_index = 0;
505                 iput(inode);
506                 RETURN_EXIT;
507         }
508
509         /* Someone is in glimpse (sync or async), do nothing. */
510         rc = down_write_trylock(&lli->lli_glimpse_sem);
511         if (rc == 0) {
512                 lli->lli_agl_index = 0;
513                 iput(inode);
514                 RETURN_EXIT;
515         }
516
517         /*
518          * Someone triggered glimpse within 1 sec before.
519          * 1) The former glimpse succeeded with glimpse lock granted by OST, and
520          *    if the lock is still cached on client, AGL needs to do nothing. If
521          *    it is cancelled by other client, AGL maybe cannot obtaion new lock
522          *    for no glimpse callback triggered by AGL.
523          * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
524          *    Under such case, it is quite possible that the OST will not grant
525          *    glimpse lock for AGL also.
526          * 3) The former glimpse failed, compared with other two cases, it is
527          *    relative rare. AGL can ignore such case, and it will not muchly
528          *    affect the performance.
529          */
530         if (lli->lli_glimpse_time != 0 &&
531             cfs_time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
532                 up_write(&lli->lli_glimpse_sem);
533                 lli->lli_agl_index = 0;
534                 iput(inode);
535                 RETURN_EXIT;
536         }
537
538         CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
539                DFID", idx = "LPU64"\n", PFID(&lli->lli_fid), index);
540
541         cl_agl(inode);
542         lli->lli_agl_index = 0;
543         lli->lli_glimpse_time = cfs_time_current();
544         up_write(&lli->lli_glimpse_sem);
545
546         CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
547                DFID", idx = "LPU64", rc = %d\n",
548                PFID(&lli->lli_fid), index, rc);
549
550         iput(inode);
551
552         EXIT;
553 }
554
555 /*
556  * prepare inode for sa entry, add it into agl list, now sa_entry is ready
557  * to be used by scanner process.
558  */
559 static void sa_instantiate(struct ll_statahead_info *sai,
560                                  struct sa_entry *entry)
561 {
562         struct inode *dir = sai->sai_dentry->d_inode;
563         struct inode *child;
564         struct md_enqueue_info *minfo;
565         struct lookup_intent *it;
566         struct ptlrpc_request *req;
567         struct mdt_body *body;
568         int rc = 0;
569         ENTRY;
570
571         LASSERT(entry->se_handle != 0);
572
573         minfo = entry->se_minfo;
574         it = &minfo->mi_it;
575         req = entry->se_req;
576         body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
577         if (body == NULL)
578                 GOTO(out, rc = -EFAULT);
579
580         child = entry->se_inode;
581         if (child == NULL) {
582                 /*
583                  * lookup.
584                  */
585                 LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
586
587                 /* XXX: No fid in reply, this is probaly cross-ref case.
588                  * SA can't handle it yet. */
589                 if (body->mbo_valid & OBD_MD_MDS)
590                         GOTO(out, rc = -EAGAIN);
591         } else {
592                 /*
593                  * revalidate.
594                  */
595                 /* unlinked and re-created with the same name */
596                 if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2,
597                                         &body->mbo_fid1))) {
598                         entry->se_inode = NULL;
599                         iput(child);
600                         child = NULL;
601                 }
602         }
603
604         it->d.lustre.it_lock_handle = entry->se_handle;
605         rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
606         if (rc != 1)
607                 GOTO(out, rc = -EAGAIN);
608
609         rc = ll_prep_inode(&child, req, dir->i_sb, it);
610         if (rc)
611                 GOTO(out, rc);
612
613         CDEBUG(D_READA, "%s: setting %.*s"DFID" l_data to inode %p\n",
614                ll_get_fsname(child->i_sb, NULL, 0),
615                entry->se_qstr.len, entry->se_qstr.name,
616                PFID(ll_inode2fid(child)), child);
617         ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
618
619         entry->se_inode = child;
620
621         if (agl_should_run(sai, child))
622                 ll_agl_add(sai, child, entry->se_index);
623
624         EXIT;
625
626 out:
627         /* sa_make_ready() will drop ldlm ibits lock refcount by calling
628          * ll_intent_drop_lock() in spite of failures. Do not worry about
629          * calling ll_intent_drop_lock() more than once. */
630         sa_make_ready(sai, entry, rc);
631 }
632
633 /* once there are async stat replies, instantiate sa_entry from replies */
634 static void sa_handle_callback(struct ll_statahead_info *sai)
635 {
636         struct ll_inode_info *lli;
637
638         lli = ll_i2info(sai->sai_dentry->d_inode);
639
640         while (sa_has_callback(sai)) {
641                 struct sa_entry *entry;
642
643                 spin_lock(&lli->lli_sa_lock);
644                 if (unlikely(!sa_has_callback(sai))) {
645                         spin_unlock(&lli->lli_sa_lock);
646                         break;
647                 }
648                 entry = list_entry(sai->sai_interim_entries.next,
649                                    struct sa_entry, se_list);
650                 list_del_init(&entry->se_list);
651                 spin_unlock(&lli->lli_sa_lock);
652
653                 sa_instantiate(sai, entry);
654         }
655 }
656
657 /*
658  * callback for async stat RPC, because this is called in ptlrpcd context, we
659  * only put sa_entry in sai_interim_entries, and wake up statahead thread to
660  * really prepare inode and instantiate sa_entry later.
661  */
662 static int ll_statahead_interpret(struct ptlrpc_request *req,
663                                   struct md_enqueue_info *minfo, int rc)
664 {
665         struct lookup_intent *it = &minfo->mi_it;
666         struct inode *dir = minfo->mi_dir;
667         struct ll_inode_info *lli = ll_i2info(dir);
668         struct ll_statahead_info *sai = lli->lli_sai;
669         struct sa_entry *entry = (struct sa_entry *)minfo->mi_cbdata;
670         __u64 handle = 0;
671         bool wakeup;
672         ENTRY;
673
674         if (it_disposition(it, DISP_LOOKUP_NEG))
675                 rc = -ENOENT;
676
677         /* because statahead thread will wait for all inflight RPC to finish,
678          * sai should be always valid, no need to refcount */
679         LASSERT(sai != NULL);
680         LASSERT(!thread_is_stopped(&sai->sai_thread));
681         LASSERT(entry != NULL);
682
683         CDEBUG(D_READA, "sa_entry %.*s rc %d\n",
684                entry->se_qstr.len, entry->se_qstr.name, rc);
685
686         if (rc != 0) {
687                 ll_intent_release(it);
688                 iput(dir);
689                 OBD_FREE_PTR(minfo);
690         } else {
691                 /* release ibits lock ASAP to avoid deadlock when statahead
692                  * thread enqueues lock on parent in readdir and another
693                  * process enqueues lock on child with parent lock held, eg.
694                  * unlink. */
695                 handle = it->d.lustre.it_lock_handle;
696                 ll_intent_drop_lock(it);
697         }
698
699         spin_lock(&lli->lli_sa_lock);
700         if (rc != 0) {
701                 wakeup = __sa_make_ready(sai, entry, rc);
702         } else {
703                 entry->se_minfo = minfo;
704                 entry->se_req = ptlrpc_request_addref(req);
705                 /* Release the async ibits lock ASAP to avoid deadlock
706                  * when statahead thread tries to enqueue lock on parent
707                  * for readpage and other tries to enqueue lock on child
708                  * with parent's lock held, for example: unlink. */
709                 entry->se_handle = handle;
710                 wakeup = !sa_has_callback(sai);
711                 list_add_tail(&entry->se_list, &sai->sai_interim_entries);
712         }
713         sai->sai_replied++;
714         if (wakeup)
715                 wake_up(&sai->sai_thread.t_ctl_waitq);
716         spin_unlock(&lli->lli_sa_lock);
717
718         RETURN(rc);
719 }
720
721 /* finish async stat RPC arguments */
722 static void sa_fini_data(struct md_enqueue_info *minfo,
723                          struct ldlm_enqueue_info *einfo)
724 {
725         LASSERT(minfo && einfo);
726         iput(minfo->mi_dir);
727         capa_put(minfo->mi_data.op_capa1);
728         capa_put(minfo->mi_data.op_capa2);
729         OBD_FREE_PTR(minfo);
730         OBD_FREE_PTR(einfo);
731 }
732
733 /*
734  * prepare arguments for async stat RPC.
735  *
736  * There is race condition between "capa_put" and "ll_statahead_interpret" for
737  * accessing "op_data.op_capa[1,2]" as following:
738  * "capa_put" releases "op_data.op_capa[1,2]"'s reference count after calling
739  * "md_intent_getattr_async". But "ll_statahead_interpret" maybe run first, and
740  * fill "op_data.op_capa[1,2]" as POISON, then cause "capa_put" access invalid
741  * "ocapa". So here reserve "op_data.op_capa[1,2]" in "pcapa" before calling
742  * "md_intent_getattr_async".
743  */
744 static int sa_prep_data(struct inode *dir, struct inode *child,
745                         struct sa_entry *entry, struct md_enqueue_info **pmi,
746                         struct ldlm_enqueue_info **pei,
747                         struct obd_capa **pcapa)
748 {
749         struct qstr              *qstr = &entry->se_qstr;
750         struct md_enqueue_info   *minfo;
751         struct ldlm_enqueue_info *einfo;
752         struct md_op_data        *op_data;
753
754         OBD_ALLOC_PTR(einfo);
755         if (einfo == NULL)
756                 return -ENOMEM;
757
758         OBD_ALLOC_PTR(minfo);
759         if (minfo == NULL) {
760                 OBD_FREE_PTR(einfo);
761                 return -ENOMEM;
762         }
763
764         op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, qstr->name,
765                                      qstr->len, 0, LUSTRE_OPC_ANY, NULL);
766         if (IS_ERR(op_data)) {
767                 OBD_FREE_PTR(einfo);
768                 OBD_FREE_PTR(minfo);
769                 return PTR_ERR(op_data);
770         }
771
772         minfo->mi_it.it_op = IT_GETATTR;
773         minfo->mi_dir = igrab(dir);
774         minfo->mi_cb = ll_statahead_interpret;
775         minfo->mi_cbdata = entry;
776
777         einfo->ei_type   = LDLM_IBITS;
778         einfo->ei_mode   = it_to_lock_mode(&minfo->mi_it);
779         einfo->ei_cb_bl  = ll_md_blocking_ast;
780         einfo->ei_cb_cp  = ldlm_completion_ast;
781         einfo->ei_cb_gl  = NULL;
782         einfo->ei_cbdata = NULL;
783
784         *pmi = minfo;
785         *pei = einfo;
786         pcapa[0] = op_data->op_capa1;
787         pcapa[1] = op_data->op_capa2;
788
789         return 0;
790 }
791
792 /* async stat for file not found in dcache */
793 static int sa_lookup(struct inode *dir, struct sa_entry *entry)
794 {
795         struct md_enqueue_info   *minfo;
796         struct ldlm_enqueue_info *einfo;
797         struct obd_capa          *capas[2];
798         int                       rc;
799         ENTRY;
800
801         rc = sa_prep_data(dir, NULL, entry, &minfo, &einfo, capas);
802         if (rc)
803                 RETURN(rc);
804
805         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
806         if (!rc) {
807                 capa_put(capas[0]);
808                 capa_put(capas[1]);
809         } else {
810                 sa_fini_data(minfo, einfo);
811         }
812
813         RETURN(rc);
814 }
815
816 /**
817  * async stat for file found in dcache, similar to .revalidate
818  *
819  * \retval      1 dentry valid, no RPC sent
820  * \retval      0 dentry invalid, will send async stat RPC
821  * \retval      negative number upon error
822  */
823 static int sa_revalidate(struct inode *dir, struct sa_entry *entry,
824                          struct dentry *dentry)
825 {
826         struct inode *inode = dentry->d_inode;
827         struct lookup_intent it = { .it_op = IT_GETATTR,
828                                     .d.lustre.it_lock_handle = 0 };
829         struct md_enqueue_info *minfo;
830         struct ldlm_enqueue_info *einfo;
831         struct obd_capa *capas[2];
832         int rc;
833         ENTRY;
834
835         if (unlikely(inode == NULL))
836                 RETURN(1);
837
838         if (d_mountpoint(dentry))
839                 RETURN(1);
840
841         entry->se_inode = igrab(inode);
842         rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
843                                 NULL);
844         if (rc == 1) {
845                 entry->se_handle = it.d.lustre.it_lock_handle;
846                 ll_intent_release(&it);
847                 RETURN(1);
848         }
849
850         rc = sa_prep_data(dir, inode, entry, &minfo, &einfo, capas);
851         if (rc) {
852                 entry->se_inode = NULL;
853                 iput(inode);
854                 RETURN(rc);
855         }
856
857         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
858         if (!rc) {
859                 capa_put(capas[0]);
860                 capa_put(capas[1]);
861         } else {
862                 entry->se_inode = NULL;
863                 iput(inode);
864                 sa_fini_data(minfo, einfo);
865         }
866
867         RETURN(rc);
868 }
869
870 /* async stat for file with @name */
871 static void sa_statahead(struct dentry *parent, const char *name, int len)
872 {
873         struct inode *dir = parent->d_inode;
874         struct ll_inode_info *lli = ll_i2info(dir);
875         struct ll_statahead_info *sai = lli->lli_sai;
876         struct dentry *dentry = NULL;
877         struct sa_entry *entry;
878         int rc;
879         ENTRY;
880
881         entry = sa_alloc(sai, sai->sai_index, name, len);
882         if (IS_ERR(entry))
883                 RETURN_EXIT;
884
885         dentry = d_lookup(parent, &entry->se_qstr);
886         if (!dentry) {
887                 rc = sa_lookup(dir, entry);
888         } else {
889                 rc = sa_revalidate(dir, entry, dentry);
890                 if (rc == 1 && agl_should_run(sai, dentry->d_inode))
891                         ll_agl_add(sai, dentry->d_inode, entry->se_index);
892         }
893
894         if (dentry != NULL)
895                 dput(dentry);
896
897         if (rc != 0)
898                 sa_make_ready(sai, entry, rc);
899         else
900                 sai->sai_sent++;
901
902         sai->sai_index++;
903
904         EXIT;
905 }
906
907 /* async glimpse (agl) thread main function */
908 static int ll_agl_thread(void *arg)
909 {
910         struct dentry *parent = (struct dentry *)arg;
911         struct inode *dir = parent->d_inode;
912         struct ll_inode_info *plli = ll_i2info(dir);
913         struct ll_inode_info *clli;
914         struct ll_sb_info *sbi = ll_i2sbi(dir);
915         struct ll_statahead_info *sai;
916         struct ptlrpc_thread *thread;
917         struct l_wait_info lwi = { 0 };
918         ENTRY;
919
920
921         sai = ll_sai_get(dir);
922         thread = &sai->sai_agl_thread;
923         thread->t_pid = current_pid();
924         CDEBUG(D_READA, "agl thread started: sai %p, parent %.*s\n",
925                sai, parent->d_name.len, parent->d_name.name);
926
927         atomic_inc(&sbi->ll_agl_total);
928         spin_lock(&plli->lli_agl_lock);
929         sai->sai_agl_valid = 1;
930         if (thread_is_init(thread))
931                 /* If someone else has changed the thread state
932                  * (e.g. already changed to SVC_STOPPING), we can't just
933                  * blindly overwrite that setting. */
934                 thread_set_flags(thread, SVC_RUNNING);
935         spin_unlock(&plli->lli_agl_lock);
936         wake_up(&thread->t_ctl_waitq);
937
938         while (1) {
939                 l_wait_event(thread->t_ctl_waitq,
940                              !agl_list_empty(sai) ||
941                              !thread_is_running(thread),
942                              &lwi);
943
944                 if (!thread_is_running(thread))
945                         break;
946
947                 spin_lock(&plli->lli_agl_lock);
948                 /* The statahead thread maybe help to process AGL entries,
949                  * so check whether list empty again. */
950                 if (!agl_list_empty(sai)) {
951                         clli = agl_first_entry(sai);
952                         list_del_init(&clli->lli_agl_list);
953                         spin_unlock(&plli->lli_agl_lock);
954                         ll_agl_trigger(&clli->lli_vfs_inode, sai);
955                 } else {
956                         spin_unlock(&plli->lli_agl_lock);
957                 }
958         }
959
960         spin_lock(&plli->lli_agl_lock);
961         sai->sai_agl_valid = 0;
962         while (!agl_list_empty(sai)) {
963                 clli = agl_first_entry(sai);
964                 list_del_init(&clli->lli_agl_list);
965                 spin_unlock(&plli->lli_agl_lock);
966                 clli->lli_agl_index = 0;
967                 iput(&clli->lli_vfs_inode);
968                 spin_lock(&plli->lli_agl_lock);
969         }
970         thread_set_flags(thread, SVC_STOPPED);
971         spin_unlock(&plli->lli_agl_lock);
972         wake_up(&thread->t_ctl_waitq);
973         ll_sai_put(sai);
974         CDEBUG(D_READA, "agl thread stopped: sai %p, parent %.*s\n",
975                sai, parent->d_name.len, parent->d_name.name);
976         RETURN(0);
977 }
978
979 /* start agl thread */
980 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
981 {
982         struct ptlrpc_thread *thread = &sai->sai_agl_thread;
983         struct l_wait_info    lwi    = { 0 };
984         struct ll_inode_info  *plli;
985         struct task_struct            *task;
986         ENTRY;
987
988         CDEBUG(D_READA, "start agl thread: sai %p, parent %.*s\n",
989                sai, parent->d_name.len, parent->d_name.name);
990
991         plli = ll_i2info(parent->d_inode);
992         task = kthread_run(ll_agl_thread, parent,
993                                "ll_agl_%u", plli->lli_opendir_pid);
994         if (IS_ERR(task)) {
995                 CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
996                 thread_set_flags(thread, SVC_STOPPED);
997                 RETURN_EXIT;
998         }
999
1000         l_wait_event(thread->t_ctl_waitq,
1001                      thread_is_running(thread) || thread_is_stopped(thread),
1002                      &lwi);
1003         EXIT;
1004 }
1005
1006 /* statahead thread main function */
1007 static int ll_statahead_thread(void *arg)
1008 {
1009         struct dentry *parent = (struct dentry *)arg;
1010         struct inode *dir = parent->d_inode;
1011         struct ll_inode_info *lli = ll_i2info(dir);
1012         struct ll_sb_info *sbi = ll_i2sbi(dir);
1013         struct ll_statahead_info *sai;
1014         struct ptlrpc_thread *sa_thread;
1015         struct ptlrpc_thread *agl_thread;
1016         int first = 0;
1017         struct md_op_data *op_data;
1018         struct ll_dir_chain chain;
1019         struct l_wait_info lwi = { 0 };
1020         struct page *page = NULL;
1021         __u64 pos = 0;
1022         int rc = 0;
1023         ENTRY;
1024
1025         sai = ll_sai_get(dir);
1026         sa_thread = &sai->sai_thread;
1027         agl_thread = &sai->sai_agl_thread;
1028         sa_thread->t_pid = current_pid();
1029         CDEBUG(D_READA, "statahead thread starting: sai %p, parent %.*s\n",
1030                sai, parent->d_name.len, parent->d_name.name);
1031
1032         op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
1033                                      LUSTRE_OPC_ANY, dir);
1034         if (IS_ERR(op_data))
1035                 GOTO(out, rc = PTR_ERR(op_data));
1036
1037         op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages;
1038
1039         if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
1040                 ll_start_agl(parent, sai);
1041
1042         atomic_inc(&sbi->ll_sa_total);
1043         spin_lock(&lli->lli_sa_lock);
1044         if (thread_is_init(sa_thread))
1045                 /* If someone else has changed the thread state
1046                  * (e.g. already changed to SVC_STOPPING), we can't just
1047                  * blindly overwrite that setting. */
1048                 thread_set_flags(sa_thread, SVC_RUNNING);
1049         spin_unlock(&lli->lli_sa_lock);
1050         wake_up(&sa_thread->t_ctl_waitq);
1051
1052         ll_dir_chain_init(&chain);
1053         while (pos != MDS_DIR_END_OFF && thread_is_running(sa_thread)) {
1054                 struct lu_dirpage *dp;
1055                 struct lu_dirent  *ent;
1056
1057                 sai->sai_in_readpage = 1;
1058                 page = ll_get_dir_page(dir, op_data, pos, &chain);
1059                 sai->sai_in_readpage = 0;
1060                 if (IS_ERR(page)) {
1061                         rc = PTR_ERR(page);
1062                         CDEBUG(D_READA, "error reading dir "DFID" at "LPU64
1063                                "/"LPU64" opendir_pid = %u: rc = %d\n",
1064                                PFID(ll_inode2fid(dir)), pos, sai->sai_index,
1065                                lli->lli_opendir_pid, rc);
1066                         break;
1067                 }
1068
1069                 dp = page_address(page);
1070                 for (ent = lu_dirent_start(dp);
1071                      ent != NULL && thread_is_running(sa_thread) &&
1072                      !sa_low_hit(sai);
1073                      ent = lu_dirent_next(ent)) {
1074                         __u64 hash;
1075                         int namelen;
1076                         char *name;
1077
1078                         hash = le64_to_cpu(ent->lde_hash);
1079                         if (unlikely(hash < pos))
1080                                 /*
1081                                  * Skip until we find target hash value.
1082                                  */
1083                                 continue;
1084
1085                         namelen = le16_to_cpu(ent->lde_namelen);
1086                         if (unlikely(namelen == 0))
1087                                 /*
1088                                  * Skip dummy record.
1089                                  */
1090                                 continue;
1091
1092                         name = ent->lde_name;
1093                         if (name[0] == '.') {
1094                                 if (namelen == 1) {
1095                                         /*
1096                                          * skip "."
1097                                          */
1098                                         continue;
1099                                 } else if (name[1] == '.' && namelen == 2) {
1100                                         /*
1101                                          * skip ".."
1102                                          */
1103                                         continue;
1104                                 } else if (!sai->sai_ls_all) {
1105                                         /*
1106                                          * skip hidden files.
1107                                          */
1108                                         sai->sai_skip_hidden++;
1109                                         continue;
1110                                 }
1111                         }
1112
1113                         /*
1114                          * don't stat-ahead first entry.
1115                          */
1116                         if (unlikely(++first == 1))
1117                                 continue;
1118
1119                         /* wait for spare statahead window */
1120                         do {
1121                                 l_wait_event(sa_thread->t_ctl_waitq,
1122                                              !sa_sent_full(sai) ||
1123                                              sa_has_callback(sai) ||
1124                                              !agl_list_empty(sai) ||
1125                                              !thread_is_running(sa_thread),
1126                                              &lwi);
1127
1128                                 sa_handle_callback(sai);
1129
1130                                 spin_lock(&lli->lli_agl_lock);
1131                                 while (sa_sent_full(sai) &&
1132                                        !agl_list_empty(sai)) {
1133                                         struct ll_inode_info *clli;
1134
1135                                         clli = agl_first_entry(sai);
1136                                         list_del_init(&clli->lli_agl_list);
1137                                         spin_unlock(&lli->lli_agl_lock);
1138
1139                                         ll_agl_trigger(&clli->lli_vfs_inode,
1140                                                         sai);
1141
1142                                         spin_lock(&lli->lli_agl_lock);
1143                                 }
1144                                 spin_unlock(&lli->lli_agl_lock);
1145                         } while (sa_sent_full(sai) &&
1146                                  thread_is_running(sa_thread));
1147
1148                         sa_statahead(parent, name, namelen);
1149                 }
1150
1151                 pos = le64_to_cpu(dp->ldp_hash_end);
1152                 ll_release_page(dir, page,
1153                                 le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1154
1155                 if (sa_low_hit(sai)) {
1156                         rc = -EFAULT;
1157                         atomic_inc(&sbi->ll_sa_wrong);
1158                         CDEBUG(D_READA, "Statahead for dir "DFID" hit "
1159                                "ratio too low: hit/miss "LPU64"/"LPU64
1160                                ", sent/replied "LPU64"/"LPU64", stopping "
1161                                "statahead thread: pid %d\n",
1162                                PFID(&lli->lli_fid), sai->sai_hit,
1163                                sai->sai_miss, sai->sai_sent,
1164                                sai->sai_replied, current_pid());
1165                         break;
1166                 }
1167         }
1168         ll_dir_chain_fini(&chain);
1169         ll_finish_md_op_data(op_data);
1170
1171         if (rc < 0) {
1172                 spin_lock(&lli->lli_sa_lock);
1173                 thread_set_flags(sa_thread, SVC_STOPPING);
1174                 lli->lli_sa_enabled = 0;
1175                 spin_unlock(&lli->lli_sa_lock);
1176         }
1177
1178         /* statahead is finished, but statahead entries need to be cached, wait
1179          * for file release to stop me. */
1180         while (thread_is_running(sa_thread)) {
1181                 l_wait_event(sa_thread->t_ctl_waitq,
1182                              sa_has_callback(sai) ||
1183                              !thread_is_running(sa_thread),
1184                              &lwi);
1185
1186                 sa_handle_callback(sai);
1187         }
1188
1189         EXIT;
1190 out:
1191         if (sai->sai_agl_valid) {
1192                 spin_lock(&lli->lli_agl_lock);
1193                 thread_set_flags(agl_thread, SVC_STOPPING);
1194                 spin_unlock(&lli->lli_agl_lock);
1195                 wake_up(&agl_thread->t_ctl_waitq);
1196
1197                 CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n",
1198                        sai, (unsigned int)agl_thread->t_pid);
1199                 l_wait_event(agl_thread->t_ctl_waitq,
1200                              thread_is_stopped(agl_thread),
1201                              &lwi);
1202         } else {
1203                 /* Set agl_thread flags anyway. */
1204                 thread_set_flags(agl_thread, SVC_STOPPED);
1205         }
1206
1207         /* wait for inflight statahead RPCs to finish, and then we can free sai
1208          * safely because statahead RPC will access sai data */
1209         while (sai->sai_sent != sai->sai_replied) {
1210                 /* in case we're not woken up, timeout wait */
1211                 lwi = LWI_TIMEOUT(msecs_to_jiffies(MSEC_PER_SEC >> 3),
1212                                   NULL, NULL);
1213                 l_wait_event(sa_thread->t_ctl_waitq,
1214                         sai->sai_sent == sai->sai_replied, &lwi);
1215         }
1216
1217         /* release resources held by statahead RPCs */
1218         sa_handle_callback(sai);
1219
1220         spin_lock(&lli->lli_sa_lock);
1221         thread_set_flags(sa_thread, SVC_STOPPED);
1222         spin_unlock(&lli->lli_sa_lock);
1223
1224         CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %.*s\n",
1225                sai, parent->d_name.len, parent->d_name.name);
1226
1227         wake_up(&sai->sai_waitq);
1228         wake_up(&sa_thread->t_ctl_waitq);
1229         ll_sai_put(sai);
1230
1231         return rc;
1232 }
1233
1234 /* authorize opened dir handle @key to statahead */
1235 void ll_authorize_statahead(struct inode *dir, void *key)
1236 {
1237         struct ll_inode_info *lli = ll_i2info(dir);
1238
1239         spin_lock(&lli->lli_sa_lock);
1240         if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL) {
1241                 /*
1242                  * if lli_sai is not NULL, it means previous statahead is not
1243                  * finished yet, we'd better not start a new statahead for now.
1244                  */
1245                 LASSERT(lli->lli_opendir_pid == 0);
1246                 lli->lli_opendir_key = key;
1247                 lli->lli_opendir_pid = current_pid();
1248                 lli->lli_sa_enabled = 1;
1249         }
1250         spin_unlock(&lli->lli_sa_lock);
1251 }
1252
1253 /*
1254  * deauthorize opened dir handle @key to statahead, and notify statahead thread
1255  * to quit if it's running.
1256  */
1257 void ll_deauthorize_statahead(struct inode *dir, void *key)
1258 {
1259         struct ll_inode_info *lli = ll_i2info(dir);
1260         struct ll_statahead_info *sai;
1261
1262         LASSERT(lli->lli_opendir_key == key);
1263         LASSERT(lli->lli_opendir_pid != 0);
1264
1265         CDEBUG(D_READA, "deauthorize statahead for "DFID"\n",
1266                 PFID(&lli->lli_fid));
1267
1268         spin_lock(&lli->lli_sa_lock);
1269         lli->lli_opendir_key = NULL;
1270         lli->lli_opendir_pid = 0;
1271         lli->lli_sa_enabled = 0;
1272         sai = lli->lli_sai;
1273         if (sai != NULL && thread_is_running(&sai->sai_thread)) {
1274                 /*
1275                  * statahead thread may not quit yet because it needs to cache
1276                  * entries, now it's time to tell it to quit.
1277                  */
1278                 thread_set_flags(&sai->sai_thread, SVC_STOPPING);
1279                 wake_up(&sai->sai_thread.t_ctl_waitq);
1280         }
1281         spin_unlock(&lli->lli_sa_lock);
1282 }
1283
1284 enum {
1285         /**
1286          * not first dirent, or is "."
1287          */
1288         LS_NOT_FIRST_DE = 0,
1289         /**
1290          * the first non-hidden dirent
1291          */
1292         LS_FIRST_DE,
1293         /**
1294          * the first hidden dirent, that is "."
1295          */
1296         LS_FIRST_DOT_DE
1297 };
1298
1299 /* file is first dirent under @dir */
1300 static int is_first_dirent(struct inode *dir, struct dentry *dentry)
1301 {
1302         struct ll_dir_chain   chain;
1303         struct qstr          *target = &dentry->d_name;
1304         struct md_op_data    *op_data;
1305         int                   dot_de;
1306         struct page          *page = NULL;
1307         int                   rc = LS_NOT_FIRST_DE;
1308         __u64                 pos = 0;
1309         ENTRY;
1310
1311         op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
1312                                      LUSTRE_OPC_ANY, dir);
1313         if (IS_ERR(op_data))
1314                 RETURN(PTR_ERR(op_data));
1315         /**
1316          *FIXME choose the start offset of the readdir
1317          */
1318         op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages;
1319
1320         ll_dir_chain_init(&chain);
1321         page = ll_get_dir_page(dir, op_data, 0, &chain);
1322
1323         while (1) {
1324                 struct lu_dirpage *dp;
1325                 struct lu_dirent  *ent;
1326
1327                 if (IS_ERR(page)) {
1328                         struct ll_inode_info *lli = ll_i2info(dir);
1329
1330                         rc = PTR_ERR(page);
1331                         CERROR("%s: reading dir "DFID" at "LPU64
1332                                "opendir_pid = %u : rc = %d\n",
1333                                ll_get_fsname(dir->i_sb, NULL, 0),
1334                                PFID(ll_inode2fid(dir)), pos,
1335                                lli->lli_opendir_pid, rc);
1336                         break;
1337                 }
1338
1339                 dp = page_address(page);
1340                 for (ent = lu_dirent_start(dp); ent != NULL;
1341                      ent = lu_dirent_next(ent)) {
1342                         __u64 hash;
1343                         int namelen;
1344                         char *name;
1345
1346                         hash = le64_to_cpu(ent->lde_hash);
1347                         /* The ll_get_dir_page() can return any page containing
1348                          * the given hash which may be not the start hash. */
1349                         if (unlikely(hash < pos))
1350                                 continue;
1351
1352                         namelen = le16_to_cpu(ent->lde_namelen);
1353                         if (unlikely(namelen == 0))
1354                                 /*
1355                                  * skip dummy record.
1356                                  */
1357                                 continue;
1358
1359                         name = ent->lde_name;
1360                         if (name[0] == '.') {
1361                                 if (namelen == 1)
1362                                         /*
1363                                          * skip "."
1364                                          */
1365                                         continue;
1366                                 else if (name[1] == '.' && namelen == 2)
1367                                         /*
1368                                          * skip ".."
1369                                          */
1370                                         continue;
1371                                 else
1372                                         dot_de = 1;
1373                         } else {
1374                                 dot_de = 0;
1375                         }
1376
1377                         if (dot_de && target->name[0] != '.') {
1378                                 CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
1379                                        target->len, target->name,
1380                                        namelen, name);
1381                                 continue;
1382                         }
1383
1384                         if (target->len != namelen ||
1385                             memcmp(target->name, name, namelen) != 0)
1386                                 rc = LS_NOT_FIRST_DE;
1387                         else if (!dot_de)
1388                                 rc = LS_FIRST_DE;
1389                         else
1390                                 rc = LS_FIRST_DOT_DE;
1391
1392                         ll_release_page(dir, page, false);
1393                         GOTO(out, rc);
1394                 }
1395                 pos = le64_to_cpu(dp->ldp_hash_end);
1396                 if (pos == MDS_DIR_END_OFF) {
1397                         /*
1398                          * End of directory reached.
1399                          */
1400                         ll_release_page(dir, page, false);
1401                         GOTO(out, rc);
1402                 } else {
1403                         /*
1404                          * chain is exhausted
1405                          * Normal case: continue to the next page.
1406                          */
1407                         ll_release_page(dir, page, le32_to_cpu(dp->ldp_flags) &
1408                                               LDF_COLLIDE);
1409                         page = ll_get_dir_page(dir, op_data, pos, &chain);
1410                 }
1411         }
1412         EXIT;
1413 out:
1414         ll_dir_chain_fini(&chain);
1415         ll_finish_md_op_data(op_data);
1416         return rc;
1417 }
1418
1419 /**
1420  * revalidate @dentryp from statahead cache
1421  *
1422  * \param[in] dir       parent directory
1423  * \param[in] sai       sai structure
1424  * \param[out] dentryp  pointer to dentry which will be revalidated
1425  * \param[in] unplug    unplug statahead window only (normally for negative
1426  *                      dentry)
1427  * \retval              1 on success, dentry is saved in @dentryp
1428  * \retval              0 if revalidation failed (no proper lock on client)
1429  * \retval              negative number upon error
1430  */
1431 static int revalidate_statahead_dentry(struct inode *dir,
1432                                         struct ll_statahead_info *sai,
1433                                         struct dentry **dentryp,
1434                                         bool unplug)
1435 {
1436         struct sa_entry *entry = NULL;
1437         struct l_wait_info lwi = { 0 };
1438         struct ll_dentry_data *ldd;
1439         struct ll_inode_info *lli;
1440         int rc = 0;
1441         ENTRY;
1442
1443         if ((*dentryp)->d_name.name[0] == '.') {
1444                 if (sai->sai_ls_all ||
1445                     sai->sai_miss_hidden >= sai->sai_skip_hidden) {
1446                         /*
1447                          * Hidden dentry is the first one, or statahead
1448                          * thread does not skip so many hidden dentries
1449                          * before "sai_ls_all" enabled as below.
1450                          */
1451                 } else {
1452                         if (!sai->sai_ls_all)
1453                                 /*
1454                                  * It maybe because hidden dentry is not
1455                                  * the first one, "sai_ls_all" was not
1456                                  * set, then "ls -al" missed. Enable
1457                                  * "sai_ls_all" for such case.
1458                                  */
1459                                 sai->sai_ls_all = 1;
1460
1461                         /*
1462                          * Such "getattr" has been skipped before
1463                          * "sai_ls_all" enabled as above.
1464                          */
1465                         sai->sai_miss_hidden++;
1466                         RETURN(-EAGAIN);
1467                 }
1468         }
1469
1470         if (unplug)
1471                 GOTO(out, rc = 1);
1472
1473         entry = sa_get(sai, &(*dentryp)->d_name);
1474         if (entry == NULL)
1475                 GOTO(out, rc = -EAGAIN);
1476
1477         /* if statahead is busy in readdir, help it do post-work */
1478         if (!sa_ready(entry) && sai->sai_in_readpage)
1479                 sa_handle_callback(sai);
1480
1481         if (!sa_ready(entry)) {
1482                 sai->sai_index_wait = entry->se_index;
1483                 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
1484                                         LWI_ON_SIGNAL_NOOP, NULL);
1485                 rc = l_wait_event(sai->sai_waitq, sa_ready(entry), &lwi);
1486                 if (rc < 0) {
1487                         /*
1488                          * entry may not be ready, so it may be used by inflight
1489                          * statahead RPC, don't free it.
1490                          */
1491                         entry = NULL;
1492                         GOTO(out, rc = -EAGAIN);
1493                 }
1494         }
1495
1496         if (entry->se_state == SA_ENTRY_SUCC && entry->se_inode != NULL) {
1497                 struct inode *inode = entry->se_inode;
1498                 struct lookup_intent it = { .it_op = IT_GETATTR,
1499                                             .d.lustre.it_lock_handle =
1500                                                 entry->se_handle };
1501                 __u64 bits;
1502
1503                 rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
1504                                         ll_inode2fid(inode), &bits);
1505                 if (rc == 1) {
1506                         if ((*dentryp)->d_inode == NULL) {
1507                                 struct dentry *alias;
1508
1509                                 alias = ll_splice_alias(inode, *dentryp);
1510                                 if (IS_ERR(alias))
1511                                         GOTO(out, rc = PTR_ERR(alias));
1512                                 *dentryp = alias;
1513                                 /* statahead prepared this inode, transfer inode
1514                                  * refcount from sa_entry to dentry */
1515                                 entry->se_inode = NULL;
1516                         } else if ((*dentryp)->d_inode != inode) {
1517                                 /* revalidate, but inode is recreated */
1518                                 CDEBUG(D_READA,
1519                                         "%s: stale dentry %.*s inode "
1520                                         DFID", statahead inode "DFID
1521                                         "\n",
1522                                         ll_get_fsname((*dentryp)->d_inode->i_sb,
1523                                                       NULL, 0),
1524                                         (*dentryp)->d_name.len,
1525                                         (*dentryp)->d_name.name,
1526                                         PFID(ll_inode2fid((*dentryp)->d_inode)),
1527                                         PFID(ll_inode2fid(inode)));
1528                                 GOTO(out, rc = -ESTALE);
1529                         }
1530
1531                         if ((bits & MDS_INODELOCK_LOOKUP) &&
1532                             d_lustre_invalid(*dentryp))
1533                                 d_lustre_revalidate(*dentryp);
1534                         ll_intent_release(&it);
1535                 }
1536         }
1537 out:
1538         /*
1539          * statahead cached sa_entry can be used only once, and will be killed
1540          * right after use, so if lookup/revalidate accessed statahead cache,
1541          * set dentry ldd_sa_generation to parent lli_sa_generation, later if we
1542          * stat this file again, we know we've done statahead before, see
1543          * dentry_may_statahead().
1544          */
1545         ldd = ll_d2d(*dentryp);
1546         lli = ll_i2info(dir);
1547         /* ldd can be NULL if llite lookup failed. */
1548         if (ldd != NULL)
1549                 ldd->lld_sa_generation = lli->lli_sa_generation;
1550         sa_put(sai, entry);
1551
1552         RETURN(rc);
1553 }
1554
1555 /**
1556  * start statahead thread
1557  *
1558  * \param[in] dir       parent directory
1559  * \param[in] dentry    dentry that triggers statahead, normally the first
1560  *                      dirent under @dir
1561  * \retval              -EAGAIN on success, because when this function is
1562  *                      called, it's already in lookup call, so client should
1563  *                      do it itself instead of waiting for statahead thread
1564  *                      to do it asynchronously.
1565  * \retval              negative number upon error
1566  */
1567 static int start_statahead_thread(struct inode *dir, struct dentry *dentry)
1568 {
1569         struct ll_inode_info *lli = ll_i2info(dir);
1570         struct ll_statahead_info *sai = NULL;
1571         struct dentry *parent = dentry->d_parent;
1572         struct ptlrpc_thread *thread;
1573         struct l_wait_info lwi = { 0 };
1574         struct task_struct *task;
1575         int rc;
1576         ENTRY;
1577
1578         /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1579         rc = is_first_dirent(dir, dentry);
1580         if (rc == LS_NOT_FIRST_DE)
1581                 /* It is not "ls -{a}l" operation, no need statahead for it. */
1582                 GOTO(out, rc = -EFAULT);
1583
1584         sai = ll_sai_alloc(parent);
1585         if (sai == NULL)
1586                 GOTO(out, rc = -ENOMEM);
1587
1588         sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
1589
1590         /* if current lli_opendir_key was deauthorized, or dir re-opened by
1591          * another process, don't start statahead, otherwise the newly spawned
1592          * statahead thread won't be notified to quit. */
1593         spin_lock(&lli->lli_sa_lock);
1594         if (unlikely(lli->lli_sai != NULL ||
1595                      lli->lli_opendir_key == NULL ||
1596                      lli->lli_opendir_pid != current->pid)) {
1597                 spin_unlock(&lli->lli_sa_lock);
1598                 GOTO(out, rc = -EPERM);
1599         }
1600         lli->lli_sai = sai;
1601         spin_unlock(&lli->lli_sa_lock);
1602
1603         atomic_inc(&ll_i2sbi(parent->d_inode)->ll_sa_running);
1604
1605         CDEBUG(D_READA, "start statahead thread: [pid %d] [parent %.*s]\n",
1606                current_pid(), parent->d_name.len, parent->d_name.name);
1607
1608         task = kthread_run(ll_statahead_thread, parent, "ll_sa_%u",
1609                            lli->lli_opendir_pid);
1610         thread = &sai->sai_thread;
1611         if (IS_ERR(task)) {
1612                 rc = PTR_ERR(task);
1613                 CERROR("can't start ll_sa thread, rc: %d\n", rc);
1614                 GOTO(out, rc);
1615         }
1616
1617         l_wait_event(thread->t_ctl_waitq,
1618                      thread_is_running(thread) || thread_is_stopped(thread),
1619                      &lwi);
1620         ll_sai_put(sai);
1621
1622         /*
1623          * We don't stat-ahead for the first dirent since we are already in
1624          * lookup.
1625          */
1626         RETURN(-EAGAIN);
1627
1628 out:
1629         /* once we start statahead thread failed, disable statahead so that
1630          * subsequent stat won't waste time to try it. */
1631         spin_lock(&lli->lli_sa_lock);
1632         lli->lli_sa_enabled = 0;
1633         lli->lli_sai = NULL;
1634         spin_unlock(&lli->lli_sa_lock);
1635
1636         if (sai != NULL)
1637                 ll_sai_free(sai);
1638
1639         RETURN(rc);
1640 }
1641
1642 /**
1643  * statahead entry function, this is called when client getattr on a file, it
1644  * will start statahead thread if this is the first dir entry, else revalidate
1645  * dentry from statahead cache.
1646  *
1647  * \param[in]  dir      parent directory
1648  * \param[out] dentryp  dentry to getattr
1649  * \param[in]  unplug   unplug statahead window only (normally for negative
1650  *                      dentry)
1651  * \retval              1 on success
1652  * \retval              0 revalidation from statahead cache failed, caller needs
1653  *                      to getattr from server directly
1654  * \retval              negative number on error, caller often ignores this and
1655  *                      then getattr from server
1656  */
1657 int ll_statahead(struct inode *dir, struct dentry **dentryp, bool unplug)
1658 {
1659         struct ll_statahead_info *sai;
1660
1661         sai = ll_sai_get(dir);
1662         if (sai != NULL) {
1663                 int rc;
1664
1665                 rc = revalidate_statahead_dentry(dir, sai, dentryp, unplug);
1666                 CDEBUG(D_READA, "revalidate statahead %.*s: %d.\n",
1667                         (*dentryp)->d_name.len, (*dentryp)->d_name.name, rc);
1668                 ll_sai_put(sai);
1669                 return rc;
1670         }
1671         return start_statahead_thread(dir, *dentryp);
1672 }