Whamcloud - gitweb
LU-14621 mdd: fix lock-tx order in mdd_xattr_merge()
[fs/lustre-release.git] / lustre / llite / statahead.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  */
31
32 #include <linux/fs.h>
33 #include <linux/sched.h>
34 #include <linux/kthread.h>
35 #include <linux/mm.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/delay.h>
39
40 #define DEBUG_SUBSYSTEM S_LLITE
41
42 #include <obd_support.h>
43 #include <lustre_dlm.h>
44 #include "llite_internal.h"
45
46 #define SA_OMITTED_ENTRY_MAX 8ULL
47
48 typedef enum {
49         /** negative values are for error cases */
50         SA_ENTRY_INIT = 0,      /** init entry */
51         SA_ENTRY_SUCC = 1,      /** stat succeed */
52         SA_ENTRY_INVA = 2,      /** invalid entry */
53 } se_state_t;
54
55 /*
56  * sa_entry is not refcounted: statahead thread allocates it and do async stat,
57  * and in async stat callback ll_statahead_interpret() will add it into
58  * sai_interim_entries, later statahead thread will call sa_handle_callback() to
59  * instantiate entry and move it into sai_entries, and then only scanner process
60  * can access and free it.
61  */
62 struct sa_entry {
63         /* link into sai_interim_entries or sai_entries */
64         struct list_head        se_list;
65         /* link into sai hash table locally */
66         struct list_head        se_hash;
67         /* entry index in the sai */
68         __u64                   se_index;
69         /* low layer ldlm lock handle */
70         __u64                   se_handle;
71         /* entry status */
72         se_state_t              se_state;
73         /* entry size, contains name */
74         int                     se_size;
75         /* pointer to async getattr enqueue info */
76         struct md_enqueue_info *se_minfo;
77         /* pointer to the async getattr request */
78         struct ptlrpc_request  *se_req;
79         /* pointer to the target inode */
80         struct inode           *se_inode;
81         /* entry name */
82         struct qstr             se_qstr;
83         /* entry fid */
84         struct lu_fid           se_fid;
85 };
86
87 static unsigned int sai_generation;
88 static DEFINE_SPINLOCK(sai_generation_lock);
89
90 static inline int sa_unhashed(struct sa_entry *entry)
91 {
92         return list_empty(&entry->se_hash);
93 }
94
95 /* sa_entry is ready to use */
96 static inline int sa_ready(struct sa_entry *entry)
97 {
98         /* Make sure sa_entry is updated and ready to use */
99         smp_rmb();
100         return (entry->se_state != SA_ENTRY_INIT);
101 }
102
103 /* hash value to put in sai_cache */
104 static inline int sa_hash(int val)
105 {
106         return val & LL_SA_CACHE_MASK;
107 }
108
109 /* hash entry into sai_cache */
110 static inline void
111 sa_rehash(struct ll_statahead_info *sai, struct sa_entry *entry)
112 {
113         int i = sa_hash(entry->se_qstr.hash);
114
115         spin_lock(&sai->sai_cache_lock[i]);
116         list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
117         spin_unlock(&sai->sai_cache_lock[i]);
118 }
119
120 /* unhash entry from sai_cache */
121 static inline void
122 sa_unhash(struct ll_statahead_info *sai, struct sa_entry *entry)
123 {
124         int i = sa_hash(entry->se_qstr.hash);
125
126         spin_lock(&sai->sai_cache_lock[i]);
127         list_del_init(&entry->se_hash);
128         spin_unlock(&sai->sai_cache_lock[i]);
129 }
130
131 static inline int agl_should_run(struct ll_statahead_info *sai,
132                                  struct inode *inode)
133 {
134         return inode && S_ISREG(inode->i_mode) && sai->sai_agl_task;
135 }
136
137 static inline struct ll_inode_info *
138 agl_first_entry(struct ll_statahead_info *sai)
139 {
140         return list_entry(sai->sai_agls.next, struct ll_inode_info,
141                           lli_agl_list);
142 }
143
144 /* statahead window is full */
145 static inline int sa_sent_full(struct ll_statahead_info *sai)
146 {
147         return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
148 }
149
150 /* got async stat replies */
151 static inline int sa_has_callback(struct ll_statahead_info *sai)
152 {
153         return !list_empty(&sai->sai_interim_entries);
154 }
155
156 static inline int agl_list_empty(struct ll_statahead_info *sai)
157 {
158         return list_empty(&sai->sai_agls);
159 }
160
161 /**
162  * (1) hit ratio less than 80%
163  * or
164  * (2) consecutive miss more than 8
165  * then means low hit.
166  */
167 static inline int sa_low_hit(struct ll_statahead_info *sai)
168 {
169         return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
170                 (sai->sai_consecutive_miss > 8));
171 }
172
173 /*
174  * if the given index is behind of statahead window more than
175  * SA_OMITTED_ENTRY_MAX, then it is old.
176  */
177 static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
178 {
179         return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
180                 sai->sai_index);
181 }
182
183 /* allocate sa_entry and hash it to allow scanner process to find it */
184 static struct sa_entry *
185 sa_alloc(struct dentry *parent, struct ll_statahead_info *sai, __u64 index,
186          const char *name, int len, const struct lu_fid *fid)
187 {
188         struct ll_inode_info *lli;
189         struct sa_entry *entry;
190         int entry_size;
191         char *dname;
192
193         ENTRY;
194
195         entry_size = sizeof(struct sa_entry) + (len & ~3) + 4;
196         OBD_ALLOC(entry, entry_size);
197         if (unlikely(!entry))
198                 RETURN(ERR_PTR(-ENOMEM));
199
200         CDEBUG(D_READA, "alloc sa entry %.*s(%p) index %llu\n",
201                len, name, entry, index);
202
203         entry->se_index = index;
204
205         entry->se_state = SA_ENTRY_INIT;
206         entry->se_size = entry_size;
207         dname = (char *)entry + sizeof(struct sa_entry);
208         memcpy(dname, name, len);
209         dname[len] = 0;
210         entry->se_qstr.hash = ll_full_name_hash(parent, name, len);
211         entry->se_qstr.len = len;
212         entry->se_qstr.name = dname;
213         entry->se_fid = *fid;
214
215         lli = ll_i2info(sai->sai_dentry->d_inode);
216
217         spin_lock(&lli->lli_sa_lock);
218         INIT_LIST_HEAD(&entry->se_list);
219         sa_rehash(sai, entry);
220         spin_unlock(&lli->lli_sa_lock);
221
222         atomic_inc(&sai->sai_cache_count);
223
224         RETURN(entry);
225 }
226
227 /* free sa_entry, which should have been unhashed and not in any list */
228 static void sa_free(struct ll_statahead_info *sai, struct sa_entry *entry)
229 {
230         CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n",
231                entry->se_qstr.len, entry->se_qstr.name, entry,
232                entry->se_index);
233
234         LASSERT(list_empty(&entry->se_list));
235         LASSERT(sa_unhashed(entry));
236
237         OBD_FREE(entry, entry->se_size);
238         atomic_dec(&sai->sai_cache_count);
239 }
240
241 /*
242  * find sa_entry by name, used by directory scanner, lock is not needed because
243  * only scanner can remove the entry from cache.
244  */
245 static struct sa_entry *
246 sa_get(struct ll_statahead_info *sai, const struct qstr *qstr)
247 {
248         struct sa_entry *entry;
249         int i = sa_hash(qstr->hash);
250
251         list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
252                 if (entry->se_qstr.hash == qstr->hash &&
253                     entry->se_qstr.len == qstr->len &&
254                     memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
255                         return entry;
256         }
257         return NULL;
258 }
259
260 /* unhash and unlink sa_entry, and then free it */
261 static inline void
262 sa_kill(struct ll_statahead_info *sai, struct sa_entry *entry)
263 {
264         struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
265
266         LASSERT(!sa_unhashed(entry));
267         LASSERT(!list_empty(&entry->se_list));
268         LASSERT(sa_ready(entry));
269
270         sa_unhash(sai, entry);
271
272         spin_lock(&lli->lli_sa_lock);
273         list_del_init(&entry->se_list);
274         spin_unlock(&lli->lli_sa_lock);
275
276         iput(entry->se_inode);
277
278         sa_free(sai, entry);
279 }
280
281 /* called by scanner after use, sa_entry will be killed */
282 static void
283 sa_put(struct ll_statahead_info *sai, struct sa_entry *entry)
284 {
285         struct sa_entry *tmp, *next;
286
287         if (entry && entry->se_state == SA_ENTRY_SUCC) {
288                 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode);
289
290                 sai->sai_hit++;
291                 sai->sai_consecutive_miss = 0;
292                 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
293         } else {
294                 sai->sai_miss++;
295                 sai->sai_consecutive_miss++;
296         }
297
298         if (entry)
299                 sa_kill(sai, entry);
300
301         /*
302          * kill old completed entries, only scanner process does this, no need
303          * to lock
304          */
305         list_for_each_entry_safe(tmp, next, &sai->sai_entries, se_list) {
306                 if (!is_omitted_entry(sai, tmp->se_index))
307                         break;
308                 sa_kill(sai, tmp);
309         }
310 }
311
312 /*
313  * update state and sort add entry to sai_entries by index, return true if
314  * scanner is waiting on this entry.
315  */
316 static bool
317 __sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
318 {
319         struct sa_entry *se;
320         struct list_head *pos = &sai->sai_entries;
321         __u64 index = entry->se_index;
322
323         LASSERT(!sa_ready(entry));
324         LASSERT(list_empty(&entry->se_list));
325
326         list_for_each_entry_reverse(se, &sai->sai_entries, se_list) {
327                 if (se->se_index < entry->se_index) {
328                         pos = &se->se_list;
329                         break;
330                 }
331         }
332         list_add(&entry->se_list, pos);
333         /*
334          * LU-9210: ll_statahead_interpet must be able to see this before
335          * we wake it up
336          */
337         smp_store_release(&entry->se_state,
338                           ret < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
339
340         return (index == sai->sai_index_wait);
341 }
342
343 /* finish async stat RPC arguments */
344 static void sa_fini_data(struct md_enqueue_info *minfo)
345 {
346         ll_unlock_md_op_lsm(&minfo->mi_data);
347         iput(minfo->mi_dir);
348         OBD_FREE_PTR(minfo);
349 }
350
351 static int ll_statahead_interpret(struct ptlrpc_request *req,
352                                   struct md_enqueue_info *minfo, int rc);
353
354 /*
355  * prepare arguments for async stat RPC.
356  */
357 static struct md_enqueue_info *
358 sa_prep_data(struct inode *dir, struct inode *child, struct sa_entry *entry)
359 {
360         struct md_enqueue_info   *minfo;
361         struct ldlm_enqueue_info *einfo;
362         struct md_op_data        *op_data;
363
364         OBD_ALLOC_PTR(minfo);
365         if (!minfo)
366                 return ERR_PTR(-ENOMEM);
367
368         op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child,
369                                      entry->se_qstr.name, entry->se_qstr.len, 0,
370                                      LUSTRE_OPC_ANY, NULL);
371         if (IS_ERR(op_data)) {
372                 OBD_FREE_PTR(minfo);
373                 return (struct md_enqueue_info *)op_data;
374         }
375
376         if (!child)
377                 op_data->op_fid2 = entry->se_fid;
378
379         minfo->mi_it.it_op = IT_GETATTR;
380         minfo->mi_dir = igrab(dir);
381         minfo->mi_cb = ll_statahead_interpret;
382         minfo->mi_cbdata = entry;
383
384         einfo = &minfo->mi_einfo;
385         einfo->ei_type   = LDLM_IBITS;
386         einfo->ei_mode   = it_to_lock_mode(&minfo->mi_it);
387         einfo->ei_cb_bl  = ll_md_blocking_ast;
388         einfo->ei_cb_cp  = ldlm_completion_ast;
389         einfo->ei_cb_gl  = NULL;
390         einfo->ei_cbdata = NULL;
391
392         return minfo;
393 }
394
395 /*
396  * release resources used in async stat RPC, update entry state and wakeup if
397  * scanner process it waiting on this entry.
398  */
399 static void
400 sa_make_ready(struct ll_statahead_info *sai, struct sa_entry *entry, int ret)
401 {
402         struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
403         struct md_enqueue_info *minfo = entry->se_minfo;
404         struct ptlrpc_request *req = entry->se_req;
405         bool wakeup;
406
407         /* release resources used in RPC */
408         if (minfo) {
409                 entry->se_minfo = NULL;
410                 ll_intent_release(&minfo->mi_it);
411                 sa_fini_data(minfo);
412         }
413
414         if (req) {
415                 entry->se_req = NULL;
416                 ptlrpc_req_finished(req);
417         }
418
419         spin_lock(&lli->lli_sa_lock);
420         wakeup = __sa_make_ready(sai, entry, ret);
421         spin_unlock(&lli->lli_sa_lock);
422
423         if (wakeup)
424                 wake_up(&sai->sai_waitq);
425 }
426
427 /* insert inode into the list of sai_agls */
428 static void ll_agl_add(struct ll_statahead_info *sai,
429                        struct inode *inode, int index)
430 {
431         struct ll_inode_info *child  = ll_i2info(inode);
432         struct ll_inode_info *parent = ll_i2info(sai->sai_dentry->d_inode);
433
434         spin_lock(&child->lli_agl_lock);
435         if (child->lli_agl_index == 0) {
436                 child->lli_agl_index = index;
437                 spin_unlock(&child->lli_agl_lock);
438
439                 LASSERT(list_empty(&child->lli_agl_list));
440
441                 spin_lock(&parent->lli_agl_lock);
442                 /* Re-check under the lock */
443                 if (agl_should_run(sai, inode)) {
444                         if (agl_list_empty(sai))
445                                 wake_up_process(sai->sai_agl_task);
446                         igrab(inode);
447                         list_add_tail(&child->lli_agl_list, &sai->sai_agls);
448                 } else
449                         child->lli_agl_index = 0;
450                 spin_unlock(&parent->lli_agl_lock);
451         } else {
452                 spin_unlock(&child->lli_agl_lock);
453         }
454 }
455
456 /* allocate sai */
457 static struct ll_statahead_info *ll_sai_alloc(struct dentry *dentry)
458 {
459         struct ll_statahead_info *sai;
460         struct ll_inode_info *lli = ll_i2info(dentry->d_inode);
461         int i;
462
463         ENTRY;
464
465         OBD_ALLOC_PTR(sai);
466         if (!sai)
467                 RETURN(NULL);
468
469         sai->sai_dentry = dget(dentry);
470         atomic_set(&sai->sai_refcount, 1);
471         sai->sai_max = LL_SA_RPC_MIN;
472         sai->sai_index = 1;
473         init_waitqueue_head(&sai->sai_waitq);
474
475         INIT_LIST_HEAD(&sai->sai_interim_entries);
476         INIT_LIST_HEAD(&sai->sai_entries);
477         INIT_LIST_HEAD(&sai->sai_agls);
478
479         for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
480                 INIT_LIST_HEAD(&sai->sai_cache[i]);
481                 spin_lock_init(&sai->sai_cache_lock[i]);
482         }
483         atomic_set(&sai->sai_cache_count, 0);
484
485         spin_lock(&sai_generation_lock);
486         lli->lli_sa_generation = ++sai_generation;
487         if (unlikely(sai_generation == 0))
488                 lli->lli_sa_generation = ++sai_generation;
489         spin_unlock(&sai_generation_lock);
490
491         RETURN(sai);
492 }
493
494 /* free sai */
495 static inline void ll_sai_free(struct ll_statahead_info *sai)
496 {
497         LASSERT(sai->sai_dentry != NULL);
498         dput(sai->sai_dentry);
499         OBD_FREE_PTR(sai);
500 }
501
502 /*
503  * take refcount of sai if sai for @dir exists, which means statahead is on for
504  * this directory.
505  */
506 static inline struct ll_statahead_info *ll_sai_get(struct inode *dir)
507 {
508         struct ll_inode_info *lli = ll_i2info(dir);
509         struct ll_statahead_info *sai = NULL;
510
511         spin_lock(&lli->lli_sa_lock);
512         sai = lli->lli_sai;
513         if (sai)
514                 atomic_inc(&sai->sai_refcount);
515         spin_unlock(&lli->lli_sa_lock);
516
517         return sai;
518 }
519
520 /*
521  * put sai refcount after use, if refcount reaches zero, free sai and sa_entries
522  * attached to it.
523  */
524 static void ll_sai_put(struct ll_statahead_info *sai)
525 {
526         struct ll_inode_info *lli = ll_i2info(sai->sai_dentry->d_inode);
527
528         if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
529                 struct sa_entry *entry, *next;
530                 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_dentry->d_inode);
531
532                 lli->lli_sai = NULL;
533                 spin_unlock(&lli->lli_sa_lock);
534
535                 LASSERT(!sai->sai_task);
536                 LASSERT(!sai->sai_agl_task);
537                 LASSERT(sai->sai_sent == sai->sai_replied);
538                 LASSERT(!sa_has_callback(sai));
539
540                 list_for_each_entry_safe(entry, next, &sai->sai_entries,
541                                          se_list)
542                         sa_kill(sai, entry);
543
544                 LASSERT(atomic_read(&sai->sai_cache_count) == 0);
545                 LASSERT(agl_list_empty(sai));
546
547                 ll_sai_free(sai);
548                 atomic_dec(&sbi->ll_sa_running);
549         }
550 }
551
552 /* Do NOT forget to drop inode refcount when into sai_agls. */
553 static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
554 {
555         struct ll_inode_info *lli = ll_i2info(inode);
556         u64 index = lli->lli_agl_index;
557         ktime_t expire;
558         int rc;
559
560         ENTRY;
561
562         LASSERT(list_empty(&lli->lli_agl_list));
563
564         /* AGL maybe fall behind statahead with one entry */
565         if (is_omitted_entry(sai, index + 1)) {
566                 lli->lli_agl_index = 0;
567                 iput(inode);
568                 RETURN_EXIT;
569         }
570
571         /*
572          * In case of restore, the MDT has the right size and has already
573          * sent it back without granting the layout lock, inode is up-to-date.
574          * Then AGL (async glimpse lock) is useless.
575          * Also to glimpse we need the layout, in case of a runninh restore
576          * the MDT holds the layout lock so the glimpse will block up to the
577          * end of restore (statahead/agl will block)
578          */
579         if (test_bit(LLIF_FILE_RESTORING, &lli->lli_flags)) {
580                 lli->lli_agl_index = 0;
581                 iput(inode);
582                 RETURN_EXIT;
583         }
584
585         /* Someone is in glimpse (sync or async), do nothing. */
586         rc = down_write_trylock(&lli->lli_glimpse_sem);
587         if (rc == 0) {
588                 lli->lli_agl_index = 0;
589                 iput(inode);
590                 RETURN_EXIT;
591         }
592
593         /*
594          * Someone triggered glimpse within 1 sec before.
595          * 1) The former glimpse succeeded with glimpse lock granted by OST, and
596          *    if the lock is still cached on client, AGL needs to do nothing. If
597          *    it is cancelled by other client, AGL maybe cannot obtaion new lock
598          *    for no glimpse callback triggered by AGL.
599          * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
600          *    Under such case, it is quite possible that the OST will not grant
601          *    glimpse lock for AGL also.
602          * 3) The former glimpse failed, compared with other two cases, it is
603          *    relative rare. AGL can ignore such case, and it will not muchly
604          *    affect the performance.
605          */
606         expire = ktime_sub_ns(ktime_get(), NSEC_PER_SEC);
607         if (ktime_to_ns(lli->lli_glimpse_time) &&
608             ktime_before(expire, lli->lli_glimpse_time)) {
609                 up_write(&lli->lli_glimpse_sem);
610                 lli->lli_agl_index = 0;
611                 iput(inode);
612                 RETURN_EXIT;
613         }
614
615         CDEBUG(D_READA,
616                "Handling (init) async glimpse: inode = " DFID", idx = %llu\n",
617                PFID(&lli->lli_fid), index);
618
619         cl_agl(inode);
620         lli->lli_agl_index = 0;
621         lli->lli_glimpse_time = ktime_get();
622         up_write(&lli->lli_glimpse_sem);
623
624         CDEBUG(D_READA,
625                "Handled (init) async glimpse: inode= " DFID", idx = %llu, rc = %d\n",
626                PFID(&lli->lli_fid), index, rc);
627
628         iput(inode);
629
630         EXIT;
631 }
632
633 /*
634  * prepare inode for sa entry, add it into agl list, now sa_entry is ready
635  * to be used by scanner process.
636  */
637 static void sa_instantiate(struct ll_statahead_info *sai,
638                            struct sa_entry *entry)
639 {
640         struct inode *dir = sai->sai_dentry->d_inode;
641         struct inode *child;
642         struct md_enqueue_info *minfo;
643         struct lookup_intent *it;
644         struct ptlrpc_request *req;
645         struct mdt_body *body;
646         int rc = 0;
647
648         ENTRY;
649
650         LASSERT(entry->se_handle != 0);
651
652         minfo = entry->se_minfo;
653         it = &minfo->mi_it;
654         req = entry->se_req;
655         body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
656         if (!body)
657                 GOTO(out, rc = -EFAULT);
658
659         child = entry->se_inode;
660         /* revalidate; unlinked and re-created with the same name */
661         if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->mbo_fid1))) {
662                 if (child) {
663                         entry->se_inode = NULL;
664                         iput(child);
665                 }
666                 /* The mdt_body is invalid. Skip this entry */
667                 GOTO(out, rc = -EAGAIN);
668         }
669
670         it->it_lock_handle = entry->se_handle;
671         rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
672         if (rc != 1)
673                 GOTO(out, rc = -EAGAIN);
674
675         rc = ll_prep_inode(&child, &req->rq_pill, dir->i_sb, it);
676         if (rc)
677                 GOTO(out, rc);
678
679         CDEBUG(D_READA, "%s: setting %.*s"DFID" l_data to inode %p\n",
680                ll_i2sbi(dir)->ll_fsname, entry->se_qstr.len,
681                entry->se_qstr.name, PFID(ll_inode2fid(child)), child);
682         ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
683
684         entry->se_inode = child;
685
686         if (agl_should_run(sai, child))
687                 ll_agl_add(sai, child, entry->se_index);
688
689         EXIT;
690
691 out:
692         /*
693          * sa_make_ready() will drop ldlm ibits lock refcount by calling
694          * ll_intent_drop_lock() in spite of failures. Do not worry about
695          * calling ll_intent_drop_lock() more than once.
696          */
697         sa_make_ready(sai, entry, rc);
698 }
699
700 /* once there are async stat replies, instantiate sa_entry from replies */
701 static void sa_handle_callback(struct ll_statahead_info *sai)
702 {
703         struct ll_inode_info *lli;
704
705         lli = ll_i2info(sai->sai_dentry->d_inode);
706
707         spin_lock(&lli->lli_sa_lock);
708         while (sa_has_callback(sai)) {
709                 struct sa_entry *entry;
710
711                 entry = list_entry(sai->sai_interim_entries.next,
712                                    struct sa_entry, se_list);
713                 list_del_init(&entry->se_list);
714                 spin_unlock(&lli->lli_sa_lock);
715
716                 sa_instantiate(sai, entry);
717                 spin_lock(&lli->lli_sa_lock);
718         }
719         spin_unlock(&lli->lli_sa_lock);
720 }
721
722 /*
723  * callback for async stat RPC, because this is called in ptlrpcd context, we
724  * only put sa_entry in sai_interim_entries, and wake up statahead thread to
725  * really prepare inode and instantiate sa_entry later.
726  */
727 static int ll_statahead_interpret(struct ptlrpc_request *req,
728                                   struct md_enqueue_info *minfo, int rc)
729 {
730         struct lookup_intent *it = &minfo->mi_it;
731         struct inode *dir = minfo->mi_dir;
732         struct ll_inode_info *lli = ll_i2info(dir);
733         struct ll_statahead_info *sai = lli->lli_sai;
734         struct sa_entry *entry = (struct sa_entry *)minfo->mi_cbdata;
735         __u64 handle = 0;
736
737         ENTRY;
738
739         if (it_disposition(it, DISP_LOOKUP_NEG))
740                 rc = -ENOENT;
741
742         /*
743          * because statahead thread will wait for all inflight RPC to finish,
744          * sai should be always valid, no need to refcount
745          */
746         LASSERT(sai != NULL);
747         LASSERT(entry != NULL);
748
749         CDEBUG(D_READA, "sa_entry %.*s rc %d\n",
750                entry->se_qstr.len, entry->se_qstr.name, rc);
751
752         if (rc != 0) {
753                 ll_intent_release(it);
754                 sa_fini_data(minfo);
755         } else {
756                 /*
757                  * release ibits lock ASAP to avoid deadlock when statahead
758                  * thread enqueues lock on parent in readdir and another
759                  * process enqueues lock on child with parent lock held, eg.
760                  * unlink.
761                  */
762                 handle = it->it_lock_handle;
763                 ll_intent_drop_lock(it);
764                 ll_unlock_md_op_lsm(&minfo->mi_data);
765         }
766
767         spin_lock(&lli->lli_sa_lock);
768         if (rc != 0) {
769                 if (__sa_make_ready(sai, entry, rc))
770                         wake_up(&sai->sai_waitq);
771         } else {
772                 int first = 0;
773
774                 entry->se_minfo = minfo;
775                 entry->se_req = ptlrpc_request_addref(req);
776                 /*
777                  * Release the async ibits lock ASAP to avoid deadlock
778                  * when statahead thread tries to enqueue lock on parent
779                  * for readpage and other tries to enqueue lock on child
780                  * with parent's lock held, for example: unlink.
781                  */
782                 entry->se_handle = handle;
783                 if (!sa_has_callback(sai))
784                         first = 1;
785
786                 list_add_tail(&entry->se_list, &sai->sai_interim_entries);
787                 if (first && sai->sai_task)
788                         wake_up_process(sai->sai_task);
789         }
790         sai->sai_replied++;
791
792         spin_unlock(&lli->lli_sa_lock);
793
794         RETURN(rc);
795 }
796
797 /* async stat for file not found in dcache */
798 static int sa_lookup(struct inode *dir, struct sa_entry *entry)
799 {
800         struct md_enqueue_info   *minfo;
801         int                       rc;
802
803         ENTRY;
804
805         minfo = sa_prep_data(dir, NULL, entry);
806         if (IS_ERR(minfo))
807                 RETURN(PTR_ERR(minfo));
808
809         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo);
810         if (rc < 0)
811                 sa_fini_data(minfo);
812
813         RETURN(rc);
814 }
815
816 /**
817  * async stat for file found in dcache, similar to .revalidate
818  *
819  * \retval      1 dentry valid, no RPC sent
820  * \retval      0 dentry invalid, will send async stat RPC
821  * \retval      negative number upon error
822  */
823 static int sa_revalidate(struct inode *dir, struct sa_entry *entry,
824                          struct dentry *dentry)
825 {
826         struct inode *inode = dentry->d_inode;
827         struct lookup_intent it = { .it_op = IT_GETATTR,
828                                     .it_lock_handle = 0 };
829         struct md_enqueue_info *minfo;
830         int rc;
831
832         ENTRY;
833
834         if (unlikely(!inode))
835                 RETURN(1);
836
837         if (d_mountpoint(dentry))
838                 RETURN(1);
839
840         minfo = sa_prep_data(dir, inode, entry);
841         if (IS_ERR(minfo))
842                 RETURN(PTR_ERR(minfo));
843
844         entry->se_inode = igrab(inode);
845         rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
846                                 NULL);
847         if (rc == 1) {
848                 entry->se_handle = it.it_lock_handle;
849                 ll_intent_release(&it);
850                 sa_fini_data(minfo);
851                 RETURN(1);
852         }
853
854         rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo);
855         if (rc < 0) {
856                 entry->se_inode = NULL;
857                 iput(inode);
858                 sa_fini_data(minfo);
859         }
860
861         RETURN(rc);
862 }
863
864 /* async stat for file with @name */
865 static void sa_statahead(struct dentry *parent, const char *name, int len,
866                          const struct lu_fid *fid)
867 {
868         struct inode *dir = parent->d_inode;
869         struct ll_inode_info *lli = ll_i2info(dir);
870         struct ll_statahead_info *sai = lli->lli_sai;
871         struct dentry *dentry = NULL;
872         struct sa_entry *entry;
873         int rc;
874
875         ENTRY;
876
877         entry = sa_alloc(parent, sai, sai->sai_index, name, len, fid);
878         if (IS_ERR(entry))
879                 RETURN_EXIT;
880
881         dentry = d_lookup(parent, &entry->se_qstr);
882         if (!dentry) {
883                 rc = sa_lookup(dir, entry);
884         } else {
885                 rc = sa_revalidate(dir, entry, dentry);
886                 if (rc == 1 && agl_should_run(sai, dentry->d_inode))
887                         ll_agl_add(sai, dentry->d_inode, entry->se_index);
888         }
889
890         if (dentry)
891                 dput(dentry);
892
893         if (rc != 0)
894                 sa_make_ready(sai, entry, rc);
895         else
896                 sai->sai_sent++;
897
898         sai->sai_index++;
899
900         EXIT;
901 }
902
903 /* async glimpse (agl) thread main function */
904 static int ll_agl_thread(void *arg)
905 {
906         struct dentry *parent = (struct dentry *)arg;
907         struct inode *dir = parent->d_inode;
908         struct ll_inode_info *plli = ll_i2info(dir);
909         struct ll_inode_info *clli;
910         /*
911          * We already own this reference, so it is safe to take it
912          * without a lock.
913          */
914         struct ll_statahead_info *sai = plli->lli_sai;
915
916         ENTRY;
917
918         CDEBUG(D_READA, "agl thread started: sai %p, parent %pd\n",
919                sai, parent);
920
921         while (({set_current_state(TASK_IDLE);
922                  !kthread_should_stop(); })) {
923                 spin_lock(&plli->lli_agl_lock);
924                 if (!agl_list_empty(sai)) {
925                         __set_current_state(TASK_RUNNING);
926                         clli = agl_first_entry(sai);
927                         list_del_init(&clli->lli_agl_list);
928                         spin_unlock(&plli->lli_agl_lock);
929                         ll_agl_trigger(&clli->lli_vfs_inode, sai);
930                         cond_resched();
931                 } else {
932                         spin_unlock(&plli->lli_agl_lock);
933                         schedule();
934                 }
935         }
936         __set_current_state(TASK_RUNNING);
937         RETURN(0);
938 }
939
940 static void ll_stop_agl(struct ll_statahead_info *sai)
941 {
942         struct dentry *parent = sai->sai_dentry;
943         struct ll_inode_info *plli = ll_i2info(parent->d_inode);
944         struct ll_inode_info *clli;
945         struct task_struct *agl_task;
946
947         spin_lock(&plli->lli_agl_lock);
948         agl_task = sai->sai_agl_task;
949         sai->sai_agl_task = NULL;
950         spin_unlock(&plli->lli_agl_lock);
951         if (!agl_task)
952                 return;
953
954         CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n",
955                sai, (unsigned int)agl_task->pid);
956         kthread_stop(agl_task);
957
958         spin_lock(&plli->lli_agl_lock);
959         while (!agl_list_empty(sai)) {
960                 clli = agl_first_entry(sai);
961                 list_del_init(&clli->lli_agl_list);
962                 spin_unlock(&plli->lli_agl_lock);
963                 clli->lli_agl_index = 0;
964                 iput(&clli->lli_vfs_inode);
965                 spin_lock(&plli->lli_agl_lock);
966         }
967         spin_unlock(&plli->lli_agl_lock);
968         CDEBUG(D_READA, "agl thread stopped: sai %p, parent %pd\n",
969                sai, parent);
970         ll_sai_put(sai);
971 }
972
973 /* start agl thread */
974 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
975 {
976         int node = cfs_cpt_spread_node(cfs_cpt_tab, CFS_CPT_ANY);
977         struct ll_inode_info *plli;
978         struct task_struct *task;
979
980         ENTRY;
981
982         CDEBUG(D_READA, "start agl thread: sai %p, parent %pd\n",
983                sai, parent);
984
985         plli = ll_i2info(parent->d_inode);
986         task = kthread_create_on_node(ll_agl_thread, parent, node, "ll_agl_%d",
987                                       plli->lli_opendir_pid);
988         if (IS_ERR(task)) {
989                 CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
990                 RETURN_EXIT;
991         }
992         sai->sai_agl_task = task;
993         atomic_inc(&ll_i2sbi(d_inode(parent))->ll_agl_total);
994         /* Get an extra reference that the thread holds */
995         ll_sai_get(d_inode(parent));
996
997         wake_up_process(task);
998
999         EXIT;
1000 }
1001
1002 /* statahead thread main function */
1003 static int ll_statahead_thread(void *arg)
1004 {
1005         struct dentry *parent = (struct dentry *)arg;
1006         struct inode *dir = parent->d_inode;
1007         struct ll_inode_info *lli = ll_i2info(dir);
1008         struct ll_sb_info *sbi = ll_i2sbi(dir);
1009         struct ll_statahead_info *sai = lli->lli_sai;
1010         int first = 0;
1011         struct md_op_data *op_data;
1012         struct page *page = NULL;
1013         __u64 pos = 0;
1014         int rc = 0;
1015
1016         ENTRY;
1017
1018         CDEBUG(D_READA, "statahead thread starting: sai %p, parent %pd\n",
1019                sai, parent);
1020
1021         OBD_ALLOC_PTR(op_data);
1022         if (!op_data)
1023                 GOTO(out, rc = -ENOMEM);
1024
1025         while (pos != MDS_DIR_END_OFF && sai->sai_task) {
1026                 struct lu_dirpage *dp;
1027                 struct lu_dirent  *ent;
1028
1029                 op_data = ll_prep_md_op_data(op_data, dir, dir, NULL, 0, 0,
1030                                              LUSTRE_OPC_ANY, dir);
1031                 if (IS_ERR(op_data)) {
1032                         rc = PTR_ERR(op_data);
1033                         break;
1034                 }
1035
1036                 sai->sai_in_readpage = 1;
1037                 page = ll_get_dir_page(dir, op_data, pos);
1038                 ll_unlock_md_op_lsm(op_data);
1039                 sai->sai_in_readpage = 0;
1040                 if (IS_ERR(page)) {
1041                         rc = PTR_ERR(page);
1042                         CDEBUG(D_READA,
1043                                "error reading dir "DFID" at %llu /%llu opendir_pid = %u: rc = %d\n",
1044                                PFID(ll_inode2fid(dir)), pos, sai->sai_index,
1045                                lli->lli_opendir_pid, rc);
1046                         break;
1047                 }
1048
1049                 dp = page_address(page);
1050                 for (ent = lu_dirent_start(dp);
1051                      ent != NULL && sai->sai_task &&
1052                      !sa_low_hit(sai);
1053                      ent = lu_dirent_next(ent)) {
1054                         __u64 hash;
1055                         int namelen;
1056                         char *name;
1057                         struct lu_fid fid;
1058
1059                         hash = le64_to_cpu(ent->lde_hash);
1060                         if (unlikely(hash < pos))
1061                                 /*
1062                                  * Skip until we find target hash value.
1063                                  */
1064                                 continue;
1065
1066                         namelen = le16_to_cpu(ent->lde_namelen);
1067                         if (unlikely(namelen == 0))
1068                                 /*
1069                                  * Skip dummy record.
1070                                  */
1071                                 continue;
1072
1073                         name = ent->lde_name;
1074                         if (name[0] == '.') {
1075                                 if (namelen == 1) {
1076                                         /*
1077                                          * skip "."
1078                                          */
1079                                         continue;
1080                                 } else if (name[1] == '.' && namelen == 2) {
1081                                         /*
1082                                          * skip ".."
1083                                          */
1084                                         continue;
1085                                 } else if (!sai->sai_ls_all) {
1086                                         /*
1087                                          * skip hidden files.
1088                                          */
1089                                         sai->sai_skip_hidden++;
1090                                         continue;
1091                                 }
1092                         }
1093
1094                         /*
1095                          * don't stat-ahead first entry.
1096                          */
1097                         if (unlikely(++first == 1))
1098                                 continue;
1099
1100                         fid_le_to_cpu(&fid, &ent->lde_fid);
1101
1102                         while (({set_current_state(TASK_IDLE);
1103                                  sai->sai_task; })) {
1104                                 if (sa_has_callback(sai)) {
1105                                         __set_current_state(TASK_RUNNING);
1106                                         sa_handle_callback(sai);
1107                                 }
1108
1109                                 spin_lock(&lli->lli_agl_lock);
1110                                 while (sa_sent_full(sai) &&
1111                                        !agl_list_empty(sai)) {
1112                                         struct ll_inode_info *clli;
1113
1114                                         __set_current_state(TASK_RUNNING);
1115                                         clli = agl_first_entry(sai);
1116                                         list_del_init(&clli->lli_agl_list);
1117                                         spin_unlock(&lli->lli_agl_lock);
1118
1119                                         ll_agl_trigger(&clli->lli_vfs_inode,
1120                                                        sai);
1121                                         cond_resched();
1122                                         spin_lock(&lli->lli_agl_lock);
1123                                 }
1124                                 spin_unlock(&lli->lli_agl_lock);
1125
1126                                 if (!sa_sent_full(sai))
1127                                         break;
1128                                 schedule();
1129                         }
1130                         __set_current_state(TASK_RUNNING);
1131
1132                         sa_statahead(parent, name, namelen, &fid);
1133                 }
1134
1135                 pos = le64_to_cpu(dp->ldp_hash_end);
1136                 ll_release_page(dir, page,
1137                                 le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1138
1139                 if (sa_low_hit(sai)) {
1140                         rc = -EFAULT;
1141                         atomic_inc(&sbi->ll_sa_wrong);
1142                         CDEBUG(D_READA,
1143                                "Statahead for dir "DFID" hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stoppingstatahead thread: pid %d\n",
1144                                PFID(&lli->lli_fid), sai->sai_hit,
1145                                sai->sai_miss, sai->sai_sent,
1146                                sai->sai_replied, current->pid);
1147                         break;
1148                 }
1149         }
1150         ll_finish_md_op_data(op_data);
1151
1152         if (rc < 0) {
1153                 spin_lock(&lli->lli_sa_lock);
1154                 sai->sai_task = NULL;
1155                 lli->lli_sa_enabled = 0;
1156                 spin_unlock(&lli->lli_sa_lock);
1157         }
1158
1159         /*
1160          * statahead is finished, but statahead entries need to be cached, wait
1161          * for file release to stop me.
1162          */
1163         while (({set_current_state(TASK_IDLE);
1164                  sai->sai_task; })) {
1165                 if (sa_has_callback(sai)) {
1166                         __set_current_state(TASK_RUNNING);
1167                         sa_handle_callback(sai);
1168                 } else {
1169                         schedule();
1170                 }
1171         }
1172         __set_current_state(TASK_RUNNING);
1173
1174         EXIT;
1175 out:
1176         ll_stop_agl(sai);
1177
1178         /*
1179          * wait for inflight statahead RPCs to finish, and then we can free sai
1180          * safely because statahead RPC will access sai data
1181          */
1182         while (sai->sai_sent != sai->sai_replied)
1183                 /* in case we're not woken up, timeout wait */
1184                 msleep(125);
1185
1186         /* release resources held by statahead RPCs */
1187         sa_handle_callback(sai);
1188
1189         CDEBUG(D_READA, "%s: statahead thread stopped: sai %p, parent %pd\n",
1190                sbi->ll_fsname, sai, parent);
1191
1192         spin_lock(&lli->lli_sa_lock);
1193         sai->sai_task = NULL;
1194         spin_unlock(&lli->lli_sa_lock);
1195         wake_up(&sai->sai_waitq);
1196
1197         ll_sai_put(sai);
1198
1199         return rc;
1200 }
1201
1202 /* authorize opened dir handle @key to statahead */
1203 void ll_authorize_statahead(struct inode *dir, void *key)
1204 {
1205         struct ll_inode_info *lli = ll_i2info(dir);
1206
1207         spin_lock(&lli->lli_sa_lock);
1208         if (!lli->lli_opendir_key && !lli->lli_sai) {
1209                 /*
1210                  * if lli_sai is not NULL, it means previous statahead is not
1211                  * finished yet, we'd better not start a new statahead for now.
1212                  */
1213                 LASSERT(lli->lli_opendir_pid == 0);
1214                 lli->lli_opendir_key = key;
1215                 lli->lli_opendir_pid = current->pid;
1216                 lli->lli_sa_enabled = 1;
1217         }
1218         spin_unlock(&lli->lli_sa_lock);
1219 }
1220
1221 /*
1222  * deauthorize opened dir handle @key to statahead, and notify statahead thread
1223  * to quit if it's running.
1224  */
1225 void ll_deauthorize_statahead(struct inode *dir, void *key)
1226 {
1227         struct ll_inode_info *lli = ll_i2info(dir);
1228         struct ll_statahead_info *sai;
1229
1230         LASSERT(lli->lli_opendir_key == key);
1231         LASSERT(lli->lli_opendir_pid != 0);
1232
1233         CDEBUG(D_READA, "deauthorize statahead for "DFID"\n",
1234                PFID(&lli->lli_fid));
1235
1236         spin_lock(&lli->lli_sa_lock);
1237         lli->lli_opendir_key = NULL;
1238         lli->lli_opendir_pid = 0;
1239         lli->lli_sa_enabled = 0;
1240         sai = lli->lli_sai;
1241         if (sai && sai->sai_task) {
1242                 /*
1243                  * statahead thread may not have quit yet because it needs to
1244                  * cache entries, now it's time to tell it to quit.
1245                  *
1246                  * wake_up_process() provides the necessary barriers
1247                  * to pair with set_current_state().
1248                  */
1249                 struct task_struct *task = sai->sai_task;
1250
1251                 sai->sai_task = NULL;
1252                 wake_up_process(task);
1253         }
1254         spin_unlock(&lli->lli_sa_lock);
1255 }
1256
1257 enum {
1258         /**
1259          * not first dirent, or is "."
1260          */
1261         LS_NOT_FIRST_DE = 0,
1262         /**
1263          * the first non-hidden dirent
1264          */
1265         LS_FIRST_DE,
1266         /**
1267          * the first hidden dirent, that is "."
1268          */
1269         LS_FIRST_DOT_DE
1270 };
1271
1272 /* file is first dirent under @dir */
1273 static int is_first_dirent(struct inode *dir, struct dentry *dentry)
1274 {
1275         struct qstr          *target = &dentry->d_name;
1276         struct md_op_data    *op_data;
1277         int                   dot_de;
1278         struct page          *page = NULL;
1279         int                   rc = LS_NOT_FIRST_DE;
1280         __u64                 pos = 0;
1281
1282         ENTRY;
1283
1284         op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
1285                                      LUSTRE_OPC_ANY, dir);
1286         if (IS_ERR(op_data))
1287                 RETURN(PTR_ERR(op_data));
1288         /**
1289          *FIXME choose the start offset of the readdir
1290          */
1291
1292         page = ll_get_dir_page(dir, op_data, 0);
1293
1294         while (1) {
1295                 struct lu_dirpage *dp;
1296                 struct lu_dirent  *ent;
1297
1298                 if (IS_ERR(page)) {
1299                         struct ll_inode_info *lli = ll_i2info(dir);
1300
1301                         rc = PTR_ERR(page);
1302                         CERROR("%s: reading dir "DFID" at %llu opendir_pid = %u : rc = %d\n",
1303                                ll_i2sbi(dir)->ll_fsname,
1304                                PFID(ll_inode2fid(dir)), pos,
1305                                lli->lli_opendir_pid, rc);
1306                         break;
1307                 }
1308
1309                 dp = page_address(page);
1310                 for (ent = lu_dirent_start(dp); ent != NULL;
1311                      ent = lu_dirent_next(ent)) {
1312                         __u64 hash;
1313                         int namelen;
1314                         char *name;
1315
1316                         hash = le64_to_cpu(ent->lde_hash);
1317                         /*
1318                          * The ll_get_dir_page() can return any page containing
1319                          * the given hash which may be not the start hash.
1320                          */
1321                         if (unlikely(hash < pos))
1322                                 continue;
1323
1324                         namelen = le16_to_cpu(ent->lde_namelen);
1325                         if (unlikely(namelen == 0))
1326                                 /*
1327                                  * skip dummy record.
1328                                  */
1329                                 continue;
1330
1331                         name = ent->lde_name;
1332                         if (name[0] == '.') {
1333                                 if (namelen == 1)
1334                                         /*
1335                                          * skip "."
1336                                          */
1337                                         continue;
1338                                 else if (name[1] == '.' && namelen == 2)
1339                                         /*
1340                                          * skip ".."
1341                                          */
1342                                         continue;
1343                                 else
1344                                         dot_de = 1;
1345                         } else {
1346                                 dot_de = 0;
1347                         }
1348
1349                         if (dot_de && target->name[0] != '.') {
1350                                 CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
1351                                        target->len, target->name,
1352                                        namelen, name);
1353                                 continue;
1354                         }
1355
1356                         if (target->len != namelen ||
1357                             memcmp(target->name, name, namelen) != 0)
1358                                 rc = LS_NOT_FIRST_DE;
1359                         else if (!dot_de)
1360                                 rc = LS_FIRST_DE;
1361                         else
1362                                 rc = LS_FIRST_DOT_DE;
1363
1364                         ll_release_page(dir, page, false);
1365                         GOTO(out, rc);
1366                 }
1367                 pos = le64_to_cpu(dp->ldp_hash_end);
1368                 if (pos == MDS_DIR_END_OFF) {
1369                         /*
1370                          * End of directory reached.
1371                          */
1372                         ll_release_page(dir, page, false);
1373                         GOTO(out, rc);
1374                 } else {
1375                         /*
1376                          * chain is exhausted
1377                          * Normal case: continue to the next page.
1378                          */
1379                         ll_release_page(dir, page, le32_to_cpu(dp->ldp_flags) &
1380                                               LDF_COLLIDE);
1381                         page = ll_get_dir_page(dir, op_data, pos);
1382                 }
1383         }
1384         EXIT;
1385 out:
1386         ll_finish_md_op_data(op_data);
1387
1388         return rc;
1389 }
1390
1391 /**
1392  * revalidate @dentryp from statahead cache
1393  *
1394  * \param[in] dir       parent directory
1395  * \param[in] sai       sai structure
1396  * \param[out] dentryp  pointer to dentry which will be revalidated
1397  * \param[in] unplug    unplug statahead window only (normally for negative
1398  *                      dentry)
1399  * \retval              1 on success, dentry is saved in @dentryp
1400  * \retval              0 if revalidation failed (no proper lock on client)
1401  * \retval              negative number upon error
1402  */
1403 static int revalidate_statahead_dentry(struct inode *dir,
1404                                        struct ll_statahead_info *sai,
1405                                        struct dentry **dentryp,
1406                                        bool unplug)
1407 {
1408         struct sa_entry *entry = NULL;
1409         struct ll_dentry_data *ldd;
1410         struct ll_inode_info *lli = ll_i2info(dir);
1411         int rc = 0;
1412
1413         ENTRY;
1414
1415         if ((*dentryp)->d_name.name[0] == '.') {
1416                 if (sai->sai_ls_all ||
1417                     sai->sai_miss_hidden >= sai->sai_skip_hidden) {
1418                         /*
1419                          * Hidden dentry is the first one, or statahead
1420                          * thread does not skip so many hidden dentries
1421                          * before "sai_ls_all" enabled as below.
1422                          */
1423                 } else {
1424                         if (!sai->sai_ls_all)
1425                                 /*
1426                                  * It maybe because hidden dentry is not
1427                                  * the first one, "sai_ls_all" was not
1428                                  * set, then "ls -al" missed. Enable
1429                                  * "sai_ls_all" for such case.
1430                                  */
1431                                 sai->sai_ls_all = 1;
1432
1433                         /*
1434                          * Such "getattr" has been skipped before
1435                          * "sai_ls_all" enabled as above.
1436                          */
1437                         sai->sai_miss_hidden++;
1438                         RETURN(-EAGAIN);
1439                 }
1440         }
1441
1442         if (unplug)
1443                 GOTO(out, rc = 1);
1444
1445         entry = sa_get(sai, &(*dentryp)->d_name);
1446         if (!entry)
1447                 GOTO(out, rc = -EAGAIN);
1448
1449         /* if statahead is busy in readdir, help it do post-work */
1450         if (!sa_ready(entry) && sai->sai_in_readpage)
1451                 sa_handle_callback(sai);
1452
1453         if (!sa_ready(entry)) {
1454                 spin_lock(&lli->lli_sa_lock);
1455                 sai->sai_index_wait = entry->se_index;
1456                 spin_unlock(&lli->lli_sa_lock);
1457                 rc = wait_event_idle_timeout(sai->sai_waitq, sa_ready(entry),
1458                                              cfs_time_seconds(30));
1459                 if (rc == 0) {
1460                         /*
1461                          * entry may not be ready, so it may be used by inflight
1462                          * statahead RPC, don't free it.
1463                          */
1464                         entry = NULL;
1465                         GOTO(out, rc = -EAGAIN);
1466                 }
1467         }
1468
1469         /*
1470          * We need to see the value that was set immediately before we
1471          * were woken up.
1472          */
1473         if (smp_load_acquire(&entry->se_state) == SA_ENTRY_SUCC &&
1474             entry->se_inode) {
1475                 struct inode *inode = entry->se_inode;
1476                 struct lookup_intent it = { .it_op = IT_GETATTR,
1477                                             .it_lock_handle =
1478                                                 entry->se_handle };
1479                 __u64 bits;
1480
1481                 rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
1482                                         ll_inode2fid(inode), &bits);
1483                 if (rc == 1) {
1484                         if (!(*dentryp)->d_inode) {
1485                                 struct dentry *alias;
1486
1487                                 alias = ll_splice_alias(inode, *dentryp);
1488                                 if (IS_ERR(alias)) {
1489                                         ll_intent_release(&it);
1490                                         GOTO(out, rc = PTR_ERR(alias));
1491                                 }
1492                                 *dentryp = alias;
1493                                 /*
1494                                  * statahead prepared this inode, transfer inode
1495                                  * refcount from sa_entry to dentry
1496                                  */
1497                                 entry->se_inode = NULL;
1498                         } else if ((*dentryp)->d_inode != inode) {
1499                                 /* revalidate, but inode is recreated */
1500                                 CDEBUG(D_READA,
1501                                        "%s: stale dentry %pd inode " DFID", statahead inode "DFID "\n",
1502                                        ll_i2sbi(inode)->ll_fsname, *dentryp,
1503                                        PFID(ll_inode2fid((*dentryp)->d_inode)),
1504                                        PFID(ll_inode2fid(inode)));
1505                                 ll_intent_release(&it);
1506                                 GOTO(out, rc = -ESTALE);
1507                         }
1508
1509                         if ((bits & MDS_INODELOCK_LOOKUP) &&
1510                             d_lustre_invalid(*dentryp))
1511                                 d_lustre_revalidate(*dentryp);
1512                         ll_intent_release(&it);
1513                 }
1514         }
1515 out:
1516         /*
1517          * statahead cached sa_entry can be used only once, and will be killed
1518          * right after use, so if lookup/revalidate accessed statahead cache,
1519          * set dentry ldd_sa_generation to parent lli_sa_generation, later if we
1520          * stat this file again, we know we've done statahead before, see
1521          * dentry_may_statahead().
1522          */
1523         ldd = ll_d2d(*dentryp);
1524         /* ldd can be NULL if llite lookup failed. */
1525         if (ldd)
1526                 ldd->lld_sa_generation = lli->lli_sa_generation;
1527         sa_put(sai, entry);
1528         spin_lock(&lli->lli_sa_lock);
1529         if (sai->sai_task)
1530                 wake_up_process(sai->sai_task);
1531         spin_unlock(&lli->lli_sa_lock);
1532
1533         RETURN(rc);
1534 }
1535
1536 /**
1537  * start statahead thread
1538  *
1539  * \param[in] dir       parent directory
1540  * \param[in] dentry    dentry that triggers statahead, normally the first
1541  *                      dirent under @dir
1542  * \param[in] agl       indicate whether AGL is needed
1543  * \retval              -EAGAIN on success, because when this function is
1544  *                      called, it's already in lookup call, so client should
1545  *                      do it itself instead of waiting for statahead thread
1546  *                      to do it asynchronously.
1547  * \retval              negative number upon error
1548  */
1549 static int start_statahead_thread(struct inode *dir, struct dentry *dentry,
1550                                   bool agl)
1551 {
1552         int node = cfs_cpt_spread_node(cfs_cpt_tab, CFS_CPT_ANY);
1553         struct ll_inode_info *lli = ll_i2info(dir);
1554         struct ll_statahead_info *sai = NULL;
1555         struct dentry *parent = dentry->d_parent;
1556         struct task_struct *task;
1557         struct ll_sb_info *sbi = ll_i2sbi(parent->d_inode);
1558         int first = LS_FIRST_DE;
1559         int rc = 0;
1560
1561         ENTRY;
1562
1563         /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1564         first = is_first_dirent(dir, dentry);
1565         if (first == LS_NOT_FIRST_DE)
1566                 /* It is not "ls -{a}l" operation, no need statahead for it. */
1567                 GOTO(out, rc = -EFAULT);
1568
1569         if (unlikely(atomic_inc_return(&sbi->ll_sa_running) >
1570                                        sbi->ll_sa_running_max)) {
1571                 CDEBUG(D_READA,
1572                        "Too many concurrent statahead instances, avoid new statahead instance temporarily.\n");
1573                 GOTO(out, rc = -EMFILE);
1574         }
1575
1576         sai = ll_sai_alloc(parent);
1577         if (!sai)
1578                 GOTO(out, rc = -ENOMEM);
1579
1580         sai->sai_ls_all = (first == LS_FIRST_DOT_DE);
1581
1582         /*
1583          * if current lli_opendir_key was deauthorized, or dir re-opened by
1584          * another process, don't start statahead, otherwise the newly spawned
1585          * statahead thread won't be notified to quit.
1586          */
1587         spin_lock(&lli->lli_sa_lock);
1588         if (unlikely(lli->lli_sai || !lli->lli_opendir_key ||
1589                      lli->lli_opendir_pid != current->pid)) {
1590                 spin_unlock(&lli->lli_sa_lock);
1591                 GOTO(out, rc = -EPERM);
1592         }
1593         lli->lli_sai = sai;
1594         spin_unlock(&lli->lli_sa_lock);
1595
1596         CDEBUG(D_READA, "start statahead thread: [pid %d] [parent %pd]\n",
1597                current->pid, parent);
1598
1599         task = kthread_create_on_node(ll_statahead_thread, parent, node,
1600                                       "ll_sa_%u", lli->lli_opendir_pid);
1601         if (IS_ERR(task)) {
1602                 spin_lock(&lli->lli_sa_lock);
1603                 lli->lli_sai = NULL;
1604                 spin_unlock(&lli->lli_sa_lock);
1605                 rc = PTR_ERR(task);
1606                 CERROR("can't start ll_sa thread, rc: %d\n", rc);
1607                 GOTO(out, rc);
1608         }
1609
1610         if (ll_i2sbi(parent->d_inode)->ll_flags & LL_SBI_AGL_ENABLED && agl)
1611                 ll_start_agl(parent, sai);
1612
1613         atomic_inc(&ll_i2sbi(parent->d_inode)->ll_sa_total);
1614         sai->sai_task = task;
1615
1616         wake_up_process(task);
1617         /*
1618          * We don't stat-ahead for the first dirent since we are already in
1619          * lookup.
1620          */
1621         RETURN(-EAGAIN);
1622
1623 out:
1624         /*
1625          * once we start statahead thread failed, disable statahead so that
1626          * subsequent stat won't waste time to try it.
1627          */
1628         spin_lock(&lli->lli_sa_lock);
1629         if (lli->lli_opendir_pid == current->pid)
1630                 lli->lli_sa_enabled = 0;
1631         spin_unlock(&lli->lli_sa_lock);
1632
1633         if (sai)
1634                 ll_sai_free(sai);
1635         if (first != LS_NOT_FIRST_DE)
1636                 atomic_dec(&sbi->ll_sa_running);
1637
1638         RETURN(rc);
1639 }
1640
1641 /*
1642  * Check whether statahead for @dir was started.
1643  */
1644 static inline bool ll_statahead_started(struct inode *dir, bool agl)
1645 {
1646         struct ll_inode_info *lli = ll_i2info(dir);
1647         struct ll_statahead_info *sai;
1648
1649         spin_lock(&lli->lli_sa_lock);
1650         sai = lli->lli_sai;
1651         if (sai && (sai->sai_agl_task != NULL) != agl)
1652                 CDEBUG(D_READA,
1653                        "%s: Statahead AGL hint changed from %d to %d\n",
1654                        ll_i2sbi(dir)->ll_fsname,
1655                        sai->sai_agl_task != NULL, agl);
1656         spin_unlock(&lli->lli_sa_lock);
1657
1658         return !!sai;
1659 }
1660
1661 /**
1662  * statahead entry function, this is called when client getattr on a file, it
1663  * will start statahead thread if this is the first dir entry, else revalidate
1664  * dentry from statahead cache.
1665  *
1666  * \param[in]  dir      parent directory
1667  * \param[out] dentryp  dentry to getattr
1668  * \param[in]  agl      whether start the agl thread
1669  *
1670  * \retval              1 on success
1671  * \retval              0 revalidation from statahead cache failed, caller needs
1672  *                      to getattr from server directly
1673  * \retval              negative number on error, caller often ignores this and
1674  *                      then getattr from server
1675  */
1676 int ll_start_statahead(struct inode *dir, struct dentry *dentry, bool agl)
1677 {
1678         if (!ll_statahead_started(dir, agl))
1679                 return start_statahead_thread(dir, dentry, agl);
1680         return 0;
1681 }
1682
1683 /**
1684  * revalidate dentry from statahead cache.
1685  *
1686  * \param[in]  dir      parent directory
1687  * \param[out] dentryp  dentry to getattr
1688  * \param[in]  unplug   unplug statahead window only (normally for negative
1689  *                      dentry)
1690  * \retval              1 on success
1691  * \retval              0 revalidation from statahead cache failed, caller needs
1692  *                      to getattr from server directly
1693  * \retval              negative number on error, caller often ignores this and
1694  *                      then getattr from server
1695  */
1696 int ll_revalidate_statahead(struct inode *dir, struct dentry **dentryp,
1697                             bool unplug)
1698 {
1699         struct ll_statahead_info *sai;
1700         int rc = 0;
1701
1702         sai = ll_sai_get(dir);
1703         if (sai) {
1704                 rc = revalidate_statahead_dentry(dir, sai, dentryp, unplug);
1705                 CDEBUG(D_READA, "revalidate statahead %pd: rc = %d.\n",
1706                        *dentryp, rc);
1707                 ll_sai_put(sai);
1708         }
1709         return rc;
1710 }