Whamcloud - gitweb
LU-3336 lfsck: use rbtree to record OST-object accessing
[fs/lustre-release.git] / lustre / lfsck / lfsck_layout.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License version 2 for more details.  A copy is
14  * included in the COPYING file that accompanied this code.
15
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2013, Intel Corporation.
24  */
25 /*
26  * lustre/lfsck/lfsck_layout.c
27  *
28  * Author: Fan, Yong <fan.yong@intel.com>
29  */
30
31 #ifndef EXPORT_SYMTAB
32 # define EXPORT_SYMTAB
33 #endif
34 #define DEBUG_SUBSYSTEM S_LFSCK
35
36 #include <linux/bitops.h>
37 #include <linux/rbtree.h>
38
39 #include <lustre/lustre_idl.h>
40 #include <lu_object.h>
41 #include <dt_object.h>
42 #include <lustre_linkea.h>
43 #include <lustre_fid.h>
44 #include <lustre_lib.h>
45 #include <lustre_net.h>
46 #include <lustre/lustre_user.h>
47 #include <md_object.h>
48 #include <obd_class.h>
49
50 #include "lfsck_internal.h"
51
52 #define LFSCK_LAYOUT_MAGIC              0xB173AE14
53
54 static const char lfsck_layout_name[] = "lfsck_layout";
55
56 struct lfsck_layout_seq {
57         struct list_head         lls_list;
58         __u64                    lls_seq;
59         __u64                    lls_lastid;
60         __u64                    lls_lastid_known;
61         struct dt_object        *lls_lastid_obj;
62         unsigned int             lls_dirty:1;
63 };
64
65 struct lfsck_layout_slave_target {
66         /* link into lfsck_layout_slave_data::llsd_master_list. */
67         struct list_head        llst_list;
68         __u64                   llst_gen;
69         atomic_t                llst_ref;
70         __u32                   llst_index;
71 };
72
73 struct lfsck_layout_slave_data {
74         /* list for lfsck_layout_seq */
75         struct list_head         llsd_seq_list;
76
77         /* list for the masters involve layout verification. */
78         struct list_head         llsd_master_list;
79         spinlock_t               llsd_lock;
80         __u64                    llsd_touch_gen;
81         struct dt_object        *llsd_rb_obj;
82         struct rb_root           llsd_rb_root;
83         rwlock_t                 llsd_rb_lock;
84         unsigned int             llsd_rbtree_valid:1;
85 };
86
87 struct lfsck_layout_object {
88         struct dt_object        *llo_obj;
89         struct lu_attr           llo_attr;
90         atomic_t                 llo_ref;
91         __u16                    llo_gen;
92 };
93
94 struct lfsck_layout_req {
95         struct list_head                 llr_list;
96         struct lfsck_layout_object      *llr_parent;
97         struct dt_object                *llr_child;
98         __u32                            llr_ost_idx;
99         __u32                            llr_lov_idx; /* offset in LOV EA */
100 };
101
102 struct lfsck_layout_master_data {
103         spinlock_t              llmd_lock;
104         struct list_head        llmd_req_list;
105
106         /* list for the ost targets involve layout verification. */
107         struct list_head        llmd_ost_list;
108
109         /* list for the ost targets in phase1 scanning. */
110         struct list_head        llmd_ost_phase1_list;
111
112         /* list for the ost targets in phase1 scanning. */
113         struct list_head        llmd_ost_phase2_list;
114
115         /* list for the mdt targets involve layout verification. */
116         struct list_head        llmd_mdt_list;
117
118         /* list for the mdt targets in phase1 scanning. */
119         struct list_head        llmd_mdt_phase1_list;
120
121         /* list for the mdt targets in phase1 scanning. */
122         struct list_head        llmd_mdt_phase2_list;
123
124         struct ptlrpc_thread    llmd_thread;
125         __u32                   llmd_touch_gen;
126         int                     llmd_prefetched;
127         int                     llmd_assistant_status;
128         int                     llmd_post_result;
129         unsigned int            llmd_to_post:1,
130                                 llmd_to_double_scan:1,
131                                 llmd_in_double_scan:1,
132                                 llmd_exit:1;
133 };
134
135 struct lfsck_layout_slave_async_args {
136         struct obd_export                *llsaa_exp;
137         struct lfsck_component           *llsaa_com;
138         struct lfsck_layout_slave_target *llsaa_llst;
139 };
140
141 static struct lfsck_layout_object *
142 lfsck_layout_object_init(const struct lu_env *env, struct dt_object *obj,
143                          __u16 gen)
144 {
145         struct lfsck_layout_object *llo;
146         int                         rc;
147
148         OBD_ALLOC_PTR(llo);
149         if (llo == NULL)
150                 return ERR_PTR(-ENOMEM);
151
152         rc = dt_attr_get(env, obj, &llo->llo_attr, BYPASS_CAPA);
153         if (rc != 0) {
154                 OBD_FREE_PTR(llo);
155
156                 return ERR_PTR(rc);
157         }
158
159         lu_object_get(&obj->do_lu);
160         llo->llo_obj = obj;
161         /* The gen can be used to check whether some others have changed the
162          * file layout after LFSCK pre-fetching but before real verification. */
163         llo->llo_gen = gen;
164         atomic_set(&llo->llo_ref, 1);
165
166         return llo;
167 }
168
169 static inline void
170 lfsck_layout_llst_put(struct lfsck_layout_slave_target *llst)
171 {
172         if (atomic_dec_and_test(&llst->llst_ref)) {
173                 LASSERT(list_empty(&llst->llst_list));
174
175                 OBD_FREE_PTR(llst);
176         }
177 }
178
179 static inline int
180 lfsck_layout_llst_add(struct lfsck_layout_slave_data *llsd, __u32 index)
181 {
182         struct lfsck_layout_slave_target *llst;
183         struct lfsck_layout_slave_target *tmp;
184         int                               rc   = 0;
185
186         OBD_ALLOC_PTR(llst);
187         if (llst == NULL)
188                 return -ENOMEM;
189
190         INIT_LIST_HEAD(&llst->llst_list);
191         llst->llst_gen = 0;
192         llst->llst_index = index;
193         atomic_set(&llst->llst_ref, 1);
194
195         spin_lock(&llsd->llsd_lock);
196         list_for_each_entry(tmp, &llsd->llsd_master_list, llst_list) {
197                 if (tmp->llst_index == index) {
198                         rc = -EALREADY;
199                         break;
200                 }
201         }
202         if (rc == 0)
203                 list_add_tail(&llst->llst_list, &llsd->llsd_master_list);
204         spin_unlock(&llsd->llsd_lock);
205
206         if (rc != 0)
207                 OBD_FREE_PTR(llst);
208
209         return rc;
210 }
211
212 static inline void
213 lfsck_layout_llst_del(struct lfsck_layout_slave_data *llsd,
214                       struct lfsck_layout_slave_target *llst)
215 {
216         bool del = false;
217
218         spin_lock(&llsd->llsd_lock);
219         if (!list_empty(&llst->llst_list)) {
220                 list_del_init(&llst->llst_list);
221                 del = true;
222         }
223         spin_unlock(&llsd->llsd_lock);
224
225         if (del)
226                 lfsck_layout_llst_put(llst);
227 }
228
229 static inline struct lfsck_layout_slave_target *
230 lfsck_layout_llst_find_and_del(struct lfsck_layout_slave_data *llsd,
231                                __u32 index)
232 {
233         struct lfsck_layout_slave_target *llst;
234
235         spin_lock(&llsd->llsd_lock);
236         list_for_each_entry(llst, &llsd->llsd_master_list, llst_list) {
237                 if (llst->llst_index == index) {
238                         list_del_init(&llst->llst_list);
239                         spin_unlock(&llsd->llsd_lock);
240
241                         return llst;
242                 }
243         }
244         spin_unlock(&llsd->llsd_lock);
245
246         return NULL;
247 }
248
249 static inline void lfsck_layout_object_put(const struct lu_env *env,
250                                            struct lfsck_layout_object *llo)
251 {
252         if (atomic_dec_and_test(&llo->llo_ref)) {
253                 lfsck_object_put(env, llo->llo_obj);
254                 OBD_FREE_PTR(llo);
255         }
256 }
257
258 static struct lfsck_layout_req *
259 lfsck_layout_req_init(struct lfsck_layout_object *parent,
260                       struct dt_object *child, __u32 ost_idx, __u32 lov_idx)
261 {
262         struct lfsck_layout_req *llr;
263
264         OBD_ALLOC_PTR(llr);
265         if (llr == NULL)
266                 return ERR_PTR(-ENOMEM);
267
268         INIT_LIST_HEAD(&llr->llr_list);
269         atomic_inc(&parent->llo_ref);
270         llr->llr_parent = parent;
271         llr->llr_child = child;
272         llr->llr_ost_idx = ost_idx;
273         llr->llr_lov_idx = lov_idx;
274
275         return llr;
276 }
277
278 static inline void lfsck_layout_req_fini(const struct lu_env *env,
279                                          struct lfsck_layout_req *llr)
280 {
281         lu_object_put(env, &llr->llr_child->do_lu);
282         lfsck_layout_object_put(env, llr->llr_parent);
283         OBD_FREE_PTR(llr);
284 }
285
286 static inline bool lfsck_layout_req_empty(struct lfsck_layout_master_data *llmd)
287 {
288         bool empty = false;
289
290         spin_lock(&llmd->llmd_lock);
291         if (list_empty(&llmd->llmd_req_list))
292                 empty = true;
293         spin_unlock(&llmd->llmd_lock);
294
295         return empty;
296 }
297
298 static int lfsck_layout_get_lovea(const struct lu_env *env,
299                                   struct dt_object *obj,
300                                   struct lu_buf *buf, ssize_t *buflen)
301 {
302         int rc;
303
304 again:
305         rc = dt_xattr_get(env, obj, buf, XATTR_NAME_LOV, BYPASS_CAPA);
306         if (rc == -ERANGE) {
307                 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_LOV,
308                                   BYPASS_CAPA);
309                 if (rc <= 0)
310                         return rc;
311
312                 lu_buf_realloc(buf, rc);
313                 if (buflen != NULL)
314                         *buflen = buf->lb_len;
315
316                 if (buf->lb_buf == NULL)
317                         return -ENOMEM;
318
319                 goto again;
320         }
321
322         if (rc == -ENODATA)
323                 rc = 0;
324
325         if (rc <= 0)
326                 return rc;
327
328         if (unlikely(buf->lb_buf == NULL)) {
329                 lu_buf_alloc(buf, rc);
330                 if (buflen != NULL)
331                         *buflen = buf->lb_len;
332
333                 if (buf->lb_buf == NULL)
334                         return -ENOMEM;
335
336                 goto again;
337         }
338
339         return rc;
340 }
341
342 static int lfsck_layout_verify_header(struct lov_mds_md_v1 *lmm)
343 {
344         __u32 magic;
345         __u32 patten;
346
347         magic = le32_to_cpu(lmm->lmm_magic);
348         /* If magic crashed, keep it there. Sometime later, during OST-object
349          * orphan handling, if some OST-object(s) back-point to it, it can be
350          * verified and repaired. */
351         if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3)
352                 return -EINVAL;
353
354         patten = le32_to_cpu(lmm->lmm_pattern);
355         /* XXX: currently, we only support LOV_PATTERN_RAID0. */
356         if (patten != LOV_PATTERN_RAID0)
357                 return -EOPNOTSUPP;
358
359         return 0;
360 }
361
362 #define LFSCK_RBTREE_BITMAP_SIZE        PAGE_CACHE_SIZE
363 #define LFSCK_RBTREE_BITMAP_WIDTH       (LFSCK_RBTREE_BITMAP_SIZE << 3)
364 #define LFSCK_RBTREE_BITMAP_MASK        (LFSCK_RBTREE_BITMAP_SIZE - 1)
365
366 struct lfsck_rbtree_node {
367         struct rb_node   lrn_node;
368         __u64            lrn_seq;
369         __u32            lrn_first_oid;
370         atomic_t         lrn_known_count;
371         atomic_t         lrn_accessed_count;
372         void            *lrn_known_bitmap;
373         void            *lrn_accessed_bitmap;
374 };
375
376 static inline int lfsck_rbtree_cmp(struct lfsck_rbtree_node *lrn,
377                                    __u64 seq, __u32 oid)
378 {
379         if (seq < lrn->lrn_seq)
380                 return -1;
381
382         if (seq > lrn->lrn_seq)
383                 return 1;
384
385         if (oid < lrn->lrn_first_oid)
386                 return -1;
387
388         if (oid >= lrn->lrn_first_oid + LFSCK_RBTREE_BITMAP_WIDTH)
389                 return 1;
390
391         return 0;
392 }
393
394 /* The caller should hold lock. */
395 static struct lfsck_rbtree_node *
396 lfsck_rbtree_search(struct lfsck_layout_slave_data *llsd,
397                     const struct lu_fid *fid)
398 {
399         struct rb_node           *node = llsd->llsd_rb_root.rb_node;
400         struct lfsck_rbtree_node *lrn;
401         int                       rc;
402
403         while (node != NULL) {
404                 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
405                 rc = lfsck_rbtree_cmp(lrn, fid_seq(fid), fid_oid(fid));
406                 if (rc < 0)
407                         node = node->rb_left;
408                 else if (rc > 0)
409                         node = node->rb_right;
410                 else
411                         return lrn;
412         }
413
414         return NULL;
415 }
416
417 static struct lfsck_rbtree_node *lfsck_rbtree_new(const struct lu_env *env,
418                                                   const struct lu_fid *fid)
419 {
420         struct lfsck_rbtree_node *lrn;
421
422         OBD_ALLOC_PTR(lrn);
423         if (lrn == NULL)
424                 return ERR_PTR(-ENOMEM);
425
426         OBD_ALLOC(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
427         if (lrn->lrn_known_bitmap == NULL) {
428                 OBD_FREE_PTR(lrn);
429
430                 return ERR_PTR(-ENOMEM);
431         }
432
433         OBD_ALLOC(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
434         if (lrn->lrn_accessed_bitmap == NULL) {
435                 OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
436                 OBD_FREE_PTR(lrn);
437
438                 return ERR_PTR(-ENOMEM);
439         }
440
441         rb_init_node(&lrn->lrn_node);
442         lrn->lrn_seq = fid_seq(fid);
443         lrn->lrn_first_oid = fid_oid(fid) & ~LFSCK_RBTREE_BITMAP_MASK;
444         atomic_set(&lrn->lrn_known_count, 0);
445         atomic_set(&lrn->lrn_accessed_count, 0);
446
447         return lrn;
448 }
449
450 static void lfsck_rbtree_free(struct lfsck_rbtree_node *lrn)
451 {
452         OBD_FREE(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
453         OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
454         OBD_FREE_PTR(lrn);
455 }
456
457 /* The caller should hold lock. */
458 static struct lfsck_rbtree_node *
459 lfsck_rbtree_insert(struct lfsck_layout_slave_data *llsd,
460                     struct lfsck_rbtree_node *lrn)
461 {
462         struct rb_node           **pos    = &(llsd->llsd_rb_root.rb_node);
463         struct rb_node            *parent = NULL;
464         struct lfsck_rbtree_node  *tmp;
465         int                        rc;
466
467         while (*pos) {
468                 parent = *pos;
469                 tmp = rb_entry(*pos, struct lfsck_rbtree_node, lrn_node);
470                 rc = lfsck_rbtree_cmp(tmp, lrn->lrn_seq, lrn->lrn_first_oid);
471                 if (rc < 0)
472                         pos = &((*pos)->rb_left);
473                 else if (rc > 0)
474                         pos = &((*pos)->rb_right);
475                 else
476                         return tmp;
477         }
478
479         rb_link_node(&lrn->lrn_node, parent, pos);
480         rb_insert_color(&lrn->lrn_node, &llsd->llsd_rb_root);
481
482         return lrn;
483 }
484
485 static int lfsck_rbtree_setup(const struct lu_env *env,
486                               struct lfsck_component *com)
487 {
488         struct lu_fid                   *fid    = &lfsck_env_info(env)->lti_fid;
489         struct lfsck_instance           *lfsck  = com->lc_lfsck;
490         struct dt_device                *dev    = lfsck->li_bottom;
491         struct lfsck_layout_slave_data  *llsd   = com->lc_data;
492         struct dt_object                *obj;
493
494         fid->f_seq = FID_SEQ_LAYOUT_RBTREE;
495         fid->f_oid = lfsck_dev_idx(dev);
496         fid->f_ver = 0;
497         obj = dt_locate(env, dev, fid);
498         if (IS_ERR(obj))
499                 RETURN(PTR_ERR(obj));
500
501         /* XXX: Generate an in-RAM object to stand for the layout rbtree.
502          *      Scanning the layout rbtree will be via the iteration over
503          *      the object. In the future, the rbtree may be written onto
504          *      disk with the object.
505          *
506          *      Mark the object to be as exist. */
507         obj->do_lu.lo_header->loh_attr |= LOHA_EXISTS;
508         llsd->llsd_rb_obj = obj;
509         llsd->llsd_rbtree_valid = 1;
510         dev->dd_record_fid_accessed = 1;
511
512         return 0;
513 }
514
515 static void lfsck_rbtree_cleanup(const struct lu_env *env,
516                                  struct lfsck_component *com)
517 {
518         struct lfsck_instance           *lfsck = com->lc_lfsck;
519         struct lfsck_layout_slave_data  *llsd  = com->lc_data;
520         struct rb_node                  *node  = rb_first(&llsd->llsd_rb_root);
521         struct rb_node                  *next;
522         struct lfsck_rbtree_node        *lrn;
523
524         lfsck->li_bottom->dd_record_fid_accessed = 0;
525         /* Invalid the rbtree, then no others will use it. */
526         write_lock(&llsd->llsd_rb_lock);
527         llsd->llsd_rbtree_valid = 0;
528         write_unlock(&llsd->llsd_rb_lock);
529
530         while (node != NULL) {
531                 next = rb_next(node);
532                 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
533                 rb_erase(node, &llsd->llsd_rb_root);
534                 lfsck_rbtree_free(lrn);
535                 node = next;
536         }
537
538         if (llsd->llsd_rb_obj != NULL) {
539                 lu_object_put(env, &llsd->llsd_rb_obj->do_lu);
540                 llsd->llsd_rb_obj = NULL;
541         }
542 }
543
544 static void lfsck_rbtree_update_bitmap(const struct lu_env *env,
545                                        struct lfsck_component *com,
546                                        const struct lu_fid *fid,
547                                        bool accessed)
548 {
549         struct lfsck_layout_slave_data  *llsd   = com->lc_data;
550         struct lfsck_rbtree_node        *lrn;
551         bool                             insert = false;
552         int                              idx;
553         int                              rc     = 0;
554         ENTRY;
555
556         CDEBUG(D_LFSCK, "%s: update bitmap for "DFID"\n",
557                lfsck_lfsck2name(com->lc_lfsck), PFID(fid));
558
559         if (unlikely(!fid_is_sane(fid) || fid_is_last_id(fid)))
560                 RETURN_EXIT;
561
562         if (!fid_is_idif(fid) && !fid_is_norm(fid))
563                 RETURN_EXIT;
564
565         read_lock(&llsd->llsd_rb_lock);
566         if (!llsd->llsd_rbtree_valid)
567                 GOTO(unlock, rc = 0);
568
569         lrn = lfsck_rbtree_search(llsd, fid);
570         if (lrn == NULL) {
571                 struct lfsck_rbtree_node *tmp;
572
573                 LASSERT(!insert);
574
575                 read_unlock(&llsd->llsd_rb_lock);
576                 tmp = lfsck_rbtree_new(env, fid);
577                 if (IS_ERR(tmp))
578                         GOTO(out, rc = PTR_ERR(tmp));
579
580                 insert = true;
581                 write_lock(&llsd->llsd_rb_lock);
582                 if (!llsd->llsd_rbtree_valid) {
583                         lfsck_rbtree_free(tmp);
584                         GOTO(unlock, rc = 0);
585                 }
586
587                 lrn = lfsck_rbtree_insert(llsd, tmp);
588                 if (lrn != tmp)
589                         lfsck_rbtree_free(tmp);
590         }
591
592         idx = fid_oid(fid) & LFSCK_RBTREE_BITMAP_MASK;
593         /* Any accessed object must be a known object. */
594         if (!test_and_set_bit(idx, lrn->lrn_known_bitmap))
595                 atomic_inc(&lrn->lrn_known_count);
596         if (accessed) {
597                 if (!test_and_set_bit(idx, lrn->lrn_accessed_bitmap))
598                         atomic_inc(&lrn->lrn_accessed_count);
599         }
600
601         GOTO(unlock, rc = 0);
602
603 unlock:
604         if (insert)
605                 write_unlock(&llsd->llsd_rb_lock);
606         else
607                 read_unlock(&llsd->llsd_rb_lock);
608 out:
609         if (rc != 0 && accessed) {
610                 struct lfsck_layout *lo = com->lc_file_ram;
611
612                 CERROR("%s: Fail to update object accessed bitmap, will cause "
613                        "incorrect LFSCK OST-object handling, so disable it to "
614                        "cancel orphan handling for related device. rc = %d.\n",
615                        lfsck_lfsck2name(com->lc_lfsck), rc);
616                 lo->ll_flags |= LF_INCOMPLETE;
617                 lfsck_rbtree_cleanup(env, com);
618         }
619 }
620
621 static void lfsck_layout_le_to_cpu(struct lfsck_layout *des,
622                                    const struct lfsck_layout *src)
623 {
624         int i;
625
626         des->ll_magic = le32_to_cpu(src->ll_magic);
627         des->ll_status = le32_to_cpu(src->ll_status);
628         des->ll_flags = le32_to_cpu(src->ll_flags);
629         des->ll_success_count = le32_to_cpu(src->ll_success_count);
630         des->ll_run_time_phase1 = le32_to_cpu(src->ll_run_time_phase1);
631         des->ll_run_time_phase2 = le32_to_cpu(src->ll_run_time_phase2);
632         des->ll_time_last_complete = le64_to_cpu(src->ll_time_last_complete);
633         des->ll_time_latest_start = le64_to_cpu(src->ll_time_latest_start);
634         des->ll_time_last_checkpoint =
635                                 le64_to_cpu(src->ll_time_last_checkpoint);
636         des->ll_pos_latest_start = le64_to_cpu(src->ll_pos_latest_start);
637         des->ll_pos_last_checkpoint = le64_to_cpu(src->ll_pos_last_checkpoint);
638         des->ll_pos_first_inconsistent =
639                         le64_to_cpu(src->ll_pos_first_inconsistent);
640         des->ll_objs_checked_phase1 = le64_to_cpu(src->ll_objs_checked_phase1);
641         des->ll_objs_failed_phase1 = le64_to_cpu(src->ll_objs_failed_phase1);
642         des->ll_objs_checked_phase2 = le64_to_cpu(src->ll_objs_checked_phase2);
643         des->ll_objs_failed_phase2 = le64_to_cpu(src->ll_objs_failed_phase2);
644         for (i = 0; i < LLIT_MAX; i++)
645                 des->ll_objs_repaired[i] =
646                                 le64_to_cpu(src->ll_objs_repaired[i]);
647         des->ll_objs_skipped = le64_to_cpu(src->ll_objs_skipped);
648 }
649
650 static void lfsck_layout_cpu_to_le(struct lfsck_layout *des,
651                                    const struct lfsck_layout *src)
652 {
653         int i;
654
655         des->ll_magic = cpu_to_le32(src->ll_magic);
656         des->ll_status = cpu_to_le32(src->ll_status);
657         des->ll_flags = cpu_to_le32(src->ll_flags);
658         des->ll_success_count = cpu_to_le32(src->ll_success_count);
659         des->ll_run_time_phase1 = cpu_to_le32(src->ll_run_time_phase1);
660         des->ll_run_time_phase2 = cpu_to_le32(src->ll_run_time_phase2);
661         des->ll_time_last_complete = cpu_to_le64(src->ll_time_last_complete);
662         des->ll_time_latest_start = cpu_to_le64(src->ll_time_latest_start);
663         des->ll_time_last_checkpoint =
664                                 cpu_to_le64(src->ll_time_last_checkpoint);
665         des->ll_pos_latest_start = cpu_to_le64(src->ll_pos_latest_start);
666         des->ll_pos_last_checkpoint = cpu_to_le64(src->ll_pos_last_checkpoint);
667         des->ll_pos_first_inconsistent =
668                         cpu_to_le64(src->ll_pos_first_inconsistent);
669         des->ll_objs_checked_phase1 = cpu_to_le64(src->ll_objs_checked_phase1);
670         des->ll_objs_failed_phase1 = cpu_to_le64(src->ll_objs_failed_phase1);
671         des->ll_objs_checked_phase2 = cpu_to_le64(src->ll_objs_checked_phase2);
672         des->ll_objs_failed_phase2 = cpu_to_le64(src->ll_objs_failed_phase2);
673         for (i = 0; i < LLIT_MAX; i++)
674                 des->ll_objs_repaired[i] =
675                                 cpu_to_le64(src->ll_objs_repaired[i]);
676         des->ll_objs_skipped = cpu_to_le64(src->ll_objs_skipped);
677 }
678
679 /**
680  * \retval +ve: the lfsck_layout is broken, the caller should reset it.
681  * \retval 0: succeed.
682  * \retval -ve: failed cases.
683  */
684 static int lfsck_layout_load(const struct lu_env *env,
685                              struct lfsck_component *com)
686 {
687         struct lfsck_layout             *lo     = com->lc_file_ram;
688         const struct dt_body_operations *dbo    = com->lc_obj->do_body_ops;
689         ssize_t                          size   = com->lc_file_size;
690         loff_t                           pos    = 0;
691         int                              rc;
692
693         rc = dbo->dbo_read(env, com->lc_obj,
694                            lfsck_buf_get(env, com->lc_file_disk, size), &pos,
695                            BYPASS_CAPA);
696         if (rc == 0) {
697                 return -ENOENT;
698         } else if (rc < 0) {
699                 CWARN("%s: failed to load lfsck_layout: rc = %d\n",
700                       lfsck_lfsck2name(com->lc_lfsck), rc);
701                 return rc;
702         } else if (rc != size) {
703                 CWARN("%s: crashed lfsck_layout, to be reset: rc = %d\n",
704                       lfsck_lfsck2name(com->lc_lfsck), rc);
705                 return 1;
706         }
707
708         lfsck_layout_le_to_cpu(lo, com->lc_file_disk);
709         if (lo->ll_magic != LFSCK_LAYOUT_MAGIC) {
710                 CWARN("%s: invalid lfsck_layout magic %#x != %#x, "
711                       "to be reset\n", lfsck_lfsck2name(com->lc_lfsck),
712                       lo->ll_magic, LFSCK_LAYOUT_MAGIC);
713                 return 1;
714         }
715
716         return 0;
717 }
718
719 static int lfsck_layout_store(const struct lu_env *env,
720                               struct lfsck_component *com)
721 {
722         struct dt_object         *obj           = com->lc_obj;
723         struct lfsck_instance    *lfsck         = com->lc_lfsck;
724         struct lfsck_layout      *lo            = com->lc_file_disk;
725         struct thandle           *handle;
726         ssize_t                   size          = com->lc_file_size;
727         loff_t                    pos           = 0;
728         int                       rc;
729         ENTRY;
730
731         lfsck_layout_cpu_to_le(lo, com->lc_file_ram);
732         handle = dt_trans_create(env, lfsck->li_bottom);
733         if (IS_ERR(handle)) {
734                 rc = PTR_ERR(handle);
735                 CERROR("%s: fail to create trans for storing lfsck_layout: "
736                        "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
737                 RETURN(rc);
738         }
739
740         rc = dt_declare_record_write(env, obj, size, pos, handle);
741         if (rc != 0) {
742                 CERROR("%s: fail to declare trans for storing lfsck_layout(1): "
743                        "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
744                 GOTO(out, rc);
745         }
746
747         rc = dt_trans_start_local(env, lfsck->li_bottom, handle);
748         if (rc != 0) {
749                 CERROR("%s: fail to start trans for storing lfsck_layout: "
750                        "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
751                 GOTO(out, rc);
752         }
753
754         rc = dt_record_write(env, obj, lfsck_buf_get(env, lo, size), &pos,
755                              handle);
756         if (rc != 0)
757                 CERROR("%s: fail to store lfsck_layout(1): size = %d, "
758                        "rc = %d\n", lfsck_lfsck2name(lfsck), (int)size, rc);
759
760         GOTO(out, rc);
761
762 out:
763         dt_trans_stop(env, lfsck->li_bottom, handle);
764
765         return rc;
766 }
767
768 static int lfsck_layout_init(const struct lu_env *env,
769                              struct lfsck_component *com)
770 {
771         struct lfsck_layout *lo = com->lc_file_ram;
772         int rc;
773
774         memset(lo, 0, com->lc_file_size);
775         lo->ll_magic = LFSCK_LAYOUT_MAGIC;
776         lo->ll_status = LS_INIT;
777         down_write(&com->lc_sem);
778         rc = lfsck_layout_store(env, com);
779         up_write(&com->lc_sem);
780
781         return rc;
782 }
783
784 static int fid_is_for_ostobj(const struct lu_env *env, struct dt_device *dt,
785                              struct dt_object *obj, const struct lu_fid *fid)
786 {
787         struct seq_server_site  *ss     = lu_site2seq(dt->dd_lu_dev.ld_site);
788         struct lu_seq_range      range  = { 0 };
789         struct lustre_mdt_attrs *lma;
790         int                      rc;
791
792         fld_range_set_any(&range);
793         rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(fid), &range);
794         if (rc == 0) {
795                 if (fld_range_is_ost(&range))
796                         return 1;
797
798                 return 0;
799         }
800
801         lma = &lfsck_env_info(env)->lti_lma;
802         rc = dt_xattr_get(env, obj, lfsck_buf_get(env, lma, sizeof(*lma)),
803                           XATTR_NAME_LMA, BYPASS_CAPA);
804         if (rc == sizeof(*lma)) {
805                 lustre_lma_swab(lma);
806
807                 return lma->lma_compat & LMAC_FID_ON_OST ? 1 : 0;
808         }
809
810         rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_FID, BYPASS_CAPA);
811
812         return rc > 0;
813 }
814
815 static struct lfsck_layout_seq *
816 lfsck_layout_seq_lookup(struct lfsck_layout_slave_data *llsd, __u64 seq)
817 {
818         struct lfsck_layout_seq *lls;
819
820         list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
821                 if (lls->lls_seq == seq)
822                         return lls;
823
824                 if (lls->lls_seq > seq)
825                         return NULL;
826         }
827
828         return NULL;
829 }
830
831 static void
832 lfsck_layout_seq_insert(struct lfsck_layout_slave_data *llsd,
833                         struct lfsck_layout_seq *lls)
834 {
835         struct lfsck_layout_seq *tmp;
836         struct list_head        *pos = &llsd->llsd_seq_list;
837
838         list_for_each_entry(tmp, &llsd->llsd_seq_list, lls_list) {
839                 if (lls->lls_seq < tmp->lls_seq) {
840                         pos = &tmp->lls_list;
841                         break;
842                 }
843         }
844         list_add_tail(&lls->lls_list, pos);
845 }
846
847 static int
848 lfsck_layout_lastid_create(const struct lu_env *env,
849                            struct lfsck_instance *lfsck,
850                            struct dt_object *obj)
851 {
852         struct lfsck_thread_info *info   = lfsck_env_info(env);
853         struct lu_attr           *la     = &info->lti_la;
854         struct dt_object_format  *dof    = &info->lti_dof;
855         struct lfsck_bookmark    *bk     = &lfsck->li_bookmark_ram;
856         struct dt_device         *dt     = lfsck->li_bottom;
857         struct thandle           *th;
858         __u64                     lastid = 0;
859         loff_t                    pos    = 0;
860         int                       rc;
861         ENTRY;
862
863         CDEBUG(D_LFSCK, "To create LAST_ID for <seq> "LPX64"\n",
864                fid_seq(lfsck_dto2fid(obj)));
865
866         if (bk->lb_param & LPF_DRYRUN)
867                 return 0;
868
869         memset(la, 0, sizeof(*la));
870         la->la_mode = S_IFREG |  S_IRUGO | S_IWUSR;
871         la->la_valid = LA_MODE | LA_UID | LA_GID;
872         dof->dof_type = dt_mode_to_dft(S_IFREG);
873
874         th = dt_trans_create(env, dt);
875         if (IS_ERR(th))
876                 RETURN(rc = PTR_ERR(th));
877
878         rc = dt_declare_create(env, obj, la, NULL, dof, th);
879         if (rc != 0)
880                 GOTO(stop, rc);
881
882         rc = dt_declare_record_write(env, obj, sizeof(lastid), pos, th);
883         if (rc != 0)
884                 GOTO(stop, rc);
885
886         rc = dt_trans_start_local(env, dt, th);
887         if (rc != 0)
888                 GOTO(stop, rc);
889
890         dt_write_lock(env, obj, 0);
891         if (likely(!dt_object_exists(obj))) {
892                 rc = dt_create(env, obj, la, NULL, dof, th);
893                 if (rc == 0)
894                         rc = dt_record_write(env, obj,
895                                 lfsck_buf_get(env, &lastid, sizeof(lastid)),
896                                 &pos, th);
897         }
898         dt_write_unlock(env, obj);
899
900         GOTO(stop, rc);
901
902 stop:
903         dt_trans_stop(env, dt, th);
904
905         return rc;
906 }
907
908 static int
909 lfsck_layout_lastid_reload(const struct lu_env *env,
910                            struct lfsck_component *com,
911                            struct lfsck_layout_seq *lls)
912 {
913         __u64   lastid;
914         loff_t  pos     = 0;
915         int     rc;
916
917         dt_read_lock(env, lls->lls_lastid_obj, 0);
918         rc = dt_record_read(env, lls->lls_lastid_obj,
919                             lfsck_buf_get(env, &lastid, sizeof(lastid)), &pos);
920         dt_read_unlock(env, lls->lls_lastid_obj);
921         if (unlikely(rc != 0))
922                 return rc;
923
924         lastid = le64_to_cpu(lastid);
925         if (lastid < lls->lls_lastid_known) {
926                 struct lfsck_instance   *lfsck  = com->lc_lfsck;
927                 struct lfsck_layout     *lo     = com->lc_file_ram;
928
929                 lls->lls_lastid = lls->lls_lastid_known;
930                 lls->lls_dirty = 1;
931                 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
932                         LASSERT(lfsck->li_out_notify != NULL);
933
934                         lfsck->li_out_notify(env, lfsck->li_out_notify_data,
935                                              LE_LASTID_REBUILDING);
936                         lo->ll_flags |= LF_CRASHED_LASTID;
937                 }
938         } else if (lastid >= lls->lls_lastid) {
939                 lls->lls_lastid = lastid;
940                 lls->lls_dirty = 0;
941         }
942
943         return 0;
944 }
945
946 static int
947 lfsck_layout_lastid_store(const struct lu_env *env,
948                           struct lfsck_component *com)
949 {
950         struct lfsck_instance           *lfsck  = com->lc_lfsck;
951         struct lfsck_bookmark           *bk     = &lfsck->li_bookmark_ram;
952         struct dt_device                *dt     = lfsck->li_bottom;
953         struct lfsck_layout_slave_data  *llsd   = com->lc_data;
954         struct lfsck_layout_seq         *lls;
955         struct thandle                  *th;
956         __u64                            lastid;
957         int                              rc     = 0;
958         int                              rc1    = 0;
959
960         list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
961                 loff_t pos = 0;
962
963                 /* XXX: Add the code back if we really found related
964                  *      inconsistent cases in the future. */
965 #if 0
966                 if (!lls->lls_dirty) {
967                         /* In OFD, before the pre-creation, the LAST_ID
968                          * file will be updated firstly, which may hide
969                          * some potential crashed cases. For example:
970                          *
971                          * The old obj1's ID is higher than old LAST_ID
972                          * but lower than the new LAST_ID, but the LFSCK
973                          * have not touch the obj1 until the OFD updated
974                          * the LAST_ID. So the LFSCK does not regard it
975                          * as crashed case. But when OFD does not create
976                          * successfully, it will set the LAST_ID as the
977                          * real created objects' ID, then LFSCK needs to
978                          * found related inconsistency. */
979                         rc = lfsck_layout_lastid_reload(env, com, lls);
980                         if (likely(!lls->lls_dirty))
981                                 continue;
982                 }
983 #endif
984
985                 CDEBUG(D_LFSCK, "To sync the LAST_ID for <seq> "LPX64
986                        " as <oid> "LPU64"\n", lls->lls_seq, lls->lls_lastid);
987
988                 if (bk->lb_param & LPF_DRYRUN) {
989                         lls->lls_dirty = 0;
990                         continue;
991                 }
992
993                 th = dt_trans_create(env, dt);
994                 if (IS_ERR(th)) {
995                         rc1 = PTR_ERR(th);
996                         CERROR("%s: (1) failed to store "LPX64": rc = %d\n",
997                                lfsck_lfsck2name(com->lc_lfsck),
998                                lls->lls_seq, rc1);
999                         continue;
1000                 }
1001
1002                 rc = dt_declare_record_write(env, lls->lls_lastid_obj,
1003                                              sizeof(lastid), pos, th);
1004                 if (rc != 0)
1005                         goto stop;
1006
1007                 rc = dt_trans_start_local(env, dt, th);
1008                 if (rc != 0)
1009                         goto stop;
1010
1011                 lastid = cpu_to_le64(lls->lls_lastid);
1012                 dt_write_lock(env, lls->lls_lastid_obj, 0);
1013                 rc = dt_record_write(env, lls->lls_lastid_obj,
1014                                      lfsck_buf_get(env, &lastid,
1015                                      sizeof(lastid)), &pos, th);
1016                 dt_write_unlock(env, lls->lls_lastid_obj);
1017                 if (rc == 0)
1018                         lls->lls_dirty = 0;
1019
1020 stop:
1021                 dt_trans_stop(env, dt, th);
1022                 if (rc != 0) {
1023                         rc1 = rc;
1024                         CERROR("%s: (2) failed to store "LPX64": rc = %d\n",
1025                                lfsck_lfsck2name(com->lc_lfsck),
1026                                lls->lls_seq, rc1);
1027                 }
1028         }
1029
1030         return rc1;
1031 }
1032
1033 static int
1034 lfsck_layout_lastid_load(const struct lu_env *env,
1035                          struct lfsck_component *com,
1036                          struct lfsck_layout_seq *lls)
1037 {
1038         struct lfsck_instance   *lfsck  = com->lc_lfsck;
1039         struct lfsck_layout     *lo     = com->lc_file_ram;
1040         struct lu_fid           *fid    = &lfsck_env_info(env)->lti_fid;
1041         struct dt_object        *obj;
1042         loff_t                   pos    = 0;
1043         int                      rc;
1044         ENTRY;
1045
1046         lu_last_id_fid(fid, lls->lls_seq, lfsck_dev_idx(lfsck->li_bottom));
1047         obj = dt_locate(env, lfsck->li_bottom, fid);
1048         if (IS_ERR(obj))
1049                 RETURN(PTR_ERR(obj));
1050
1051         /* LAST_ID crashed, to be rebuilt */
1052         if (!dt_object_exists(obj)) {
1053                 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
1054                         LASSERT(lfsck->li_out_notify != NULL);
1055
1056                         lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1057                                              LE_LASTID_REBUILDING);
1058                         lo->ll_flags |= LF_CRASHED_LASTID;
1059
1060                         if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY4) &&
1061                             cfs_fail_val > 0) {
1062                                 struct l_wait_info lwi = LWI_TIMEOUT(
1063                                                 cfs_time_seconds(cfs_fail_val),
1064                                                 NULL, NULL);
1065
1066                                 up_write(&com->lc_sem);
1067                                 l_wait_event(lfsck->li_thread.t_ctl_waitq,
1068                                              !thread_is_running(&lfsck->li_thread),
1069                                              &lwi);
1070                                 down_write(&com->lc_sem);
1071                         }
1072                 }
1073
1074                 rc = lfsck_layout_lastid_create(env, lfsck, obj);
1075         } else {
1076                 dt_read_lock(env, obj, 0);
1077                 rc = dt_read(env, obj,
1078                         lfsck_buf_get(env, &lls->lls_lastid, sizeof(__u64)),
1079                         &pos);
1080                 dt_read_unlock(env, obj);
1081                 if (rc != 0 && rc != sizeof(__u64))
1082                         GOTO(out, rc = (rc > 0 ? -EFAULT : rc));
1083
1084                 if (rc == 0 && !(lo->ll_flags & LF_CRASHED_LASTID)) {
1085                         LASSERT(lfsck->li_out_notify != NULL);
1086
1087                         lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1088                                              LE_LASTID_REBUILDING);
1089                         lo->ll_flags |= LF_CRASHED_LASTID;
1090                 }
1091
1092                 lls->lls_lastid = le64_to_cpu(lls->lls_lastid);
1093                 rc = 0;
1094         }
1095
1096         GOTO(out, rc);
1097
1098 out:
1099         if (rc != 0)
1100                 lfsck_object_put(env, obj);
1101         else
1102                 lls->lls_lastid_obj = obj;
1103
1104         return rc;
1105 }
1106
1107 static int lfsck_layout_master_async_interpret(const struct lu_env *env,
1108                                                struct ptlrpc_request *req,
1109                                                void *args, int rc)
1110 {
1111         struct lfsck_async_interpret_args *laia = args;
1112         struct lfsck_component            *com  = laia->laia_com;
1113         struct lfsck_layout_master_data   *llmd = com->lc_data;
1114         struct lfsck_tgt_descs            *ltds = laia->laia_ltds;
1115         struct lfsck_tgt_desc             *ltd  = laia->laia_ltd;
1116         struct lfsck_request              *lr   = laia->laia_lr;
1117
1118         switch (lr->lr_event) {
1119         case LE_START:
1120                 if (rc != 0) {
1121                         struct lfsck_layout *lo = com->lc_file_ram;
1122
1123                         lo->ll_flags |= LF_INCOMPLETE;
1124                         lfsck_tgt_put(ltd);
1125                         break;
1126                 }
1127
1128                 spin_lock(&ltds->ltd_lock);
1129                 if (ltd->ltd_dead || ltd->ltd_layout_done) {
1130                         spin_unlock(&ltds->ltd_lock);
1131                         lfsck_tgt_put(ltd);
1132                         break;
1133                 }
1134
1135                 if (lr->lr_flags & LEF_TO_OST) {
1136                         if (list_empty(&ltd->ltd_layout_list))
1137                                 list_add_tail(&ltd->ltd_layout_list,
1138                                               &llmd->llmd_ost_list);
1139                         if (list_empty(&ltd->ltd_layout_phase_list))
1140                                 list_add_tail(&ltd->ltd_layout_phase_list,
1141                                               &llmd->llmd_ost_phase1_list);
1142                 } else {
1143                         if (list_empty(&ltd->ltd_layout_list))
1144                                 list_add_tail(&ltd->ltd_layout_list,
1145                                               &llmd->llmd_mdt_list);
1146                         if (list_empty(&ltd->ltd_layout_phase_list))
1147                                 list_add_tail(&ltd->ltd_layout_phase_list,
1148                                               &llmd->llmd_mdt_phase1_list);
1149                 }
1150                 spin_unlock(&ltds->ltd_lock);
1151                 lfsck_tgt_put(ltd);
1152                 break;
1153         case LE_STOP:
1154         case LE_PHASE1_DONE:
1155         case LE_PHASE2_DONE:
1156                 if (rc != 0)
1157                         CERROR("%s: fail to notify %s %x for layout: "
1158                                "event = %d, rc = %d\n",
1159                                lfsck_lfsck2name(com->lc_lfsck),
1160                                (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1161                                ltd->ltd_index, lr->lr_event, rc);
1162                 break;
1163         case LE_QUERY: {
1164                 struct lfsck_reply *reply;
1165
1166                 if (rc != 0) {
1167                         spin_lock(&ltds->ltd_lock);
1168                         list_del_init(&ltd->ltd_layout_phase_list);
1169                         list_del_init(&ltd->ltd_layout_list);
1170                         spin_unlock(&ltds->ltd_lock);
1171                         lfsck_tgt_put(ltd);
1172                         break;
1173                 }
1174
1175                 reply = req_capsule_server_get(&req->rq_pill,
1176                                                &RMF_LFSCK_REPLY);
1177                 if (reply == NULL) {
1178                         rc = -EPROTO;
1179                         CERROR("%s: invalid return value: rc = %d\n",
1180                                lfsck_lfsck2name(com->lc_lfsck), rc);
1181                         spin_lock(&ltds->ltd_lock);
1182                         list_del_init(&ltd->ltd_layout_phase_list);
1183                         list_del_init(&ltd->ltd_layout_list);
1184                         spin_unlock(&ltds->ltd_lock);
1185                         lfsck_tgt_put(ltd);
1186                         break;
1187                 }
1188
1189                 switch (reply->lr_status) {
1190                 case LS_SCANNING_PHASE1:
1191                         break;
1192                 case LS_SCANNING_PHASE2:
1193                         spin_lock(&ltds->ltd_lock);
1194                         list_del_init(&ltd->ltd_layout_phase_list);
1195                         if (ltd->ltd_dead || ltd->ltd_layout_done) {
1196                                 spin_unlock(&ltds->ltd_lock);
1197                                 break;
1198                         }
1199
1200                         if (lr->lr_flags & LEF_TO_OST)
1201                                 list_add_tail(&ltd->ltd_layout_phase_list,
1202                                               &llmd->llmd_ost_phase2_list);
1203                         else
1204                                 list_add_tail(&ltd->ltd_layout_phase_list,
1205                                               &llmd->llmd_mdt_phase2_list);
1206                         spin_unlock(&ltds->ltd_lock);
1207                         break;
1208                 default:
1209                         spin_lock(&ltds->ltd_lock);
1210                         list_del_init(&ltd->ltd_layout_phase_list);
1211                         list_del_init(&ltd->ltd_layout_list);
1212                         spin_unlock(&ltds->ltd_lock);
1213                         break;
1214                 }
1215                 lfsck_tgt_put(ltd);
1216                 break;
1217         }
1218         default:
1219                 CERROR("%s: unexpected event: rc = %d\n",
1220                        lfsck_lfsck2name(com->lc_lfsck), lr->lr_event);
1221                 break;
1222         }
1223
1224         lfsck_component_put(env, com);
1225
1226         return 0;
1227 }
1228
1229 static int lfsck_layout_master_query_others(const struct lu_env *env,
1230                                             struct lfsck_component *com)
1231 {
1232         struct lfsck_thread_info          *info  = lfsck_env_info(env);
1233         struct lfsck_request              *lr    = &info->lti_lr;
1234         struct lfsck_async_interpret_args *laia  = &info->lti_laia;
1235         struct lfsck_instance             *lfsck = com->lc_lfsck;
1236         struct lfsck_layout_master_data   *llmd  = com->lc_data;
1237         struct ptlrpc_request_set         *set;
1238         struct lfsck_tgt_descs            *ltds;
1239         struct lfsck_tgt_desc             *ltd;
1240         struct list_head                  *head;
1241         __u32                              cnt   = 0;
1242         int                                rc    = 0;
1243         int                                rc1   = 0;
1244         ENTRY;
1245
1246         set = ptlrpc_prep_set();
1247         if (set == NULL)
1248                 RETURN(-ENOMEM);
1249
1250         llmd->llmd_touch_gen++;
1251         memset(lr, 0, sizeof(*lr));
1252         lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1253         lr->lr_event = LE_QUERY;
1254         lr->lr_active = LT_LAYOUT;
1255         laia->laia_com = com;
1256         laia->laia_lr = lr;
1257
1258         if (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1259                 ltds = &lfsck->li_mdt_descs;
1260                 lr->lr_flags = 0;
1261                 head = &llmd->llmd_mdt_phase1_list;
1262         } else {
1263
1264 again:
1265                 ltds = &lfsck->li_ost_descs;
1266                 lr->lr_flags = LEF_TO_OST;
1267                 head = &llmd->llmd_ost_phase1_list;
1268         }
1269
1270         laia->laia_ltds = ltds;
1271         spin_lock(&ltds->ltd_lock);
1272         while (!list_empty(head)) {
1273                 ltd = list_entry(head->next,
1274                                  struct lfsck_tgt_desc,
1275                                  ltd_layout_phase_list);
1276                 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1277                         break;
1278
1279                 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1280                 list_del(&ltd->ltd_layout_phase_list);
1281                 list_add_tail(&ltd->ltd_layout_phase_list, head);
1282                 atomic_inc(&ltd->ltd_ref);
1283                 laia->laia_ltd = ltd;
1284                 spin_unlock(&ltds->ltd_lock);
1285                 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1286                                          lfsck_layout_master_async_interpret,
1287                                          laia, LFSCK_QUERY);
1288                 if (rc != 0) {
1289                         CERROR("%s: fail to query %s %x for layout: rc = %d\n",
1290                                lfsck_lfsck2name(lfsck),
1291                                (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1292                                ltd->ltd_index, rc);
1293                         lfsck_tgt_put(ltd);
1294                         rc1 = rc;
1295                 } else {
1296                         cnt++;
1297                 }
1298                 spin_lock(&ltds->ltd_lock);
1299         }
1300         spin_unlock(&ltds->ltd_lock);
1301
1302         if (cnt > 0) {
1303                 rc = ptlrpc_set_wait(set);
1304                 if (rc < 0) {
1305                         ptlrpc_set_destroy(set);
1306                         RETURN(rc);
1307                 }
1308                 cnt = 0;
1309         }
1310
1311         if (!(lr->lr_flags & LEF_TO_OST) &&
1312             list_empty(&llmd->llmd_mdt_phase1_list))
1313                 goto again;
1314
1315         ptlrpc_set_destroy(set);
1316
1317         RETURN(rc1 != 0 ? rc1 : rc);
1318 }
1319
1320 static inline bool
1321 lfsck_layout_master_to_orphan(struct lfsck_layout_master_data *llmd)
1322 {
1323         return list_empty(&llmd->llmd_mdt_phase1_list) &&
1324                (!list_empty(&llmd->llmd_ost_phase2_list) ||
1325                 list_empty(&llmd->llmd_ost_phase1_list));
1326 }
1327
1328 static int lfsck_layout_master_notify_others(const struct lu_env *env,
1329                                              struct lfsck_component *com,
1330                                              struct lfsck_request *lr,
1331                                              __u32 flags)
1332 {
1333         struct lfsck_thread_info          *info  = lfsck_env_info(env);
1334         struct lfsck_async_interpret_args *laia  = &info->lti_laia;
1335         struct lfsck_instance             *lfsck = com->lc_lfsck;
1336         struct lfsck_layout_master_data   *llmd  = com->lc_data;
1337         struct lfsck_layout               *lo    = com->lc_file_ram;
1338         struct ptlrpc_request_set         *set;
1339         struct lfsck_tgt_descs            *ltds;
1340         struct lfsck_tgt_desc             *ltd;
1341         struct lfsck_tgt_desc             *next;
1342         struct list_head                  *head;
1343         __u32                              idx;
1344         __u32                              cnt   = 0;
1345         int                                rc    = 0;
1346         ENTRY;
1347
1348         set = ptlrpc_prep_set();
1349         if (set == NULL)
1350                 RETURN(-ENOMEM);
1351
1352         lr->lr_active = LT_LAYOUT;
1353         laia->laia_com = com;
1354         laia->laia_lr = lr;
1355         lr->lr_flags = 0;
1356         switch (lr->lr_event) {
1357         case LE_START:
1358                 /* Notify OSTs firstly, then other MDTs if needed. */
1359                 lr->lr_flags |= LEF_TO_OST;
1360                 ltds = &lfsck->li_ost_descs;
1361
1362 lable1:
1363                 laia->laia_ltds = ltds;
1364                 down_read(&ltds->ltd_rw_sem);
1365                 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1366                         ltd = lfsck_tgt_get(ltds, idx);
1367                         LASSERT(ltd != NULL);
1368
1369                         laia->laia_ltd = ltd;
1370                         ltd->ltd_layout_done = 0;
1371                         rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1372                                         lfsck_layout_master_async_interpret,
1373                                         laia, LFSCK_NOTIFY);
1374                         if (rc != 0) {
1375                                 CERROR("%s: fail to notify %s %x for layout "
1376                                        "start: rc = %d\n",
1377                                        lfsck_lfsck2name(lfsck),
1378                                        (lr->lr_flags & LEF_TO_OST) ? "OST" :
1379                                        "MDT", idx, rc);
1380                                 lfsck_tgt_put(ltd);
1381                                 lo->ll_flags |= LF_INCOMPLETE;
1382                         } else {
1383                                 cnt++;
1384                         }
1385                 }
1386                 up_read(&ltds->ltd_rw_sem);
1387
1388                 /* Sync up */
1389                 if (cnt > 0) {
1390                         rc = ptlrpc_set_wait(set);
1391                         if (rc < 0) {
1392                                 ptlrpc_set_destroy(set);
1393                                 RETURN(rc);
1394                         }
1395                         cnt = 0;
1396                 }
1397
1398                 if (!(flags & LPF_ALL_MDT))
1399                         break;
1400
1401                 ltds = &lfsck->li_mdt_descs;
1402                 /* The sponsor broadcasts the request to other MDTs. */
1403                 if (flags & LPF_BROADCAST) {
1404                         flags &= ~LPF_ALL_MDT;
1405                         lr->lr_flags &= ~LEF_TO_OST;
1406                         goto lable1;
1407                 }
1408
1409                 /* non-sponsors link other MDT targets locallly. */
1410                 spin_lock(&ltds->ltd_lock);
1411                 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1412                         ltd = LTD_TGT(ltds, idx);
1413                         LASSERT(ltd != NULL);
1414
1415                         if (!list_empty(&ltd->ltd_layout_list))
1416                                 continue;
1417
1418                         list_add_tail(&ltd->ltd_layout_list,
1419                                       &llmd->llmd_mdt_list);
1420                         list_add_tail(&ltd->ltd_layout_phase_list,
1421                                       &llmd->llmd_mdt_phase1_list);
1422                 }
1423                 spin_unlock(&ltds->ltd_lock);
1424
1425                 break;
1426         case LE_STOP:
1427                 if (flags & LPF_BROADCAST)
1428                         lr->lr_flags |= LEF_FORCE_STOP;
1429         case LE_PHASE2_DONE:
1430                 /* Notify other MDTs if needed, then the OSTs. */
1431                 if (flags & LPF_ALL_MDT) {
1432                         /* The sponsor broadcasts the request to other MDTs. */
1433                         if (flags & LPF_BROADCAST) {
1434                                 lr->lr_flags &= ~LEF_TO_OST;
1435                                 head = &llmd->llmd_mdt_list;
1436                                 ltds = &lfsck->li_mdt_descs;
1437                                 goto lable3;
1438                         }
1439
1440                         /* non-sponsors unlink other MDT targets locallly. */
1441                         ltds = &lfsck->li_mdt_descs;
1442                         spin_lock(&ltds->ltd_lock);
1443                         list_for_each_entry_safe(ltd, next,
1444                                                  &llmd->llmd_mdt_list,
1445                                                  ltd_layout_list) {
1446                                 list_del_init(&ltd->ltd_layout_phase_list);
1447                                 list_del_init(&ltd->ltd_layout_list);
1448                         }
1449                         spin_unlock(&ltds->ltd_lock);
1450                 }
1451
1452 lable2:
1453                 lr->lr_flags |= LEF_TO_OST;
1454                 head = &llmd->llmd_ost_list;
1455                 ltds = &lfsck->li_ost_descs;
1456
1457 lable3:
1458                 laia->laia_ltds = ltds;
1459                 spin_lock(&ltds->ltd_lock);
1460                 while (!list_empty(head)) {
1461                         ltd = list_entry(head->next, struct lfsck_tgt_desc,
1462                                          ltd_layout_list);
1463                         if (!list_empty(&ltd->ltd_layout_phase_list))
1464                                 list_del_init(&ltd->ltd_layout_phase_list);
1465                         list_del_init(&ltd->ltd_layout_list);
1466                         laia->laia_ltd = ltd;
1467                         spin_unlock(&ltds->ltd_lock);
1468                         rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1469                                         lfsck_layout_master_async_interpret,
1470                                         laia, LFSCK_NOTIFY);
1471                         if (rc != 0)
1472                                 CERROR("%s: fail to notify %s %x for layout "
1473                                        "stop/phase2: rc = %d\n",
1474                                        lfsck_lfsck2name(lfsck),
1475                                        (lr->lr_flags & LEF_TO_OST) ? "OST" :
1476                                        "MDT", ltd->ltd_index, rc);
1477                         else
1478                                 cnt++;
1479                         spin_lock(&ltds->ltd_lock);
1480                 }
1481                 spin_unlock(&ltds->ltd_lock);
1482
1483                 if (!(flags & LPF_BROADCAST))
1484                         break;
1485
1486                 /* Sync up */
1487                 if (cnt > 0) {
1488                         rc = ptlrpc_set_wait(set);
1489                         if (rc < 0) {
1490                                 ptlrpc_set_destroy(set);
1491                                 RETURN(rc);
1492                         }
1493                         cnt = 0;
1494                 }
1495
1496                 flags &= ~LPF_BROADCAST;
1497                 goto lable2;
1498         case LE_PHASE1_DONE:
1499                 llmd->llmd_touch_gen++;
1500                 lr->lr_flags &= ~LEF_TO_OST;
1501                 ltds = &lfsck->li_mdt_descs;
1502                 laia->laia_ltds = ltds;
1503                 spin_lock(&ltds->ltd_lock);
1504                 while (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1505                         ltd = list_entry(llmd->llmd_mdt_phase1_list.next,
1506                                          struct lfsck_tgt_desc,
1507                                          ltd_layout_phase_list);
1508                         if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1509                                 break;
1510
1511                         ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1512                         list_del_init(&ltd->ltd_layout_phase_list);
1513                         list_add_tail(&ltd->ltd_layout_phase_list,
1514                                       &llmd->llmd_mdt_phase1_list);
1515                         laia->laia_ltd = ltd;
1516                         spin_unlock(&ltds->ltd_lock);
1517                         rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1518                                         lfsck_layout_master_async_interpret,
1519                                         laia, LFSCK_NOTIFY);
1520                         if (rc != 0)
1521                                 CERROR("%s: fail to notify MDT %x for layout "
1522                                        "phase1 done: rc = %d\n",
1523                                        lfsck_lfsck2name(lfsck),
1524                                        ltd->ltd_index, rc);
1525                         else
1526                                 cnt++;
1527                         spin_lock(&ltds->ltd_lock);
1528                 }
1529                 spin_unlock(&ltds->ltd_lock);
1530                 break;
1531         default:
1532                 CERROR("%s: unexpected LFSCK event: rc = %d\n",
1533                        lfsck_lfsck2name(lfsck), lr->lr_event);
1534                 rc = -EINVAL;
1535                 break;
1536         }
1537
1538         if (cnt > 0)
1539                 rc = ptlrpc_set_wait(set);
1540         ptlrpc_set_destroy(set);
1541
1542         if (rc == 0 && lr->lr_event == LE_START &&
1543             list_empty(&llmd->llmd_ost_list))
1544                 rc = -ENODEV;
1545
1546         RETURN(rc);
1547 }
1548
1549 static int lfsck_layout_double_scan_result(const struct lu_env *env,
1550                                            struct lfsck_component *com,
1551                                            int rc)
1552 {
1553         struct lfsck_instance   *lfsck = com->lc_lfsck;
1554         struct lfsck_layout     *lo    = com->lc_file_ram;
1555         struct lfsck_bookmark   *bk    = &lfsck->li_bookmark_ram;
1556
1557         down_write(&com->lc_sem);
1558
1559         lo->ll_run_time_phase2 += cfs_duration_sec(cfs_time_current() +
1560                                 HALF_SEC - lfsck->li_time_last_checkpoint);
1561         lo->ll_time_last_checkpoint = cfs_time_current_sec();
1562         lo->ll_objs_checked_phase2 += com->lc_new_checked;
1563
1564         if (rc > 0) {
1565                 com->lc_journal = 0;
1566                 if (lo->ll_flags & LF_INCOMPLETE)
1567                         lo->ll_status = LS_PARTIAL;
1568                 else
1569                         lo->ll_status = LS_COMPLETED;
1570                 if (!(bk->lb_param & LPF_DRYRUN))
1571                         lo->ll_flags &= ~(LF_SCANNED_ONCE | LF_INCONSISTENT);
1572                 lo->ll_time_last_complete = lo->ll_time_last_checkpoint;
1573                 lo->ll_success_count++;
1574         } else if (rc == 0) {
1575                 lo->ll_status = lfsck->li_status;
1576                 if (lo->ll_status == 0)
1577                         lo->ll_status = LS_STOPPED;
1578         } else {
1579                 lo->ll_status = LS_FAILED;
1580         }
1581
1582         if (lo->ll_status != LS_PAUSED) {
1583                 spin_lock(&lfsck->li_lock);
1584                 list_del_init(&com->lc_link);
1585                 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
1586                 spin_unlock(&lfsck->li_lock);
1587         }
1588
1589         rc = lfsck_layout_store(env, com);
1590
1591         up_write(&com->lc_sem);
1592
1593         return rc;
1594 }
1595
1596 static int lfsck_layout_lock(const struct lu_env *env,
1597                              struct lfsck_component *com,
1598                              struct dt_object *obj,
1599                              struct lustre_handle *lh, __u64 bits)
1600 {
1601         struct lfsck_thread_info        *info   = lfsck_env_info(env);
1602         ldlm_policy_data_t              *policy = &info->lti_policy;
1603         struct ldlm_res_id              *resid  = &info->lti_resid;
1604         struct lfsck_instance           *lfsck  = com->lc_lfsck;
1605         __u64                            flags  = LDLM_FL_ATOMIC_CB;
1606         int                              rc;
1607
1608         LASSERT(lfsck->li_namespace != NULL);
1609
1610         memset(policy, 0, sizeof(*policy));
1611         policy->l_inodebits.bits = bits;
1612         fid_build_reg_res_name(lfsck_dto2fid(obj), resid);
1613         rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid, LDLM_IBITS,
1614                                     policy, LCK_EX, &flags, ldlm_blocking_ast,
1615                                     ldlm_completion_ast, NULL, NULL, 0,
1616                                     LVB_T_NONE, NULL, lh);
1617         if (rc == ELDLM_OK) {
1618                 rc = 0;
1619         } else {
1620                 memset(lh, 0, sizeof(*lh));
1621                 rc = -EIO;
1622         }
1623
1624         return rc;
1625 }
1626
1627 static void lfsck_layout_unlock(struct lustre_handle *lh)
1628 {
1629         if (lustre_handle_is_used(lh)) {
1630                 ldlm_lock_decref(lh, LCK_EX);
1631                 memset(lh, 0, sizeof(*lh));
1632         }
1633 }
1634
1635 static int lfsck_layout_trans_stop(const struct lu_env *env,
1636                                    struct dt_device *dev,
1637                                    struct thandle *handle, int result)
1638 {
1639         int rc;
1640
1641         handle->th_result = result;
1642         rc = dt_trans_stop(env, dev, handle);
1643         if (rc > 0)
1644                 rc = 0;
1645         else if (rc == 0)
1646                 rc = 1;
1647
1648         return rc;
1649 }
1650
1651 static int lfsck_layout_scan_orphan(const struct lu_env *env,
1652                                     struct lfsck_component *com,
1653                                     struct lfsck_tgt_desc *ltd)
1654 {
1655         /* XXX: To be extended in other patch. */
1656
1657         return 0;
1658 }
1659
1660 /* For the MDT-object with dangling reference, we need to re-create
1661  * the missed OST-object with the known FID/owner information. */
1662 static int lfsck_layout_recreate_ostobj(const struct lu_env *env,
1663                                         struct lfsck_component *com,
1664                                         struct lfsck_layout_req *llr,
1665                                         struct lu_attr *la)
1666 {
1667         struct lfsck_thread_info        *info   = lfsck_env_info(env);
1668         struct filter_fid               *pfid   = &info->lti_new_pfid;
1669         struct dt_allocation_hint       *hint   = &info->lti_hint;
1670         struct dt_object                *parent = llr->llr_parent->llo_obj;
1671         struct dt_object                *child  = llr->llr_child;
1672         struct dt_device                *dev    = lfsck_obj2dt_dev(child);
1673         const struct lu_fid             *tfid   = lu_object_fid(&parent->do_lu);
1674         struct thandle                  *handle;
1675         struct lu_buf                   *buf;
1676         struct lustre_handle             lh     = { 0 };
1677         int                              rc;
1678         ENTRY;
1679
1680         CDEBUG(D_LFSCK, "Repair dangling reference for: parent "DFID
1681                ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
1682                PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
1683                llr->llr_ost_idx, llr->llr_lov_idx, la->la_uid, la->la_gid);
1684
1685         rc = lfsck_layout_lock(env, com, parent, &lh,
1686                                MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
1687         if (rc != 0)
1688                 RETURN(rc);
1689
1690         handle = dt_trans_create(env, dev);
1691         if (IS_ERR(handle))
1692                 GOTO(unlock1, rc = PTR_ERR(handle));
1693
1694         hint->dah_parent = NULL;
1695         hint->dah_mode = 0;
1696         pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
1697         pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
1698         pfid->ff_parent.f_ver = cpu_to_le32(llr->llr_lov_idx);
1699         buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
1700
1701         rc = dt_declare_create(env, child, la, hint, NULL, handle);
1702         if (rc != 0)
1703                 GOTO(stop, rc);
1704
1705         rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID,
1706                                   LU_XATTR_CREATE, handle);
1707         if (rc != 0)
1708                 GOTO(stop, rc);
1709
1710         rc = dt_trans_start(env, dev, handle);
1711         if (rc != 0)
1712                 GOTO(stop, rc);
1713
1714         dt_read_lock(env, parent, 0);
1715         if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
1716                 GOTO(unlock2, rc = 1);
1717
1718         rc = dt_create(env, child, la, hint, NULL, handle);
1719         if (rc != 0)
1720                 GOTO(unlock2, rc);
1721
1722         rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, LU_XATTR_CREATE,
1723                           handle, BYPASS_CAPA);
1724
1725         GOTO(unlock2, rc);
1726
1727 unlock2:
1728         dt_read_unlock(env, parent);
1729
1730 stop:
1731         rc = lfsck_layout_trans_stop(env, dev, handle, rc);
1732
1733 unlock1:
1734         lfsck_layout_unlock(&lh);
1735
1736         return rc;
1737 }
1738
1739 /* If the OST-object does not recognize the MDT-object as its parent, and
1740  * there is no other MDT-object claims as its parent, then just trust the
1741  * given MDT-object as its parent. So update the OST-object filter_fid. */
1742 static int lfsck_layout_repair_unmatched_pair(const struct lu_env *env,
1743                                               struct lfsck_component *com,
1744                                               struct lfsck_layout_req *llr,
1745                                               const struct lu_attr *pla)
1746 {
1747         struct lfsck_thread_info        *info   = lfsck_env_info(env);
1748         struct filter_fid               *pfid   = &info->lti_new_pfid;
1749         struct lu_attr                  *tla    = &info->lti_la3;
1750         struct dt_object                *parent = llr->llr_parent->llo_obj;
1751         struct dt_object                *child  = llr->llr_child;
1752         struct dt_device                *dev    = lfsck_obj2dt_dev(child);
1753         const struct lu_fid             *tfid   = lu_object_fid(&parent->do_lu);
1754         struct thandle                  *handle;
1755         struct lu_buf                   *buf;
1756         struct lustre_handle             lh     = { 0 };
1757         int                              rc;
1758         ENTRY;
1759
1760         CDEBUG(D_LFSCK, "Repair unmatched MDT-OST pair for: parent "DFID
1761                ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
1762                PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
1763                llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid);
1764
1765         rc = lfsck_layout_lock(env, com, parent, &lh,
1766                                MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
1767         if (rc != 0)
1768                 RETURN(rc);
1769
1770         handle = dt_trans_create(env, dev);
1771         if (IS_ERR(handle))
1772                 GOTO(unlock1, rc = PTR_ERR(handle));
1773
1774         pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
1775         pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
1776         /* The ff_parent->f_ver is not the real parent fid->f_ver. Instead,
1777          * it is the OST-object index in the parent MDT-object layout. */
1778         pfid->ff_parent.f_ver = cpu_to_le32(llr->llr_lov_idx);
1779         buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
1780
1781         rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle);
1782         if (rc != 0)
1783                 GOTO(stop, rc);
1784
1785         tla->la_valid = LA_UID | LA_GID;
1786         tla->la_uid = pla->la_uid;
1787         tla->la_gid = pla->la_gid;
1788         rc = dt_declare_attr_set(env, child, tla, handle);
1789         if (rc != 0)
1790                 GOTO(stop, rc);
1791
1792         rc = dt_trans_start(env, dev, handle);
1793         if (rc != 0)
1794                 GOTO(stop, rc);
1795
1796         dt_write_lock(env, parent, 0);
1797         if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
1798                 GOTO(unlock2, rc = 1);
1799
1800         rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle,
1801                           BYPASS_CAPA);
1802         if (rc != 0)
1803                 GOTO(unlock2, rc);
1804
1805         /* Get the latest parent's owner. */
1806         rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
1807         if (rc != 0)
1808                 GOTO(unlock2, rc);
1809
1810         tla->la_valid = LA_UID | LA_GID;
1811         rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
1812
1813         GOTO(unlock2, rc);
1814
1815 unlock2:
1816         dt_write_unlock(env, parent);
1817
1818 stop:
1819         rc = lfsck_layout_trans_stop(env, dev, handle, rc);
1820
1821 unlock1:
1822         lfsck_layout_unlock(&lh);
1823
1824         return rc;
1825 }
1826
1827 /* If there are more than one MDT-objects claim as the OST-object's parent,
1828  * and the OST-object only recognizes one of them, then we need to generate
1829  * new OST-object(s) with new fid(s) for the non-recognized MDT-object(s). */
1830 static int lfsck_layout_repair_multiple_references(const struct lu_env *env,
1831                                                    struct lfsck_component *com,
1832                                                    struct lfsck_layout_req *llr,
1833                                                    struct lu_attr *la,
1834                                                    struct lu_buf *buf)
1835 {
1836         struct lfsck_thread_info        *info   = lfsck_env_info(env);
1837         struct dt_allocation_hint       *hint   = &info->lti_hint;
1838         struct dt_object_format         *dof    = &info->lti_dof;
1839         struct dt_device                *pdev   = com->lc_lfsck->li_next;
1840         struct ost_id                   *oi     = &info->lti_oi;
1841         struct dt_object                *parent = llr->llr_parent->llo_obj;
1842         struct dt_device                *cdev   = lfsck_obj2dt_dev(llr->llr_child);
1843         struct dt_object                *child  = NULL;
1844         struct lu_device                *d      = &cdev->dd_lu_dev;
1845         struct lu_object                *o      = NULL;
1846         struct thandle                  *handle;
1847         struct lov_mds_md_v1            *lmm;
1848         struct lov_ost_data_v1          *objs;
1849         struct lustre_handle             lh     = { 0 };
1850         __u32                            magic;
1851         int                              rc;
1852         ENTRY;
1853
1854         CDEBUG(D_LFSCK, "Repair multiple references for: parent "DFID
1855                ", OST-index %u, stripe-index %u, owner %u:%u\n",
1856                PFID(lfsck_dto2fid(parent)), llr->llr_ost_idx,
1857                llr->llr_lov_idx, la->la_uid, la->la_gid);
1858
1859         rc = lfsck_layout_lock(env, com, parent, &lh,
1860                                MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
1861         if (rc != 0)
1862                 RETURN(rc);
1863
1864         handle = dt_trans_create(env, pdev);
1865         if (IS_ERR(handle))
1866                 GOTO(unlock1, rc = PTR_ERR(handle));
1867
1868         o = lu_object_anon(env, d, NULL);
1869         if (IS_ERR(o))
1870                 GOTO(stop, rc = PTR_ERR(o));
1871
1872         child = container_of(o, struct dt_object, do_lu);
1873         o = lu_object_locate(o->lo_header, d->ld_type);
1874         if (unlikely(o == NULL))
1875                 GOTO(stop, rc = -EINVAL);
1876
1877         child = container_of(o, struct dt_object, do_lu);
1878         la->la_valid = LA_UID | LA_GID;
1879         hint->dah_parent = NULL;
1880         hint->dah_mode = 0;
1881         dof->dof_type = DFT_REGULAR;
1882         rc = dt_declare_create(env, child, la, NULL, NULL, handle);
1883         if (rc != 0)
1884                 GOTO(stop, rc);
1885
1886         rc = dt_declare_xattr_set(env, parent, buf, XATTR_NAME_LOV,
1887                                   LU_XATTR_REPLACE, handle);
1888         if (rc != 0)
1889                 GOTO(stop, rc);
1890
1891         rc = dt_trans_start(env, pdev, handle);
1892         if (rc != 0)
1893                 GOTO(stop, rc);
1894
1895         dt_write_lock(env, parent, 0);
1896         if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
1897                 GOTO(unlock2, rc = 0);
1898
1899         rc = dt_xattr_get(env, parent, buf, XATTR_NAME_LOV, BYPASS_CAPA);
1900         if (unlikely(rc == 0 || rc == -ENODATA || rc == -ERANGE))
1901                 GOTO(unlock2, rc = 0);
1902
1903         lmm = buf->lb_buf;
1904         rc = lfsck_layout_verify_header(lmm);
1905         if (rc != 0)
1906                 GOTO(unlock2, rc);
1907
1908         /* Someone change layout during the LFSCK, no need to repair then. */
1909         if (le16_to_cpu(lmm->lmm_layout_gen) != llr->llr_parent->llo_gen)
1910                 GOTO(unlock2, rc = 0);
1911
1912         rc = dt_create(env, child, la, hint, dof, handle);
1913         if (rc != 0)
1914                 GOTO(unlock2, rc);
1915
1916         /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
1917          * been verified in lfsck_layout_verify_header() already. If some
1918          * new magic introduced in the future, then layout LFSCK needs to
1919          * be updated also. */
1920         magic = le32_to_cpu(lmm->lmm_magic);
1921         if (magic == LOV_MAGIC_V1) {
1922                 objs = &(lmm->lmm_objects[0]);
1923         } else {
1924                 LASSERT(magic == LOV_MAGIC_V3);
1925                 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
1926         }
1927
1928         lmm->lmm_layout_gen = cpu_to_le16(llr->llr_parent->llo_gen + 1);
1929         fid_to_ostid(lu_object_fid(&child->do_lu), oi);
1930         ostid_cpu_to_le(oi, &objs[llr->llr_lov_idx].l_ost_oi);
1931         objs[llr->llr_lov_idx].l_ost_gen = cpu_to_le32(0);
1932         objs[llr->llr_lov_idx].l_ost_idx = cpu_to_le32(llr->llr_ost_idx);
1933         rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV,
1934                           LU_XATTR_REPLACE, handle, BYPASS_CAPA);
1935
1936         GOTO(unlock2, rc = (rc == 0 ? 1 : rc));
1937
1938 unlock2:
1939         dt_write_unlock(env, parent);
1940
1941 stop:
1942         if (child != NULL)
1943                 lu_object_put(env, &child->do_lu);
1944
1945         dt_trans_stop(env, pdev, handle);
1946
1947 unlock1:
1948         lfsck_layout_unlock(&lh);
1949
1950         return rc;
1951 }
1952
1953 /* If the MDT-object and the OST-object have different owner information,
1954  * then trust the MDT-object, because the normal chown/chgrp handle order
1955  * is from MDT to OST, and it is possible that some chown/chgrp operation
1956  * is partly done. */
1957 static int lfsck_layout_repair_owner(const struct lu_env *env,
1958                                      struct lfsck_component *com,
1959                                      struct lfsck_layout_req *llr,
1960                                      struct lu_attr *pla)
1961 {
1962         struct lfsck_thread_info        *info   = lfsck_env_info(env);
1963         struct lu_attr                  *tla    = &info->lti_la3;
1964         struct dt_object                *parent = llr->llr_parent->llo_obj;
1965         struct dt_object                *child  = llr->llr_child;
1966         struct dt_device                *dev    = lfsck_obj2dt_dev(child);
1967         struct thandle                  *handle;
1968         int                              rc;
1969         ENTRY;
1970
1971         CDEBUG(D_LFSCK, "Repair inconsistent file owner for: parent "DFID
1972                ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
1973                PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
1974                llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid);
1975
1976         handle = dt_trans_create(env, dev);
1977         if (IS_ERR(handle))
1978                 RETURN(PTR_ERR(handle));
1979
1980         tla->la_uid = pla->la_uid;
1981         tla->la_gid = pla->la_gid;
1982         tla->la_valid = LA_UID | LA_GID;
1983         rc = dt_declare_attr_set(env, child, tla, handle);
1984         if (rc != 0)
1985                 GOTO(stop, rc);
1986
1987         rc = dt_trans_start(env, dev, handle);
1988         if (rc != 0)
1989                 GOTO(stop, rc);
1990
1991         /* Use the dt_object lock to serialize with destroy and attr_set. */
1992         dt_read_lock(env, parent, 0);
1993         if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
1994                 GOTO(unlock, rc = 1);
1995
1996         /* Get the latest parent's owner. */
1997         rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
1998         if (rc != 0) {
1999                 CWARN("%s: fail to get the latest parent's ("DFID") owner, "
2000                       "not sure whether some others chown/chgrp during the "
2001                       "LFSCK: rc = %d\n", lfsck_lfsck2name(com->lc_lfsck),
2002                       PFID(lfsck_dto2fid(parent)), rc);
2003
2004                 GOTO(unlock, rc);
2005         }
2006
2007         /* Some others chown/chgrp during the LFSCK, needs to do nothing. */
2008         if (unlikely(tla->la_uid != pla->la_uid ||
2009                      tla->la_gid != pla->la_gid))
2010                 GOTO(unlock, rc = 1);
2011
2012         tla->la_valid = LA_UID | LA_GID;
2013         rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
2014
2015         GOTO(unlock, rc);
2016
2017 unlock:
2018         dt_read_unlock(env, parent);
2019
2020 stop:
2021         rc = lfsck_layout_trans_stop(env, dev, handle, rc);
2022
2023         return rc;
2024 }
2025
2026 /* Check whether the OST-object correctly back points to the
2027  * MDT-object (@parent) via the XATTR_NAME_FID xattr (@pfid). */
2028 static int lfsck_layout_check_parent(const struct lu_env *env,
2029                                      struct lfsck_component *com,
2030                                      struct dt_object *parent,
2031                                      const struct lu_fid *pfid,
2032                                      const struct lu_fid *cfid,
2033                                      const struct lu_attr *pla,
2034                                      const struct lu_attr *cla,
2035                                      struct lfsck_layout_req *llr,
2036                                      struct lu_buf *lov_ea, __u32 idx)
2037 {
2038         struct lfsck_thread_info        *info   = lfsck_env_info(env);
2039         struct lu_buf                   *buf    = &info->lti_big_buf;
2040         struct dt_object                *tobj;
2041         struct lov_mds_md_v1            *lmm;
2042         struct lov_ost_data_v1          *objs;
2043         int                              rc;
2044         int                              i;
2045         __u32                            magic;
2046         __u16                            count;
2047         ENTRY;
2048
2049         if (fid_is_zero(pfid)) {
2050                 /* client never wrote. */
2051                 if (cla->la_size == 0 && cla->la_blocks == 0) {
2052                         if (unlikely(cla->la_uid != pla->la_uid ||
2053                                      cla->la_gid != pla->la_gid))
2054                                 RETURN (LLIT_INCONSISTENT_OWNER);
2055
2056                         RETURN(0);
2057                 }
2058
2059                 RETURN(LLIT_UNMATCHED_PAIR);
2060         }
2061
2062         if (unlikely(!fid_is_sane(pfid)))
2063                 RETURN(LLIT_UNMATCHED_PAIR);
2064
2065         if (lu_fid_eq(pfid, lu_object_fid(&parent->do_lu))) {
2066                 if (llr->llr_lov_idx == idx)
2067                         RETURN(0);
2068
2069                 RETURN(LLIT_UNMATCHED_PAIR);
2070         }
2071
2072         tobj = lfsck_object_find(env, com->lc_lfsck, pfid);
2073         if (tobj == NULL)
2074                 RETURN(LLIT_UNMATCHED_PAIR);
2075
2076         if (IS_ERR(tobj))
2077                 RETURN(PTR_ERR(tobj));
2078
2079         if (!dt_object_exists(tobj))
2080                 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
2081
2082         /* Load the tobj's layout EA, in spite of it is a local MDT-object or
2083          * remote one on another MDT. Then check whether the given OST-object
2084          * is in such layout. If yes, it is multiple referenced, otherwise it
2085          * is unmatched referenced case. */
2086         rc = lfsck_layout_get_lovea(env, tobj, buf, NULL);
2087         if (rc == 0)
2088                 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
2089
2090         if (rc < 0)
2091                 GOTO(out, rc);
2092
2093         lmm = buf->lb_buf;
2094         rc = lfsck_layout_verify_header(lmm);
2095         if (rc != 0)
2096                 GOTO(out, rc);
2097
2098         /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
2099          * been verified in lfsck_layout_verify_header() already. If some
2100          * new magic introduced in the future, then layout LFSCK needs to
2101          * be updated also. */
2102         magic = le32_to_cpu(lmm->lmm_magic);
2103         if (magic == LOV_MAGIC_V1) {
2104                 objs = &(lmm->lmm_objects[0]);
2105         } else {
2106                 LASSERT(magic == LOV_MAGIC_V3);
2107                 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
2108         }
2109
2110         count = le16_to_cpu(lmm->lmm_stripe_count);
2111         for (i = 0; i < count; i++, objs++) {
2112                 struct lu_fid           *tfid   = &info->lti_fid2;
2113                 struct ost_id           *oi     = &info->lti_oi;
2114
2115                 ostid_le_to_cpu(&objs->l_ost_oi, oi);
2116                 ostid_to_fid(tfid, oi, le32_to_cpu(objs->l_ost_idx));
2117                 if (lu_fid_eq(cfid, tfid)) {
2118                         *lov_ea = *buf;
2119
2120                         GOTO(out, rc = LLIT_MULTIPLE_REFERENCED);
2121                 }
2122         }
2123
2124         GOTO(out, rc = LLIT_UNMATCHED_PAIR);
2125
2126 out:
2127         lfsck_object_put(env, tobj);
2128
2129         return rc;
2130 }
2131
2132 static int lfsck_layout_assistant_handle_one(const struct lu_env *env,
2133                                              struct lfsck_component *com,
2134                                              struct lfsck_layout_req *llr)
2135 {
2136         struct lfsck_layout                  *lo     = com->lc_file_ram;
2137         struct lfsck_thread_info             *info   = lfsck_env_info(env);
2138         struct filter_fid_old                *pea    = &info->lti_old_pfid;
2139         struct lu_fid                        *pfid   = &info->lti_fid;
2140         struct lu_buf                        *buf    = NULL;
2141         struct dt_object                     *parent = llr->llr_parent->llo_obj;
2142         struct dt_object                     *child  = llr->llr_child;
2143         struct lu_attr                       *pla    = &info->lti_la;
2144         struct lu_attr                       *cla    = &info->lti_la2;
2145         struct lfsck_instance                *lfsck  = com->lc_lfsck;
2146         struct lfsck_bookmark                *bk     = &lfsck->li_bookmark_ram;
2147         enum lfsck_layout_inconsistency_type  type   = LLIT_NONE;
2148         __u32                                 idx    = 0;
2149         int                                   rc;
2150         ENTRY;
2151
2152         rc = dt_attr_get(env, parent, pla, BYPASS_CAPA);
2153         if (rc != 0) {
2154                 if (lu_object_is_dying(parent->do_lu.lo_header))
2155                         RETURN(0);
2156
2157                 GOTO(out, rc);
2158         }
2159
2160         rc = dt_attr_get(env, child, cla, BYPASS_CAPA);
2161         if (rc == -ENOENT) {
2162                 if (lu_object_is_dying(parent->do_lu.lo_header))
2163                         RETURN(0);
2164
2165                 type = LLIT_DANGLING;
2166                 goto repair;
2167         }
2168
2169         if (rc != 0)
2170                 GOTO(out, rc);
2171
2172         buf = lfsck_buf_get(env, pea, sizeof(struct filter_fid_old));
2173         rc= dt_xattr_get(env, child, buf, XATTR_NAME_FID, BYPASS_CAPA);
2174         if (unlikely(rc >= 0 && rc != sizeof(struct filter_fid_old) &&
2175                      rc != sizeof(struct filter_fid))) {
2176                 type = LLIT_UNMATCHED_PAIR;
2177                 goto repair;
2178         }
2179
2180         if (rc < 0 && rc != -ENODATA)
2181                 GOTO(out, rc);
2182
2183         if (rc == -ENODATA) {
2184                 fid_zero(pfid);
2185         } else {
2186                 fid_le_to_cpu(pfid, &pea->ff_parent);
2187                 /* OST-object does not save parent FID::f_ver, instead,
2188                  * the OST-object index in the parent MDT-object layout
2189                  * EA reuses the pfid->f_ver. */
2190                 idx = pfid->f_ver;
2191                 pfid->f_ver = 0;
2192         }
2193
2194         rc = lfsck_layout_check_parent(env, com, parent, pfid,
2195                                        lu_object_fid(&child->do_lu),
2196                                        pla, cla, llr, buf, idx);
2197         if (rc > 0) {
2198                 type = rc;
2199                 goto repair;
2200         }
2201
2202         if (rc < 0)
2203                 GOTO(out, rc);
2204
2205         if (unlikely(cla->la_uid != pla->la_uid ||
2206                      cla->la_gid != pla->la_gid)) {
2207                 type = LLIT_INCONSISTENT_OWNER;
2208                 goto repair;
2209         }
2210
2211 repair:
2212         if (bk->lb_param & LPF_DRYRUN) {
2213                 if (type != LLIT_NONE)
2214                         GOTO(out, rc = 1);
2215                 else
2216                         GOTO(out, rc = 0);
2217         }
2218
2219         switch (type) {
2220         case LLIT_DANGLING:
2221                 memset(cla, 0, sizeof(*cla));
2222                 cla->la_uid = pla->la_uid;
2223                 cla->la_gid = pla->la_gid;
2224                 cla->la_mode = S_IFREG | 0666;
2225                 cla->la_valid = LA_TYPE | LA_MODE | LA_UID | LA_GID |
2226                                 LA_ATIME | LA_MTIME | LA_CTIME;
2227                 rc = lfsck_layout_recreate_ostobj(env, com, llr, cla);
2228                 break;
2229         case LLIT_UNMATCHED_PAIR:
2230                 rc = lfsck_layout_repair_unmatched_pair(env, com, llr, pla);
2231                 break;
2232         case LLIT_MULTIPLE_REFERENCED:
2233                 rc = lfsck_layout_repair_multiple_references(env, com, llr,
2234                                                              pla, buf);
2235                 break;
2236         case LLIT_INCONSISTENT_OWNER:
2237                 rc = lfsck_layout_repair_owner(env, com, llr, pla);
2238                 break;
2239         default:
2240                 rc = 0;
2241                 break;
2242         }
2243
2244         GOTO(out, rc);
2245
2246 out:
2247         down_write(&com->lc_sem);
2248         if (rc < 0) {
2249                 /* If cannot touch the target server,
2250                  * mark the LFSCK as INCOMPLETE. */
2251                 if (rc == -ENOTCONN || rc == -ESHUTDOWN || rc == -ETIMEDOUT ||
2252                     rc == -EHOSTDOWN || rc == -EHOSTUNREACH) {
2253                         lo->ll_flags |= LF_INCOMPLETE;
2254                         lo->ll_objs_skipped++;
2255                         rc = 0;
2256                 } else {
2257                         lo->ll_objs_failed_phase1++;
2258                 }
2259         } else if (rc > 0) {
2260                 LASSERTF(type > LLIT_NONE && type <= LLIT_MAX,
2261                          "unknown type = %d\n", type);
2262
2263                 lo->ll_objs_repaired[type - 1]++;
2264         }
2265         up_write(&com->lc_sem);
2266
2267         return rc;
2268 }
2269
2270 static int lfsck_layout_assistant(void *args)
2271 {
2272         struct lfsck_thread_args        *lta     = args;
2273         struct lu_env                   *env     = &lta->lta_env;
2274         struct lfsck_component          *com     = lta->lta_com;
2275         struct lfsck_instance           *lfsck   = lta->lta_lfsck;
2276         struct lfsck_bookmark           *bk      = &lfsck->li_bookmark_ram;
2277         struct lfsck_position           *pos     = &com->lc_pos_start;
2278         struct lfsck_thread_info        *info    = lfsck_env_info(env);
2279         struct lfsck_request            *lr      = &info->lti_lr;
2280         struct lfsck_layout_master_data *llmd    = com->lc_data;
2281         struct ptlrpc_thread            *mthread = &lfsck->li_thread;
2282         struct ptlrpc_thread            *athread = &llmd->llmd_thread;
2283         struct lfsck_layout_req         *llr;
2284         struct l_wait_info               lwi     = { 0 };
2285         int                              rc      = 0;
2286         int                              rc1     = 0;
2287         __u32                            flags;
2288         ENTRY;
2289
2290         if (lta->lta_lsp->lsp_start != NULL)
2291                 flags  = lta->lta_lsp->lsp_start->ls_flags;
2292         else
2293                 flags = bk->lb_param;
2294         memset(lr, 0, sizeof(*lr));
2295         lr->lr_event = LE_START;
2296         lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
2297         lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
2298                        LSV_ASYNC_WINDOWS;
2299         lr->lr_speed = bk->lb_speed_limit;
2300         lr->lr_version = bk->lb_version;
2301         lr->lr_param = bk->lb_param;
2302         lr->lr_async_windows = bk->lb_async_windows;
2303         if (pos->lp_oit_cookie <= 1)
2304                 lr->lr_param |= LPF_RESET;
2305
2306         rc = lfsck_layout_master_notify_others(env, com, lr, flags);
2307         if (rc != 0) {
2308                 CERROR("%s: fail to notify others for layout start: rc = %d\n",
2309                        lfsck_lfsck2name(lfsck), rc);
2310                 GOTO(fini, rc);
2311         }
2312
2313         spin_lock(&llmd->llmd_lock);
2314         thread_set_flags(athread, SVC_RUNNING);
2315         spin_unlock(&llmd->llmd_lock);
2316         wake_up_all(&mthread->t_ctl_waitq);
2317
2318         while (1) {
2319                 while (!list_empty(&llmd->llmd_req_list)) {
2320                         bool wakeup = false;
2321
2322                         if (unlikely(llmd->llmd_exit))
2323                                 GOTO(cleanup1, rc = llmd->llmd_post_result);
2324
2325                         llr = list_entry(llmd->llmd_req_list.next,
2326                                          struct lfsck_layout_req,
2327                                          llr_list);
2328                         /* Only the lfsck_layout_assistant thread itself can
2329                          * remove the "llr" from the head of the list, LFSCK
2330                          * engine thread only inserts other new "lld" at the
2331                          * end of the list. So it is safe to handle current
2332                          * "llr" without the spin_lock. */
2333                         rc = lfsck_layout_assistant_handle_one(env, com, llr);
2334                         spin_lock(&llmd->llmd_lock);
2335                         list_del_init(&llr->llr_list);
2336                         if (bk->lb_async_windows != 0 &&
2337                             llmd->llmd_prefetched >= bk->lb_async_windows)
2338                                 wakeup = true;
2339
2340                         llmd->llmd_prefetched--;
2341                         spin_unlock(&llmd->llmd_lock);
2342                         if (wakeup)
2343                                 wake_up_all(&mthread->t_ctl_waitq);
2344
2345                         lfsck_layout_req_fini(env, llr);
2346                         if (rc < 0 && bk->lb_param & LPF_FAILOUT)
2347                                 GOTO(cleanup1, rc);
2348                 }
2349
2350                 /* Wakeup the master engine if it is waiting in checkpoint. */
2351                 wake_up_all(&mthread->t_ctl_waitq);
2352
2353                 l_wait_event(athread->t_ctl_waitq,
2354                              !lfsck_layout_req_empty(llmd) ||
2355                              llmd->llmd_exit ||
2356                              llmd->llmd_to_post ||
2357                              llmd->llmd_to_double_scan,
2358                              &lwi);
2359
2360                 if (unlikely(llmd->llmd_exit))
2361                         GOTO(cleanup1, rc = llmd->llmd_post_result);
2362
2363                 if (!list_empty(&llmd->llmd_req_list))
2364                         continue;
2365
2366                 if (llmd->llmd_to_post) {
2367                         llmd->llmd_to_post = 0;
2368                         LASSERT(llmd->llmd_post_result > 0);
2369
2370                         memset(lr, 0, sizeof(*lr));
2371                         lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
2372                         lr->lr_event = LE_PHASE1_DONE;
2373                         lr->lr_status = llmd->llmd_post_result;
2374                         rc = lfsck_layout_master_notify_others(env, com, lr, 0);
2375                         if (rc != 0)
2376                                 CERROR("%s: failed to notify others "
2377                                        "for layout post: rc = %d\n",
2378                                        lfsck_lfsck2name(lfsck), rc);
2379
2380                         /* Wakeup the master engine to go ahead. */
2381                         wake_up_all(&mthread->t_ctl_waitq);
2382                 }
2383
2384                 if (llmd->llmd_to_double_scan) {
2385                         llmd->llmd_to_double_scan = 0;
2386                         atomic_inc(&lfsck->li_double_scan_count);
2387                         llmd->llmd_in_double_scan = 1;
2388                         wake_up_all(&mthread->t_ctl_waitq);
2389
2390                         while (llmd->llmd_in_double_scan) {
2391                                 struct lfsck_tgt_descs  *ltds =
2392                                                         &lfsck->li_ost_descs;
2393                                 struct lfsck_tgt_desc   *ltd;
2394
2395                                 rc = lfsck_layout_master_query_others(env, com);
2396                                 if (lfsck_layout_master_to_orphan(llmd))
2397                                         goto orphan;
2398
2399                                 if (rc < 0)
2400                                         GOTO(cleanup2, rc);
2401
2402                                 /* Pull LFSCK status on related targets once
2403                                  * per 30 seconds if we are not notified. */
2404                                 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(30),
2405                                                            cfs_time_seconds(1),
2406                                                            NULL, NULL);
2407                                 rc = l_wait_event(athread->t_ctl_waitq,
2408                                         lfsck_layout_master_to_orphan(llmd) ||
2409                                         llmd->llmd_exit ||
2410                                         !thread_is_running(mthread),
2411                                         &lwi);
2412
2413                                 if (unlikely(llmd->llmd_exit ||
2414                                              !thread_is_running(mthread)))
2415                                         GOTO(cleanup2, rc = 0);
2416
2417                                 if (rc == -ETIMEDOUT)
2418                                         continue;
2419
2420                                 if (rc < 0)
2421                                         GOTO(cleanup2, rc);
2422
2423 orphan:
2424                                 spin_lock(&ltds->ltd_lock);
2425                                 while (!list_empty(
2426                                                 &llmd->llmd_ost_phase2_list)) {
2427                                         ltd = list_entry(
2428                                               llmd->llmd_ost_phase2_list.next,
2429                                               struct lfsck_tgt_desc,
2430                                               ltd_layout_phase_list);
2431                                         list_del_init(
2432                                                 &ltd->ltd_layout_phase_list);
2433                                         spin_unlock(&ltds->ltd_lock);
2434
2435                                         rc = lfsck_layout_scan_orphan(env, com,
2436                                                                       ltd);
2437                                         if (rc != 0 &&
2438                                             bk->lb_param & LPF_FAILOUT)
2439                                                 GOTO(cleanup2, rc);
2440
2441                                         if (unlikely(llmd->llmd_exit ||
2442                                                 !thread_is_running(mthread)))
2443                                                 GOTO(cleanup2, rc = 0);
2444
2445                                         spin_lock(&ltds->ltd_lock);
2446                                 }
2447
2448                                 if (list_empty(&llmd->llmd_ost_phase1_list)) {
2449                                         spin_unlock(&ltds->ltd_lock);
2450                                         GOTO(cleanup2, rc = 1);
2451                                 }
2452                                 spin_unlock(&ltds->ltd_lock);
2453                         }
2454                 }
2455         }
2456
2457 cleanup1:
2458         /* Cleanup the unfinished requests. */
2459         spin_lock(&llmd->llmd_lock);
2460         if (rc < 0)
2461                 llmd->llmd_assistant_status = rc;
2462
2463         while (!list_empty(&llmd->llmd_req_list)) {
2464                 llr = list_entry(llmd->llmd_req_list.next,
2465                                  struct lfsck_layout_req,
2466                                  llr_list);
2467                 list_del_init(&llr->llr_list);
2468                 llmd->llmd_prefetched--;
2469                 spin_unlock(&llmd->llmd_lock);
2470                 lfsck_layout_req_fini(env, llr);
2471                 spin_lock(&llmd->llmd_lock);
2472         }
2473         spin_unlock(&llmd->llmd_lock);
2474
2475         LASSERTF(llmd->llmd_prefetched == 0, "unmatched prefeteched objs %d\n",
2476                  llmd->llmd_prefetched);
2477
2478 cleanup2:
2479         memset(lr, 0, sizeof(*lr));
2480         lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
2481         if (rc > 0) {
2482                 lr->lr_event = LE_PHASE2_DONE;
2483                 flags = 0;
2484                 lr->lr_status = rc;
2485         } else if (rc == 0) {
2486                 lr->lr_event = LE_STOP;
2487                 if (lfsck->li_status == LS_PAUSED ||
2488                     lfsck->li_status == LS_CO_PAUSED) {
2489                         flags = 0;
2490                         lr->lr_status = LS_CO_PAUSED;
2491                 } else if (lfsck->li_status == LS_STOPPED ||
2492                          lfsck->li_status == LS_CO_STOPPED) {
2493                         flags = lfsck->li_flags;
2494                         if (flags & LPF_BROADCAST)
2495                                 lr->lr_status = LS_STOPPED;
2496                         else
2497                                 lr->lr_status = LS_CO_STOPPED;
2498                 } else {
2499                         LBUG();
2500                 }
2501         } else {
2502                 lr->lr_event = LE_STOP;
2503                 flags = 0;
2504                 lr->lr_status = LS_CO_FAILED;
2505         }
2506
2507         rc1 = lfsck_layout_master_notify_others(env, com, lr, flags);
2508         if (rc1 != 0) {
2509                 CERROR("%s: failed to notify others for layout quit: rc = %d\n",
2510                        lfsck_lfsck2name(lfsck), rc1);
2511                 rc = rc1;
2512         }
2513
2514         /* Under force exit case, some requests may be just freed without
2515          * verification, those objects should be re-handled when next run.
2516          * So not update the on-disk tracing file under such case. */
2517         if (!llmd->llmd_exit)
2518                 rc1 = lfsck_layout_double_scan_result(env, com, rc);
2519
2520 fini:
2521         if (llmd->llmd_in_double_scan)
2522                 atomic_dec(&lfsck->li_double_scan_count);
2523
2524         spin_lock(&llmd->llmd_lock);
2525         llmd->llmd_assistant_status = (rc1 != 0 ? rc1 : rc);
2526         thread_set_flags(athread, SVC_STOPPED);
2527         wake_up_all(&mthread->t_ctl_waitq);
2528         spin_unlock(&llmd->llmd_lock);
2529         lfsck_thread_args_fini(lta);
2530
2531         return rc;
2532 }
2533
2534 static int
2535 lfsck_layout_slave_async_interpret(const struct lu_env *env,
2536                                    struct ptlrpc_request *req,
2537                                    void *args, int rc)
2538 {
2539         struct lfsck_layout_slave_async_args *llsaa = args;
2540         struct obd_export                    *exp   = llsaa->llsaa_exp;
2541         struct lfsck_component               *com   = llsaa->llsaa_com;
2542         struct lfsck_layout_slave_target     *llst  = llsaa->llsaa_llst;
2543         struct lfsck_layout_slave_data       *llsd  = com->lc_data;
2544         bool                                  done  = false;
2545
2546         if (rc != 0) {
2547                 /* It is quite probably caused by target crash,
2548                  * to make the LFSCK can go ahead, assume that
2549                  * the target finished the LFSCK prcoessing. */
2550                 done = true;
2551         } else {
2552                 struct lfsck_reply *lr;
2553
2554                 lr = req_capsule_server_get(&req->rq_pill, &RMF_LFSCK_REPLY);
2555                 if (lr->lr_status != LS_SCANNING_PHASE1 &&
2556                     lr->lr_status != LS_SCANNING_PHASE2)
2557                         done = true;
2558         }
2559         if (done)
2560                 lfsck_layout_llst_del(llsd, llst);
2561         lfsck_layout_llst_put(llst);
2562         lfsck_component_put(env, com);
2563         class_export_put(exp);
2564
2565         return 0;
2566 }
2567
2568 static int lfsck_layout_async_query(const struct lu_env *env,
2569                                     struct lfsck_component *com,
2570                                     struct obd_export *exp,
2571                                     struct lfsck_layout_slave_target *llst,
2572                                     struct lfsck_request *lr,
2573                                     struct ptlrpc_request_set *set)
2574 {
2575         struct lfsck_layout_slave_async_args *llsaa;
2576         struct ptlrpc_request                *req;
2577         struct lfsck_request                 *tmp;
2578         int                                   rc;
2579         ENTRY;
2580
2581         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_QUERY);
2582         if (req == NULL)
2583                 RETURN(-ENOMEM);
2584
2585         rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_QUERY);
2586         if (rc != 0) {
2587                 ptlrpc_request_free(req);
2588                 RETURN(rc);
2589         }
2590
2591         tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
2592         *tmp = *lr;
2593         ptlrpc_request_set_replen(req);
2594
2595         llsaa = ptlrpc_req_async_args(req);
2596         llsaa->llsaa_exp = exp;
2597         llsaa->llsaa_com = lfsck_component_get(com);
2598         llsaa->llsaa_llst = llst;
2599         req->rq_interpret_reply = lfsck_layout_slave_async_interpret;
2600         ptlrpc_set_add_req(set, req);
2601
2602         RETURN(0);
2603 }
2604
2605 static int lfsck_layout_async_notify(const struct lu_env *env,
2606                                      struct obd_export *exp,
2607                                      struct lfsck_request *lr,
2608                                      struct ptlrpc_request_set *set)
2609 {
2610         struct ptlrpc_request   *req;
2611         struct lfsck_request    *tmp;
2612         int                      rc;
2613         ENTRY;
2614
2615         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
2616         if (req == NULL)
2617                 RETURN(-ENOMEM);
2618
2619         rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
2620         if (rc != 0) {
2621                 ptlrpc_request_free(req);
2622                 RETURN(rc);
2623         }
2624
2625         tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
2626         *tmp = *lr;
2627         ptlrpc_request_set_replen(req);
2628         ptlrpc_set_add_req(set, req);
2629
2630         RETURN(0);
2631 }
2632
2633 static int
2634 lfsck_layout_slave_query_master(const struct lu_env *env,
2635                                 struct lfsck_component *com)
2636 {
2637         struct lfsck_request             *lr    = &lfsck_env_info(env)->lti_lr;
2638         struct lfsck_instance            *lfsck = com->lc_lfsck;
2639         struct lfsck_layout_slave_data   *llsd  = com->lc_data;
2640         struct lfsck_layout_slave_target *llst;
2641         struct obd_export                *exp;
2642         struct ptlrpc_request_set        *set;
2643         int                               cnt   = 0;
2644         int                               rc    = 0;
2645         int                               rc1   = 0;
2646         ENTRY;
2647
2648         set = ptlrpc_prep_set();
2649         if (set == NULL)
2650                 RETURN(-ENOMEM);
2651
2652         memset(lr, 0, sizeof(*lr));
2653         lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
2654         lr->lr_event = LE_QUERY;
2655         lr->lr_active = LT_LAYOUT;
2656
2657         llsd->llsd_touch_gen++;
2658         spin_lock(&llsd->llsd_lock);
2659         while (!list_empty(&llsd->llsd_master_list)) {
2660                 llst = list_entry(llsd->llsd_master_list.next,
2661                                   struct lfsck_layout_slave_target,
2662                                   llst_list);
2663                 if (llst->llst_gen == llsd->llsd_touch_gen)
2664                         break;
2665
2666                 llst->llst_gen = llsd->llsd_touch_gen;
2667                 list_del(&llst->llst_list);
2668                 list_add_tail(&llst->llst_list,
2669                               &llsd->llsd_master_list);
2670                 atomic_inc(&llst->llst_ref);
2671                 spin_unlock(&llsd->llsd_lock);
2672
2673                 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
2674                                                llst->llst_index);
2675                 if (exp == NULL) {
2676                         lfsck_layout_llst_del(llsd, llst);
2677                         lfsck_layout_llst_put(llst);
2678                         spin_lock(&llsd->llsd_lock);
2679                         continue;
2680                 }
2681
2682                 rc = lfsck_layout_async_query(env, com, exp, llst, lr, set);
2683                 if (rc != 0) {
2684                         CERROR("%s: slave fail to query %s for layout: "
2685                                "rc = %d\n", lfsck_lfsck2name(lfsck),
2686                                exp->exp_obd->obd_name, rc);
2687                         rc1 = rc;
2688                         lfsck_layout_llst_put(llst);
2689                         class_export_put(exp);
2690                 } else {
2691                         cnt++;
2692                 }
2693                 spin_lock(&llsd->llsd_lock);
2694         }
2695         spin_unlock(&llsd->llsd_lock);
2696
2697         if (cnt > 0)
2698                 rc = ptlrpc_set_wait(set);
2699         ptlrpc_set_destroy(set);
2700
2701         RETURN(rc1 != 0 ? rc1 : rc);
2702 }
2703
2704 static void
2705 lfsck_layout_slave_notify_master(const struct lu_env *env,
2706                                  struct lfsck_component *com,
2707                                  enum lfsck_events event, int result)
2708 {
2709         struct lfsck_instance            *lfsck = com->lc_lfsck;
2710         struct lfsck_layout_slave_data   *llsd  = com->lc_data;
2711         struct lfsck_request             *lr    = &lfsck_env_info(env)->lti_lr;
2712         struct lfsck_layout_slave_target *llst;
2713         struct obd_export                *exp;
2714         struct ptlrpc_request_set        *set;
2715         int                               cnt   = 0;
2716         int                               rc;
2717         ENTRY;
2718
2719         set = ptlrpc_prep_set();
2720         if (set == NULL)
2721                 RETURN_EXIT;
2722
2723         memset(lr, 0, sizeof(*lr));
2724         lr->lr_event = event;
2725         lr->lr_flags = LEF_FROM_OST;
2726         lr->lr_status = result;
2727         lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
2728         lr->lr_active = LT_LAYOUT;
2729         llsd->llsd_touch_gen++;
2730         spin_lock(&llsd->llsd_lock);
2731         while (!list_empty(&llsd->llsd_master_list)) {
2732                 llst = list_entry(llsd->llsd_master_list.next,
2733                                   struct lfsck_layout_slave_target,
2734                                   llst_list);
2735                 if (llst->llst_gen == llsd->llsd_touch_gen)
2736                         break;
2737
2738                 llst->llst_gen = llsd->llsd_touch_gen;
2739                 list_del(&llst->llst_list);
2740                 list_add_tail(&llst->llst_list,
2741                               &llsd->llsd_master_list);
2742                 atomic_inc(&llst->llst_ref);
2743                 spin_unlock(&llsd->llsd_lock);
2744
2745                 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
2746                                                llst->llst_index);
2747                 if (exp == NULL) {
2748                         lfsck_layout_llst_del(llsd, llst);
2749                         lfsck_layout_llst_put(llst);
2750                         spin_lock(&llsd->llsd_lock);
2751                         continue;
2752                 }
2753
2754                 rc = lfsck_layout_async_notify(env, exp, lr, set);
2755                 if (rc != 0)
2756                         CERROR("%s: slave fail to notify %s for layout: "
2757                                "rc = %d\n", lfsck_lfsck2name(lfsck),
2758                                exp->exp_obd->obd_name, rc);
2759                 else
2760                         cnt++;
2761                 lfsck_layout_llst_put(llst);
2762                 class_export_put(exp);
2763                 spin_lock(&llsd->llsd_lock);
2764         }
2765         spin_unlock(&llsd->llsd_lock);
2766
2767         if (cnt > 0)
2768                 rc = ptlrpc_set_wait(set);
2769
2770         ptlrpc_set_destroy(set);
2771
2772         RETURN_EXIT;
2773 }
2774
2775 /* layout APIs */
2776
2777 static int lfsck_layout_reset(const struct lu_env *env,
2778                               struct lfsck_component *com, bool init)
2779 {
2780         struct lfsck_layout     *lo    = com->lc_file_ram;
2781         int                      rc;
2782
2783         down_write(&com->lc_sem);
2784         if (init) {
2785                 memset(lo, 0, com->lc_file_size);
2786         } else {
2787                 __u32 count = lo->ll_success_count;
2788                 __u64 last_time = lo->ll_time_last_complete;
2789
2790                 memset(lo, 0, com->lc_file_size);
2791                 lo->ll_success_count = count;
2792                 lo->ll_time_last_complete = last_time;
2793         }
2794
2795         lo->ll_magic = LFSCK_LAYOUT_MAGIC;
2796         lo->ll_status = LS_INIT;
2797
2798         rc = lfsck_layout_store(env, com);
2799         up_write(&com->lc_sem);
2800
2801         return rc;
2802 }
2803
2804 static void lfsck_layout_fail(const struct lu_env *env,
2805                               struct lfsck_component *com, bool new_checked)
2806 {
2807         struct lfsck_layout *lo = com->lc_file_ram;
2808
2809         down_write(&com->lc_sem);
2810         if (new_checked)
2811                 com->lc_new_checked++;
2812         lo->ll_objs_failed_phase1++;
2813         if (lo->ll_pos_first_inconsistent == 0) {
2814                 struct lfsck_instance *lfsck = com->lc_lfsck;
2815
2816                 lo->ll_pos_first_inconsistent =
2817                         lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
2818                                                         lfsck->li_di_oit);
2819         }
2820         up_write(&com->lc_sem);
2821 }
2822
2823 static int lfsck_layout_master_checkpoint(const struct lu_env *env,
2824                                           struct lfsck_component *com, bool init)
2825 {
2826         struct lfsck_instance           *lfsck   = com->lc_lfsck;
2827         struct lfsck_layout             *lo      = com->lc_file_ram;
2828         struct lfsck_layout_master_data *llmd    = com->lc_data;
2829         struct ptlrpc_thread            *mthread = &lfsck->li_thread;
2830         struct ptlrpc_thread            *athread = &llmd->llmd_thread;
2831         struct l_wait_info               lwi     = { 0 };
2832         int                              rc;
2833
2834         if (com->lc_new_checked == 0 && !init)
2835                 return 0;
2836
2837         l_wait_event(mthread->t_ctl_waitq,
2838                      list_empty(&llmd->llmd_req_list) ||
2839                      !thread_is_running(mthread) ||
2840                      thread_is_stopped(athread),
2841                      &lwi);
2842
2843         if (!thread_is_running(mthread) || thread_is_stopped(athread))
2844                 return 0;
2845
2846         down_write(&com->lc_sem);
2847         if (init) {
2848                 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
2849         } else {
2850                 lo->ll_pos_last_checkpoint =
2851                                         lfsck->li_pos_current.lp_oit_cookie;
2852                 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
2853                                 HALF_SEC - lfsck->li_time_last_checkpoint);
2854                 lo->ll_time_last_checkpoint = cfs_time_current_sec();
2855                 lo->ll_objs_checked_phase1 += com->lc_new_checked;
2856                 com->lc_new_checked = 0;
2857         }
2858
2859         rc = lfsck_layout_store(env, com);
2860         up_write(&com->lc_sem);
2861
2862         return rc;
2863 }
2864
2865 static int lfsck_layout_slave_checkpoint(const struct lu_env *env,
2866                                          struct lfsck_component *com, bool init)
2867 {
2868         struct lfsck_instance   *lfsck = com->lc_lfsck;
2869         struct lfsck_layout     *lo    = com->lc_file_ram;
2870         int                      rc;
2871
2872         if (com->lc_new_checked == 0 && !init)
2873                 return 0;
2874
2875         down_write(&com->lc_sem);
2876
2877         if (init) {
2878                 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
2879         } else {
2880                 lo->ll_pos_last_checkpoint =
2881                                         lfsck->li_pos_current.lp_oit_cookie;
2882                 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
2883                                 HALF_SEC - lfsck->li_time_last_checkpoint);
2884                 lo->ll_time_last_checkpoint = cfs_time_current_sec();
2885                 lo->ll_objs_checked_phase1 += com->lc_new_checked;
2886                 com->lc_new_checked = 0;
2887         }
2888
2889         rc = lfsck_layout_store(env, com);
2890
2891         up_write(&com->lc_sem);
2892
2893         return rc;
2894 }
2895
2896 static int lfsck_layout_prep(const struct lu_env *env,
2897                              struct lfsck_component *com)
2898 {
2899         struct lfsck_instance   *lfsck  = com->lc_lfsck;
2900         struct lfsck_layout     *lo     = com->lc_file_ram;
2901         struct lfsck_position   *pos    = &com->lc_pos_start;
2902
2903         fid_zero(&pos->lp_dir_parent);
2904         pos->lp_dir_cookie = 0;
2905         if (lo->ll_status == LS_COMPLETED ||
2906             lo->ll_status == LS_PARTIAL) {
2907                 int rc;
2908
2909                 rc = lfsck_layout_reset(env, com, false);
2910                 if (rc != 0)
2911                         return rc;
2912         }
2913
2914         down_write(&com->lc_sem);
2915
2916         lo->ll_time_latest_start = cfs_time_current_sec();
2917
2918         spin_lock(&lfsck->li_lock);
2919         if (lo->ll_flags & LF_SCANNED_ONCE) {
2920                 if (!lfsck->li_drop_dryrun ||
2921                     lo->ll_pos_first_inconsistent == 0) {
2922                         lo->ll_status = LS_SCANNING_PHASE2;
2923                         list_del_init(&com->lc_link);
2924                         list_add_tail(&com->lc_link,
2925                                       &lfsck->li_list_double_scan);
2926                         pos->lp_oit_cookie = 0;
2927                 } else {
2928                         int i;
2929
2930                         lo->ll_status = LS_SCANNING_PHASE1;
2931                         lo->ll_run_time_phase1 = 0;
2932                         lo->ll_run_time_phase2 = 0;
2933                         lo->ll_objs_checked_phase1 = 0;
2934                         lo->ll_objs_checked_phase2 = 0;
2935                         lo->ll_objs_failed_phase1 = 0;
2936                         lo->ll_objs_failed_phase2 = 0;
2937                         for (i = 0; i < LLIT_MAX; i++)
2938                                 lo->ll_objs_repaired[i] = 0;
2939
2940                         pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
2941                 }
2942         } else {
2943                 lo->ll_status = LS_SCANNING_PHASE1;
2944                 if (!lfsck->li_drop_dryrun ||
2945                     lo->ll_pos_first_inconsistent == 0)
2946                         pos->lp_oit_cookie = lo->ll_pos_last_checkpoint + 1;
2947                 else
2948                         pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
2949         }
2950         spin_unlock(&lfsck->li_lock);
2951
2952         up_write(&com->lc_sem);
2953
2954         return 0;
2955 }
2956
2957 static int lfsck_layout_slave_prep(const struct lu_env *env,
2958                                    struct lfsck_component *com,
2959                                    struct lfsck_start_param *lsp)
2960 {
2961         struct lfsck_layout             *lo     = com->lc_file_ram;
2962         struct lfsck_layout_slave_data  *llsd   = com->lc_data;
2963         int                              rc;
2964
2965         rc = lfsck_layout_prep(env, com);
2966         if (rc != 0 || lo->ll_status != LS_SCANNING_PHASE1 ||
2967             !lsp->lsp_index_valid)
2968                 return rc;
2969
2970         rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
2971         if (rc == 0 && !(lo->ll_flags & LF_INCOMPLETE)) {
2972                 LASSERT(!llsd->llsd_rbtree_valid);
2973
2974                 write_lock(&llsd->llsd_rb_lock);
2975                 rc = lfsck_rbtree_setup(env, com);
2976                 write_unlock(&llsd->llsd_rb_lock);
2977         }
2978
2979         return rc;
2980 }
2981
2982 static int lfsck_layout_master_prep(const struct lu_env *env,
2983                                     struct lfsck_component *com,
2984                                     struct lfsck_start_param *lsp)
2985 {
2986         struct lfsck_instance           *lfsck   = com->lc_lfsck;
2987         struct lfsck_layout_master_data *llmd    = com->lc_data;
2988         struct ptlrpc_thread            *mthread = &lfsck->li_thread;
2989         struct ptlrpc_thread            *athread = &llmd->llmd_thread;
2990         struct lfsck_thread_args        *lta;
2991         long                             rc;
2992         ENTRY;
2993
2994         rc = lfsck_layout_prep(env, com);
2995         if (rc != 0)
2996                 RETURN(rc);
2997
2998         llmd->llmd_assistant_status = 0;
2999         llmd->llmd_post_result = 0;
3000         llmd->llmd_to_post = 0;
3001         llmd->llmd_to_double_scan = 0;
3002         llmd->llmd_in_double_scan = 0;
3003         llmd->llmd_exit = 0;
3004         thread_set_flags(athread, 0);
3005
3006         lta = lfsck_thread_args_init(lfsck, com, lsp);
3007         if (IS_ERR(lta))
3008                 RETURN(PTR_ERR(lta));
3009
3010         rc = PTR_ERR(kthread_run(lfsck_layout_assistant, lta, "lfsck_layout"));
3011         if (IS_ERR_VALUE(rc)) {
3012                 CERROR("%s: Cannot start LFSCK layout assistant thread: "
3013                        "rc = %ld\n", lfsck_lfsck2name(lfsck), rc);
3014                 lfsck_thread_args_fini(lta);
3015         } else {
3016                 struct l_wait_info lwi = { 0 };
3017
3018                 l_wait_event(mthread->t_ctl_waitq,
3019                              thread_is_running(athread) ||
3020                              thread_is_stopped(athread),
3021                              &lwi);
3022                 if (unlikely(!thread_is_running(athread)))
3023                         rc = llmd->llmd_assistant_status;
3024                 else
3025                         rc = 0;
3026         }
3027
3028         RETURN(rc);
3029 }
3030
3031 /* Pre-fetch the attribute for each stripe in the given layout EA. */
3032 static int lfsck_layout_scan_stripes(const struct lu_env *env,
3033                                      struct lfsck_component *com,
3034                                      struct dt_object *parent,
3035                                      struct lov_mds_md_v1 *lmm)
3036 {
3037         struct lfsck_thread_info        *info    = lfsck_env_info(env);
3038         struct lfsck_instance           *lfsck   = com->lc_lfsck;
3039         struct lfsck_bookmark           *bk      = &lfsck->li_bookmark_ram;
3040         struct lfsck_layout             *lo      = com->lc_file_ram;
3041         struct lfsck_layout_master_data *llmd    = com->lc_data;
3042         struct lfsck_layout_object      *llo     = NULL;
3043         struct lov_ost_data_v1          *objs;
3044         struct lfsck_tgt_descs          *ltds    = &lfsck->li_ost_descs;
3045         struct ptlrpc_thread            *mthread = &lfsck->li_thread;
3046         struct ptlrpc_thread            *athread = &llmd->llmd_thread;
3047                 struct l_wait_info       lwi     = { 0 };
3048         struct lu_buf                   *buf;
3049         int                              rc      = 0;
3050         int                              i;
3051         __u32                            magic;
3052         __u16                            count;
3053         __u16                            gen;
3054         ENTRY;
3055
3056         buf = lfsck_buf_get(env, &info->lti_old_pfid,
3057                             sizeof(struct filter_fid_old));
3058         count = le16_to_cpu(lmm->lmm_stripe_count);
3059         gen = le16_to_cpu(lmm->lmm_layout_gen);
3060         /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
3061          * been verified in lfsck_layout_verify_header() already. If some
3062          * new magic introduced in the future, then layout LFSCK needs to
3063          * be updated also. */
3064         magic = le32_to_cpu(lmm->lmm_magic);
3065         if (magic == LOV_MAGIC_V1) {
3066                 objs = &(lmm->lmm_objects[0]);
3067         } else {
3068                 LASSERT(magic == LOV_MAGIC_V3);
3069                 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
3070         }
3071
3072         for (i = 0; i < count; i++, objs++) {
3073                 struct lu_fid           *fid    = &info->lti_fid;
3074                 struct ost_id           *oi     = &info->lti_oi;