Whamcloud - gitweb
5fc99148f1b87c3a1bb46baf5a1260f0e540c705
[fs/lustre-release.git] / lustre / mdd / mdd_lfsck.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License version 2 for more details.  A copy is
14  * included in the COPYING file that accompanied this code.
15
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2012, Intel Corporation.
24  */
25 /*
26  * lustre/mdd/mdd_lfsck.c
27  *
28  * Top-level entry points into mdd module
29  *
30  * LFSCK controller, which scans the whole device through low layer
31  * iteration APIs, drives all lfsck compeonents, controls the speed.
32  *
33  * Author: Fan Yong <yong.fan@whamcloud.com>
34  */
35
36 #ifndef EXPORT_SYMTAB
37 # define EXPORT_SYMTAB
38 #endif
39 #define DEBUG_SUBSYSTEM S_MDS
40
41 #include <lustre/lustre_idl.h>
42 #include <lustre_fid.h>
43 #include <obd_support.h>
44
45 #include "mdd_internal.h"
46 #include "mdd_lfsck.h"
47
48 #define HALF_SEC                        (CFS_HZ >> 1)
49 #define LFSCK_CHECKPOINT_INTERVAL       60
50 #define MDS_DIR_DUMMY_START             0xffffffffffffffffULL
51
52 const char lfsck_bookmark_name[] = "lfsck_bookmark";
53
54 /* misc functions */
55
56 static inline struct mdd_device *mdd_lfsck2mdd(struct md_lfsck *lfsck)
57 {
58         return container_of0(lfsck, struct mdd_device, mdd_lfsck);
59 }
60
61 static inline char *mdd_lfsck2name(struct md_lfsck *lfsck)
62 {
63         struct mdd_device *mdd = mdd_lfsck2mdd(lfsck);
64
65         return mdd2obd_dev(mdd)->obd_name;
66 }
67
68 static inline void mdd_lfsck_component_put(const struct lu_env *env,
69                                            struct lfsck_component *com)
70 {
71         if (atomic_dec_and_test(&com->lc_ref)) {
72                 if (com->lc_obj != NULL)
73                         lu_object_put(env, &com->lc_obj->do_lu);
74                 if (com->lc_file_ram != NULL)
75                         OBD_FREE(com->lc_file_ram, com->lc_file_size);
76                 if (com->lc_file_disk != NULL)
77                         OBD_FREE(com->lc_file_disk, com->lc_file_size);
78                 OBD_FREE_PTR(com);
79         }
80 }
81
82 static inline struct lfsck_component *
83 __mdd_lfsck_component_find(struct md_lfsck *lfsck, __u16 type, cfs_list_t *list)
84 {
85         struct lfsck_component *com;
86
87         cfs_list_for_each_entry(com, list, lc_link) {
88                 if (com->lc_type == type)
89                         return com;
90         }
91         return NULL;
92 }
93
94 static void mdd_lfsck_component_cleanup(const struct lu_env *env,
95                                         struct lfsck_component *com)
96 {
97         if (!cfs_list_empty(&com->lc_link))
98                 cfs_list_del_init(&com->lc_link);
99         if (!cfs_list_empty(&com->lc_link_dir))
100                 cfs_list_del_init(&com->lc_link_dir);
101
102         mdd_lfsck_component_put(env, com);
103 }
104
105 static void mdd_lfsck_pos_fill(const struct lu_env *env, struct md_lfsck *lfsck,
106                                struct lfsck_position *pos, bool oit_processed,
107                                bool dir_processed)
108 {
109         const struct dt_it_ops *iops = &lfsck->ml_obj_oit->do_index_ops->dio_it;
110
111         spin_lock(&lfsck->ml_lock);
112         if (unlikely(lfsck->ml_di_oit == NULL)) {
113                 spin_unlock(&lfsck->ml_lock);
114                 memset(pos, 0, sizeof(*pos));
115                 return;
116         }
117
118         pos->lp_oit_cookie = iops->store(env, lfsck->ml_di_oit);
119
120         LASSERT(pos->lp_oit_cookie > 0);
121
122         if (!oit_processed)
123                 pos->lp_oit_cookie--;
124
125         if (lfsck->ml_di_dir != NULL) {
126                 struct dt_object *dto = lfsck->ml_obj_dir;
127
128                 pos->lp_dir_parent = *lu_object_fid(&dto->do_lu);
129                 pos->lp_dir_cookie = dto->do_index_ops->dio_it.store(env,
130                                                         lfsck->ml_di_dir);
131
132                 LASSERT(pos->lp_dir_cookie != MDS_DIR_DUMMY_START);
133
134                 if (pos->lp_dir_cookie == MDS_DIR_END_OFF)
135                         LASSERT(dir_processed);
136
137                 /* For the dir which just to be processed,
138                  * lp_dir_cookie will become MDS_DIR_DUMMY_START,
139                  * which can be correctly handled by mdd_lfsck_prep. */
140                 if (!dir_processed)
141                         pos->lp_dir_cookie--;
142         } else {
143                 fid_zero(&pos->lp_dir_parent);
144                 pos->lp_dir_cookie = 0;
145         }
146         spin_unlock(&lfsck->ml_lock);
147 }
148
149 static inline int mdd_lfsck_pos_is_zero(const struct lfsck_position *pos)
150 {
151         return pos->lp_oit_cookie == 0 && fid_is_zero(&pos->lp_dir_parent);
152 }
153
154 static inline int mdd_lfsck_pos_is_eq(const struct lfsck_position *pos1,
155                                       const struct lfsck_position *pos2)
156 {
157         if (pos1->lp_oit_cookie < pos2->lp_oit_cookie)
158                 return -1;
159
160         if (pos1->lp_oit_cookie > pos2->lp_oit_cookie)
161                 return 1;
162
163         if (fid_is_zero(&pos1->lp_dir_parent) &&
164             !fid_is_zero(&pos2->lp_dir_parent))
165                 return -1;
166
167         if (!fid_is_zero(&pos1->lp_dir_parent) &&
168             fid_is_zero(&pos2->lp_dir_parent))
169                 return 1;
170
171         if (fid_is_zero(&pos1->lp_dir_parent) &&
172             fid_is_zero(&pos2->lp_dir_parent))
173                 return 0;
174
175         LASSERT(lu_fid_eq(&pos1->lp_dir_parent, &pos2->lp_dir_parent));
176
177         if (pos1->lp_dir_cookie < pos2->lp_dir_cookie)
178                 return -1;
179
180         if (pos1->lp_dir_cookie > pos2->lp_dir_cookie)
181                 return 1;
182
183         return 0;
184 }
185
186 static void mdd_lfsck_close_dir(const struct lu_env *env,
187                                 struct md_lfsck *lfsck)
188 {
189         struct dt_object        *dir_obj  = lfsck->ml_obj_dir;
190         const struct dt_it_ops  *dir_iops = &dir_obj->do_index_ops->dio_it;
191         struct dt_it            *dir_di   = lfsck->ml_di_dir;
192
193         spin_lock(&lfsck->ml_lock);
194         lfsck->ml_di_dir = NULL;
195         spin_unlock(&lfsck->ml_lock);
196
197         dir_iops->put(env, dir_di);
198         dir_iops->fini(env, dir_di);
199         lfsck->ml_obj_dir = NULL;
200         lu_object_put(env, &dir_obj->do_lu);
201 }
202
203 static void __mdd_lfsck_set_speed(struct md_lfsck *lfsck, __u32 limit)
204 {
205         lfsck->ml_bookmark_ram.lb_speed_limit = limit;
206         if (limit != LFSCK_SPEED_NO_LIMIT) {
207                 if (limit > CFS_HZ) {
208                         lfsck->ml_sleep_rate = limit / CFS_HZ;
209                         lfsck->ml_sleep_jif = 1;
210                 } else {
211                         lfsck->ml_sleep_rate = 1;
212                         lfsck->ml_sleep_jif = CFS_HZ / limit;
213                 }
214         } else {
215                 lfsck->ml_sleep_jif = 0;
216                 lfsck->ml_sleep_rate = 0;
217         }
218 }
219
220 static void mdd_lfsck_control_speed(struct md_lfsck *lfsck)
221 {
222         struct ptlrpc_thread *thread = &lfsck->ml_thread;
223         struct l_wait_info    lwi;
224
225         if (lfsck->ml_sleep_jif > 0 &&
226             lfsck->ml_new_scanned >= lfsck->ml_sleep_rate) {
227                 spin_lock(&lfsck->ml_lock);
228                 if (likely(lfsck->ml_sleep_jif > 0 &&
229                            lfsck->ml_new_scanned >= lfsck->ml_sleep_rate)) {
230                         lwi = LWI_TIMEOUT_INTR(lfsck->ml_sleep_jif, NULL,
231                                                LWI_ON_SIGNAL_NOOP, NULL);
232                         spin_unlock(&lfsck->ml_lock);
233
234                         l_wait_event(thread->t_ctl_waitq,
235                                      !thread_is_running(thread),
236                                      &lwi);
237                         lfsck->ml_new_scanned = 0;
238                 } else {
239                         spin_unlock(&lfsck->ml_lock);
240                 }
241         }
242 }
243
244 /* lfsck_bookmark file ops */
245
246 static void inline mdd_lfsck_bookmark_to_cpu(struct lfsck_bookmark *des,
247                                              struct lfsck_bookmark *src)
248 {
249         des->lb_magic = le32_to_cpu(src->lb_magic);
250         des->lb_version = le16_to_cpu(src->lb_version);
251         des->lb_param = le16_to_cpu(src->lb_param);
252         des->lb_speed_limit = le32_to_cpu(src->lb_speed_limit);
253 }
254
255 static void inline mdd_lfsck_bookmark_to_le(struct lfsck_bookmark *des,
256                                             struct lfsck_bookmark *src)
257 {
258         des->lb_magic = cpu_to_le32(src->lb_magic);
259         des->lb_version = cpu_to_le16(src->lb_version);
260         des->lb_param = cpu_to_le16(src->lb_param);
261         des->lb_speed_limit = cpu_to_le32(src->lb_speed_limit);
262 }
263
264 static int mdd_lfsck_bookmark_load(const struct lu_env *env,
265                                    struct md_lfsck *lfsck)
266 {
267         loff_t pos = 0;
268         int    len = sizeof(struct lfsck_bookmark);
269         int    rc;
270
271         rc = dt_record_read(env, lfsck->ml_bookmark_obj,
272                             mdd_buf_get(env, &lfsck->ml_bookmark_disk, len),
273                             &pos);
274         if (rc == 0) {
275                 struct lfsck_bookmark *bm = &lfsck->ml_bookmark_ram;
276
277                 mdd_lfsck_bookmark_to_cpu(bm, &lfsck->ml_bookmark_disk);
278                 if (bm->lb_magic != LFSCK_BOOKMARK_MAGIC) {
279                         CWARN("%.16s: invalid lfsck_bookmark magic "
280                               "0x%x != 0x%x\n", mdd_lfsck2name(lfsck),
281                               bm->lb_magic, LFSCK_BOOKMARK_MAGIC);
282                         /* Process it as new lfsck_bookmark. */
283                         rc = -ENODATA;
284                 }
285         } else {
286                 if (rc == -EFAULT && pos == 0)
287                         /* return -ENODATA for empty lfsck_bookmark. */
288                         rc = -ENODATA;
289                 else
290                         CERROR("%.16s: fail to load lfsck_bookmark, "
291                                "expected = %d, rc = %d\n",
292                                mdd_lfsck2name(lfsck), len, rc);
293         }
294         return rc;
295 }
296
297 static int mdd_lfsck_bookmark_store(const struct lu_env *env,
298                                     struct md_lfsck *lfsck)
299 {
300         struct mdd_device *mdd    = mdd_lfsck2mdd(lfsck);
301         struct thandle    *handle;
302         struct dt_object  *obj    = lfsck->ml_bookmark_obj;
303         loff_t             pos    = 0;
304         int                len    = sizeof(struct lfsck_bookmark);
305         int                rc;
306         ENTRY;
307
308         mdd_lfsck_bookmark_to_le(&lfsck->ml_bookmark_disk,
309                                  &lfsck->ml_bookmark_ram);
310         handle = dt_trans_create(env, mdd->mdd_bottom);
311         if (IS_ERR(handle)) {
312                 rc = PTR_ERR(handle);
313                 CERROR("%.16s: fail to create trans for storing "
314                        "lfsck_bookmark: %d\n,", mdd_lfsck2name(lfsck), rc);
315                 RETURN(rc);
316         }
317
318         rc = dt_declare_record_write(env, obj, len, 0, handle);
319         if (rc != 0) {
320                 CERROR("%.16s: fail to declare trans for storing "
321                        "lfsck_bookmark: %d\n,", mdd_lfsck2name(lfsck), rc);
322                 GOTO(out, rc);
323         }
324
325         rc = dt_trans_start_local(env, mdd->mdd_bottom, handle);
326         if (rc != 0) {
327                 CERROR("%.16s: fail to start trans for storing "
328                        "lfsck_bookmark: %d\n,", mdd_lfsck2name(lfsck), rc);
329                 GOTO(out, rc);
330         }
331
332         rc = dt_record_write(env, obj,
333                              mdd_buf_get(env, &lfsck->ml_bookmark_disk, len),
334                              &pos, handle);
335         if (rc != 0)
336                 CERROR("%.16s: fail to store lfsck_bookmark, expected = %d, "
337                        "rc = %d\n", mdd_lfsck2name(lfsck), len, rc);
338
339         GOTO(out, rc);
340
341 out:
342         dt_trans_stop(env, mdd->mdd_bottom, handle);
343         return rc;
344 }
345
346 static int mdd_lfsck_bookmark_init(const struct lu_env *env,
347                                    struct md_lfsck *lfsck)
348 {
349         struct lfsck_bookmark *mb = &lfsck->ml_bookmark_ram;
350         int rc;
351
352         memset(mb, 0, sizeof(mb));
353         mb->lb_magic = LFSCK_BOOKMARK_MAGIC;
354         mb->lb_version = LFSCK_VERSION_V1;
355         mutex_lock(&lfsck->ml_mutex);
356         rc = mdd_lfsck_bookmark_store(env, lfsck);
357         mutex_unlock(&lfsck->ml_mutex);
358         return rc;
359 }
360
361 /* helper functions for framework */
362
363 static int object_is_client_visible(const struct lu_env *env,
364                                     struct mdd_device *mdd,
365                                     struct mdd_object *obj)
366 {
367         struct lu_fid *fid   = &mdd_env_info(env)->mti_fid;
368         int            depth = 0;
369         int            rc;
370
371         LASSERT(S_ISDIR(mdd_object_type(obj)));
372
373         while (1) {
374                 if (mdd_is_root(mdd, mdo2fid(obj))) {
375                         if (depth > 0)
376                                 mdd_object_put(env, obj);
377                         return 1;
378                 }
379
380                 mdd_read_lock(env, obj, MOR_TGT_CHILD);
381                 if (unlikely(mdd_is_dead_obj(obj))) {
382                         mdd_read_unlock(env, obj);
383                         if (depth > 0)
384                                 mdd_object_put(env, obj);
385                         return 0;
386                 }
387
388                 rc = dt_xattr_get(env, mdd_object_child(obj),
389                                   mdd_buf_get(env, NULL, 0), XATTR_NAME_LINK,
390                                   BYPASS_CAPA);
391                 mdd_read_unlock(env, obj);
392                 if (rc >= 0) {
393                         if (depth > 0)
394                                 mdd_object_put(env, obj);
395                         return 1;
396                 }
397
398                 if (rc < 0 && rc != -ENODATA) {
399                         if (depth > 0)
400                                 mdd_object_put(env, obj);
401                         return rc;
402                 }
403
404                 rc = mdd_parent_fid(env, obj, fid);
405                 if (depth > 0)
406                         mdd_object_put(env, obj);
407                 if (rc != 0)
408                         return rc;
409
410                 if (unlikely(lu_fid_eq(fid, &mdd->mdd_local_root_fid)))
411                         return 0;
412
413                 obj = mdd_object_find(env, mdd, fid);
414                 if (obj == NULL)
415                         return 0;
416                 else if (IS_ERR(obj))
417                         return PTR_ERR(obj);
418
419                 /* XXX: need more processing for remote object in the future. */
420                 if (!mdd_object_exists(obj) || mdd_object_remote(obj)) {
421                         mdd_object_put(env, obj);
422                         return 0;
423                 }
424
425                 depth++;
426         }
427         return 0;
428 }
429
430 static void mdd_lfsck_unpack_ent(struct lu_dirent *ent)
431 {
432         fid_le_to_cpu(&ent->lde_fid, &ent->lde_fid);
433         ent->lde_hash = le64_to_cpu(ent->lde_hash);
434         ent->lde_reclen = le16_to_cpu(ent->lde_reclen);
435         ent->lde_namelen = le16_to_cpu(ent->lde_namelen);
436         ent->lde_attrs = le32_to_cpu(ent->lde_attrs);
437
438         /* Make sure the name is terminated with '0'.
439          * The data (type) after ent::lde_name maybe
440          * broken, but we do not care. */
441         ent->lde_name[ent->lde_namelen] = 0;
442 }
443
444 /* LFSCK wrap functions */
445
446 static void mdd_lfsck_fail(const struct lu_env *env, struct md_lfsck *lfsck,
447                            bool oit, bool new_checked)
448 {
449         struct lfsck_component *com;
450
451         cfs_list_for_each_entry(com, &lfsck->ml_list_scan, lc_link) {
452                 com->lc_ops->lfsck_fail(env, com, oit, new_checked);
453         }
454 }
455
456 static int mdd_lfsck_checkpoint(const struct lu_env *env,
457                                 struct md_lfsck *lfsck, bool oit)
458 {
459         struct lfsck_component *com;
460         int                     rc;
461
462         if (likely(cfs_time_beforeq(cfs_time_current(),
463                                     lfsck->ml_time_next_checkpoint)))
464                 return 0;
465
466         mdd_lfsck_pos_fill(env, lfsck, &lfsck->ml_pos_current, oit, !oit);
467         cfs_list_for_each_entry(com, &lfsck->ml_list_scan, lc_link) {
468                 rc = com->lc_ops->lfsck_checkpoint(env, com, false);
469                 if (rc != 0)
470                         return rc;;
471         }
472
473         lfsck->ml_time_last_checkpoint = cfs_time_current();
474         lfsck->ml_time_next_checkpoint = lfsck->ml_time_last_checkpoint +
475                                 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
476         return 0;
477 }
478
479 static int mdd_lfsck_prep(struct lu_env *env, struct md_lfsck *lfsck)
480 {
481         struct mdd_device      *mdd     = mdd_lfsck2mdd(lfsck);
482         struct mdd_object      *obj     = NULL;
483         struct dt_object       *dt_obj;
484         struct lfsck_component *com;
485         struct lfsck_component *next;
486         struct lfsck_position  *pos     = NULL;
487         const struct dt_it_ops *iops    =
488                                 &lfsck->ml_obj_oit->do_index_ops->dio_it;
489         struct dt_it           *di;
490         int                     rc;
491         ENTRY;
492
493         LASSERT(lfsck->ml_obj_dir == NULL);
494         LASSERT(lfsck->ml_di_dir == NULL);
495
496         cfs_list_for_each_entry_safe(com, next, &lfsck->ml_list_scan, lc_link) {
497                 com->lc_new_checked = 0;
498                 if (lfsck->ml_bookmark_ram.lb_param & LPF_DRYRUN)
499                         com->lc_journal = 0;
500
501                 rc = com->lc_ops->lfsck_prep(env, com);
502                 if (rc != 0)
503                         RETURN(rc);
504
505                 if ((pos == NULL) ||
506                     (!mdd_lfsck_pos_is_zero(&com->lc_pos_start) &&
507                      mdd_lfsck_pos_is_eq(pos, &com->lc_pos_start) > 0))
508                         pos = &com->lc_pos_start;
509         }
510
511         /* Init otable-based iterator. */
512         if (pos == NULL) {
513                 rc = iops->load(env, lfsck->ml_di_oit, 0);
514                 GOTO(out, rc = (rc >= 0 ? 0 : rc));
515         }
516
517         rc = iops->load(env, lfsck->ml_di_oit, pos->lp_oit_cookie);
518         if (rc < 0)
519                 GOTO(out, rc);
520
521         if (fid_is_zero(&pos->lp_dir_parent))
522                 GOTO(out, rc = 0);
523
524         /* Find the directory for namespace-based traverse. */
525         obj = mdd_object_find(env, mdd, &pos->lp_dir_parent);
526         if (obj == NULL)
527                 GOTO(out, rc = 0);
528         else if (IS_ERR(obj))
529                 RETURN(PTR_ERR(obj));
530
531         /* XXX: need more processing for remote object in the future. */
532         if (!mdd_object_exists(obj) || mdd_object_remote(obj) ||
533             unlikely(!S_ISDIR(mdd_object_type(obj))))
534                 GOTO(out, rc = 0);
535
536         if (unlikely(mdd_is_dead_obj(obj)))
537                 GOTO(out, rc = 0);
538
539         dt_obj = mdd_object_child(obj);
540         if (unlikely(!dt_try_as_dir(env, dt_obj)))
541                 GOTO(out, rc = -ENOTDIR);
542
543         /* Init the namespace-based directory traverse. */
544         iops = &dt_obj->do_index_ops->dio_it;
545         di = iops->init(env, dt_obj, lfsck->ml_args_dir, BYPASS_CAPA);
546         if (IS_ERR(di))
547                 GOTO(out, rc = PTR_ERR(di));
548
549         rc = iops->load(env, di, pos->lp_dir_cookie);
550         if (rc == 0)
551                 rc = iops->next(env, di);
552         else if (rc > 0)
553                 rc = 0;
554
555         if (rc != 0) {
556                 iops->put(env, di);
557                 iops->fini(env, di);
558                 GOTO(out, rc);
559         }
560
561         lfsck->ml_obj_dir = dt_obj;
562         spin_lock(&lfsck->ml_lock);
563         lfsck->ml_di_dir = di;
564         spin_unlock(&lfsck->ml_lock);
565         obj = NULL;
566
567         GOTO(out, rc = 0);
568
569 out:
570         if (obj != NULL)
571                 mdd_object_put(env, obj);
572
573         if (rc != 0)
574                 return (rc > 0 ? 0 : rc);
575
576         mdd_lfsck_pos_fill(env, lfsck, &lfsck->ml_pos_current, false, false);
577         cfs_list_for_each_entry(com, &lfsck->ml_list_scan, lc_link) {
578                 rc = com->lc_ops->lfsck_checkpoint(env, com, true);
579                 if (rc != 0)
580                         break;
581         }
582
583         lfsck->ml_time_last_checkpoint = cfs_time_current();
584         lfsck->ml_time_next_checkpoint = lfsck->ml_time_last_checkpoint +
585                                 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
586         return rc;
587 }
588
589 static int mdd_lfsck_exec_oit(const struct lu_env *env, struct md_lfsck *lfsck,
590                               struct mdd_object *obj)
591 {
592         struct lfsck_component *com;
593         struct dt_object       *dt_obj;
594         const struct dt_it_ops *iops;
595         struct dt_it           *di;
596         int                     rc;
597         ENTRY;
598
599         LASSERT(lfsck->ml_obj_dir == NULL);
600
601         cfs_list_for_each_entry(com, &lfsck->ml_list_scan, lc_link) {
602                 rc = com->lc_ops->lfsck_exec_oit(env, com, obj);
603                 if (rc != 0)
604                         RETURN(rc);
605         }
606
607         if (!S_ISDIR(mdd_object_type(obj)) ||
608             cfs_list_empty(&lfsck->ml_list_dir))
609                RETURN(0);
610
611         rc = object_is_client_visible(env, mdd_lfsck2mdd(lfsck), obj);
612         if (rc <= 0)
613                 GOTO(out, rc);
614
615         if (unlikely(mdd_is_dead_obj(obj)))
616                 GOTO(out, rc = 0);
617
618         dt_obj = mdd_object_child(obj);
619         if (unlikely(!dt_try_as_dir(env, dt_obj)))
620                 GOTO(out, rc = -ENOTDIR);
621
622         iops = &dt_obj->do_index_ops->dio_it;
623         di = iops->init(env, dt_obj, lfsck->ml_args_dir, BYPASS_CAPA);
624         if (IS_ERR(di))
625                 GOTO(out, rc = PTR_ERR(di));
626
627         rc = iops->load(env, di, 0);
628         if (rc == 0)
629                 rc = iops->next(env, di);
630         else if (rc > 0)
631                 rc = 0;
632
633         if (rc != 0) {
634                 iops->put(env, di);
635                 iops->fini(env, di);
636                 GOTO(out, rc);
637         }
638
639         mdd_object_get(obj);
640         lfsck->ml_obj_dir = dt_obj;
641         spin_lock(&lfsck->ml_lock);
642         lfsck->ml_di_dir = di;
643         spin_unlock(&lfsck->ml_lock);
644
645         GOTO(out, rc = 0);
646
647 out:
648         if (rc < 0)
649                 mdd_lfsck_fail(env, lfsck, false, false);
650         return (rc > 0 ? 0 : rc);
651 }
652
653 static int mdd_lfsck_exec_dir(const struct lu_env *env, struct md_lfsck *lfsck,
654                               struct mdd_object *obj, struct lu_dirent *ent)
655 {
656         struct lfsck_component *com;
657         int                     rc;
658
659         cfs_list_for_each_entry(com, &lfsck->ml_list_scan, lc_link) {
660                 rc = com->lc_ops->lfsck_exec_dir(env, com, obj, ent);
661                 if (rc != 0)
662                         return rc;
663         }
664         return 0;
665 }
666
667 static int mdd_lfsck_post(const struct lu_env *env, struct md_lfsck *lfsck,
668                           int result)
669 {
670         struct lfsck_component *com;
671         struct lfsck_component *next;
672         int                     rc;
673
674         mdd_lfsck_pos_fill(env, lfsck, &lfsck->ml_pos_current, true, true);
675         cfs_list_for_each_entry_safe(com, next, &lfsck->ml_list_scan, lc_link) {
676                 rc = com->lc_ops->lfsck_post(env, com, result);
677                 if (rc != 0)
678                         return rc;
679         }
680
681         lfsck->ml_time_last_checkpoint = cfs_time_current();
682         lfsck->ml_time_next_checkpoint = lfsck->ml_time_last_checkpoint +
683                                 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
684         return result;
685 }
686
687 static int mdd_lfsck_double_scan(const struct lu_env *env,
688                                  struct md_lfsck *lfsck)
689 {
690         struct lfsck_component *com;
691         struct lfsck_component *next;
692         int                     rc;
693
694         cfs_list_for_each_entry_safe(com, next, &lfsck->ml_list_double_scan,
695                                      lc_link) {
696                 if (lfsck->ml_bookmark_ram.lb_param & LPF_DRYRUN)
697                         com->lc_journal = 0;
698
699                 rc = com->lc_ops->lfsck_double_scan(env, com);
700                 if (rc != 0)
701                         return rc;
702         }
703         return 0;
704 }
705
706 /* LFSCK engines */
707
708 static int mdd_lfsck_dir_engine(const struct lu_env *env,
709                                 struct md_lfsck *lfsck)
710 {
711         struct mdd_thread_info  *info   = mdd_env_info(env);
712         struct mdd_device       *mdd    = mdd_lfsck2mdd(lfsck);
713         const struct dt_it_ops  *iops   =
714                         &lfsck->ml_obj_dir->do_index_ops->dio_it;
715         struct dt_it            *di     = lfsck->ml_di_dir;
716         struct lu_dirent        *ent    = &info->mti_ent;
717         struct lu_fid           *fid    = &info->mti_fid;
718         struct lfsck_bookmark   *bk     = &lfsck->ml_bookmark_ram;
719         struct ptlrpc_thread    *thread = &lfsck->ml_thread;
720         int                      rc;
721         ENTRY;
722
723         do {
724                 struct mdd_object *child;
725
726                 lfsck->ml_new_scanned++;
727                 rc = iops->rec(env, di, (struct dt_rec *)ent,
728                                lfsck->ml_args_dir);
729                 if (rc != 0) {
730                         mdd_lfsck_fail(env, lfsck, false, true);
731                         if (bk->lb_param & LPF_FAILOUT)
732                                 RETURN(rc);
733                         else
734                                 goto checkpoint;
735                 }
736
737                 mdd_lfsck_unpack_ent(ent);
738                 if (ent->lde_attrs & LUDA_IGNORE)
739                         goto checkpoint;
740
741                 *fid = ent->lde_fid;
742                 child = mdd_object_find(env, mdd, fid);
743                 if (child == NULL) {
744                         goto checkpoint;
745                 } else if (IS_ERR(child)) {
746                         mdd_lfsck_fail(env, lfsck, false, true);
747                         if (bk->lb_param & LPF_FAILOUT)
748                                 RETURN(PTR_ERR(child));
749                         else
750                                 goto checkpoint;
751                 }
752
753                 /* XXX: need more processing for remote object in the future. */
754                 if (mdd_object_exists(child) && !mdd_object_remote(child))
755                         rc = mdd_lfsck_exec_dir(env, lfsck, child, ent);
756                 mdd_object_put(env, child);
757                 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
758                         RETURN(rc);
759
760 checkpoint:
761                 rc = mdd_lfsck_checkpoint(env, lfsck, false);
762                 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
763                         RETURN(rc);
764
765                 /* Rate control. */
766                 mdd_lfsck_control_speed(lfsck);
767                 if (unlikely(!thread_is_running(thread)))
768                         RETURN(0);
769
770                 rc = iops->next(env, di);
771         } while (rc == 0);
772
773         if (rc > 0 && !lfsck->ml_oit_over)
774                 mdd_lfsck_close_dir(env, lfsck);
775
776         RETURN(rc);
777 }
778
779 static int mdd_lfsck_oit_engine(const struct lu_env *env,
780                                 struct md_lfsck *lfsck)
781 {
782         struct mdd_thread_info  *info   = mdd_env_info(env);
783         struct mdd_device       *mdd    = mdd_lfsck2mdd(lfsck);
784         const struct dt_it_ops  *iops   =
785                                 &lfsck->ml_obj_oit->do_index_ops->dio_it;
786         struct dt_it            *di     = lfsck->ml_di_oit;
787         struct lu_fid           *fid    = &info->mti_fid;
788         struct lfsck_bookmark   *bk     = &lfsck->ml_bookmark_ram;
789         struct ptlrpc_thread    *thread = &lfsck->ml_thread;
790         int                      rc;
791         ENTRY;
792
793         do {
794                 struct mdd_object *target;
795
796                 if (lfsck->ml_di_dir != NULL) {
797                         rc = mdd_lfsck_dir_engine(env, lfsck);
798                         if (rc <= 0)
799                                 RETURN(rc);
800                 }
801
802                 if (unlikely(lfsck->ml_oit_over))
803                         RETURN(1);
804
805                 lfsck->ml_new_scanned++;
806                 rc = iops->rec(env, di, (struct dt_rec *)fid, 0);
807                 if (rc != 0) {
808                         mdd_lfsck_fail(env, lfsck, true, true);
809                         if (bk->lb_param & LPF_FAILOUT)
810                                 RETURN(rc);
811                         else
812                                 goto checkpoint;
813                 }
814
815                 target = mdd_object_find(env, mdd, fid);
816                 if (target == NULL) {
817                         goto checkpoint;
818                 } else if (IS_ERR(target)) {
819                         mdd_lfsck_fail(env, lfsck, true, true);
820                         if (bk->lb_param & LPF_FAILOUT)
821                                 RETURN(PTR_ERR(target));
822                         else
823                                 goto checkpoint;
824                 }
825
826                 /* XXX: In fact, low layer otable-based iteration should not
827                  *      return agent object. But before LU-2646 resolved, we
828                  *      need more processing for agent object. */
829                 if (mdd_object_exists(target) && !mdd_object_remote(target))
830                         rc = mdd_lfsck_exec_oit(env, lfsck, target);
831                 mdd_object_put(env, target);
832                 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
833                         RETURN(rc);
834
835 checkpoint:
836                 rc = mdd_lfsck_checkpoint(env, lfsck, true);
837                 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
838                         RETURN(rc);
839
840                 /* Rate control. */
841                 mdd_lfsck_control_speed(lfsck);
842
843                 rc = iops->next(env, di);
844                 if (rc > 0)
845                         lfsck->ml_oit_over = 1;
846
847                 if (unlikely(!thread_is_running(thread)))
848                         RETURN(0);
849         } while (rc == 0 || lfsck->ml_di_dir != NULL);
850
851         RETURN(rc);
852 }
853
854 static int mdd_lfsck_main(void *args)
855 {
856         struct lu_env            env;
857         struct md_lfsck         *lfsck    = (struct md_lfsck *)args;
858         struct ptlrpc_thread    *thread   = &lfsck->ml_thread;
859         struct dt_object        *oit_obj  = lfsck->ml_obj_oit;
860         const struct dt_it_ops  *oit_iops = &oit_obj->do_index_ops->dio_it;
861         struct dt_it            *oit_di;
862         int                      rc;
863         ENTRY;
864
865         cfs_daemonize("lfsck");
866         rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
867         if (rc != 0) {
868                 CERROR("%s: LFSCK, fail to init env, rc = %d\n",
869                        mdd_lfsck2name(lfsck), rc);
870                 GOTO(noenv, rc);
871         }
872
873         oit_di = oit_iops->init(&env, oit_obj, lfsck->ml_args_oit, BYPASS_CAPA);
874         if (IS_ERR(oit_di)) {
875                 rc = PTR_ERR(oit_di);
876                 CERROR("%s: LFSCK, fail to init iteration, rc = %d\n",
877                        mdd_lfsck2name(lfsck), rc);
878                 GOTO(fini_env, rc);
879         }
880
881         spin_lock(&lfsck->ml_lock);
882         lfsck->ml_di_oit = oit_di;
883         spin_unlock(&lfsck->ml_lock);
884         rc = mdd_lfsck_prep(&env, lfsck);
885         if (rc != 0)
886                 GOTO(fini_oit, rc);
887
888         CDEBUG(D_LFSCK, "LFSCK entry: oit_flags = 0x%x, dir_flags = 0x%x, "
889                "oit_cookie = "LPU64", dir_cookie = "LPU64", parent = "DFID
890                ", pid = %d\n", lfsck->ml_args_oit, lfsck->ml_args_dir,
891                lfsck->ml_pos_current.lp_oit_cookie,
892                lfsck->ml_pos_current.lp_dir_cookie,
893                PFID(&lfsck->ml_pos_current.lp_dir_parent),
894                cfs_curproc_pid());
895
896         spin_lock(&lfsck->ml_lock);
897         thread_set_flags(thread, SVC_RUNNING);
898         spin_unlock(&lfsck->ml_lock);
899         cfs_waitq_broadcast(&thread->t_ctl_waitq);
900
901         if (!cfs_list_empty(&lfsck->ml_list_scan) ||
902             cfs_list_empty(&lfsck->ml_list_double_scan))
903                 rc = mdd_lfsck_oit_engine(&env, lfsck);
904         else
905                 rc = 1;
906
907         CDEBUG(D_LFSCK, "LFSCK exit: oit_flags = 0x%x, dir_flags = 0x%x, "
908                "oit_cookie = "LPU64", dir_cookie = "LPU64", parent = "DFID
909                ", pid = %d, rc = %d\n", lfsck->ml_args_oit, lfsck->ml_args_dir,
910                lfsck->ml_pos_current.lp_oit_cookie,
911                lfsck->ml_pos_current.lp_dir_cookie,
912                PFID(&lfsck->ml_pos_current.lp_dir_parent),
913                cfs_curproc_pid(), rc);
914
915         if (lfsck->ml_paused && cfs_list_empty(&lfsck->ml_list_scan))
916                 oit_iops->put(&env, oit_di);
917
918         rc = mdd_lfsck_post(&env, lfsck, rc);
919         if (lfsck->ml_di_dir != NULL)
920                 mdd_lfsck_close_dir(&env, lfsck);
921
922 fini_oit:
923         spin_lock(&lfsck->ml_lock);
924         lfsck->ml_di_oit = NULL;
925         spin_unlock(&lfsck->ml_lock);
926
927         oit_iops->fini(&env, oit_di);
928         if (rc == 1) {
929                 if (!cfs_list_empty(&lfsck->ml_list_double_scan))
930                         rc = mdd_lfsck_double_scan(&env, lfsck);
931                 else
932                         rc = 0;
933         }
934
935         /* XXX: Purge the pinned objects in the future. */
936
937 fini_env:
938         lu_env_fini(&env);
939
940 noenv:
941         spin_lock(&lfsck->ml_lock);
942         thread_set_flags(thread, SVC_STOPPED);
943         cfs_waitq_broadcast(&thread->t_ctl_waitq);
944         spin_unlock(&lfsck->ml_lock);
945         return rc;
946 }
947
948 /* external interfaces */
949
950 int mdd_lfsck_set_speed(const struct lu_env *env, struct md_lfsck *lfsck,
951                         __u32 limit)
952 {
953         int rc;
954
955         mutex_lock(&lfsck->ml_mutex);
956         __mdd_lfsck_set_speed(lfsck, limit);
957         rc = mdd_lfsck_bookmark_store(env, lfsck);
958         mutex_unlock(&lfsck->ml_mutex);
959         return rc;
960 }
961
962 int mdd_lfsck_start(const struct lu_env *env, struct md_lfsck *lfsck,
963                     struct lfsck_start *start)
964 {
965         struct lfsck_bookmark  *bk     = &lfsck->ml_bookmark_ram;
966         struct ptlrpc_thread   *thread = &lfsck->ml_thread;
967         struct lfsck_component *com;
968         struct l_wait_info      lwi    = { 0 };
969         bool                    dirty  = false;
970         int                     rc     = 0;
971         __u16                   valid  = 0;
972         __u16                   flags  = 0;
973         ENTRY;
974
975         if (lfsck->ml_obj_oit == NULL)
976                 RETURN(-ENOTSUPP);
977
978         /* start == NULL means auto trigger paused LFSCK. */
979         if (start == NULL && cfs_list_empty(&lfsck->ml_list_scan))
980                 RETURN(0);
981
982         mutex_lock(&lfsck->ml_mutex);
983         spin_lock(&lfsck->ml_lock);
984         if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
985                 spin_unlock(&lfsck->ml_lock);
986                 mutex_unlock(&lfsck->ml_mutex);
987                 RETURN(-EALREADY);
988         }
989
990         spin_unlock(&lfsck->ml_lock);
991
992         lfsck->ml_paused = 0;
993         lfsck->ml_oit_over = 0;
994         lfsck->ml_drop_dryrun = 0;
995         lfsck->ml_new_scanned = 0;
996
997         /* For auto trigger. */
998         if (start == NULL)
999                 goto trigger;
1000
1001         start->ls_version = bk->lb_version;
1002         if (start->ls_valid & LSV_SPEED_LIMIT) {
1003                 __mdd_lfsck_set_speed(lfsck, start->ls_speed_limit);
1004                 dirty = true;
1005         }
1006
1007         if (start->ls_valid & LSV_ERROR_HANDLE) {
1008                 valid |= DOIV_ERROR_HANDLE;
1009                 if (start->ls_flags & LPF_FAILOUT)
1010                         flags |= DOIF_FAILOUT;
1011
1012                 if ((start->ls_flags & LPF_FAILOUT) &&
1013                     !(bk->lb_param & LPF_FAILOUT)) {
1014                         bk->lb_param |= LPF_FAILOUT;
1015                         dirty = true;
1016                 } else if (!(start->ls_flags & LPF_FAILOUT) &&
1017                            (bk->lb_param & LPF_FAILOUT)) {
1018                         bk->lb_param &= ~LPF_FAILOUT;
1019                         dirty = true;
1020                 }
1021         }
1022
1023         if (start->ls_valid & LSV_DRYRUN) {
1024                 if ((start->ls_flags & LPF_DRYRUN) &&
1025                     !(bk->lb_param & LPF_DRYRUN)) {
1026                         bk->lb_param |= LPF_DRYRUN;
1027                         dirty = true;
1028                 } else if (!(start->ls_flags & LPF_DRYRUN) &&
1029                            (bk->lb_param & LPF_DRYRUN)) {
1030                         bk->lb_param &= ~LPF_DRYRUN;
1031                         lfsck->ml_drop_dryrun = 1;
1032                         dirty = true;
1033                 }
1034         }
1035
1036         if (dirty) {
1037                 rc = mdd_lfsck_bookmark_store(env, lfsck);
1038                 if (rc != 0)
1039                         GOTO(out, rc);
1040         }
1041
1042         if (start->ls_flags & LPF_RESET)
1043                 flags |= DOIF_RESET;
1044
1045         if (start->ls_active != 0) {
1046                 struct lfsck_component *next;
1047                 __u16 type = 1;
1048
1049                 if (start->ls_active == LFSCK_TYPES_ALL)
1050                         start->ls_active = LFSCK_TYPES_SUPPORTED;
1051
1052                 if (start->ls_active & ~LFSCK_TYPES_SUPPORTED) {
1053                         start->ls_active &= ~LFSCK_TYPES_SUPPORTED;
1054                         GOTO(out, rc = -ENOTSUPP);
1055                 }
1056
1057                 cfs_list_for_each_entry_safe(com, next,
1058                                              &lfsck->ml_list_scan, lc_link) {
1059                         if (!(com->lc_type & start->ls_active)) {
1060                                 rc = com->lc_ops->lfsck_post(env, com, 0);
1061                                 if (rc != 0)
1062                                         GOTO(out, rc);
1063                         }
1064                 }
1065
1066                 while (start->ls_active != 0) {
1067                         if (type & start->ls_active) {
1068                                 com = __mdd_lfsck_component_find(lfsck, type,
1069                                                         &lfsck->ml_list_idle);
1070                                 if (com != NULL) {
1071                                         /* The component status will be updated
1072                                          * when its prep() is called later by
1073                                          * the LFSCK main engine. */
1074                                         cfs_list_del_init(&com->lc_link);
1075                                         cfs_list_add_tail(&com->lc_link,
1076                                                           &lfsck->ml_list_scan);
1077                                 }
1078                                 start->ls_active &= ~type;
1079                         }
1080                         type <<= 1;
1081                 }
1082         }
1083
1084         cfs_list_for_each_entry(com, &lfsck->ml_list_scan, lc_link) {
1085                 start->ls_active |= com->lc_type;
1086                 if (flags & DOIF_RESET) {
1087                         rc = com->lc_ops->lfsck_reset(env, com, false);
1088                         if (rc != 0)
1089                                 GOTO(out, rc);
1090                 }
1091         }
1092
1093 trigger:
1094         lfsck->ml_args_dir = LUDA_64BITHASH | LUDA_VERIFY;
1095         if (bk->lb_param & LPF_DRYRUN)
1096                 lfsck->ml_args_dir |= LUDA_VERIFY_DRYRUN;
1097
1098         if (bk->lb_param & LPF_FAILOUT) {
1099                 valid |= DOIV_ERROR_HANDLE;
1100                 flags |= DOIF_FAILOUT;
1101         }
1102
1103         if (!cfs_list_empty(&lfsck->ml_list_scan))
1104                 flags |= DOIF_OUTUSED;
1105
1106         lfsck->ml_args_oit = (flags << DT_OTABLE_IT_FLAGS_SHIFT) | valid;
1107         thread_set_flags(thread, 0);
1108         rc = cfs_create_thread(mdd_lfsck_main, lfsck, 0);
1109         if (rc < 0)
1110                 CERROR("%s: cannot start LFSCK thread, rc = %d\n",
1111                        mdd_lfsck2name(lfsck), rc);
1112         else
1113                 l_wait_event(thread->t_ctl_waitq,
1114                              thread_is_running(thread) ||
1115                              thread_is_stopped(thread),
1116                              &lwi);
1117
1118         GOTO(out, rc = 0);
1119
1120 out:
1121         mutex_unlock(&lfsck->ml_mutex);
1122         return (rc < 0 ? rc : 0);
1123 }
1124
1125 int mdd_lfsck_stop(const struct lu_env *env, struct md_lfsck *lfsck,
1126                    bool pause)
1127 {
1128         struct ptlrpc_thread *thread = &lfsck->ml_thread;
1129         struct l_wait_info    lwi    = { 0 };
1130         ENTRY;
1131
1132         mutex_lock(&lfsck->ml_mutex);
1133         spin_lock(&lfsck->ml_lock);
1134         if (thread_is_init(thread) || thread_is_stopped(thread)) {
1135                 spin_unlock(&lfsck->ml_lock);
1136                 mutex_unlock(&lfsck->ml_mutex);
1137                 RETURN(-EALREADY);
1138         }
1139
1140         if (pause)
1141                 lfsck->ml_paused = 1;
1142         thread_set_flags(thread, SVC_STOPPING);
1143         /* The LFSCK thread may be sleeping on low layer wait queue,
1144          * wake it up. */
1145         if (likely(lfsck->ml_di_oit != NULL))
1146                 lfsck->ml_obj_oit->do_index_ops->dio_it.put(env,
1147                                                             lfsck->ml_di_oit);
1148         spin_unlock(&lfsck->ml_lock);
1149
1150         cfs_waitq_broadcast(&thread->t_ctl_waitq);
1151         l_wait_event(thread->t_ctl_waitq,
1152                      thread_is_stopped(thread),
1153                      &lwi);
1154         mutex_unlock(&lfsck->ml_mutex);
1155
1156         RETURN(0);
1157 }
1158
1159 static const struct lu_fid lfsck_it_fid = { .f_seq = FID_SEQ_LOCAL_FILE,
1160                                             .f_oid = OTABLE_IT_OID,
1161                                             .f_ver = 0 };
1162
1163 int mdd_lfsck_setup(const struct lu_env *env, struct mdd_device *mdd)
1164 {
1165         struct md_lfsck  *lfsck = &mdd->mdd_lfsck;
1166         struct dt_object *obj;
1167         int               rc;
1168         ENTRY;
1169
1170         LASSERT(!lfsck->ml_initialized);
1171
1172         lfsck->ml_initialized = 1;
1173         mutex_init(&lfsck->ml_mutex);
1174         spin_lock_init(&lfsck->ml_lock);
1175         CFS_INIT_LIST_HEAD(&lfsck->ml_list_scan);
1176         CFS_INIT_LIST_HEAD(&lfsck->ml_list_dir);
1177         CFS_INIT_LIST_HEAD(&lfsck->ml_list_double_scan);
1178         CFS_INIT_LIST_HEAD(&lfsck->ml_list_idle);
1179         cfs_waitq_init(&lfsck->ml_thread.t_ctl_waitq);
1180
1181         obj = dt_locate(env, mdd->mdd_bottom, &lfsck_it_fid);
1182         if (IS_ERR(obj))
1183                 RETURN(PTR_ERR(obj));
1184
1185         lfsck->ml_obj_oit = obj;
1186         rc = obj->do_ops->do_index_try(env, obj, &dt_otable_features);
1187         if (rc != 0) {
1188                 if (rc == -ENOTSUPP)
1189                         rc = 0;
1190
1191                 RETURN(rc);
1192         }
1193
1194         obj = dt_store_open(env, mdd->mdd_bottom, "", lfsck_bookmark_name,
1195                             &mdd_env_info(env)->mti_fid);
1196         if (IS_ERR(obj))
1197                 RETURN(PTR_ERR(obj));
1198
1199         lfsck->ml_bookmark_obj = obj;
1200         rc = mdd_lfsck_bookmark_load(env, lfsck);
1201         if (rc == -ENODATA)
1202                 rc = mdd_lfsck_bookmark_init(env, lfsck);
1203
1204         /* XXX: LFSCK components initialization to be added here. */
1205
1206         RETURN(rc);
1207 }
1208
1209 void mdd_lfsck_cleanup(const struct lu_env *env, struct mdd_device *mdd)
1210 {
1211         struct md_lfsck         *lfsck  = &mdd->mdd_lfsck;
1212         struct ptlrpc_thread    *thread = &lfsck->ml_thread;
1213         struct lfsck_component  *com;
1214
1215         if (!lfsck->ml_initialized)
1216                 return;
1217
1218         LASSERT(thread_is_init(thread) || thread_is_stopped(thread));
1219
1220         if (lfsck->ml_obj_oit != NULL) {
1221                 lu_object_put(env, &lfsck->ml_obj_oit->do_lu);
1222                 lfsck->ml_obj_oit = NULL;
1223         }
1224
1225         LASSERT(lfsck->ml_obj_dir == NULL);
1226
1227         if (lfsck->ml_bookmark_obj != NULL) {
1228                 lu_object_put(env, &lfsck->ml_bookmark_obj->do_lu);
1229                 lfsck->ml_bookmark_obj = NULL;
1230         }
1231
1232         while (!cfs_list_empty(&lfsck->ml_list_scan)) {
1233                 com = cfs_list_entry(lfsck->ml_list_scan.next,
1234                                      struct lfsck_component,
1235                                      lc_link);
1236                 mdd_lfsck_component_cleanup(env, com);
1237         }
1238
1239         LASSERT(cfs_list_empty(&lfsck->ml_list_dir));
1240
1241         while (!cfs_list_empty(&lfsck->ml_list_double_scan)) {
1242                 com = cfs_list_entry(lfsck->ml_list_double_scan.next,
1243                                      struct lfsck_component,
1244                                      lc_link);
1245                 mdd_lfsck_component_cleanup(env, com);
1246         }
1247
1248         while (!cfs_list_empty(&lfsck->ml_list_idle)) {
1249                 com = cfs_list_entry(lfsck->ml_list_idle.next,
1250                                      struct lfsck_component,
1251                                      lc_link);
1252                 mdd_lfsck_component_cleanup(env, com);
1253         }
1254 }