4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2013, 2016, Intel Corporation.
26 * lustre/lfsck/lfsck_engine.c
28 * Author: Fan, Yong <fan.yong@intel.com>
31 #define DEBUG_SUBSYSTEM S_LFSCK
33 #include <lu_object.h>
34 #include <dt_object.h>
35 #include <lustre_net.h>
36 #include <lustre_fid.h>
37 #include <obd_support.h>
38 #include <lustre_lib.h>
40 #include "lfsck_internal.h"
42 int lfsck_unpack_ent(struct lu_dirent *ent, __u64 *cookie, __u16 *type)
45 int align = sizeof(*lt) - 1;
48 fid_le_to_cpu(&ent->lde_fid, &ent->lde_fid);
49 *cookie = le64_to_cpu(ent->lde_hash);
50 ent->lde_reclen = le16_to_cpu(ent->lde_reclen);
51 ent->lde_namelen = le16_to_cpu(ent->lde_namelen);
52 ent->lde_attrs = le32_to_cpu(ent->lde_attrs);
54 if (unlikely(!(ent->lde_attrs & LUDA_TYPE)))
57 len = (ent->lde_namelen + align) & ~align;
58 lt = (struct luda_type *)(ent->lde_name + len);
59 *type = le16_to_cpu(lt->lt_type);
61 /* Make sure the name is terminated with '\0'. The data (object type)
62 * after ent::lde_name maybe broken, but we have stored such data in
63 * the output parameter @type as above. */
64 ent->lde_name[ent->lde_namelen] = '\0';
69 static void lfsck_di_oit_put(const struct lu_env *env, struct lfsck_instance *lfsck)
71 const struct dt_it_ops *iops;
74 spin_lock(&lfsck->li_lock);
75 iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
76 di = lfsck->li_di_oit;
77 lfsck->li_di_oit = NULL;
78 spin_unlock(&lfsck->li_lock);
82 static void lfsck_di_dir_put(const struct lu_env *env, struct lfsck_instance *lfsck)
84 const struct dt_it_ops *iops;
87 spin_lock(&lfsck->li_lock);
88 iops = &lfsck->li_obj_dir->do_index_ops->dio_it;
89 di = lfsck->li_di_dir;
90 lfsck->li_di_dir = NULL;
91 lfsck->li_cookie_dir = 0;
92 spin_unlock(&lfsck->li_lock);
96 static int lfsck_parent_fid(const struct lu_env *env, struct dt_object *obj,
99 if (unlikely(!S_ISDIR(lfsck_object_type(obj)) ||
100 !dt_try_as_dir(env, obj)))
103 return dt_lookup(env, obj, (struct dt_rec *)fid,
104 (const struct dt_key *)"..");
108 * Check whether needs to scan the directory or not.
110 * 1) If we are not doing namespace LFSCK, or the given @obj is not directory,
111 * then needs not to scan the @obj. Otherwise,
112 * 2) Global /ROOT needs to be scanned, backend root needs not to be scanned.
113 * 3) If the @obj is neither IGIF nor normal FID (including .lustre and its
114 * sub-directories that have been scanned when the LFSCK engine start),
115 * then needs not to be scanned.
116 * 4) If it is a remote object, then scanning the object will be done on the
117 * MDT on which the object really resides.
118 * 5) If the local object has normal FID, then needs to be scanned. Otherwise,
119 * 6) If the object has linkEA, then needs to be scanned. Otherwise,
120 * 7) If none of the previous conditions are true, we need to check the parent
121 * directories whether this subdirectory is in a tree that should be scanned.
122 * Set the parent as current @obj, repeat 2)-7).
124 * \param[in] env pointer to the thread context
125 * \param[in] lfsck pointer to the lfsck instance
126 * \param[in] obj pointer to the object to be checked
128 * \retval positive number if the directory needs to be scanned
129 * \retval 0 if the directory needs NOT to be scanned
130 * \retval negative error number on failure
132 static int lfsck_needs_scan_dir(const struct lu_env *env,
133 struct lfsck_instance *lfsck,
134 struct dt_object *obj)
136 struct lfsck_thread_info *info = lfsck_env_info(env);
137 struct lu_fid *fid = &info->lti_fid;
138 struct lu_seq_range *range = &info->lti_range;
139 struct seq_server_site *ss = lfsck_dev_site(lfsck);
140 __u32 idx = lfsck_dev_idx(lfsck);
144 if (list_empty(&lfsck->li_list_dir) || !S_ISDIR(lfsck_object_type(obj)))
149 *fid = *lfsck_dto2fid(obj);
151 /* Global /ROOT is visible. */
152 if (unlikely(lu_fid_eq(fid, &lfsck->li_global_root_fid)))
155 /* Backend root is invisible. */
156 if (unlikely(lu_fid_eq(fid, &lfsck->li_local_root_fid)))
159 if (!fid_is_norm(fid) && !fid_is_igif(fid))
162 fld_range_set_mdt(range);
163 rc = fld_local_lookup(env, ss->ss_server_fld,
164 fid_seq(fid), range);
165 if (rc != 0 || range->lsr_index != idx)
166 /* Current FID should NOT be for the input parameter
167 * @obj, because the lfsck_master_oit_engine() has
168 * filtered out agent object. So current FID is for
169 * the ancestor of the original input parameter @obj.
170 * So the ancestor is a remote directory. The input
171 * parameter @obj is local directory, and should be
172 * scanned under such case. */
175 /* normal FID on this target (locally) must be for the
176 * client-side visiable object. */
177 if (fid_is_norm(fid))
181 obj = lfsck_object_find_bottom(env, lfsck, fid);
186 if (!dt_object_exists(obj))
190 dt_read_lock(env, obj, MOR_TGT_CHILD);
191 if (unlikely(lfsck_is_dead_obj(obj))) {
192 dt_read_unlock(env, obj);
197 rc = dt_xattr_get(env, obj,
198 lfsck_buf_get(env, NULL, 0), XATTR_NAME_LINK);
199 dt_read_unlock(env, obj);
203 if (rc < 0 && rc != -ENODATA)
206 rc = lfsck_parent_fid(env, obj, fid);
208 lfsck_object_put(env, obj);
214 if (!fid_is_sane(fid))
219 if (depth > 0 && obj != NULL)
220 lfsck_object_put(env, obj);
225 static int lfsck_load_stripe_lmv(const struct lu_env *env,
226 struct lfsck_instance *lfsck,
227 struct dt_object *obj)
229 struct lmv_mds_md_v1 *lmv = &lfsck_env_info(env)->lti_lmv;
230 struct lfsck_lmv *llmv;
234 LASSERT(lfsck->li_obj_dir == NULL);
235 LASSERT(lfsck->li_lmv == NULL);
237 rc = lfsck_read_stripe_lmv(env, obj, lmv);
238 if (rc == -ENODATA) {
239 lfsck->li_obj_dir = lfsck_object_get(obj);
251 if (lmv->lmv_magic == LMV_MAGIC) {
252 struct lfsck_slave_lmv_rec *lslr;
255 llmv->ll_lmv_master = 1;
256 if (lmv->lmv_stripe_count < 1)
257 stripes = LFSCK_LMV_DEF_STRIPES;
258 else if (lmv->lmv_stripe_count > LFSCK_LMV_MAX_STRIPES)
259 stripes = LFSCK_LMV_MAX_STRIPES;
261 stripes = lmv->lmv_stripe_count;
263 OBD_ALLOC_LARGE(lslr, sizeof(*lslr) * stripes);
270 llmv->ll_stripes_allocated = stripes;
271 llmv->ll_hash_type = LMV_HASH_TYPE_UNKNOWN;
272 llmv->ll_lslr = lslr;
274 llmv->ll_lmv_slave = 1;
277 lfsck->li_obj_dir = lfsck_object_get(obj);
279 atomic_set(&llmv->ll_ref, 1);
280 lfsck->li_lmv = llmv;
285 /* LFSCK wrap functions */
287 static void lfsck_fail(const struct lu_env *env, struct lfsck_instance *lfsck,
290 struct lfsck_component *com;
292 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
293 com->lc_ops->lfsck_fail(env, com, new_checked);
297 void lfsck_close_dir(const struct lu_env *env,
298 struct lfsck_instance *lfsck, int result)
300 struct lfsck_component *com;
303 if (lfsck->li_lmv != NULL) {
304 lfsck->li_lmv->ll_exit_value = result;
305 if (lfsck->li_obj_dir != NULL) {
306 list_for_each_entry(com, &lfsck->li_list_dir,
308 com->lc_ops->lfsck_close_dir(env, com);
312 lfsck_lmv_put(env, lfsck->li_lmv);
313 lfsck->li_lmv = NULL;
316 if (lfsck->li_di_dir != NULL) {
317 const struct dt_it_ops *dir_iops;
318 struct dt_it *dir_di = lfsck->li_di_dir;
320 LASSERT(lfsck->li_obj_dir != NULL);
322 dir_iops = &lfsck->li_obj_dir->do_index_ops->dio_it;
323 lfsck_di_dir_put(env, lfsck);
324 dir_iops->fini(env, dir_di);
327 if (lfsck->li_obj_dir != NULL) {
328 struct dt_object *dir_obj = lfsck->li_obj_dir;
330 lfsck->li_obj_dir = NULL;
331 lfsck_object_put(env, dir_obj);
337 int lfsck_open_dir(const struct lu_env *env,
338 struct lfsck_instance *lfsck, __u64 cookie)
340 struct dt_object *obj = lfsck->li_obj_dir;
341 struct dt_it *di = lfsck->li_di_dir;
342 struct lfsck_component *com;
343 const struct dt_it_ops *iops;
347 LASSERT(obj != NULL);
350 if (unlikely(!dt_try_as_dir(env, obj)))
351 GOTO(out, rc = -ENOTDIR);
353 list_for_each_entry(com, &lfsck->li_list_dir, lc_link_dir) {
354 rc = com->lc_ops->lfsck_open_dir(env, com);
359 iops = &obj->do_index_ops->dio_it;
360 di = iops->init(env, obj, lfsck->li_args_dir);
362 GOTO(out, rc = PTR_ERR(di));
364 rc = iops->load(env, di, cookie);
367 else if (rc == 0 || (rc > 0 && cookie > 0))
368 rc = iops->next(env, di);
376 lfsck->li_cookie_dir = iops->store(env, di);
377 spin_lock(&lfsck->li_lock);
378 lfsck->li_di_dir = di;
379 spin_unlock(&lfsck->li_lock);
386 lfsck_close_dir(env, lfsck, rc);
391 static int lfsck_checkpoint(const struct lu_env *env,
392 struct lfsck_instance *lfsck)
394 struct lfsck_component *com;
398 if (likely(ktime_get_seconds() <= lfsck->li_time_next_checkpoint))
401 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, false);
402 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
403 rc = com->lc_ops->lfsck_checkpoint(env, com, false);
408 lfsck->li_time_last_checkpoint = ktime_get_seconds();
409 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
410 LFSCK_CHECKPOINT_INTERVAL;
411 return rc1 != 0 ? rc1 : rc;
414 static int lfsck_prep(const struct lu_env *env, struct lfsck_instance *lfsck,
415 struct lfsck_start_param *lsp)
417 struct dt_object *obj = NULL;
418 struct lfsck_component *com;
419 struct lfsck_component *next;
420 struct lfsck_position *pos = NULL;
421 const struct dt_it_ops *iops =
422 &lfsck->li_obj_oit->do_index_ops->dio_it;
426 LASSERT(lfsck->li_obj_dir == NULL);
427 LASSERT(lfsck->li_di_dir == NULL);
429 lfsck->li_current_oit_processed = 0;
430 list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
431 com->lc_new_checked = 0;
432 rc = com->lc_ops->lfsck_prep(env, com, lsp);
437 (!lfsck_pos_is_zero(&com->lc_pos_start) &&
438 lfsck_pos_is_eq(pos, &com->lc_pos_start) > 0))
439 pos = &com->lc_pos_start;
442 /* Init otable-based iterator. */
444 rc = iops->load(env, lfsck->li_di_oit, 0);
445 if (rc > 0 || unlikely(rc == -ENODATA)) {
446 lfsck->li_oit_over = 1;
453 rc = iops->load(env, lfsck->li_di_oit, pos->lp_oit_cookie);
454 if (rc > 0 || unlikely(rc == -ENODATA))
455 lfsck->li_oit_over = 1;
459 if (!lfsck->li_master || fid_is_zero(&pos->lp_dir_parent))
462 /* Find the directory for namespace-based traverse. */
463 obj = lfsck_object_find_bottom(env, lfsck, &pos->lp_dir_parent);
465 RETURN(PTR_ERR(obj));
467 /* Remote directory will be scanned by the LFSCK instance
468 * on the MDT where the remote object really resides on. */
469 if (!dt_object_exists(obj) || dt_object_remote(obj) ||
470 unlikely(!S_ISDIR(lfsck_object_type(obj))))
473 rc = lfsck_load_stripe_lmv(env, lfsck, obj);
475 /* For the master MDT-object of a striped directory,
476 * reset the iteration from the directory beginning. */
477 if (lfsck->li_lmv != NULL && lfsck->li_lmv->ll_lmv_master)
478 pos->lp_dir_cookie = 0;
480 rc = lfsck_open_dir(env, lfsck, pos->lp_dir_cookie);
482 /* The end of the directory. */
490 lfsck_object_put(env, obj);
493 lfsck_close_dir(env, lfsck, rc);
494 list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
496 com->lc_ops->lfsck_post(env, com, rc, true);
503 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, true);
504 lfsck->li_pos_current = lfsck->li_pos_checkpoint;
505 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
506 rc = com->lc_ops->lfsck_checkpoint(env, com, true);
511 lfsck->li_time_last_checkpoint = ktime_get_seconds();
512 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
513 LFSCK_CHECKPOINT_INTERVAL;
517 static int lfsck_exec_oit(const struct lu_env *env,
518 struct lfsck_instance *lfsck, struct dt_object *obj)
520 struct lfsck_component *com;
524 LASSERT(lfsck->li_obj_dir == NULL);
526 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
527 rc = com->lc_ops->lfsck_exec_oit(env, com, obj);
532 rc = lfsck_needs_scan_dir(env, lfsck, obj);
536 rc = lfsck_load_stripe_lmv(env, lfsck, obj);
538 rc = lfsck_open_dir(env, lfsck, 0);
544 lfsck_fail(env, lfsck, false);
547 lfsck_close_dir(env, lfsck, rc);
549 return rc > 0 ? 0 : rc;
552 static int lfsck_exec_dir(const struct lu_env *env,
553 struct lfsck_instance *lfsck,
554 struct lfsck_assistant_object *lso,
555 struct lu_dirent *ent, __u16 type)
557 struct lfsck_component *com;
560 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
561 rc = com->lc_ops->lfsck_exec_dir(env, com, lso, ent, type);
568 static int lfsck_master_dir_engine(const struct lu_env *env,
569 struct lfsck_instance *lfsck);
571 static int lfsck_post(const struct lu_env *env, struct lfsck_instance *lfsck,
574 struct lfsck_component *com;
575 struct lfsck_component *next;
578 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, false);
579 lfsck_close_dir(env, lfsck, result);
581 while (thread_is_running(&lfsck->li_thread) && rc > 0 &&
582 !list_empty(&lfsck->li_list_lmv)) {
583 struct lfsck_lmv_unit *llu;
585 spin_lock(&lfsck->li_lock);
586 llu = list_entry(lfsck->li_list_lmv.next,
587 struct lfsck_lmv_unit, llu_link);
588 list_del_init(&llu->llu_link);
589 spin_unlock(&lfsck->li_lock);
591 lfsck->li_lmv = &llu->llu_lmv;
592 lfsck->li_obj_dir = lfsck_object_get(llu->llu_obj);
593 rc = lfsck_open_dir(env, lfsck, 0);
595 rc = lfsck_master_dir_engine(env, lfsck);
596 lfsck_close_dir(env, lfsck, result);
602 list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
603 rc = com->lc_ops->lfsck_post(env, com, result, false);
605 CDEBUG(D_LFSCK, "%s: lfsck_post at the component %u: "
606 "rc = %d\n", lfsck_lfsck2name(lfsck),
607 (__u32)com->lc_type, rc);
610 lfsck->li_time_last_checkpoint = ktime_get_seconds();
611 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
612 LFSCK_CHECKPOINT_INTERVAL;
614 /* Ignore some component post failure to make other can go ahead. */
618 static int lfsck_double_scan(const struct lu_env *env,
619 struct lfsck_instance *lfsck)
621 struct lfsck_component *com;
622 struct lfsck_component *next;
623 struct l_wait_info lwi = { 0 };
627 list_for_each_entry(com, &lfsck->li_list_double_scan, lc_link) {
628 rc = com->lc_ops->lfsck_double_scan(env, com);
633 l_wait_event(lfsck->li_thread.t_ctl_waitq,
634 atomic_read(&lfsck->li_double_scan_count) == 0,
637 if (lfsck->li_status != LS_PAUSED &&
638 lfsck->li_status != LS_CO_PAUSED) {
639 list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
641 spin_lock(&lfsck->li_lock);
642 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
643 spin_unlock(&lfsck->li_lock);
647 return rc1 != 0 ? rc1 : rc;
650 static void lfsck_quit(const struct lu_env *env, struct lfsck_instance *lfsck)
652 struct lfsck_component *com;
653 struct lfsck_component *next;
655 list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
657 if (com->lc_ops->lfsck_quit != NULL)
658 com->lc_ops->lfsck_quit(env, com);
660 spin_lock(&lfsck->li_lock);
661 list_del_init(&com->lc_link_dir);
662 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
663 spin_unlock(&lfsck->li_lock);
666 list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
668 if (com->lc_ops->lfsck_quit != NULL)
669 com->lc_ops->lfsck_quit(env, com);
671 spin_lock(&lfsck->li_lock);
672 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
673 spin_unlock(&lfsck->li_lock);
679 static int lfsck_master_dir_engine(const struct lu_env *env,
680 struct lfsck_instance *lfsck)
682 struct lfsck_thread_info *info = lfsck_env_info(env);
683 struct dt_object *dir = lfsck->li_obj_dir;
684 const struct dt_it_ops *iops = &dir->do_index_ops->dio_it;
685 struct dt_it *di = lfsck->li_di_dir;
686 struct lu_dirent *ent =
687 (struct lu_dirent *)info->lti_key;
688 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
689 struct ptlrpc_thread *thread = &lfsck->li_thread;
690 struct lfsck_assistant_object *lso = NULL;
696 if (CFS_FAIL_TIMEOUT(OBD_FAIL_LFSCK_DELAY2, cfs_fail_val) &&
697 unlikely(!thread_is_running(thread))) {
698 CDEBUG(D_LFSCK, "%s: scan dir exit for engine stop, "
699 "parent "DFID", cookie %#llx\n",
700 lfsck_lfsck2name(lfsck),
701 PFID(lfsck_dto2fid(dir)), lfsck->li_cookie_dir);
706 lfsck->li_new_scanned++;
707 rc = iops->rec(env, di, (struct dt_rec *)ent,
710 rc = lfsck_unpack_ent(ent, &lfsck->li_cookie_dir,
714 CDEBUG(D_LFSCK, "%s: scan dir failed at rec(), "
715 "parent "DFID", cookie %#llx: rc = %d\n",
716 lfsck_lfsck2name(lfsck),
717 PFID(lfsck_dto2fid(dir)),
718 lfsck->li_cookie_dir, rc);
719 lfsck_fail(env, lfsck, true);
720 if (bk->lb_param & LPF_FAILOUT)
726 if (ent->lde_attrs & LUDA_IGNORE)
729 /* skip dot entry. */
730 if (ent->lde_namelen == 1 && ent->lde_name[0] == '.')
734 lso = lfsck_assistant_object_init(env,
735 lfsck_dto2fid(dir), NULL,
736 lfsck->li_pos_current.lp_oit_cookie, true);
738 if (bk->lb_param & LPF_FAILOUT)
739 RETURN(PTR_ERR(lso));
746 /* The type in the @ent structure may has been overwritten,
747 * so we need to pass the @type parameter independently. */
748 rc = lfsck_exec_dir(env, lfsck, lso, ent, type);
749 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
753 rc = lfsck_checkpoint(env, lfsck);
754 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
758 lfsck_control_speed(lfsck);
759 if (unlikely(!thread_is_running(thread))) {
760 CDEBUG(D_LFSCK, "%s: scan dir exit for engine stop, "
761 "parent "DFID", cookie %#llx\n",
762 lfsck_lfsck2name(lfsck),
763 PFID(lfsck_dto2fid(dir)),
764 lfsck->li_cookie_dir);
768 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_FATAL2)) {
769 spin_lock(&lfsck->li_lock);
770 thread_set_flags(thread, SVC_STOPPING);
771 spin_unlock(&lfsck->li_lock);
772 GOTO(out, rc = -EINVAL);
775 rc = iops->next(env, di);
777 CDEBUG(D_LFSCK, "%s dir engine fail to locate next "
778 "for the directory "DFID": rc = %d\n",
779 lfsck_lfsck2name(lfsck),
780 PFID(&lfsck->li_pos_current.lp_dir_parent), rc);
783 if (rc > 0 && !lfsck->li_oit_over)
784 lfsck_close_dir(env, lfsck, rc);
790 lfsck_assistant_object_put(env, lso);
796 * Object-table based iteration engine.
798 * Object-table based iteration is the basic linear engine to scan all the
799 * objects on current device in turn. For each object, it calls all the
800 * registered LFSCK component(s)' API to perform related consistency
803 * It flushes related LFSCK trace files to disk via making checkpoint
804 * periodically. Then if the server crashed or the LFSCK is paused, the
805 * LFSCK can resume from the latest checkpoint.
807 * It also controls the whole LFSCK speed via lfsck_control_speed() to
808 * avoid the server to become overload.
810 * \param[in] env pointer to the thread context
811 * \param[in] lfsck pointer to the lfsck instance
813 * \retval positive number if all objects have been scanned
814 * \retval 0 if the iteration is stopped or paused
815 * \retval negative error number on failure
817 static int lfsck_master_oit_engine(const struct lu_env *env,
818 struct lfsck_instance *lfsck)
820 struct lfsck_thread_info *info = lfsck_env_info(env);
821 const struct dt_it_ops *iops =
822 &lfsck->li_obj_oit->do_index_ops->dio_it;
823 struct dt_it *di = lfsck->li_di_oit;
824 struct lu_fid *fid = &info->lti_fid;
825 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
826 struct ptlrpc_thread *thread = &lfsck->li_thread;
827 struct seq_server_site *ss = lfsck_dev_site(lfsck);
828 __u32 idx = lfsck_dev_idx(lfsck);
832 if (unlikely(ss == NULL))
836 struct dt_object *target;
838 if (lfsck->li_di_dir != NULL) {
839 rc = lfsck_master_dir_engine(env, lfsck);
844 if (unlikely(lfsck->li_oit_over))
847 if (CFS_FAIL_TIMEOUT(OBD_FAIL_LFSCK_DELAY1, cfs_fail_val) &&
848 unlikely(!thread_is_running(thread))) {
849 CDEBUG(D_LFSCK, "%s: OIT scan exit for engine stop, "
851 lfsck_lfsck2name(lfsck), iops->store(env, di));
856 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_CRASH))
859 lfsck->li_current_oit_processed = 1;
861 if (!list_empty(&lfsck->li_list_lmv)) {
862 struct lfsck_lmv_unit *llu;
864 spin_lock(&lfsck->li_lock);
865 llu = list_entry(lfsck->li_list_lmv.next,
866 struct lfsck_lmv_unit, llu_link);
867 list_del_init(&llu->llu_link);
868 spin_unlock(&lfsck->li_lock);
870 lfsck->li_lmv = &llu->llu_lmv;
871 lfsck->li_obj_dir = lfsck_object_get(llu->llu_obj);
872 rc = lfsck_open_dir(env, lfsck, 0);
874 rc = lfsck_master_dir_engine(env, lfsck);
880 lfsck->li_new_scanned++;
881 lfsck->li_pos_current.lp_oit_cookie = iops->store(env, di);
882 rc = iops->rec(env, di, (struct dt_rec *)fid, 0);
884 CDEBUG(D_LFSCK, "%s: OIT scan failed at rec(): "
885 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
886 lfsck_fail(env, lfsck, true);
887 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
893 if (unlikely(!fid_is_sane(fid))) {
894 CDEBUG(D_LFSCK, "%s: OIT scan find invalid FID "DFID
896 lfsck_lfsck2name(lfsck), PFID(fid));
900 if (fid_is_idif(fid)) {
901 __u32 idx1 = fid_idif_ost_idx(fid);
903 LASSERT(!lfsck->li_master);
906 struct ost_id *oi = &info->lti_oi;
908 if (unlikely(idx1 != 0)) {
909 CDEBUG(D_LFSCK, "%s: invalid IDIF "DFID
910 ", not match device index %u\n",
911 lfsck_lfsck2name(lfsck),
917 /* rebuild the IDIF with index to
918 * avoid double instances for the
920 fid_to_ostid(fid, oi);
921 ostid_to_fid(fid, oi, idx);
923 } else if (!fid_is_norm(fid) && !fid_is_igif(fid) &&
924 !fid_is_last_id(fid) &&
925 !lu_fid_eq(fid, &lfsck->li_global_root_fid)) {
927 /* If the FID/object is only used locally and invisible
928 * to external nodes, then LFSCK will not handle it.
930 * dot_lustre sequence has been handled specially. */
933 struct lu_seq_range *range = &info->lti_range;
935 if (lfsck->li_master)
936 fld_range_set_mdt(range);
938 fld_range_set_ost(range);
939 rc = fld_local_lookup(env, ss->ss_server_fld,
940 fid_seq(fid), range);
941 if (rc != 0 || range->lsr_index != idx) {
942 /* Remote object will be handled by the LFSCK
943 * instance on the MDT where the remote object
944 * really resides on. */
950 target = lfsck_object_find_bottom(env, lfsck, fid);
951 if (IS_ERR(target)) {
952 CDEBUG(D_LFSCK, "%s: OIT scan failed at find target "
953 DFID", cookie %llu: rc = %d\n",
954 lfsck_lfsck2name(lfsck), PFID(fid),
955 iops->store(env, di), rc);
956 lfsck_fail(env, lfsck, true);
957 if (bk->lb_param & LPF_FAILOUT)
958 RETURN(PTR_ERR(target));
963 if (dt_object_exists(target))
964 rc = lfsck_exec_oit(env, lfsck, target);
966 lfsck_object_put(env, target);
967 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
971 rc = lfsck_checkpoint(env, lfsck);
972 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
976 lfsck_control_speed(lfsck);
978 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_FATAL1)) {
979 spin_lock(&lfsck->li_lock);
980 thread_set_flags(thread, SVC_STOPPING);
981 spin_unlock(&lfsck->li_lock);
985 rc = iops->next(env, di);
986 if (unlikely(rc > 0))
987 lfsck->li_oit_over = 1;
988 else if (likely(rc == 0))
989 lfsck->li_current_oit_processed = 0;
991 CDEBUG(D_LFSCK, "%s oit engine fail to locate next at "
992 "%llu: rc = %d\n", lfsck_lfsck2name(lfsck),
993 iops->store(env, di), rc);
995 if (unlikely(!thread_is_running(thread))) {
996 CDEBUG(D_LFSCK, "%s: OIT scan exit for engine stop, "
997 "cookie %llu\n", lfsck_lfsck2name(lfsck),
998 iops->store(env, di));
1001 } while (rc == 0 || lfsck->li_di_dir != NULL);
1006 int lfsck_master_engine(void *args)
1008 struct lfsck_thread_args *lta = args;
1009 struct lu_env *env = <a->lta_env;
1010 struct lfsck_instance *lfsck = lta->lta_lfsck;
1011 struct ptlrpc_thread *thread = &lfsck->li_thread;
1012 struct dt_object *oit_obj = lfsck->li_obj_oit;
1013 const struct dt_it_ops *oit_iops = &oit_obj->do_index_ops->dio_it;
1014 struct dt_it *oit_di;
1015 struct l_wait_info lwi = { 0 };
1019 spin_lock(&lfsck->li_lock);
1020 lfsck->li_task = current;
1021 spin_unlock(&lfsck->li_lock);
1023 /* There will be some objects verification during the LFSCK start,
1024 * such as the subsequent lfsck_verify_lpf(). Trigger low layer OI
1025 * OI scrub before that to handle the potential inconsistence. */
1026 oit_di = oit_iops->init(env, oit_obj, lfsck->li_args_oit);
1027 if (IS_ERR(oit_di)) {
1028 rc = PTR_ERR(oit_di);
1029 CDEBUG(D_LFSCK, "%s: master engine fail to init iteration: "
1030 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
1032 GOTO(fini_args, rc);
1035 if (lfsck->li_master &&
1036 (!list_empty(&lfsck->li_list_scan) ||
1037 !list_empty(&lfsck->li_list_double_scan))) {
1038 rc = lfsck_verify_lpf(env, lfsck);
1039 /* Fail to verify the .lustre/lost+found/MDTxxxx/ may be not
1040 * fatal, because the .lustre/lost+found/ maybe not accessed
1041 * by the LFSCK if it does not add orphans or others to such
1042 * directory. So go ahead until hit failure when really uses
1045 CDEBUG(D_LFSCK, "%s: master engine fail to verify the "
1046 ".lustre/lost+found/, go ahead: rc = %d\n",
1047 lfsck_lfsck2name(lfsck), rc);
1050 spin_lock(&lfsck->li_lock);
1051 lfsck->li_di_oit = oit_di;
1052 spin_unlock(&lfsck->li_lock);
1053 rc = lfsck_prep(env, lfsck, lta->lta_lsp);
1057 CDEBUG(D_LFSCK, "LFSCK entry: oit_flags = %#x, dir_flags = %#x, "
1058 "oit_cookie = %llu, dir_cookie = %#llx, parent = "DFID
1059 ", pid = %d\n", lfsck->li_args_oit, lfsck->li_args_dir,
1060 lfsck->li_pos_checkpoint.lp_oit_cookie,
1061 lfsck->li_pos_checkpoint.lp_dir_cookie,
1062 PFID(&lfsck->li_pos_checkpoint.lp_dir_parent),
1065 spin_lock(&lfsck->li_lock);
1066 if (unlikely(!thread_is_starting(thread))) {
1067 spin_unlock(&lfsck->li_lock);
1068 GOTO(fini_oit, rc = 0);
1071 thread_set_flags(thread, SVC_RUNNING);
1072 spin_unlock(&lfsck->li_lock);
1073 wake_up_all(&thread->t_ctl_waitq);
1075 l_wait_event(thread->t_ctl_waitq,
1076 lfsck->li_start_unplug ||
1077 !thread_is_running(thread),
1079 if (!thread_is_running(thread))
1080 GOTO(fini_oit, rc = 0);
1082 if (!list_empty(&lfsck->li_list_scan) ||
1083 list_empty(&lfsck->li_list_double_scan))
1084 rc = lfsck_master_oit_engine(env, lfsck);
1088 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, false);
1089 CDEBUG(D_LFSCK, "LFSCK exit: oit_flags = %#x, dir_flags = %#x, "
1090 "oit_cookie = %llu, dir_cookie = %#llx, parent = "DFID
1091 ", pid = %d, rc = %d\n", lfsck->li_args_oit, lfsck->li_args_dir,
1092 lfsck->li_pos_checkpoint.lp_oit_cookie,
1093 lfsck->li_pos_checkpoint.lp_dir_cookie,
1094 PFID(&lfsck->li_pos_checkpoint.lp_dir_parent),
1097 if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_CRASH))
1098 rc = lfsck_post(env, lfsck, rc);
1100 lfsck_close_dir(env, lfsck, rc);
1103 lfsck_di_oit_put(env, lfsck);
1104 oit_iops->fini(env, oit_di);
1106 if (!list_empty(&lfsck->li_list_double_scan))
1107 rc = lfsck_double_scan(env, lfsck);
1111 lfsck_quit(env, lfsck);
1114 /* XXX: Purge the pinned objects in the future. */
1117 spin_lock(&lfsck->li_lock);
1118 thread_set_flags(thread, SVC_STOPPED);
1119 lfsck->li_task = NULL;
1120 spin_unlock(&lfsck->li_lock);
1121 wake_up_all(&thread->t_ctl_waitq);
1122 lfsck_thread_args_fini(lta);
1126 static inline bool lfsck_assistant_req_empty(struct lfsck_assistant_data *lad)
1130 spin_lock(&lad->lad_lock);
1131 if (list_empty(&lad->lad_req_list))
1133 spin_unlock(&lad->lad_lock);
1139 * Query the LFSCK status from the instatnces on remote servers.
1141 * The LFSCK assistant thread queries the LFSCK instances on other
1142 * servers (MDT/OST) about their status, such as whether they have
1143 * finished the phase1/phase2 scanning or not, and so on.
1145 * \param[in] env pointer to the thread context
1146 * \param[in] com pointer to the lfsck component
1148 * \retval 0 for success
1149 * \retval negative error number on failure
1151 static int lfsck_assistant_query_others(const struct lu_env *env,
1152 struct lfsck_component *com)
1154 struct lfsck_thread_info *info = lfsck_env_info(env);
1155 struct lfsck_request *lr = &info->lti_lr;
1156 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1157 struct lfsck_instance *lfsck = com->lc_lfsck;
1158 struct lfsck_assistant_data *lad = com->lc_data;
1159 struct ptlrpc_request_set *set;
1160 struct lfsck_tgt_descs *ltds;
1161 struct lfsck_tgt_desc *ltd;
1162 struct list_head *phase_head;
1167 set = ptlrpc_prep_set();
1171 lad->lad_touch_gen++;
1172 memset(lr, 0, sizeof(*lr));
1173 lr->lr_event = LE_QUERY;
1174 lr->lr_active = com->lc_type;
1176 memset(laia, 0, sizeof(*laia));
1177 laia->laia_com = com;
1180 if (!list_empty(&lad->lad_mdt_phase1_list)) {
1181 ltds = &lfsck->li_mdt_descs;
1183 phase_head = &lad->lad_mdt_phase1_list;
1184 } else if (com->lc_type != LFSCK_TYPE_LAYOUT) {
1189 ltds = &lfsck->li_ost_descs;
1190 lr->lr_flags = LEF_TO_OST;
1191 phase_head = &lad->lad_ost_phase1_list;
1194 laia->laia_ltds = ltds;
1195 spin_lock(<ds->ltd_lock);
1196 while (!list_empty(phase_head)) {
1197 struct list_head *phase_list;
1200 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1201 ltd = list_entry(phase_head->next,
1202 struct lfsck_tgt_desc,
1203 ltd_layout_phase_list);
1204 phase_list = <d->ltd_layout_phase_list;
1205 gen = <d->ltd_layout_gen;
1207 ltd = list_entry(phase_head->next,
1208 struct lfsck_tgt_desc,
1209 ltd_namespace_phase_list);
1210 phase_list = <d->ltd_namespace_phase_list;
1211 gen = <d->ltd_namespace_gen;
1214 if (*gen == lad->lad_touch_gen)
1217 *gen = lad->lad_touch_gen;
1218 list_move_tail(phase_list, phase_head);
1219 atomic_inc(<d->ltd_ref);
1220 laia->laia_ltd = ltd;
1221 spin_unlock(<ds->ltd_lock);
1222 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1223 lfsck_async_interpret_common,
1226 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to query "
1227 "%s %x for %s: rc = %d\n",
1228 lfsck_lfsck2name(lfsck),
1229 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1230 ltd->ltd_index, lad->lad_name, rc);
1234 spin_lock(<ds->ltd_lock);
1236 spin_unlock(<ds->ltd_lock);
1238 rc = ptlrpc_set_wait(env, set);
1240 ptlrpc_set_destroy(set);
1244 if (com->lc_type == LFSCK_TYPE_LAYOUT && !(lr->lr_flags & LEF_TO_OST) &&
1245 list_empty(&lad->lad_mdt_phase1_list))
1249 ptlrpc_set_destroy(set);
1251 RETURN(rc1 != 0 ? rc1 : rc);
1255 * Notify the LFSCK event to the instances on remote servers.
1257 * The LFSCK assistant thread notifies the LFSCK instances on other
1258 * servers (MDT/OST) about some events, such as start new scanning,
1259 * stop the scanning, this LFSCK instance will exit, and so on.
1261 * \param[in] env pointer to the thread context
1262 * \param[in] com pointer to the lfsck component
1263 * \param[in] lr pointer to the LFSCK event request
1265 * \retval 0 for success
1266 * \retval negative error number on failure
1268 static int lfsck_assistant_notify_others(const struct lu_env *env,
1269 struct lfsck_component *com,
1270 struct lfsck_request *lr)
1272 struct lfsck_thread_info *info = lfsck_env_info(env);
1273 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1274 struct lfsck_instance *lfsck = com->lc_lfsck;
1275 struct lfsck_assistant_data *lad = com->lc_data;
1276 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1277 struct ptlrpc_request_set *set;
1278 struct lfsck_tgt_descs *ltds;
1279 struct lfsck_tgt_desc *ltd;
1280 struct lfsck_tgt_desc *next;
1286 set = ptlrpc_prep_set();
1290 lr->lr_index = lfsck_dev_idx(lfsck);
1291 lr->lr_active = com->lc_type;
1293 memset(laia, 0, sizeof(*laia));
1294 laia->laia_com = com;
1297 switch (lr->lr_event) {
1299 if (com->lc_type != LFSCK_TYPE_LAYOUT)
1302 lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN;
1303 lr->lr_speed = bk->lb_speed_limit;
1304 lr->lr_version = bk->lb_version;
1305 lr->lr_param |= bk->lb_param;
1306 lr->lr_async_windows = bk->lb_async_windows;
1307 lr->lr_flags = LEF_TO_OST;
1309 /* Notify OSTs firstly, then handle other MDTs if needed. */
1310 ltds = &lfsck->li_ost_descs;
1311 laia->laia_ltds = ltds;
1312 down_read(<ds->ltd_rw_sem);
1313 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1314 ltd = lfsck_tgt_get(ltds, idx);
1315 LASSERT(ltd != NULL);
1317 laia->laia_ltd = ltd;
1318 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1319 lfsck_async_interpret_common,
1320 laia, LFSCK_NOTIFY);
1322 lfsck_lad_set_bitmap(env, com, idx);
1323 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to "
1324 "notify OST %x for %s start: rc = %d\n",
1325 lfsck_lfsck2name(lfsck), idx,
1330 up_read(<ds->ltd_rw_sem);
1333 rc = ptlrpc_set_wait(env, set);
1335 ptlrpc_set_destroy(set);
1340 if (!(bk->lb_param & LPF_ALL_TGT))
1343 /* link other MDT targets locallly. */
1344 ltds = &lfsck->li_mdt_descs;
1345 spin_lock(<ds->ltd_lock);
1346 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1347 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1348 ltd = lfsck_ltd2tgt(ltds, idx);
1349 LASSERT(ltd != NULL);
1351 if (!list_empty(<d->ltd_layout_list))
1354 list_add_tail(<d->ltd_layout_list,
1355 &lad->lad_mdt_list);
1356 list_add_tail(<d->ltd_layout_phase_list,
1357 &lad->lad_mdt_phase1_list);
1360 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1361 ltd = lfsck_ltd2tgt(ltds, idx);
1362 LASSERT(ltd != NULL);
1364 if (!list_empty(<d->ltd_namespace_list))
1367 list_add_tail(<d->ltd_namespace_list,
1368 &lad->lad_mdt_list);
1369 list_add_tail(<d->ltd_namespace_phase_list,
1370 &lad->lad_mdt_phase1_list);
1373 spin_unlock(<ds->ltd_lock);
1376 case LE_PHASE2_DONE:
1377 case LE_PEER_EXIT: {
1378 struct list_head *phase_head;
1380 /* Handle other MDTs firstly if needed, then notify the OSTs. */
1381 if (bk->lb_param & LPF_ALL_TGT) {
1382 phase_head = &lad->lad_mdt_list;
1383 ltds = &lfsck->li_mdt_descs;
1384 if (lr->lr_event == LE_STOP) {
1385 /* unlink other MDT targets locallly. */
1386 spin_lock(<ds->ltd_lock);
1387 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1388 list_for_each_entry_safe(ltd, next,
1389 phase_head, ltd_layout_list) {
1391 <d->ltd_layout_phase_list);
1393 <d->ltd_layout_list);
1396 list_for_each_entry_safe(ltd, next,
1398 ltd_namespace_list) {
1400 <d->ltd_namespace_phase_list);
1402 <d->ltd_namespace_list);
1405 spin_unlock(<ds->ltd_lock);
1407 if (com->lc_type != LFSCK_TYPE_LAYOUT)
1410 lr->lr_flags |= LEF_TO_OST;
1411 phase_head = &lad->lad_ost_list;
1412 ltds = &lfsck->li_ost_descs;
1414 lr->lr_flags &= ~LEF_TO_OST;
1416 } else if (com->lc_type != LFSCK_TYPE_LAYOUT) {
1419 lr->lr_flags |= LEF_TO_OST;
1420 phase_head = &lad->lad_ost_list;
1421 ltds = &lfsck->li_ost_descs;
1425 laia->laia_ltds = ltds;
1426 spin_lock(<ds->ltd_lock);
1427 while (!list_empty(phase_head)) {
1428 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1429 ltd = list_entry(phase_head->next,
1430 struct lfsck_tgt_desc,
1432 if (!list_empty(<d->ltd_layout_phase_list))
1434 <d->ltd_layout_phase_list);
1435 list_del_init(<d->ltd_layout_list);
1437 ltd = list_entry(phase_head->next,
1438 struct lfsck_tgt_desc,
1439 ltd_namespace_list);
1440 if (!list_empty(<d->ltd_namespace_phase_list))
1442 <d->ltd_namespace_phase_list);
1443 list_del_init(<d->ltd_namespace_list);
1445 atomic_inc(<d->ltd_ref);
1446 laia->laia_ltd = ltd;
1447 spin_unlock(<ds->ltd_lock);
1448 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1449 lfsck_async_interpret_common,
1450 laia, LFSCK_NOTIFY);
1452 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to "
1453 "notify %s %x for %s stop/phase2_done/"
1454 "peer_exit: rc = %d\n",
1455 lfsck_lfsck2name(lfsck),
1456 (lr->lr_flags & LEF_TO_OST) ?
1457 "OST" : "MDT", ltd->ltd_index,
1461 spin_lock(<ds->ltd_lock);
1463 spin_unlock(<ds->ltd_lock);
1465 rc = ptlrpc_set_wait(env, set);
1467 ptlrpc_set_destroy(set);
1471 if (com->lc_type == LFSCK_TYPE_LAYOUT &&
1472 !(lr->lr_flags & LEF_TO_OST)) {
1473 lr->lr_flags |= LEF_TO_OST;
1474 phase_head = &lad->lad_ost_list;
1475 ltds = &lfsck->li_ost_descs;
1480 case LE_PHASE1_DONE:
1481 lad->lad_ops->la_sync_failures(env, com, lr);
1482 lad->lad_touch_gen++;
1483 ltds = &lfsck->li_mdt_descs;
1484 laia->laia_ltds = ltds;
1485 spin_lock(<ds->ltd_lock);
1486 while (!list_empty(&lad->lad_mdt_list)) {
1487 struct list_head *list;
1490 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1491 ltd = list_entry(lad->lad_mdt_list.next,
1492 struct lfsck_tgt_desc,
1494 list = <d->ltd_layout_list;
1495 gen = <d->ltd_layout_gen;
1497 struct lfsck_namespace *ns = com->lc_file_ram;
1499 ltd = list_entry(lad->lad_mdt_list.next,
1500 struct lfsck_tgt_desc,
1501 ltd_namespace_list);
1502 list = <d->ltd_namespace_list;
1503 gen = <d->ltd_namespace_gen;
1504 lr->lr_flags2 = ns->ln_flags & ~LF_INCOMPLETE;
1507 if (*gen == lad->lad_touch_gen)
1510 *gen = lad->lad_touch_gen;
1511 list_move_tail(list, &lad->lad_mdt_list);
1512 if (ltd->ltd_synced_failures)
1515 atomic_inc(<d->ltd_ref);
1516 laia->laia_ltd = ltd;
1517 spin_unlock(<ds->ltd_lock);
1518 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1519 lfsck_async_interpret_common,
1520 laia, LFSCK_NOTIFY);
1522 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to "
1523 "notify MDT %x for %s phase1 done: "
1524 "rc = %d\n", lfsck_lfsck2name(lfsck),
1525 ltd->ltd_index, lad->lad_name, rc);
1528 spin_lock(<ds->ltd_lock);
1530 spin_unlock(<ds->ltd_lock);
1533 CDEBUG(D_LFSCK, "%s: LFSCK assistant unexpected LFSCK event: "
1534 "rc = %d\n", lfsck_lfsck2name(lfsck), lr->lr_event);
1539 rc1 = ptlrpc_set_wait(env, set);
1540 ptlrpc_set_destroy(set);
1542 RETURN(rc != 0 ? rc : rc1);
1546 * The LFSCK assistant thread is triggered by the LFSCK main engine.
1547 * They co-work together as an asynchronous pipeline: the LFSCK main
1548 * engine scans the system and pre-fetches the objects, attributes,
1549 * or name entries, etc, and pushes them into the pipeline as input
1550 * requests for the LFSCK assistant thread; on the other end of the
1551 * pipeline, the LFSCK assistant thread performs the real check and
1552 * repair for every request from the main engine.
1554 * Generally, the assistant engine may be blocked when check/repair
1555 * something, so the LFSCK main engine will run some faster. On the
1556 * other hand, the LFSCK main engine will drive multiple assistant
1557 * threads in parallel, means for each LFSCK component on the master
1558 * (such as layout LFSCK, namespace LFSCK), there is an independent
1559 * LFSCK assistant thread. So under such 1:N multiple asynchronous
1560 * pipelines mode, the whole LFSCK performance will be much better
1561 * than check/repair everything by the LFSCK main engine itself.
1563 int lfsck_assistant_engine(void *args)
1565 struct lfsck_thread_args *lta = args;
1566 struct lu_env *env = <a->lta_env;
1567 struct lfsck_component *com = lta->lta_com;
1568 struct lfsck_instance *lfsck = lta->lta_lfsck;
1569 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1570 struct lfsck_position *pos = &com->lc_pos_start;
1571 struct lfsck_thread_info *info = lfsck_env_info(env);
1572 struct lfsck_request *lr = &info->lti_lr;
1573 struct lfsck_assistant_data *lad = com->lc_data;
1574 struct ptlrpc_thread *mthread = &lfsck->li_thread;
1575 struct ptlrpc_thread *athread = &lad->lad_thread;
1576 struct lfsck_assistant_operations *lao = lad->lad_ops;
1577 struct lfsck_assistant_req *lar;
1578 struct l_wait_info lwi = { 0 };
1584 CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread start\n",
1585 lfsck_lfsck2name(lfsck), lad->lad_name);
1587 memset(lr, 0, sizeof(*lr));
1588 lr->lr_event = LE_START;
1589 if (pos->lp_oit_cookie <= 1)
1590 lr->lr_param = LPF_RESET;
1591 rc = lfsck_assistant_notify_others(env, com, lr);
1593 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to notify others "
1594 "to start %s: rc = %d\n",
1595 lfsck_lfsck2name(lfsck), lad->lad_name, rc);
1599 spin_lock(&lad->lad_lock);
1600 lad->lad_task = current;
1601 thread_set_flags(athread, SVC_RUNNING);
1602 spin_unlock(&lad->lad_lock);
1603 wake_up_all(&mthread->t_ctl_waitq);
1606 while (!list_empty(&lad->lad_req_list)) {
1607 bool wakeup = false;
1609 if (unlikely(lad->lad_exit ||
1610 !thread_is_running(mthread)))
1611 GOTO(cleanup, rc = lad->lad_post_result);
1613 lar = list_entry(lad->lad_req_list.next,
1614 struct lfsck_assistant_req,
1616 /* Only the lfsck_assistant_engine thread itself can
1617 * remove the "lar" from the head of the list, LFSCK
1618 * engine thread only inserts other new "lar" at the
1619 * end of the list. So it is safe to handle current
1620 * "lar" without the spin_lock. */
1621 rc = lao->la_handler_p1(env, com, lar);
1622 spin_lock(&lad->lad_lock);
1623 list_del_init(&lar->lar_list);
1624 lad->lad_prefetched--;
1625 /* Wake up the main engine thread only when the list
1626 * is empty or half of the prefetched items have been
1627 * handled to avoid too frequent thread schedule. */
1628 if (lad->lad_prefetched <= (bk->lb_async_windows / 2))
1630 spin_unlock(&lad->lad_lock);
1632 wake_up_all(&mthread->t_ctl_waitq);
1634 lao->la_req_fini(env, lar);
1635 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
1639 l_wait_event(athread->t_ctl_waitq,
1640 !lfsck_assistant_req_empty(lad) ||
1643 lad->lad_to_double_scan,
1646 if (unlikely(lad->lad_exit))
1647 GOTO(cleanup, rc = lad->lad_post_result);
1649 if (!list_empty(&lad->lad_req_list))
1652 if (lad->lad_to_post) {
1653 CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread post\n",
1654 lfsck_lfsck2name(lfsck), lad->lad_name);
1656 if (unlikely(lad->lad_exit))
1657 GOTO(cleanup, rc = lad->lad_post_result);
1659 lad->lad_to_post = 0;
1660 LASSERT(lad->lad_post_result > 0);
1662 /* Wakeup the master engine to go ahead. */
1663 wake_up_all(&mthread->t_ctl_waitq);
1665 memset(lr, 0, sizeof(*lr));
1666 lr->lr_event = LE_PHASE1_DONE;
1667 lr->lr_status = lad->lad_post_result;
1668 rc = lfsck_assistant_notify_others(env, com, lr);
1670 CDEBUG(D_LFSCK, "%s: LFSCK assistant notified "
1671 "others for %s post: rc = %d\n",
1672 lfsck_lfsck2name(lfsck),
1676 if (lad->lad_to_double_scan) {
1677 lad->lad_to_double_scan = 0;
1678 atomic_inc(&lfsck->li_double_scan_count);
1679 lad->lad_in_double_scan = 1;
1680 wake_up_all(&mthread->t_ctl_waitq);
1682 com->lc_new_checked = 0;
1683 com->lc_new_scanned = 0;
1684 com->lc_time_last_checkpoint = ktime_get_seconds();
1685 com->lc_time_next_checkpoint =
1686 com->lc_time_last_checkpoint +
1687 LFSCK_CHECKPOINT_INTERVAL;
1689 CDEBUG(D_LFSCK, "%s: LFSCK assistant sync before "
1690 "the second-stage scaning\n",
1691 lfsck_lfsck2name(lfsck));
1693 /* Flush async updates before handling orphan. */
1694 rc2 = dt_sync(env, lfsck->li_next);
1696 CDEBUG(D_LFSCK, "%s: LFSCK assistant phase2 "
1697 "scan start, synced: rc = %d\n",
1698 lfsck_lfsck2name(lfsck), rc2);
1700 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_NO_DOUBLESCAN))
1701 GOTO(cleanup, rc = 0);
1703 while (lad->lad_in_double_scan) {
1704 rc = lfsck_assistant_query_others(env, com);
1705 if (lfsck_phase2_next_ready(lad))
1711 /* Pull LFSCK status on related targets once
1712 * per 30 seconds if we are not notified. */
1713 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(30),
1714 cfs_time_seconds(1),
1716 rc = l_wait_event(athread->t_ctl_waitq,
1717 lfsck_phase2_next_ready(lad) ||
1719 !thread_is_running(mthread),
1722 if (unlikely(lad->lad_exit ||
1723 !thread_is_running(mthread)))
1724 GOTO(cleanup, rc = 0);
1726 if (rc == -ETIMEDOUT)
1733 rc = lao->la_handler_p2(env, com);
1737 if (unlikely(lad->lad_exit ||
1738 !thread_is_running(mthread)))
1739 GOTO(cleanup, rc = 0);
1745 /* Cleanup the unfinished requests. */
1746 spin_lock(&lad->lad_lock);
1748 lad->lad_assistant_status = rc;
1750 if (lad->lad_exit && lad->lad_post_result <= 0)
1751 lao->la_fill_pos(env, com, &lfsck->li_pos_checkpoint);
1753 thread_set_flags(athread, SVC_STOPPING);
1754 while (!list_empty(&lad->lad_req_list)) {
1755 lar = list_entry(lad->lad_req_list.next,
1756 struct lfsck_assistant_req,
1758 list_del_init(&lar->lar_list);
1759 lad->lad_prefetched--;
1760 spin_unlock(&lad->lad_lock);
1761 lao->la_req_fini(env, lar);
1762 spin_lock(&lad->lad_lock);
1764 spin_unlock(&lad->lad_lock);
1766 memset(lr, 0, sizeof(*lr));
1768 lr->lr_event = LE_PHASE2_DONE;
1770 } else if (rc == 0) {
1771 if (lfsck->li_flags & LPF_ALL_TGT) {
1772 lr->lr_event = LE_STOP;
1773 lr->lr_status = LS_STOPPED;
1775 lr->lr_event = LE_PEER_EXIT;
1776 switch (lfsck->li_status) {
1779 lr->lr_status = LS_CO_PAUSED;
1783 lr->lr_status = LS_CO_STOPPED;
1786 CDEBUG(D_LFSCK, "%s: LFSCK assistant unknown "
1787 "status: rc = %d\n",
1788 lfsck_lfsck2name(lfsck),
1790 lr->lr_status = LS_CO_FAILED;
1795 if (lfsck->li_flags & LPF_ALL_TGT) {
1796 lr->lr_event = LE_STOP;
1797 lr->lr_status = LS_FAILED;
1799 lr->lr_event = LE_PEER_EXIT;
1800 lr->lr_status = LS_CO_FAILED;
1804 rc1 = lfsck_assistant_notify_others(env, com, lr);
1806 CDEBUG(D_LFSCK, "%s: LFSCK assistant failed to notify "
1807 "others for %s quit: rc = %d\n",
1808 lfsck_lfsck2name(lfsck), lad->lad_name, rc1);
1812 CDEBUG(D_LFSCK, "%s: LFSCK assistant sync before exit\n",
1813 lfsck_lfsck2name(lfsck));
1815 /* Flush async updates before exit. */
1816 rc2 = dt_sync(env, lfsck->li_next);
1818 CDEBUG(D_LFSCK, "%s: LFSCK assistant synced before exit: rc = %d\n",
1819 lfsck_lfsck2name(lfsck), rc2);
1821 /* Under force exit case, some requests may be just freed without
1822 * verification, those objects should be re-handled when next run.
1823 * So not update the on-disk trace file under such case. */
1824 if (lad->lad_in_double_scan) {
1826 rc1 = lao->la_double_scan_result(env, com, rc);
1828 CDEBUG(D_LFSCK, "%s: LFSCK assistant phase2 scan "
1829 "finished: rc = %d\n",
1830 lfsck_lfsck2name(lfsck), rc1 != 0 ? rc1 : rc);
1834 if (lad->lad_in_double_scan)
1835 atomic_dec(&lfsck->li_double_scan_count);
1837 spin_lock(&lad->lad_lock);
1838 lad->lad_assistant_status = (rc1 != 0 ? rc1 : rc);
1839 thread_set_flags(athread, SVC_STOPPED);
1840 wake_up_all(&mthread->t_ctl_waitq);
1841 lad->lad_task = NULL;
1842 spin_unlock(&lad->lad_lock);
1844 CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread exit: rc = %d\n",
1845 lfsck_lfsck2name(lfsck), lad->lad_name,
1846 lad->lad_assistant_status);
1848 lfsck_thread_args_fini(lta);