4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2013, 2016, Intel Corporation.
26 * lustre/lfsck/lfsck_engine.c
28 * Author: Fan, Yong <fan.yong@intel.com>
31 #define DEBUG_SUBSYSTEM S_LFSCK
33 #include <lu_object.h>
34 #include <dt_object.h>
35 #include <lustre_net.h>
36 #include <lustre_fid.h>
37 #include <obd_support.h>
38 #include <lustre_lib.h>
40 #include "lfsck_internal.h"
42 int lfsck_unpack_ent(struct lu_dirent *ent, __u64 *cookie, __u16 *type)
45 int align = sizeof(*lt) - 1;
48 fid_le_to_cpu(&ent->lde_fid, &ent->lde_fid);
49 *cookie = le64_to_cpu(ent->lde_hash);
50 ent->lde_reclen = le16_to_cpu(ent->lde_reclen);
51 ent->lde_namelen = le16_to_cpu(ent->lde_namelen);
52 ent->lde_attrs = le32_to_cpu(ent->lde_attrs);
54 if (unlikely(!(ent->lde_attrs & LUDA_TYPE)))
57 len = (ent->lde_namelen + align) & ~align;
58 lt = (struct luda_type *)(ent->lde_name + len);
59 *type = le16_to_cpu(lt->lt_type);
61 /* Make sure the name is terminated with '\0'. The data (object type)
62 * after ent::lde_name maybe broken, but we have stored such data in
63 * the output parameter @type as above. */
64 ent->lde_name[ent->lde_namelen] = '\0';
69 static void lfsck_di_oit_put(const struct lu_env *env, struct lfsck_instance *lfsck)
71 const struct dt_it_ops *iops;
74 spin_lock(&lfsck->li_lock);
75 iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
76 di = lfsck->li_di_oit;
77 lfsck->li_di_oit = NULL;
78 spin_unlock(&lfsck->li_lock);
82 static void lfsck_di_dir_put(const struct lu_env *env, struct lfsck_instance *lfsck)
84 const struct dt_it_ops *iops;
87 spin_lock(&lfsck->li_lock);
88 iops = &lfsck->li_obj_dir->do_index_ops->dio_it;
89 di = lfsck->li_di_dir;
90 lfsck->li_di_dir = NULL;
91 lfsck->li_cookie_dir = 0;
92 spin_unlock(&lfsck->li_lock);
96 static int lfsck_parent_fid(const struct lu_env *env, struct dt_object *obj,
99 if (unlikely(!S_ISDIR(lfsck_object_type(obj)) ||
100 !dt_try_as_dir(env, obj)))
103 return dt_lookup(env, obj, (struct dt_rec *)fid,
104 (const struct dt_key *)"..");
108 * Check whether needs to scan the directory or not.
110 * 1) If we are not doing namespace LFSCK, or the given @obj is not directory,
111 * then needs not to scan the @obj. Otherwise,
112 * 2) Global /ROOT needs to be scanned, backend root needs not to be scanned.
113 * 3) If the @obj is neither IGIF nor normal FID (including .lustre and its
114 * sub-directories that have been scanned when the LFSCK engine start),
115 * then needs not to be scanned.
116 * 4) If it is a remote object, then scanning the object will be done on the
117 * MDT on which the object really resides.
118 * 5) If the local object has normal FID, then needs to be scanned. Otherwise,
119 * 6) If the object has linkEA, then needs to be scanned. Otherwise,
120 * 7) If none of the previous conditions are true, we need to check the parent
121 * directories whether this subdirectory is in a tree that should be scanned.
122 * Set the parent as current @obj, repeat 2)-7).
124 * \param[in] env pointer to the thread context
125 * \param[in] lfsck pointer to the lfsck instance
126 * \param[in] obj pointer to the object to be checked
128 * \retval positive number if the directory needs to be scanned
129 * \retval 0 if the directory needs NOT to be scanned
130 * \retval negative error number on failure
132 static int lfsck_needs_scan_dir(const struct lu_env *env,
133 struct lfsck_instance *lfsck,
134 struct dt_object *obj)
136 struct lfsck_thread_info *info = lfsck_env_info(env);
137 struct lu_fid *fid = &info->lti_fid;
138 struct lu_seq_range *range = &info->lti_range;
139 struct seq_server_site *ss = lfsck_dev_site(lfsck);
140 __u32 idx = lfsck_dev_idx(lfsck);
144 if (list_empty(&lfsck->li_list_dir) || !S_ISDIR(lfsck_object_type(obj)))
149 *fid = *lfsck_dto2fid(obj);
151 /* Global /ROOT is visible. */
152 if (unlikely(lu_fid_eq(fid, &lfsck->li_global_root_fid)))
155 /* Backend root is invisible. */
156 if (unlikely(lu_fid_eq(fid, &lfsck->li_local_root_fid)))
159 if (!fid_is_norm(fid) && !fid_is_igif(fid))
162 fld_range_set_mdt(range);
163 rc = fld_local_lookup(env, ss->ss_server_fld,
164 fid_seq(fid), range);
165 if (rc != 0 || range->lsr_index != idx)
166 /* Current FID should NOT be for the input parameter
167 * @obj, because the lfsck_master_oit_engine() has
168 * filtered out agent object. So current FID is for
169 * the ancestor of the original input parameter @obj.
170 * So the ancestor is a remote directory. The input
171 * parameter @obj is local directory, and should be
172 * scanned under such case. */
175 /* normal FID on this target (locally) must be for the
176 * client-side visiable object. */
177 if (fid_is_norm(fid))
181 obj = lfsck_object_find_bottom(env, lfsck, fid);
186 if (!dt_object_exists(obj))
190 dt_read_lock(env, obj, MOR_TGT_CHILD);
191 if (unlikely(lfsck_is_dead_obj(obj))) {
192 dt_read_unlock(env, obj);
197 rc = dt_xattr_get(env, obj,
198 lfsck_buf_get(env, NULL, 0), XATTR_NAME_LINK);
199 dt_read_unlock(env, obj);
203 if (rc < 0 && rc != -ENODATA)
206 rc = lfsck_parent_fid(env, obj, fid);
208 lfsck_object_put(env, obj);
214 if (!fid_is_sane(fid))
219 if (depth > 0 && obj != NULL)
220 lfsck_object_put(env, obj);
225 static int lfsck_load_stripe_lmv(const struct lu_env *env,
226 struct lfsck_instance *lfsck,
227 struct dt_object *obj)
229 struct lmv_mds_md_v1 *lmv = &lfsck_env_info(env)->lti_lmv;
230 struct lfsck_lmv *llmv;
234 LASSERT(lfsck->li_obj_dir == NULL);
235 LASSERT(lfsck->li_lmv == NULL);
237 rc = lfsck_read_stripe_lmv(env, obj, lmv);
238 if (rc == -ENODATA) {
239 lfsck->li_obj_dir = lfsck_object_get(obj);
251 if (lmv->lmv_magic == LMV_MAGIC) {
252 struct lfsck_slave_lmv_rec *lslr;
255 llmv->ll_lmv_master = 1;
256 if (lmv->lmv_stripe_count < 1)
257 stripes = LFSCK_LMV_DEF_STRIPES;
258 else if (lmv->lmv_stripe_count > LFSCK_LMV_MAX_STRIPES)
259 stripes = LFSCK_LMV_MAX_STRIPES;
261 stripes = lmv->lmv_stripe_count;
263 OBD_ALLOC_LARGE(lslr, sizeof(*lslr) * stripes);
270 llmv->ll_stripes_allocated = stripes;
271 llmv->ll_hash_type = LMV_HASH_TYPE_UNKNOWN;
272 llmv->ll_lslr = lslr;
274 llmv->ll_lmv_slave = 1;
277 lfsck->li_obj_dir = lfsck_object_get(obj);
279 atomic_set(&llmv->ll_ref, 1);
280 lfsck->li_lmv = llmv;
285 /* LFSCK wrap functions */
287 static void lfsck_fail(const struct lu_env *env, struct lfsck_instance *lfsck,
290 struct lfsck_component *com;
292 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
293 com->lc_ops->lfsck_fail(env, com, new_checked);
297 void lfsck_close_dir(const struct lu_env *env,
298 struct lfsck_instance *lfsck, int result)
300 struct lfsck_component *com;
303 if (lfsck->li_lmv != NULL) {
304 lfsck->li_lmv->ll_exit_value = result;
305 if (lfsck->li_obj_dir != NULL) {
306 list_for_each_entry(com, &lfsck->li_list_dir,
308 com->lc_ops->lfsck_close_dir(env, com);
312 lfsck_lmv_put(env, lfsck->li_lmv);
313 lfsck->li_lmv = NULL;
316 if (lfsck->li_di_dir != NULL) {
317 const struct dt_it_ops *dir_iops;
318 struct dt_it *dir_di = lfsck->li_di_dir;
320 LASSERT(lfsck->li_obj_dir != NULL);
322 dir_iops = &lfsck->li_obj_dir->do_index_ops->dio_it;
323 lfsck_di_dir_put(env, lfsck);
324 dir_iops->fini(env, dir_di);
327 if (lfsck->li_obj_dir != NULL) {
328 struct dt_object *dir_obj = lfsck->li_obj_dir;
330 lfsck->li_obj_dir = NULL;
331 lfsck_object_put(env, dir_obj);
337 int lfsck_open_dir(const struct lu_env *env,
338 struct lfsck_instance *lfsck, __u64 cookie)
340 struct dt_object *obj = lfsck->li_obj_dir;
341 struct dt_it *di = lfsck->li_di_dir;
342 struct lfsck_component *com;
343 const struct dt_it_ops *iops;
347 LASSERT(obj != NULL);
350 if (unlikely(!dt_try_as_dir(env, obj)))
351 GOTO(out, rc = -ENOTDIR);
353 list_for_each_entry(com, &lfsck->li_list_dir, lc_link_dir) {
354 rc = com->lc_ops->lfsck_open_dir(env, com);
359 iops = &obj->do_index_ops->dio_it;
360 di = iops->init(env, obj, lfsck->li_args_dir);
362 GOTO(out, rc = PTR_ERR(di));
364 rc = iops->load(env, di, cookie);
365 if (rc == 0 || (rc > 0 && cookie > 0))
366 rc = iops->next(env, di);
374 lfsck->li_cookie_dir = iops->store(env, di);
375 spin_lock(&lfsck->li_lock);
376 lfsck->li_di_dir = di;
377 spin_unlock(&lfsck->li_lock);
384 lfsck_close_dir(env, lfsck, rc);
389 static int lfsck_checkpoint(const struct lu_env *env,
390 struct lfsck_instance *lfsck)
392 struct lfsck_component *com;
396 if (likely(cfs_time_beforeq(cfs_time_current(),
397 lfsck->li_time_next_checkpoint)))
400 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, false);
401 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
402 rc = com->lc_ops->lfsck_checkpoint(env, com, false);
407 lfsck->li_time_last_checkpoint = cfs_time_current();
408 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
409 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
410 return rc1 != 0 ? rc1 : rc;
413 static int lfsck_prep(const struct lu_env *env, struct lfsck_instance *lfsck,
414 struct lfsck_start_param *lsp)
416 struct dt_object *obj = NULL;
417 struct lfsck_component *com;
418 struct lfsck_component *next;
419 struct lfsck_position *pos = NULL;
420 const struct dt_it_ops *iops =
421 &lfsck->li_obj_oit->do_index_ops->dio_it;
425 LASSERT(lfsck->li_obj_dir == NULL);
426 LASSERT(lfsck->li_di_dir == NULL);
428 lfsck->li_current_oit_processed = 0;
429 list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
430 com->lc_new_checked = 0;
431 rc = com->lc_ops->lfsck_prep(env, com, lsp);
436 (!lfsck_pos_is_zero(&com->lc_pos_start) &&
437 lfsck_pos_is_eq(pos, &com->lc_pos_start) > 0))
438 pos = &com->lc_pos_start;
441 /* Init otable-based iterator. */
443 rc = iops->load(env, lfsck->li_di_oit, 0);
445 lfsck->li_oit_over = 1;
452 rc = iops->load(env, lfsck->li_di_oit, pos->lp_oit_cookie);
456 lfsck->li_oit_over = 1;
458 if (!lfsck->li_master || fid_is_zero(&pos->lp_dir_parent))
461 /* Find the directory for namespace-based traverse. */
462 obj = lfsck_object_find_bottom(env, lfsck, &pos->lp_dir_parent);
464 RETURN(PTR_ERR(obj));
466 /* Remote directory will be scanned by the LFSCK instance
467 * on the MDT where the remote object really resides on. */
468 if (!dt_object_exists(obj) || dt_object_remote(obj) ||
469 unlikely(!S_ISDIR(lfsck_object_type(obj))))
472 rc = lfsck_load_stripe_lmv(env, lfsck, obj);
474 /* For the master MDT-object of a striped directory,
475 * reset the iteration from the directory beginning. */
476 if (lfsck->li_lmv != NULL && lfsck->li_lmv->ll_lmv_master)
477 pos->lp_dir_cookie = 0;
479 rc = lfsck_open_dir(env, lfsck, pos->lp_dir_cookie);
481 /* The end of the directory. */
489 lfsck_object_put(env, obj);
492 lfsck_close_dir(env, lfsck, rc);
493 list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
495 com->lc_ops->lfsck_post(env, com, rc, true);
502 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, true);
503 lfsck->li_pos_current = lfsck->li_pos_checkpoint;
504 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
505 rc = com->lc_ops->lfsck_checkpoint(env, com, true);
510 lfsck->li_time_last_checkpoint = cfs_time_current();
511 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
512 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
516 static int lfsck_exec_oit(const struct lu_env *env,
517 struct lfsck_instance *lfsck, struct dt_object *obj)
519 struct lfsck_component *com;
523 LASSERT(lfsck->li_obj_dir == NULL);
525 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
526 rc = com->lc_ops->lfsck_exec_oit(env, com, obj);
531 rc = lfsck_needs_scan_dir(env, lfsck, obj);
535 rc = lfsck_load_stripe_lmv(env, lfsck, obj);
537 rc = lfsck_open_dir(env, lfsck, 0);
543 lfsck_fail(env, lfsck, false);
546 lfsck_close_dir(env, lfsck, rc);
548 return rc > 0 ? 0 : rc;
551 static int lfsck_exec_dir(const struct lu_env *env,
552 struct lfsck_instance *lfsck,
553 struct lfsck_assistant_object *lso,
554 struct lu_dirent *ent, __u16 type)
556 struct lfsck_component *com;
559 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
560 rc = com->lc_ops->lfsck_exec_dir(env, com, lso, ent, type);
567 static int lfsck_master_dir_engine(const struct lu_env *env,
568 struct lfsck_instance *lfsck);
570 static int lfsck_post(const struct lu_env *env, struct lfsck_instance *lfsck,
573 struct lfsck_component *com;
574 struct lfsck_component *next;
577 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, false);
578 lfsck_close_dir(env, lfsck, result);
580 while (thread_is_running(&lfsck->li_thread) && rc > 0 &&
581 !list_empty(&lfsck->li_list_lmv)) {
582 struct lfsck_lmv_unit *llu;
584 spin_lock(&lfsck->li_lock);
585 llu = list_entry(lfsck->li_list_lmv.next,
586 struct lfsck_lmv_unit, llu_link);
587 list_del_init(&llu->llu_link);
588 spin_unlock(&lfsck->li_lock);
590 lfsck->li_lmv = &llu->llu_lmv;
591 lfsck->li_obj_dir = lfsck_object_get(llu->llu_obj);
592 rc = lfsck_open_dir(env, lfsck, 0);
594 rc = lfsck_master_dir_engine(env, lfsck);
595 lfsck_close_dir(env, lfsck, result);
601 list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
602 rc = com->lc_ops->lfsck_post(env, com, result, false);
604 CDEBUG(D_LFSCK, "%s: lfsck_post at the component %u: "
605 "rc = %d\n", lfsck_lfsck2name(lfsck),
606 (__u32)com->lc_type, rc);
609 lfsck->li_time_last_checkpoint = cfs_time_current();
610 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
611 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
613 /* Ignore some component post failure to make other can go ahead. */
617 static int lfsck_double_scan(const struct lu_env *env,
618 struct lfsck_instance *lfsck)
620 struct lfsck_component *com;
621 struct lfsck_component *next;
622 struct l_wait_info lwi = { 0 };
626 list_for_each_entry(com, &lfsck->li_list_double_scan, lc_link) {
627 rc = com->lc_ops->lfsck_double_scan(env, com);
632 l_wait_event(lfsck->li_thread.t_ctl_waitq,
633 atomic_read(&lfsck->li_double_scan_count) == 0,
636 if (lfsck->li_status != LS_PAUSED &&
637 lfsck->li_status != LS_CO_PAUSED) {
638 list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
640 spin_lock(&lfsck->li_lock);
641 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
642 spin_unlock(&lfsck->li_lock);
646 return rc1 != 0 ? rc1 : rc;
649 static void lfsck_quit(const struct lu_env *env, struct lfsck_instance *lfsck)
651 struct lfsck_component *com;
652 struct lfsck_component *next;
654 list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
656 if (com->lc_ops->lfsck_quit != NULL)
657 com->lc_ops->lfsck_quit(env, com);
659 spin_lock(&lfsck->li_lock);
660 list_del_init(&com->lc_link_dir);
661 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
662 spin_unlock(&lfsck->li_lock);
665 list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
667 if (com->lc_ops->lfsck_quit != NULL)
668 com->lc_ops->lfsck_quit(env, com);
670 spin_lock(&lfsck->li_lock);
671 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
672 spin_unlock(&lfsck->li_lock);
678 static int lfsck_master_dir_engine(const struct lu_env *env,
679 struct lfsck_instance *lfsck)
681 struct lfsck_thread_info *info = lfsck_env_info(env);
682 struct dt_object *dir = lfsck->li_obj_dir;
683 const struct dt_it_ops *iops = &dir->do_index_ops->dio_it;
684 struct dt_it *di = lfsck->li_di_dir;
685 struct lu_dirent *ent =
686 (struct lu_dirent *)info->lti_key;
687 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
688 struct ptlrpc_thread *thread = &lfsck->li_thread;
689 struct lfsck_assistant_object *lso = NULL;
695 if (CFS_FAIL_TIMEOUT(OBD_FAIL_LFSCK_DELAY2, cfs_fail_val) &&
696 unlikely(!thread_is_running(thread))) {
697 CDEBUG(D_LFSCK, "%s: scan dir exit for engine stop, "
698 "parent "DFID", cookie %#llx\n",
699 lfsck_lfsck2name(lfsck),
700 PFID(lfsck_dto2fid(dir)), lfsck->li_cookie_dir);
705 lfsck->li_new_scanned++;
706 rc = iops->rec(env, di, (struct dt_rec *)ent,
709 rc = lfsck_unpack_ent(ent, &lfsck->li_cookie_dir,
713 CDEBUG(D_LFSCK, "%s: scan dir failed at rec(), "
714 "parent "DFID", cookie %#llx: rc = %d\n",
715 lfsck_lfsck2name(lfsck),
716 PFID(lfsck_dto2fid(dir)),
717 lfsck->li_cookie_dir, rc);
718 lfsck_fail(env, lfsck, true);
719 if (bk->lb_param & LPF_FAILOUT)
725 if (ent->lde_attrs & LUDA_IGNORE)
728 /* skip dot entry. */
729 if (ent->lde_namelen == 1 && ent->lde_name[0] == '.')
733 lso = lfsck_assistant_object_init(env,
734 lfsck_dto2fid(dir), NULL,
735 lfsck->li_pos_current.lp_oit_cookie, true);
737 if (bk->lb_param & LPF_FAILOUT)
738 RETURN(PTR_ERR(lso));
745 /* The type in the @ent structure may has been overwritten,
746 * so we need to pass the @type parameter independently. */
747 rc = lfsck_exec_dir(env, lfsck, lso, ent, type);
748 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
752 rc = lfsck_checkpoint(env, lfsck);
753 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
757 lfsck_control_speed(lfsck);
758 if (unlikely(!thread_is_running(thread))) {
759 CDEBUG(D_LFSCK, "%s: scan dir exit for engine stop, "
760 "parent "DFID", cookie %#llx\n",
761 lfsck_lfsck2name(lfsck),
762 PFID(lfsck_dto2fid(dir)),
763 lfsck->li_cookie_dir);
767 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_FATAL2)) {
768 spin_lock(&lfsck->li_lock);
769 thread_set_flags(thread, SVC_STOPPING);
770 spin_unlock(&lfsck->li_lock);
771 GOTO(out, rc = -EINVAL);
774 rc = iops->next(env, di);
776 CDEBUG(D_LFSCK, "%s dir engine fail to locate next "
777 "for the directory "DFID": rc = %d\n",
778 lfsck_lfsck2name(lfsck),
779 PFID(&lfsck->li_pos_current.lp_dir_parent), rc);
782 if (rc > 0 && !lfsck->li_oit_over)
783 lfsck_close_dir(env, lfsck, rc);
789 lfsck_assistant_object_put(env, lso);
795 * Object-table based iteration engine.
797 * Object-table based iteration is the basic linear engine to scan all the
798 * objects on current device in turn. For each object, it calls all the
799 * registered LFSCK component(s)' API to perform related consistency
802 * It flushes related LFSCK trace files to disk via making checkpoint
803 * periodically. Then if the server crashed or the LFSCK is paused, the
804 * LFSCK can resume from the latest checkpoint.
806 * It also controls the whole LFSCK speed via lfsck_control_speed() to
807 * avoid the server to become overload.
809 * \param[in] env pointer to the thread context
810 * \param[in] lfsck pointer to the lfsck instance
812 * \retval positive number if all objects have been scanned
813 * \retval 0 if the iteration is stopped or paused
814 * \retval negative error number on failure
816 static int lfsck_master_oit_engine(const struct lu_env *env,
817 struct lfsck_instance *lfsck)
819 struct lfsck_thread_info *info = lfsck_env_info(env);
820 const struct dt_it_ops *iops =
821 &lfsck->li_obj_oit->do_index_ops->dio_it;
822 struct dt_it *di = lfsck->li_di_oit;
823 struct lu_fid *fid = &info->lti_fid;
824 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
825 struct ptlrpc_thread *thread = &lfsck->li_thread;
826 struct seq_server_site *ss = lfsck_dev_site(lfsck);
827 __u32 idx = lfsck_dev_idx(lfsck);
831 if (unlikely(ss == NULL))
835 struct dt_object *target;
837 if (lfsck->li_di_dir != NULL) {
838 rc = lfsck_master_dir_engine(env, lfsck);
843 if (unlikely(lfsck->li_oit_over))
846 if (CFS_FAIL_TIMEOUT(OBD_FAIL_LFSCK_DELAY1, cfs_fail_val) &&
847 unlikely(!thread_is_running(thread))) {
848 CDEBUG(D_LFSCK, "%s: OIT scan exit for engine stop, "
850 lfsck_lfsck2name(lfsck), iops->store(env, di));
855 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_CRASH))
858 lfsck->li_current_oit_processed = 1;
860 if (!list_empty(&lfsck->li_list_lmv)) {
861 struct lfsck_lmv_unit *llu;
863 spin_lock(&lfsck->li_lock);
864 llu = list_entry(lfsck->li_list_lmv.next,
865 struct lfsck_lmv_unit, llu_link);
866 list_del_init(&llu->llu_link);
867 spin_unlock(&lfsck->li_lock);
869 lfsck->li_lmv = &llu->llu_lmv;
870 lfsck->li_obj_dir = lfsck_object_get(llu->llu_obj);
871 rc = lfsck_open_dir(env, lfsck, 0);
873 rc = lfsck_master_dir_engine(env, lfsck);
879 lfsck->li_new_scanned++;
880 lfsck->li_pos_current.lp_oit_cookie = iops->store(env, di);
881 rc = iops->rec(env, di, (struct dt_rec *)fid, 0);
883 CDEBUG(D_LFSCK, "%s: OIT scan failed at rec(): "
884 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
885 lfsck_fail(env, lfsck, true);
886 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
892 if (unlikely(!fid_is_sane(fid))) {
893 CDEBUG(D_LFSCK, "%s: OIT scan find invalid FID "DFID
895 lfsck_lfsck2name(lfsck), PFID(fid));
899 if (fid_is_idif(fid)) {
900 __u32 idx1 = fid_idif_ost_idx(fid);
902 LASSERT(!lfsck->li_master);
905 struct ost_id *oi = &info->lti_oi;
907 if (unlikely(idx1 != 0)) {
908 CDEBUG(D_LFSCK, "%s: invalid IDIF "DFID
909 ", not match device index %u\n",
910 lfsck_lfsck2name(lfsck),
916 /* rebuild the IDIF with index to
917 * avoid double instances for the
919 fid_to_ostid(fid, oi);
920 ostid_to_fid(fid, oi, idx);
922 } else if (!fid_is_norm(fid) && !fid_is_igif(fid) &&
923 !fid_is_last_id(fid) &&
924 !lu_fid_eq(fid, &lfsck->li_global_root_fid)) {
926 /* If the FID/object is only used locally and invisible
927 * to external nodes, then LFSCK will not handle it.
929 * dot_lustre sequence has been handled specially. */
932 struct lu_seq_range *range = &info->lti_range;
934 if (lfsck->li_master)
935 fld_range_set_mdt(range);
937 fld_range_set_ost(range);
938 rc = fld_local_lookup(env, ss->ss_server_fld,
939 fid_seq(fid), range);
940 if (rc != 0 || range->lsr_index != idx) {
941 /* Remote object will be handled by the LFSCK
942 * instance on the MDT where the remote object
943 * really resides on. */
949 target = lfsck_object_find_bottom(env, lfsck, fid);
950 if (IS_ERR(target)) {
951 CDEBUG(D_LFSCK, "%s: OIT scan failed at find target "
952 DFID", cookie %llu: rc = %d\n",
953 lfsck_lfsck2name(lfsck), PFID(fid),
954 iops->store(env, di), rc);
955 lfsck_fail(env, lfsck, true);
956 if (bk->lb_param & LPF_FAILOUT)
957 RETURN(PTR_ERR(target));
962 if (dt_object_exists(target))
963 rc = lfsck_exec_oit(env, lfsck, target);
965 lfsck_object_put(env, target);
966 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
970 rc = lfsck_checkpoint(env, lfsck);
971 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
975 lfsck_control_speed(lfsck);
977 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_FATAL1)) {
978 spin_lock(&lfsck->li_lock);
979 thread_set_flags(thread, SVC_STOPPING);
980 spin_unlock(&lfsck->li_lock);
984 rc = iops->next(env, di);
985 if (unlikely(rc > 0))
986 lfsck->li_oit_over = 1;
987 else if (likely(rc == 0))
988 lfsck->li_current_oit_processed = 0;
990 CDEBUG(D_LFSCK, "%s oit engine fail to locate next at "
991 "%llu: rc = %d\n", lfsck_lfsck2name(lfsck),
992 iops->store(env, di), rc);
994 if (unlikely(!thread_is_running(thread))) {
995 CDEBUG(D_LFSCK, "%s: OIT scan exit for engine stop, "
996 "cookie %llu\n", lfsck_lfsck2name(lfsck),
997 iops->store(env, di));
1000 } while (rc == 0 || lfsck->li_di_dir != NULL);
1005 int lfsck_master_engine(void *args)
1007 struct lfsck_thread_args *lta = args;
1008 struct lu_env *env = <a->lta_env;
1009 struct lfsck_instance *lfsck = lta->lta_lfsck;
1010 struct ptlrpc_thread *thread = &lfsck->li_thread;
1011 struct dt_object *oit_obj = lfsck->li_obj_oit;
1012 const struct dt_it_ops *oit_iops = &oit_obj->do_index_ops->dio_it;
1013 struct dt_it *oit_di;
1014 struct l_wait_info lwi = { 0 };
1018 /* There will be some objects verification during the LFSCK start,
1019 * such as the subsequent lfsck_verify_lpf(). Trigger low layer OI
1020 * OI scrub before that to handle the potential inconsistence. */
1021 oit_di = oit_iops->init(env, oit_obj, lfsck->li_args_oit);
1022 if (IS_ERR(oit_di)) {
1023 rc = PTR_ERR(oit_di);
1024 CDEBUG(D_LFSCK, "%s: master engine fail to init iteration: "
1025 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
1027 GOTO(fini_args, rc);
1030 if (lfsck->li_master &&
1031 (!list_empty(&lfsck->li_list_scan) ||
1032 !list_empty(&lfsck->li_list_double_scan))) {
1033 rc = lfsck_verify_lpf(env, lfsck);
1034 /* Fail to verify the .lustre/lost+found/MDTxxxx/ may be not
1035 * fatal, because the .lustre/lost+found/ maybe not accessed
1036 * by the LFSCK if it does not add orphans or others to such
1037 * directory. So go ahead until hit failure when really uses
1040 CDEBUG(D_LFSCK, "%s: master engine fail to verify the "
1041 ".lustre/lost+found/, go ahead: rc = %d\n",
1042 lfsck_lfsck2name(lfsck), rc);
1045 spin_lock(&lfsck->li_lock);
1046 lfsck->li_di_oit = oit_di;
1047 spin_unlock(&lfsck->li_lock);
1048 rc = lfsck_prep(env, lfsck, lta->lta_lsp);
1052 CDEBUG(D_LFSCK, "LFSCK entry: oit_flags = %#x, dir_flags = %#x, "
1053 "oit_cookie = %llu, dir_cookie = %#llx, parent = "DFID
1054 ", pid = %d\n", lfsck->li_args_oit, lfsck->li_args_dir,
1055 lfsck->li_pos_checkpoint.lp_oit_cookie,
1056 lfsck->li_pos_checkpoint.lp_dir_cookie,
1057 PFID(&lfsck->li_pos_checkpoint.lp_dir_parent),
1060 spin_lock(&lfsck->li_lock);
1061 thread_set_flags(thread, SVC_RUNNING);
1062 spin_unlock(&lfsck->li_lock);
1063 wake_up_all(&thread->t_ctl_waitq);
1065 l_wait_event(thread->t_ctl_waitq,
1066 lfsck->li_start_unplug ||
1067 !thread_is_running(thread),
1069 if (!thread_is_running(thread))
1070 GOTO(fini_oit, rc = 0);
1072 if (!list_empty(&lfsck->li_list_scan) ||
1073 list_empty(&lfsck->li_list_double_scan))
1074 rc = lfsck_master_oit_engine(env, lfsck);
1078 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, false);
1079 CDEBUG(D_LFSCK, "LFSCK exit: oit_flags = %#x, dir_flags = %#x, "
1080 "oit_cookie = %llu, dir_cookie = %#llx, parent = "DFID
1081 ", pid = %d, rc = %d\n", lfsck->li_args_oit, lfsck->li_args_dir,
1082 lfsck->li_pos_checkpoint.lp_oit_cookie,
1083 lfsck->li_pos_checkpoint.lp_dir_cookie,
1084 PFID(&lfsck->li_pos_checkpoint.lp_dir_parent),
1087 if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_CRASH))
1088 rc = lfsck_post(env, lfsck, rc);
1090 lfsck_close_dir(env, lfsck, rc);
1093 lfsck_di_oit_put(env, lfsck);
1094 oit_iops->fini(env, oit_di);
1096 if (!list_empty(&lfsck->li_list_double_scan))
1097 rc = lfsck_double_scan(env, lfsck);
1101 lfsck_quit(env, lfsck);
1104 /* XXX: Purge the pinned objects in the future. */
1107 spin_lock(&lfsck->li_lock);
1108 thread_set_flags(thread, SVC_STOPPED);
1109 spin_unlock(&lfsck->li_lock);
1110 wake_up_all(&thread->t_ctl_waitq);
1111 lfsck_thread_args_fini(lta);
1115 static inline bool lfsck_assistant_req_empty(struct lfsck_assistant_data *lad)
1119 spin_lock(&lad->lad_lock);
1120 if (list_empty(&lad->lad_req_list))
1122 spin_unlock(&lad->lad_lock);
1128 * Query the LFSCK status from the instatnces on remote servers.
1130 * The LFSCK assistant thread queries the LFSCK instances on other
1131 * servers (MDT/OST) about their status, such as whether they have
1132 * finished the phase1/phase2 scanning or not, and so on.
1134 * \param[in] env pointer to the thread context
1135 * \param[in] com pointer to the lfsck component
1137 * \retval 0 for success
1138 * \retval negative error number on failure
1140 static int lfsck_assistant_query_others(const struct lu_env *env,
1141 struct lfsck_component *com)
1143 struct lfsck_thread_info *info = lfsck_env_info(env);
1144 struct lfsck_request *lr = &info->lti_lr;
1145 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1146 struct lfsck_instance *lfsck = com->lc_lfsck;
1147 struct lfsck_assistant_data *lad = com->lc_data;
1148 struct ptlrpc_request_set *set;
1149 struct lfsck_tgt_descs *ltds;
1150 struct lfsck_tgt_desc *ltd;
1151 struct list_head *phase_head;
1156 set = ptlrpc_prep_set();
1160 lad->lad_touch_gen++;
1161 memset(lr, 0, sizeof(*lr));
1162 lr->lr_event = LE_QUERY;
1163 lr->lr_active = com->lc_type;
1165 memset(laia, 0, sizeof(*laia));
1166 laia->laia_com = com;
1169 if (!list_empty(&lad->lad_mdt_phase1_list)) {
1170 ltds = &lfsck->li_mdt_descs;
1172 phase_head = &lad->lad_mdt_phase1_list;
1173 } else if (com->lc_type != LFSCK_TYPE_LAYOUT) {
1178 ltds = &lfsck->li_ost_descs;
1179 lr->lr_flags = LEF_TO_OST;
1180 phase_head = &lad->lad_ost_phase1_list;
1183 laia->laia_ltds = ltds;
1184 spin_lock(<ds->ltd_lock);
1185 while (!list_empty(phase_head)) {
1186 struct list_head *phase_list;
1189 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1190 ltd = list_entry(phase_head->next,
1191 struct lfsck_tgt_desc,
1192 ltd_layout_phase_list);
1193 phase_list = <d->ltd_layout_phase_list;
1194 gen = <d->ltd_layout_gen;
1196 ltd = list_entry(phase_head->next,
1197 struct lfsck_tgt_desc,
1198 ltd_namespace_phase_list);
1199 phase_list = <d->ltd_namespace_phase_list;
1200 gen = <d->ltd_namespace_gen;
1203 if (*gen == lad->lad_touch_gen)
1206 *gen = lad->lad_touch_gen;
1207 list_move_tail(phase_list, phase_head);
1208 atomic_inc(<d->ltd_ref);
1209 laia->laia_ltd = ltd;
1210 spin_unlock(<ds->ltd_lock);
1211 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1212 lfsck_async_interpret_common,
1215 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to query "
1216 "%s %x for %s: rc = %d\n",
1217 lfsck_lfsck2name(lfsck),
1218 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1219 ltd->ltd_index, lad->lad_name, rc);
1223 spin_lock(<ds->ltd_lock);
1225 spin_unlock(<ds->ltd_lock);
1227 rc = ptlrpc_set_wait(set);
1229 ptlrpc_set_destroy(set);
1233 if (com->lc_type == LFSCK_TYPE_LAYOUT && !(lr->lr_flags & LEF_TO_OST) &&
1234 list_empty(&lad->lad_mdt_phase1_list))
1238 ptlrpc_set_destroy(set);
1240 RETURN(rc1 != 0 ? rc1 : rc);
1244 * Notify the LFSCK event to the instances on remote servers.
1246 * The LFSCK assistant thread notifies the LFSCK instances on other
1247 * servers (MDT/OST) about some events, such as start new scanning,
1248 * stop the scanning, this LFSCK instance will exit, and so on.
1250 * \param[in] env pointer to the thread context
1251 * \param[in] com pointer to the lfsck component
1252 * \param[in] lr pointer to the LFSCK event request
1254 * \retval 0 for success
1255 * \retval negative error number on failure
1257 static int lfsck_assistant_notify_others(const struct lu_env *env,
1258 struct lfsck_component *com,
1259 struct lfsck_request *lr)
1261 struct lfsck_thread_info *info = lfsck_env_info(env);
1262 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1263 struct lfsck_instance *lfsck = com->lc_lfsck;
1264 struct lfsck_assistant_data *lad = com->lc_data;
1265 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1266 struct ptlrpc_request_set *set;
1267 struct lfsck_tgt_descs *ltds;
1268 struct lfsck_tgt_desc *ltd;
1269 struct lfsck_tgt_desc *next;
1275 set = ptlrpc_prep_set();
1279 lr->lr_index = lfsck_dev_idx(lfsck);
1280 lr->lr_active = com->lc_type;
1282 memset(laia, 0, sizeof(*laia));
1283 laia->laia_com = com;
1286 switch (lr->lr_event) {
1288 if (com->lc_type != LFSCK_TYPE_LAYOUT)
1291 lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN;
1292 lr->lr_speed = bk->lb_speed_limit;
1293 lr->lr_version = bk->lb_version;
1294 lr->lr_param |= bk->lb_param;
1295 lr->lr_async_windows = bk->lb_async_windows;
1296 lr->lr_flags = LEF_TO_OST;
1298 /* Notify OSTs firstly, then handle other MDTs if needed. */
1299 ltds = &lfsck->li_ost_descs;
1300 laia->laia_ltds = ltds;
1301 down_read(<ds->ltd_rw_sem);
1302 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1303 ltd = lfsck_tgt_get(ltds, idx);
1304 LASSERT(ltd != NULL);
1306 laia->laia_ltd = ltd;
1307 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1308 lfsck_async_interpret_common,
1309 laia, LFSCK_NOTIFY);
1311 lfsck_lad_set_bitmap(env, com, idx);
1312 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to "
1313 "notify OST %x for %s start: rc = %d\n",
1314 lfsck_lfsck2name(lfsck), idx,
1319 up_read(<ds->ltd_rw_sem);
1322 rc = ptlrpc_set_wait(set);
1324 ptlrpc_set_destroy(set);
1329 if (!(bk->lb_param & LPF_ALL_TGT))
1332 /* link other MDT targets locallly. */
1333 ltds = &lfsck->li_mdt_descs;
1334 spin_lock(<ds->ltd_lock);
1335 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1336 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1337 ltd = lfsck_ltd2tgt(ltds, idx);
1338 LASSERT(ltd != NULL);
1340 if (!list_empty(<d->ltd_layout_list))
1343 list_add_tail(<d->ltd_layout_list,
1344 &lad->lad_mdt_list);
1345 list_add_tail(<d->ltd_layout_phase_list,
1346 &lad->lad_mdt_phase1_list);
1349 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1350 ltd = lfsck_ltd2tgt(ltds, idx);
1351 LASSERT(ltd != NULL);
1353 if (!list_empty(<d->ltd_namespace_list))
1356 list_add_tail(<d->ltd_namespace_list,
1357 &lad->lad_mdt_list);
1358 list_add_tail(<d->ltd_namespace_phase_list,
1359 &lad->lad_mdt_phase1_list);
1362 spin_unlock(<ds->ltd_lock);
1365 case LE_PHASE2_DONE:
1366 case LE_PEER_EXIT: {
1367 struct list_head *phase_head;
1369 /* Handle other MDTs firstly if needed, then notify the OSTs. */
1370 if (bk->lb_param & LPF_ALL_TGT) {
1371 phase_head = &lad->lad_mdt_list;
1372 ltds = &lfsck->li_mdt_descs;
1373 if (lr->lr_event == LE_STOP) {
1374 /* unlink other MDT targets locallly. */
1375 spin_lock(<ds->ltd_lock);
1376 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1377 list_for_each_entry_safe(ltd, next,
1378 phase_head, ltd_layout_list) {
1380 <d->ltd_layout_phase_list);
1382 <d->ltd_layout_list);
1385 list_for_each_entry_safe(ltd, next,
1387 ltd_namespace_list) {
1389 <d->ltd_namespace_phase_list);
1391 <d->ltd_namespace_list);
1394 spin_unlock(<ds->ltd_lock);
1396 if (com->lc_type != LFSCK_TYPE_LAYOUT)
1399 lr->lr_flags |= LEF_TO_OST;
1400 phase_head = &lad->lad_ost_list;
1401 ltds = &lfsck->li_ost_descs;
1403 lr->lr_flags &= ~LEF_TO_OST;
1405 } else if (com->lc_type != LFSCK_TYPE_LAYOUT) {
1408 lr->lr_flags |= LEF_TO_OST;
1409 phase_head = &lad->lad_ost_list;
1410 ltds = &lfsck->li_ost_descs;
1414 laia->laia_ltds = ltds;
1415 spin_lock(<ds->ltd_lock);
1416 while (!list_empty(phase_head)) {
1417 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1418 ltd = list_entry(phase_head->next,
1419 struct lfsck_tgt_desc,
1421 if (!list_empty(<d->ltd_layout_phase_list))
1423 <d->ltd_layout_phase_list);
1424 list_del_init(<d->ltd_layout_list);
1426 ltd = list_entry(phase_head->next,
1427 struct lfsck_tgt_desc,
1428 ltd_namespace_list);
1429 if (!list_empty(<d->ltd_namespace_phase_list))
1431 <d->ltd_namespace_phase_list);
1432 list_del_init(<d->ltd_namespace_list);
1434 atomic_inc(<d->ltd_ref);
1435 laia->laia_ltd = ltd;
1436 spin_unlock(<ds->ltd_lock);
1437 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1438 lfsck_async_interpret_common,
1439 laia, LFSCK_NOTIFY);
1441 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to "
1442 "notify %s %x for %s stop/phase2_done/"
1443 "peer_exit: rc = %d\n",
1444 lfsck_lfsck2name(lfsck),
1445 (lr->lr_flags & LEF_TO_OST) ?
1446 "OST" : "MDT", ltd->ltd_index,
1450 spin_lock(<ds->ltd_lock);
1452 spin_unlock(<ds->ltd_lock);
1454 rc = ptlrpc_set_wait(set);
1456 ptlrpc_set_destroy(set);
1460 if (com->lc_type == LFSCK_TYPE_LAYOUT &&
1461 !(lr->lr_flags & LEF_TO_OST)) {
1462 lr->lr_flags |= LEF_TO_OST;
1463 phase_head = &lad->lad_ost_list;
1464 ltds = &lfsck->li_ost_descs;
1469 case LE_PHASE1_DONE:
1470 lad->lad_ops->la_sync_failures(env, com, lr);
1471 lad->lad_touch_gen++;
1472 ltds = &lfsck->li_mdt_descs;
1473 laia->laia_ltds = ltds;
1474 spin_lock(<ds->ltd_lock);
1475 while (!list_empty(&lad->lad_mdt_list)) {
1476 struct list_head *list;
1479 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1480 ltd = list_entry(lad->lad_mdt_list.next,
1481 struct lfsck_tgt_desc,
1483 list = <d->ltd_layout_list;
1484 gen = <d->ltd_layout_gen;
1486 struct lfsck_namespace *ns = com->lc_file_ram;
1488 ltd = list_entry(lad->lad_mdt_list.next,
1489 struct lfsck_tgt_desc,
1490 ltd_namespace_list);
1491 list = <d->ltd_namespace_list;
1492 gen = <d->ltd_namespace_gen;
1493 lr->lr_flags2 = ns->ln_flags & ~LF_INCOMPLETE;
1496 if (*gen == lad->lad_touch_gen)
1499 *gen = lad->lad_touch_gen;
1500 list_move_tail(list, &lad->lad_mdt_list);
1501 if (ltd->ltd_synced_failures)
1504 atomic_inc(<d->ltd_ref);
1505 laia->laia_ltd = ltd;
1506 spin_unlock(<ds->ltd_lock);
1507 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1508 lfsck_async_interpret_common,
1509 laia, LFSCK_NOTIFY);
1511 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to "
1512 "notify MDT %x for %s phase1 done: "
1513 "rc = %d\n", lfsck_lfsck2name(lfsck),
1514 ltd->ltd_index, lad->lad_name, rc);
1517 spin_lock(<ds->ltd_lock);
1519 spin_unlock(<ds->ltd_lock);
1522 CDEBUG(D_LFSCK, "%s: LFSCK assistant unexpected LFSCK event: "
1523 "rc = %d\n", lfsck_lfsck2name(lfsck), lr->lr_event);
1528 rc1 = ptlrpc_set_wait(set);
1529 ptlrpc_set_destroy(set);
1531 RETURN(rc != 0 ? rc : rc1);
1535 * The LFSCK assistant thread is triggered by the LFSCK main engine.
1536 * They co-work together as an asynchronous pipeline: the LFSCK main
1537 * engine scans the system and pre-fetches the objects, attributes,
1538 * or name entries, etc, and pushes them into the pipeline as input
1539 * requests for the LFSCK assistant thread; on the other end of the
1540 * pipeline, the LFSCK assistant thread performs the real check and
1541 * repair for every request from the main engine.
1543 * Generally, the assistant engine may be blocked when check/repair
1544 * something, so the LFSCK main engine will run some faster. On the
1545 * other hand, the LFSCK main engine will drive multiple assistant
1546 * threads in parallel, means for each LFSCK component on the master
1547 * (such as layout LFSCK, namespace LFSCK), there is an independent
1548 * LFSCK assistant thread. So under such 1:N multiple asynchronous
1549 * pipelines mode, the whole LFSCK performance will be much better
1550 * than check/repair everything by the LFSCK main engine itself.
1552 int lfsck_assistant_engine(void *args)
1554 struct lfsck_thread_args *lta = args;
1555 struct lu_env *env = <a->lta_env;
1556 struct lfsck_component *com = lta->lta_com;
1557 struct lfsck_instance *lfsck = lta->lta_lfsck;
1558 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1559 struct lfsck_position *pos = &com->lc_pos_start;
1560 struct lfsck_thread_info *info = lfsck_env_info(env);
1561 struct lfsck_request *lr = &info->lti_lr;
1562 struct lfsck_assistant_data *lad = com->lc_data;
1563 struct ptlrpc_thread *mthread = &lfsck->li_thread;
1564 struct ptlrpc_thread *athread = &lad->lad_thread;
1565 struct lfsck_assistant_operations *lao = lad->lad_ops;
1566 struct lfsck_assistant_req *lar;
1567 struct l_wait_info lwi = { 0 };
1573 CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread start\n",
1574 lfsck_lfsck2name(lfsck), lad->lad_name);
1576 memset(lr, 0, sizeof(*lr));
1577 lr->lr_event = LE_START;
1578 if (pos->lp_oit_cookie <= 1)
1579 lr->lr_param = LPF_RESET;
1580 rc = lfsck_assistant_notify_others(env, com, lr);
1582 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to notify others "
1583 "to start %s: rc = %d\n",
1584 lfsck_lfsck2name(lfsck), lad->lad_name, rc);
1588 spin_lock(&lad->lad_lock);
1589 lad->lad_task = current;
1590 thread_set_flags(athread, SVC_RUNNING);
1591 spin_unlock(&lad->lad_lock);
1592 wake_up_all(&mthread->t_ctl_waitq);
1595 while (!list_empty(&lad->lad_req_list)) {
1596 bool wakeup = false;
1598 if (unlikely(lad->lad_exit ||
1599 !thread_is_running(mthread)))
1600 GOTO(cleanup, rc = lad->lad_post_result);
1602 lar = list_entry(lad->lad_req_list.next,
1603 struct lfsck_assistant_req,
1605 /* Only the lfsck_assistant_engine thread itself can
1606 * remove the "lar" from the head of the list, LFSCK
1607 * engine thread only inserts other new "lar" at the
1608 * end of the list. So it is safe to handle current
1609 * "lar" without the spin_lock. */
1610 rc = lao->la_handler_p1(env, com, lar);
1611 spin_lock(&lad->lad_lock);
1612 list_del_init(&lar->lar_list);
1613 lad->lad_prefetched--;
1614 /* Wake up the main engine thread only when the list
1615 * is empty or half of the prefetched items have been
1616 * handled to avoid too frequent thread schedule. */
1617 if (lad->lad_prefetched <= (bk->lb_async_windows / 2))
1619 spin_unlock(&lad->lad_lock);
1621 wake_up_all(&mthread->t_ctl_waitq);
1623 lao->la_req_fini(env, lar);
1624 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
1628 l_wait_event(athread->t_ctl_waitq,
1629 !lfsck_assistant_req_empty(lad) ||
1632 lad->lad_to_double_scan,
1635 if (unlikely(lad->lad_exit))
1636 GOTO(cleanup, rc = lad->lad_post_result);
1638 if (!list_empty(&lad->lad_req_list))
1641 if (lad->lad_to_post) {
1642 CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread post\n",
1643 lfsck_lfsck2name(lfsck), lad->lad_name);
1645 if (unlikely(lad->lad_exit))
1646 GOTO(cleanup, rc = lad->lad_post_result);
1648 lad->lad_to_post = 0;
1649 LASSERT(lad->lad_post_result > 0);
1651 /* Wakeup the master engine to go ahead. */
1652 wake_up_all(&mthread->t_ctl_waitq);
1654 memset(lr, 0, sizeof(*lr));
1655 lr->lr_event = LE_PHASE1_DONE;
1656 lr->lr_status = lad->lad_post_result;
1657 rc = lfsck_assistant_notify_others(env, com, lr);
1659 CDEBUG(D_LFSCK, "%s: LFSCK assistant notified "
1660 "others for %s post: rc = %d\n",
1661 lfsck_lfsck2name(lfsck),
1665 if (lad->lad_to_double_scan) {
1666 lad->lad_to_double_scan = 0;
1667 atomic_inc(&lfsck->li_double_scan_count);
1668 lad->lad_in_double_scan = 1;
1669 wake_up_all(&mthread->t_ctl_waitq);
1671 com->lc_new_checked = 0;
1672 com->lc_new_scanned = 0;
1673 com->lc_time_last_checkpoint = cfs_time_current();
1674 com->lc_time_next_checkpoint =
1675 com->lc_time_last_checkpoint +
1676 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
1678 CDEBUG(D_LFSCK, "%s: LFSCK assistant sync before "
1679 "the second-stage scaning\n",
1680 lfsck_lfsck2name(lfsck));
1682 /* Flush async updates before handling orphan. */
1683 rc2 = dt_sync(env, lfsck->li_next);
1685 CDEBUG(D_LFSCK, "%s: LFSCK assistant phase2 "
1686 "scan start, synced: rc = %d\n",
1687 lfsck_lfsck2name(lfsck), rc2);
1689 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_NO_DOUBLESCAN))
1690 GOTO(cleanup, rc = 0);
1692 while (lad->lad_in_double_scan) {
1693 rc = lfsck_assistant_query_others(env, com);
1694 if (lfsck_phase2_next_ready(lad))
1700 /* Pull LFSCK status on related targets once
1701 * per 30 seconds if we are not notified. */
1702 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(30),
1703 cfs_time_seconds(1),
1705 rc = l_wait_event(athread->t_ctl_waitq,
1706 lfsck_phase2_next_ready(lad) ||
1708 !thread_is_running(mthread),
1711 if (unlikely(lad->lad_exit ||
1712 !thread_is_running(mthread)))
1713 GOTO(cleanup, rc = 0);
1715 if (rc == -ETIMEDOUT)
1722 rc = lao->la_handler_p2(env, com);
1726 if (unlikely(lad->lad_exit ||
1727 !thread_is_running(mthread)))
1728 GOTO(cleanup, rc = 0);
1734 /* Cleanup the unfinished requests. */
1735 spin_lock(&lad->lad_lock);
1737 lad->lad_assistant_status = rc;
1739 if (lad->lad_exit && lad->lad_post_result <= 0)
1740 lao->la_fill_pos(env, com, &lfsck->li_pos_checkpoint);
1742 thread_set_flags(athread, SVC_STOPPING);
1743 while (!list_empty(&lad->lad_req_list)) {
1744 lar = list_entry(lad->lad_req_list.next,
1745 struct lfsck_assistant_req,
1747 list_del_init(&lar->lar_list);
1748 lad->lad_prefetched--;
1749 spin_unlock(&lad->lad_lock);
1750 lao->la_req_fini(env, lar);
1751 spin_lock(&lad->lad_lock);
1753 spin_unlock(&lad->lad_lock);
1755 LASSERTF(lad->lad_prefetched == 0, "unmatched prefeteched objs %d\n",
1756 lad->lad_prefetched);
1758 memset(lr, 0, sizeof(*lr));
1760 lr->lr_event = LE_PHASE2_DONE;
1762 } else if (rc == 0) {
1763 if (lfsck->li_flags & LPF_ALL_TGT) {
1764 lr->lr_event = LE_STOP;
1765 lr->lr_status = LS_STOPPED;
1767 lr->lr_event = LE_PEER_EXIT;
1768 switch (lfsck->li_status) {
1771 lr->lr_status = LS_CO_PAUSED;
1775 lr->lr_status = LS_CO_STOPPED;
1778 CDEBUG(D_LFSCK, "%s: LFSCK assistant unknown "
1779 "status: rc = %d\n",
1780 lfsck_lfsck2name(lfsck),
1782 lr->lr_status = LS_CO_FAILED;
1787 if (lfsck->li_flags & LPF_ALL_TGT) {
1788 lr->lr_event = LE_STOP;
1789 lr->lr_status = LS_FAILED;
1791 lr->lr_event = LE_PEER_EXIT;
1792 lr->lr_status = LS_CO_FAILED;
1796 rc1 = lfsck_assistant_notify_others(env, com, lr);
1798 CDEBUG(D_LFSCK, "%s: LFSCK assistant failed to notify "
1799 "others for %s quit: rc = %d\n",
1800 lfsck_lfsck2name(lfsck), lad->lad_name, rc1);
1804 CDEBUG(D_LFSCK, "%s: LFSCK assistant sync before exit\n",
1805 lfsck_lfsck2name(lfsck));
1807 /* Flush async updates before exit. */
1808 rc2 = dt_sync(env, lfsck->li_next);
1810 CDEBUG(D_LFSCK, "%s: LFSCK assistant synced before exit: rc = %d\n",
1811 lfsck_lfsck2name(lfsck), rc2);
1813 /* Under force exit case, some requests may be just freed without
1814 * verification, those objects should be re-handled when next run.
1815 * So not update the on-disk trace file under such case. */
1816 if (lad->lad_in_double_scan) {
1818 rc1 = lao->la_double_scan_result(env, com, rc);
1820 CDEBUG(D_LFSCK, "%s: LFSCK assistant phase2 scan "
1821 "finished: rc = %d\n",
1822 lfsck_lfsck2name(lfsck), rc1 != 0 ? rc1 : rc);
1826 if (lad->lad_in_double_scan)
1827 atomic_dec(&lfsck->li_double_scan_count);
1829 spin_lock(&lad->lad_lock);
1830 lad->lad_assistant_status = (rc1 != 0 ? rc1 : rc);
1831 thread_set_flags(athread, SVC_STOPPED);
1832 wake_up_all(&mthread->t_ctl_waitq);
1833 lad->lad_task = NULL;
1834 spin_unlock(&lad->lad_lock);
1836 CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread exit: rc = %d\n",
1837 lfsck_lfsck2name(lfsck), lad->lad_name,
1838 lad->lad_assistant_status);
1840 lfsck_thread_args_fini(lta);