4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2013, 2016, Intel Corporation.
26 * lustre/lfsck/lfsck_engine.c
28 * Author: Fan, Yong <fan.yong@intel.com>
31 #define DEBUG_SUBSYSTEM S_LFSCK
33 #include <lu_object.h>
34 #include <dt_object.h>
35 #include <lustre_net.h>
36 #include <lustre_fid.h>
37 #include <obd_support.h>
38 #include <lustre_lib.h>
40 #include "lfsck_internal.h"
42 int lfsck_unpack_ent(struct lu_dirent *ent, __u64 *cookie, __u16 *type)
45 int align = sizeof(*lt) - 1;
48 fid_le_to_cpu(&ent->lde_fid, &ent->lde_fid);
49 *cookie = le64_to_cpu(ent->lde_hash);
50 ent->lde_reclen = le16_to_cpu(ent->lde_reclen);
51 ent->lde_namelen = le16_to_cpu(ent->lde_namelen);
52 ent->lde_attrs = le32_to_cpu(ent->lde_attrs);
54 if (unlikely(!(ent->lde_attrs & LUDA_TYPE)))
57 len = (ent->lde_namelen + align) & ~align;
58 lt = (struct luda_type *)(ent->lde_name + len);
59 *type = le16_to_cpu(lt->lt_type);
61 /* Make sure the name is terminated with '\0'. The data (object type)
62 * after ent::lde_name maybe broken, but we have stored such data in
63 * the output parameter @type as above.
65 ent->lde_name[ent->lde_namelen] = '\0';
70 static void lfsck_di_oit_put(const struct lu_env *env,
71 struct lfsck_instance *lfsck)
73 const struct dt_it_ops *iops;
76 spin_lock(&lfsck->li_lock);
77 iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
78 di = lfsck->li_di_oit;
79 lfsck->li_di_oit = NULL;
80 spin_unlock(&lfsck->li_lock);
84 static void lfsck_di_dir_put(const struct lu_env *env,
85 struct lfsck_instance *lfsck)
87 const struct dt_it_ops *iops;
90 spin_lock(&lfsck->li_lock);
91 iops = &lfsck->li_obj_dir->do_index_ops->dio_it;
92 di = lfsck->li_di_dir;
93 lfsck->li_di_dir = NULL;
94 lfsck->li_cookie_dir = 0;
95 spin_unlock(&lfsck->li_lock);
100 * Check whether needs to scan the directory or not.
102 * 1) If we are not doing namespace LFSCK, or the given @obj is not directory,
103 * then needs not to scan the @obj. Otherwise,
104 * 2) Global /ROOT needs to be scanned, backend root needs not to be scanned.
105 * 3) If the @obj is neither IGIF nor normal FID (including .lustre and its
106 * sub-directories that have been scanned when the LFSCK engine start),
107 * then needs not to be scanned.
108 * 4) If it is a remote object, then scanning the object will be done on the
109 * MDT on which the object really resides.
110 * 5) If the local object has normal FID, then needs to be scanned. Otherwise,
111 * 6) If the object has linkEA, then needs to be scanned. Otherwise,
112 * 7) If none of the previous conditions are true, we need to check the parent
113 * directories whether this subdirectory is in a tree that should be scanned.
114 * Set the parent as current @obj, repeat 2)-7).
116 * \param[in] env pointer to the thread context
117 * \param[in] lfsck pointer to the lfsck instance
118 * \param[in] obj pointer to the object to be checked
120 * \retval positive number if the directory needs to be scanned
121 * \retval 0 if the directory needs NOT to be scanned
122 * \retval negative error number on failure
124 static int lfsck_needs_scan_dir(const struct lu_env *env,
125 struct lfsck_instance *lfsck,
126 struct dt_object *obj)
128 struct lfsck_thread_info *info = lfsck_env_info(env);
129 struct lu_fid *fid = &info->lti_fid;
130 struct lu_seq_range *range = &info->lti_range;
131 struct lu_attr *la = &info->lti_la;
132 struct seq_server_site *ss = lfsck_dev_site(lfsck);
133 __u32 idx = lfsck_dev_idx(lfsck);
137 if (list_empty(&lfsck->li_list_dir) || !S_ISDIR(lfsck_object_type(obj)))
140 *fid = *lfsck_dto2fid(obj);
141 rc = dt_attr_get(env, obj, la);
142 if (unlikely(rc || (la->la_valid & LA_FLAGS &&
143 la->la_flags & LUSTRE_ORPHAN_FL))) {
144 /* Orphan directory is empty, does not need scan. */
146 "%s: skip orphan dir "DFID", %llx/%x: rc = %d\n",
147 lfsck_lfsck2name(lfsck), PFID(fid),
148 la->la_valid, la->la_flags, rc);
156 /* Global /ROOT is visible. */
157 if (unlikely(lu_fid_eq(fid, &lfsck->li_global_root_fid)))
160 /* Backend root is invisible. */
161 if (unlikely(lu_fid_eq(fid, &lfsck->li_local_root_fid)))
164 if (!fid_is_norm(fid) && !fid_is_igif(fid))
167 fld_range_set_mdt(range);
168 rc = fld_local_lookup(env, ss->ss_server_fld,
169 fid_seq(fid), range);
170 if (rc != 0 || range->lsr_index != idx)
171 /* Current FID should NOT be for the input parameter
172 * @obj, because the lfsck_master_oit_engine() has
173 * filtered out agent object. So current FID is for
174 * the ancestor of the original input parameter @obj.
175 * So the ancestor is a remote directory. The input
176 * parameter @obj is local directory, and should be
177 * scanned under such case.
181 /* normal FID on this target (locally) must be for the
182 * client-side visible object.
184 if (fid_is_norm(fid))
187 /* Only true after "obj = NULL" set below */
189 obj = lfsck_object_find_bottom(env, lfsck, fid);
194 if (!dt_object_exists(obj))
198 dt_read_lock(env, obj, DT_TGT_CHILD);
199 if (unlikely(lfsck_is_dead_obj(obj))) {
200 dt_read_unlock(env, obj);
205 rc = dt_xattr_get(env, obj,
206 lfsck_buf_get(env, NULL, 0), XATTR_NAME_LINK);
207 dt_read_unlock(env, obj);
211 if (rc < 0 && rc != -ENODATA)
214 rc = dt_lookup_dir(env, obj, dotdot, fid);
216 lfsck_object_put(env, obj);
222 if (!fid_is_sane(fid))
227 if (depth > 0 && obj != NULL)
228 lfsck_object_put(env, obj);
233 static int lfsck_load_stripe_lmv(const struct lu_env *env,
234 struct lfsck_instance *lfsck,
235 struct dt_object *obj)
237 struct lmv_mds_md_v1 *lmv = &lfsck_env_info(env)->lti_lmv;
238 struct lfsck_lmv *llmv;
242 LASSERT(lfsck->li_obj_dir == NULL);
243 LASSERT(lfsck->li_lmv == NULL);
245 rc = lfsck_read_stripe_lmv(env, lfsck, obj, lmv);
246 if (rc == -ENODATA) {
247 lfsck->li_obj_dir = lfsck_object_get(obj);
259 if (lmv->lmv_magic == LMV_MAGIC) {
260 struct lfsck_slave_lmv_rec *lslr;
263 llmv->ll_lmv_master = 1;
264 if (lmv->lmv_stripe_count < 1)
265 stripes = LFSCK_LMV_DEF_STRIPES;
266 else if (lmv->lmv_stripe_count > LFSCK_LMV_MAX_STRIPES)
267 stripes = LFSCK_LMV_MAX_STRIPES;
269 stripes = lmv->lmv_stripe_count;
271 OBD_ALLOC_PTR_ARRAY_LARGE(lslr, stripes);
278 llmv->ll_stripes_allocated = stripes;
279 llmv->ll_hash_type = LMV_HASH_TYPE_UNKNOWN;
280 llmv->ll_lslr = lslr;
282 llmv->ll_lmv_slave = 1;
285 lfsck->li_obj_dir = lfsck_object_get(obj);
287 atomic_set(&llmv->ll_ref, 1);
288 lfsck->li_lmv = llmv;
293 /* LFSCK wrap functions */
295 static void lfsck_fail(const struct lu_env *env, struct lfsck_instance *lfsck,
298 struct lfsck_component *com;
300 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
301 com->lc_ops->lfsck_fail(env, com, new_checked);
305 void lfsck_close_dir(const struct lu_env *env,
306 struct lfsck_instance *lfsck, int result)
308 struct lfsck_component *com;
311 if (lfsck->li_lmv != NULL) {
312 lfsck->li_lmv->ll_exit_value = result;
313 if (lfsck->li_obj_dir != NULL) {
314 list_for_each_entry(com, &lfsck->li_list_dir,
316 com->lc_ops->lfsck_close_dir(env, com);
320 lfsck_lmv_put(env, lfsck->li_lmv);
321 lfsck->li_lmv = NULL;
324 if (lfsck->li_di_dir != NULL) {
325 const struct dt_it_ops *dir_iops;
326 struct dt_it *dir_di = lfsck->li_di_dir;
328 LASSERT(lfsck->li_obj_dir != NULL);
330 dir_iops = &lfsck->li_obj_dir->do_index_ops->dio_it;
331 lfsck_di_dir_put(env, lfsck);
332 dir_iops->fini(env, dir_di);
335 if (lfsck->li_obj_dir != NULL) {
336 struct dt_object *dir_obj = lfsck->li_obj_dir;
338 lfsck->li_obj_dir = NULL;
339 lfsck_object_put(env, dir_obj);
345 int lfsck_open_dir(const struct lu_env *env,
346 struct lfsck_instance *lfsck, __u64 cookie)
348 struct dt_object *obj = lfsck->li_obj_dir;
349 struct dt_it *di = lfsck->li_di_dir;
350 struct lfsck_component *com;
351 const struct dt_it_ops *iops;
355 LASSERT(obj != NULL);
358 if (unlikely(!dt_try_as_dir(env, obj, true)))
359 GOTO(out, rc = -ENOTDIR);
361 list_for_each_entry(com, &lfsck->li_list_dir, lc_link_dir) {
362 rc = com->lc_ops->lfsck_open_dir(env, com);
367 iops = &obj->do_index_ops->dio_it;
368 di = iops->init(env, obj, lfsck->li_args_dir);
370 GOTO(out, rc = PTR_ERR(di));
372 rc = iops->load(env, di, cookie);
375 else if (rc == 0 || (rc > 0 && cookie > 0))
376 rc = iops->next(env, di);
384 lfsck->li_cookie_dir = iops->store(env, di);
385 spin_lock(&lfsck->li_lock);
386 lfsck->li_di_dir = di;
387 spin_unlock(&lfsck->li_lock);
394 lfsck_close_dir(env, lfsck, rc);
399 static int lfsck_checkpoint(const struct lu_env *env,
400 struct lfsck_instance *lfsck)
402 struct lfsck_component *com;
406 if (likely(ktime_get_seconds() <= lfsck->li_time_next_checkpoint))
409 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, false);
410 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
411 rc = com->lc_ops->lfsck_checkpoint(env, com, false);
416 lfsck->li_time_last_checkpoint = ktime_get_seconds();
417 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
418 LFSCK_CHECKPOINT_INTERVAL;
419 return rc1 != 0 ? rc1 : rc;
422 static int lfsck_prep(const struct lu_env *env, struct lfsck_instance *lfsck,
423 struct lfsck_start_param *lsp)
425 struct dt_object *obj = NULL;
426 struct lfsck_component *com;
427 struct lfsck_component *next;
428 struct lfsck_position *pos = NULL;
429 const struct dt_it_ops *iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
433 LASSERT(lfsck->li_obj_dir == NULL);
434 LASSERT(lfsck->li_di_dir == NULL);
436 lfsck->li_current_oit_processed = 0;
437 list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
438 com->lc_new_checked = 0;
439 rc = com->lc_ops->lfsck_prep(env, com, lsp);
444 (!lfsck_pos_is_zero(&com->lc_pos_start) &&
445 lfsck_pos_is_eq(pos, &com->lc_pos_start) > 0))
446 pos = &com->lc_pos_start;
449 /* Init otable-based iterator. */
451 rc = iops->load(env, lfsck->li_di_oit, 0);
452 if (rc > 0 || unlikely(rc == -ENODATA)) {
453 lfsck->li_oit_over = 1;
460 rc = iops->load(env, lfsck->li_di_oit, pos->lp_oit_cookie);
461 if (rc > 0 || unlikely(rc == -ENODATA))
462 lfsck->li_oit_over = 1;
466 if (!lfsck->li_master || fid_is_zero(&pos->lp_dir_parent))
469 /* Find the directory for namespace-based traverse. */
470 obj = lfsck_object_find_bottom(env, lfsck, &pos->lp_dir_parent);
472 RETURN(PTR_ERR(obj));
474 /* Remote directory will be scanned by the LFSCK instance
475 * on the MDT where the remote object really resides on.
477 if (!dt_object_exists(obj) || dt_object_remote(obj) ||
478 unlikely(!S_ISDIR(lfsck_object_type(obj))))
481 rc = lfsck_load_stripe_lmv(env, lfsck, obj);
483 /* For the master MDT-object of a striped directory,
484 * reset the iteration from the directory beginning.
486 if (lfsck->li_lmv != NULL && lfsck->li_lmv->ll_lmv_master)
487 pos->lp_dir_cookie = 0;
489 rc = lfsck_open_dir(env, lfsck, pos->lp_dir_cookie);
491 /* The end of the directory. */
499 lfsck_object_put(env, obj);
502 lfsck_close_dir(env, lfsck, rc);
503 list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
505 com->lc_ops->lfsck_post(env, com, rc, true);
512 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, true);
513 lfsck->li_pos_current = lfsck->li_pos_checkpoint;
514 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
515 rc = com->lc_ops->lfsck_checkpoint(env, com, true);
520 lfsck->li_time_last_checkpoint = ktime_get_seconds();
521 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
522 LFSCK_CHECKPOINT_INTERVAL;
526 static int lfsck_exec_oit(const struct lu_env *env,
527 struct lfsck_instance *lfsck, struct dt_object *obj)
529 struct lfsck_component *com;
533 LASSERT(lfsck->li_obj_dir == NULL);
535 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
536 rc = com->lc_ops->lfsck_exec_oit(env, com, obj);
541 rc = lfsck_needs_scan_dir(env, lfsck, obj);
545 rc = lfsck_load_stripe_lmv(env, lfsck, obj);
547 rc = lfsck_open_dir(env, lfsck, 0);
553 lfsck_fail(env, lfsck, false);
556 lfsck_close_dir(env, lfsck, rc);
558 return rc > 0 ? 0 : rc;
561 static int lfsck_exec_dir(const struct lu_env *env,
562 struct lfsck_instance *lfsck,
563 struct lfsck_assistant_object *lso,
564 struct lu_dirent *ent, __u16 type)
566 struct lfsck_component *com;
569 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
570 rc = com->lc_ops->lfsck_exec_dir(env, com, lso, ent, type);
577 static int lfsck_master_dir_engine(const struct lu_env *env,
578 struct lfsck_instance *lfsck);
580 static int lfsck_post(const struct lu_env *env, struct lfsck_instance *lfsck,
583 struct lfsck_component *com;
584 struct lfsck_component *next;
587 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, false);
588 lfsck_close_dir(env, lfsck, result);
590 while (thread_is_running(&lfsck->li_thread) && rc > 0 &&
591 !list_empty(&lfsck->li_list_lmv)) {
592 struct lfsck_lmv_unit *llu;
594 spin_lock(&lfsck->li_lock);
595 llu = list_first_entry(&lfsck->li_list_lmv,
596 struct lfsck_lmv_unit, llu_link);
597 list_del_init(&llu->llu_link);
598 spin_unlock(&lfsck->li_lock);
600 lfsck->li_lmv = &llu->llu_lmv;
601 lfsck->li_obj_dir = lfsck_object_get(llu->llu_obj);
602 rc = lfsck_open_dir(env, lfsck, 0);
604 rc = lfsck_master_dir_engine(env, lfsck);
605 lfsck_close_dir(env, lfsck, result);
611 list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
612 rc = com->lc_ops->lfsck_post(env, com, result, false);
615 "%s at the component %u: rc = %d\n",
616 lfsck_lfsck2name(lfsck), (__u32)com->lc_type,
620 lfsck->li_time_last_checkpoint = ktime_get_seconds();
621 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
622 LFSCK_CHECKPOINT_INTERVAL;
624 /* Ignore some component post failure to make other can go ahead. */
628 static int lfsck_double_scan(const struct lu_env *env,
629 struct lfsck_instance *lfsck)
631 struct lfsck_component *com;
632 struct lfsck_component *next;
636 list_for_each_entry(com, &lfsck->li_list_double_scan, lc_link) {
637 rc = com->lc_ops->lfsck_double_scan(env, com);
642 wait_event_idle(lfsck->li_thread.t_ctl_waitq,
643 atomic_read(&lfsck->li_double_scan_count) == 0);
645 if (lfsck->li_status != LS_PAUSED &&
646 lfsck->li_status != LS_CO_PAUSED) {
647 list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
649 spin_lock(&lfsck->li_lock);
650 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
651 spin_unlock(&lfsck->li_lock);
655 return rc1 != 0 ? rc1 : rc;
658 static void lfsck_quit(const struct lu_env *env, struct lfsck_instance *lfsck)
660 struct lfsck_component *com;
661 struct lfsck_component *next;
663 list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
665 if (com->lc_ops->lfsck_quit != NULL)
666 com->lc_ops->lfsck_quit(env, com);
668 spin_lock(&lfsck->li_lock);
669 list_del_init(&com->lc_link_dir);
670 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
671 spin_unlock(&lfsck->li_lock);
674 list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
676 if (com->lc_ops->lfsck_quit != NULL)
677 com->lc_ops->lfsck_quit(env, com);
679 spin_lock(&lfsck->li_lock);
680 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
681 spin_unlock(&lfsck->li_lock);
687 static int lfsck_master_dir_engine(const struct lu_env *env,
688 struct lfsck_instance *lfsck)
690 struct lfsck_thread_info *info = lfsck_env_info(env);
691 struct dt_object *dir = lfsck->li_obj_dir;
692 const struct dt_it_ops *iops = &dir->do_index_ops->dio_it;
693 struct dt_it *di = lfsck->li_di_dir;
694 struct lu_dirent *ent = (struct lu_dirent *)info->lti_key;
695 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
696 struct ptlrpc_thread *thread = &lfsck->li_thread;
697 struct lfsck_assistant_object *lso = NULL;
703 if (CFS_FAIL_TIMEOUT(OBD_FAIL_LFSCK_DELAY2, cfs_fail_val) &&
704 unlikely(!thread_is_running(thread))) {
706 "%s: scan dir exit for engine stop, parent "DFID", cookie %#llx\n",
707 lfsck_lfsck2name(lfsck),
708 PFID(lfsck_dto2fid(dir)), lfsck->li_cookie_dir);
713 lfsck->li_new_scanned++;
714 rc = iops->rec(env, di, (struct dt_rec *)ent,
717 rc = lfsck_unpack_ent(ent, &lfsck->li_cookie_dir,
722 "%s: scan dir failed at rec(), parent "DFID", cookie %#llx: rc = %d\n",
723 lfsck_lfsck2name(lfsck),
724 PFID(lfsck_dto2fid(dir)),
725 lfsck->li_cookie_dir, rc);
726 lfsck_fail(env, lfsck, true);
727 if (bk->lb_param & LPF_FAILOUT)
733 if (ent->lde_attrs & LUDA_IGNORE)
736 /* skip dot entry. */
737 if (ent->lde_namelen == 1 && ent->lde_name[0] == '.')
741 lso = lfsck_assistant_object_init(env,
742 lfsck_dto2fid(dir), NULL,
743 lfsck->li_pos_current.lp_oit_cookie, true);
745 if (bk->lb_param & LPF_FAILOUT)
746 RETURN(PTR_ERR(lso));
753 /* The type in the @ent structure may has been overwritten,
754 * so we need to pass the @type parameter independently.
756 rc = lfsck_exec_dir(env, lfsck, lso, ent, type);
757 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
761 rc = lfsck_checkpoint(env, lfsck);
762 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
766 lfsck_control_speed(lfsck);
767 if (unlikely(!thread_is_running(thread))) {
769 "%s: scan dir exit for engine stop, parent "DFID", cookie %#llx\n",
770 lfsck_lfsck2name(lfsck),
771 PFID(lfsck_dto2fid(dir)),
772 lfsck->li_cookie_dir);
776 if (CFS_FAIL_CHECK(OBD_FAIL_LFSCK_FATAL2)) {
777 spin_lock(&lfsck->li_lock);
778 thread_set_flags(thread, SVC_STOPPING);
779 spin_unlock(&lfsck->li_lock);
780 GOTO(out, rc = -EINVAL);
783 rc = iops->next(env, di);
786 "%s dir engine fail to locate next for the directory "DFID": rc = %d\n",
787 lfsck_lfsck2name(lfsck),
788 PFID(&lfsck->li_pos_current.lp_dir_parent), rc);
791 if (rc > 0 && !lfsck->li_oit_over)
792 lfsck_close_dir(env, lfsck, rc);
798 kref_put(&lso->lso_ref, lfsck_assistant_object_put);
804 * Object-table based iteration engine.
806 * Object-table based iteration is the basic linear engine to scan all the
807 * objects on current device in turn. For each object, it calls all the
808 * registered LFSCK component(s)' API to perform related consistency
811 * It flushes related LFSCK trace files to disk via making checkpoint
812 * periodically. Then if the server crashed or the LFSCK is paused, the
813 * LFSCK can resume from the latest checkpoint.
815 * It also controls the whole LFSCK speed via lfsck_control_speed() to
816 * avoid the server to become overload.
818 * \param[in] env pointer to the thread context
819 * \param[in] lfsck pointer to the lfsck instance
821 * \retval positive number if all objects have been scanned
822 * \retval 0 if the iteration is stopped or paused
823 * \retval negative error number on failure
825 static int lfsck_master_oit_engine(const struct lu_env *env,
826 struct lfsck_instance *lfsck)
828 struct lfsck_thread_info *info = lfsck_env_info(env);
829 const struct dt_it_ops *iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
830 struct dt_it *di = lfsck->li_di_oit;
831 struct lu_fid *fid = &info->lti_fid;
832 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
833 struct ptlrpc_thread *thread = &lfsck->li_thread;
834 struct seq_server_site *ss = lfsck_dev_site(lfsck);
835 __u32 idx = lfsck_dev_idx(lfsck);
839 if (unlikely(ss == NULL))
843 struct dt_object *target;
845 if (lfsck->li_di_dir != NULL) {
846 rc = lfsck_master_dir_engine(env, lfsck);
851 if (unlikely(lfsck->li_oit_over))
854 if (CFS_FAIL_TIMEOUT(OBD_FAIL_LFSCK_DELAY1, cfs_fail_val) &&
855 unlikely(!thread_is_running(thread))) {
857 "%s: OIT scan exit for engine stop, cookie %llu\n",
858 lfsck_lfsck2name(lfsck), iops->store(env, di));
863 if (CFS_FAIL_CHECK(OBD_FAIL_LFSCK_CRASH))
866 lfsck->li_current_oit_processed = 1;
868 if (!list_empty(&lfsck->li_list_lmv)) {
869 struct lfsck_lmv_unit *llu;
871 spin_lock(&lfsck->li_lock);
872 llu = list_first_entry(&lfsck->li_list_lmv,
873 struct lfsck_lmv_unit, llu_link);
874 list_del_init(&llu->llu_link);
875 spin_unlock(&lfsck->li_lock);
877 lfsck->li_lmv = &llu->llu_lmv;
878 lfsck->li_obj_dir = lfsck_object_get(llu->llu_obj);
879 rc = lfsck_open_dir(env, lfsck, 0);
881 rc = lfsck_master_dir_engine(env, lfsck);
887 lfsck->li_new_scanned++;
888 lfsck->li_pos_current.lp_oit_cookie = iops->store(env, di);
889 rc = iops->rec(env, di, (struct dt_rec *)fid, 0);
892 "%s: OIT scan failed at rec(): rc = %d\n",
893 lfsck_lfsck2name(lfsck), rc);
894 lfsck_fail(env, lfsck, true);
895 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
901 if (unlikely(!fid_is_sane(fid))) {
902 CDEBUG(D_LFSCK, "%s: OIT scan find invalid FID "DFID
904 lfsck_lfsck2name(lfsck), PFID(fid));
908 if (fid_is_idif(fid)) {
909 __u32 idx1 = fid_idif_ost_idx(fid);
911 LASSERT(!lfsck->li_master);
914 struct ost_id *oi = &info->lti_oi;
916 if (unlikely(idx1 != 0)) {
917 CDEBUG(D_LFSCK, "%s: invalid IDIF "DFID
918 ", not match device index %u\n",
919 lfsck_lfsck2name(lfsck),
925 /* rebuild the IDIF with index to avoid double
926 * instances for the same object.
928 fid_to_ostid(fid, oi);
929 ostid_to_fid(fid, oi, idx);
931 } else if (!fid_is_norm(fid) && !fid_is_igif(fid) &&
932 !fid_is_last_id(fid) &&
933 !lu_fid_eq(fid, &lfsck->li_global_root_fid)) {
935 /* If the FID/object is only used locally and invisible
936 * to external nodes, then LFSCK will not handle it.
938 * dot_lustre sequence has been handled specially.
942 struct lu_seq_range *range = &info->lti_range;
944 if (lfsck->li_master)
945 fld_range_set_mdt(range);
947 fld_range_set_ost(range);
948 rc = fld_local_lookup(env, ss->ss_server_fld,
949 fid_seq(fid), range);
950 if (rc != 0 || range->lsr_index != idx) {
951 /* Remote object will be handled by the LFSCK
952 * instance on the MDT where the remote object
960 target = lfsck_object_find_bottom(env, lfsck, fid);
961 if (IS_ERR(target)) {
962 CDEBUG(D_LFSCK, "%s: OIT scan failed at find target "
963 DFID", cookie %llu: rc = %d\n",
964 lfsck_lfsck2name(lfsck), PFID(fid),
965 iops->store(env, di), rc);
966 lfsck_fail(env, lfsck, true);
967 if (bk->lb_param & LPF_FAILOUT)
968 RETURN(PTR_ERR(target));
973 if (dt_object_exists(target))
974 rc = lfsck_exec_oit(env, lfsck, target);
976 lfsck_object_put(env, target);
977 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
981 rc = lfsck_checkpoint(env, lfsck);
982 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
986 lfsck_control_speed(lfsck);
988 if (CFS_FAIL_CHECK(OBD_FAIL_LFSCK_FATAL1)) {
989 spin_lock(&lfsck->li_lock);
990 thread_set_flags(thread, SVC_STOPPING);
991 spin_unlock(&lfsck->li_lock);
995 rc = iops->next(env, di);
996 if (unlikely(rc > 0))
997 lfsck->li_oit_over = 1;
998 else if (likely(rc == 0))
999 lfsck->li_current_oit_processed = 0;
1002 "%s oit engine fail to locate next at %llu: rc = %d\n",
1003 lfsck_lfsck2name(lfsck), iops->store(env, di),
1006 if (unlikely(!thread_is_running(thread))) {
1008 "%s: OIT scan exit for engine stop, cookie %llu\n",
1009 lfsck_lfsck2name(lfsck), iops->store(env, di));
1012 } while (rc == 0 || lfsck->li_di_dir != NULL);
1017 int lfsck_master_engine(void *args)
1019 struct lfsck_thread_args *lta = args;
1020 struct lu_env *env = <a->lta_env;
1021 struct lfsck_instance *lfsck = lta->lta_lfsck;
1022 struct ptlrpc_thread *thread = &lfsck->li_thread;
1023 struct dt_object *oit_obj = lfsck->li_obj_oit;
1024 const struct dt_it_ops *oit_iops = &oit_obj->do_index_ops->dio_it;
1025 struct dt_it *oit_di;
1030 * thread is spawned with all signals set to SIG_IGN, re-enable
1031 * SIGINT for lfsck_stop() to awaken and stop the thread.
1033 allow_signal(SIGINT);
1034 spin_lock(&lfsck->li_lock);
1035 lfsck->li_task = current;
1036 spin_unlock(&lfsck->li_lock);
1038 /* There will be some objects verification during the LFSCK start,
1039 * such as the subsequent lfsck_verify_lpf(). Trigger low layer OI
1040 * OI scrub before that to handle the potential inconsistence.
1042 oit_di = oit_iops->init(env, oit_obj, lfsck->li_args_oit);
1043 if (IS_ERR(oit_di)) {
1044 rc = PTR_ERR(oit_di);
1046 "%s: master engine fail to init iteration: rc = %d\n",
1047 lfsck_lfsck2name(lfsck), rc);
1049 GOTO(fini_args, rc);
1052 if (lfsck->li_master &&
1053 (!list_empty(&lfsck->li_list_scan) ||
1054 !list_empty(&lfsck->li_list_double_scan))) {
1055 rc = lfsck_verify_lpf(env, lfsck);
1056 /* FIXME: once OI files are missing, this will fail, it should
1057 * return error, but to satisfy sanity-lfsck test 4 & 5, leave
1058 * it uninitialized here, and any code dereference it need to
1062 CERROR("%s: master engine fail to verify the .lustre/lost+found/, go ahead: rc = %d\n",
1063 lfsck_lfsck2name(lfsck), rc);
1066 spin_lock(&lfsck->li_lock);
1067 lfsck->li_di_oit = oit_di;
1068 spin_unlock(&lfsck->li_lock);
1069 rc = lfsck_prep(env, lfsck, lta->lta_lsp);
1074 "LFSCK entry: oit_flags = %#x, dir_flags = %#x, oit_cookie = %llu, dir_cookie = %#llx, parent = "DFID", pid = %d\n",
1075 lfsck->li_args_oit, lfsck->li_args_dir,
1076 lfsck->li_pos_checkpoint.lp_oit_cookie,
1077 lfsck->li_pos_checkpoint.lp_dir_cookie,
1078 PFID(&lfsck->li_pos_checkpoint.lp_dir_parent),
1081 spin_lock(&lfsck->li_lock);
1082 if (unlikely(!thread_is_starting(thread))) {
1083 spin_unlock(&lfsck->li_lock);
1084 GOTO(fini_oit, rc = 0);
1087 thread_set_flags(thread, SVC_RUNNING);
1088 spin_unlock(&lfsck->li_lock);
1089 wake_up(&thread->t_ctl_waitq);
1091 wait_event_idle(thread->t_ctl_waitq,
1092 lfsck->li_start_unplug ||
1093 !thread_is_running(thread));
1094 if (!thread_is_running(thread))
1095 GOTO(fini_oit, rc = 0);
1097 if (!list_empty(&lfsck->li_list_scan) ||
1098 list_empty(&lfsck->li_list_double_scan))
1099 rc = lfsck_master_oit_engine(env, lfsck);
1103 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, false);
1105 "LFSCK exit: oit_flags = %#x, dir_flags = %#x, oit_cookie = %llu, dir_cookie = %#llx, parent = "DFID", pid = %d: rc = %d\n",
1106 lfsck->li_args_oit, lfsck->li_args_dir,
1107 lfsck->li_pos_checkpoint.lp_oit_cookie,
1108 lfsck->li_pos_checkpoint.lp_dir_cookie,
1109 PFID(&lfsck->li_pos_checkpoint.lp_dir_parent),
1112 if (!CFS_FAIL_CHECK(OBD_FAIL_LFSCK_CRASH))
1113 rc = lfsck_post(env, lfsck, rc);
1115 lfsck_close_dir(env, lfsck, rc);
1118 lfsck_di_oit_put(env, lfsck);
1119 oit_iops->fini(env, oit_di);
1121 if (!list_empty(&lfsck->li_list_double_scan))
1122 rc = lfsck_double_scan(env, lfsck);
1126 lfsck_quit(env, lfsck);
1129 /* XXX: Purge the pinned objects in the future. */
1132 spin_lock(&lfsck->li_lock);
1133 thread_set_flags(thread, SVC_STOPPED);
1134 lfsck->li_task = NULL;
1135 spin_unlock(&lfsck->li_lock);
1136 wake_up(&thread->t_ctl_waitq);
1137 lfsck_thread_args_fini(lta);
1141 static inline bool lfsck_assistant_req_empty(struct lfsck_assistant_data *lad)
1145 spin_lock(&lad->lad_lock);
1146 if (list_empty(&lad->lad_req_list))
1148 spin_unlock(&lad->lad_lock);
1154 * Query the LFSCK status from the instatnces on remote servers.
1156 * The LFSCK assistant thread queries the LFSCK instances on other
1157 * servers (MDT/OST) about their status, such as whether they have
1158 * finished the phase1/phase2 scanning or not, and so on.
1160 * \param[in] env pointer to the thread context
1161 * \param[in] com pointer to the lfsck component
1163 * \retval 0 for success
1164 * \retval negative error number on failure
1166 static int lfsck_assistant_query_others(const struct lu_env *env,
1167 struct lfsck_component *com)
1169 struct lfsck_thread_info *info = lfsck_env_info(env);
1170 struct lfsck_request *lr = &info->lti_lr;
1171 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1172 struct lfsck_instance *lfsck = com->lc_lfsck;
1173 struct lfsck_assistant_data *lad = com->lc_data;
1174 struct ptlrpc_request_set *set;
1175 struct lfsck_tgt_descs *ltds;
1176 struct lfsck_tgt_desc *ltd;
1177 struct list_head *phase_head;
1182 set = ptlrpc_prep_set();
1186 lad->lad_touch_gen++;
1187 memset(lr, 0, sizeof(*lr));
1188 lr->lr_event = LE_QUERY;
1189 lr->lr_active = com->lc_type;
1191 memset(laia, 0, sizeof(*laia));
1192 laia->laia_com = com;
1195 if (!list_empty(&lad->lad_mdt_phase1_list)) {
1196 ltds = &lfsck->li_mdt_descs;
1198 phase_head = &lad->lad_mdt_phase1_list;
1199 } else if (com->lc_type != LFSCK_TYPE_LAYOUT) {
1204 ltds = &lfsck->li_ost_descs;
1205 lr->lr_flags = LEF_TO_OST;
1206 phase_head = &lad->lad_ost_phase1_list;
1209 laia->laia_ltds = ltds;
1210 spin_lock(<ds->ltd_lock);
1211 while (!list_empty(phase_head)) {
1212 struct list_head *phase_list;
1215 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1216 ltd = list_first_entry(phase_head,
1217 struct lfsck_tgt_desc,
1218 ltd_layout_phase_list);
1219 phase_list = <d->ltd_layout_phase_list;
1220 gen = <d->ltd_layout_gen;
1222 ltd = list_first_entry(phase_head,
1223 struct lfsck_tgt_desc,
1224 ltd_namespace_phase_list);
1225 phase_list = <d->ltd_namespace_phase_list;
1226 gen = <d->ltd_namespace_gen;
1229 if (*gen == lad->lad_touch_gen)
1232 *gen = lad->lad_touch_gen;
1233 list_move_tail(phase_list, phase_head);
1234 atomic_inc(<d->ltd_ref);
1235 laia->laia_ltd = ltd;
1236 spin_unlock(<ds->ltd_lock);
1237 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1238 lfsck_async_interpret_common,
1242 "%s: LFSCK assistant fail to query %s %x for %s: rc = %d\n",
1243 lfsck_lfsck2name(lfsck),
1244 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1245 ltd->ltd_index, lad->lad_name, rc);
1249 spin_lock(<ds->ltd_lock);
1251 spin_unlock(<ds->ltd_lock);
1253 rc = ptlrpc_set_wait(env, set);
1255 ptlrpc_set_destroy(set);
1259 if (com->lc_type == LFSCK_TYPE_LAYOUT && !(lr->lr_flags & LEF_TO_OST) &&
1260 list_empty(&lad->lad_mdt_phase1_list))
1264 ptlrpc_set_destroy(set);
1266 RETURN(rc1 != 0 ? rc1 : rc);
1270 * Notify the LFSCK event to the instances on remote servers.
1272 * The LFSCK assistant thread notifies the LFSCK instances on other
1273 * servers (MDT/OST) about some events, such as start new scanning,
1274 * stop the scanning, this LFSCK instance will exit, and so on.
1276 * \param[in] env pointer to the thread context
1277 * \param[in] com pointer to the lfsck component
1278 * \param[in] lr pointer to the LFSCK event request
1280 * \retval 0 for success
1281 * \retval negative error number on failure
1283 static int lfsck_assistant_notify_others(const struct lu_env *env,
1284 struct lfsck_component *com,
1285 struct lfsck_request *lr)
1287 struct lfsck_thread_info *info = lfsck_env_info(env);
1288 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1289 struct lfsck_instance *lfsck = com->lc_lfsck;
1290 struct lfsck_assistant_data *lad = com->lc_data;
1291 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1292 struct ptlrpc_request_set *set;
1293 struct lfsck_tgt_descs *ltds;
1294 struct lfsck_tgt_desc *ltd;
1295 struct lfsck_tgt_desc *next;
1301 set = ptlrpc_prep_set();
1305 lr->lr_index = lfsck_dev_idx(lfsck);
1306 lr->lr_active = com->lc_type;
1308 memset(laia, 0, sizeof(*laia));
1309 laia->laia_com = com;
1312 switch (lr->lr_event) {
1314 if (com->lc_type != LFSCK_TYPE_LAYOUT)
1317 lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN;
1318 lr->lr_speed = bk->lb_speed_limit;
1319 lr->lr_version = bk->lb_version;
1320 lr->lr_param |= bk->lb_param;
1321 lr->lr_async_windows = bk->lb_async_windows;
1322 lr->lr_flags = LEF_TO_OST;
1324 /* Notify OSTs firstly, then handle other MDTs if needed. */
1325 ltds = &lfsck->li_ost_descs;
1326 laia->laia_ltds = ltds;
1327 down_read(<ds->ltd_rw_sem);
1328 for_each_set_bit(idx, ltds->ltd_tgts_bitmap,
1329 ltds->ltd_tgts_mask_len) {
1330 ltd = lfsck_tgt_get(ltds, idx);
1331 LASSERT(ltd != NULL);
1333 laia->laia_ltd = ltd;
1334 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1335 lfsck_async_interpret_common,
1336 laia, LFSCK_NOTIFY);
1338 lfsck_lad_set_bitmap(env, com, idx);
1340 "%s: LFSCK assistant fail to notify OST %x for %s start: rc = %d\n",
1341 lfsck_lfsck2name(lfsck), idx,
1346 up_read(<ds->ltd_rw_sem);
1349 rc = ptlrpc_set_wait(env, set);
1351 ptlrpc_set_destroy(set);
1356 if (!(bk->lb_param & LPF_ALL_TGT))
1359 /* link other MDT targets locallly. */
1360 ltds = &lfsck->li_mdt_descs;
1361 spin_lock(<ds->ltd_lock);
1362 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1363 for_each_set_bit(idx, ltds->ltd_tgts_bitmap,
1364 ltds->ltd_tgts_mask_len) {
1365 ltd = lfsck_ltd2tgt(ltds, idx);
1366 LASSERT(ltd != NULL);
1368 if (!list_empty(<d->ltd_layout_list))
1371 list_add_tail(<d->ltd_layout_list,
1372 &lad->lad_mdt_list);
1373 list_add_tail(<d->ltd_layout_phase_list,
1374 &lad->lad_mdt_phase1_list);
1377 for_each_set_bit(idx, ltds->ltd_tgts_bitmap,
1378 ltds->ltd_tgts_mask_len) {
1379 ltd = lfsck_ltd2tgt(ltds, idx);
1380 LASSERT(ltd != NULL);
1382 if (!list_empty(<d->ltd_namespace_list))
1385 list_add_tail(<d->ltd_namespace_list,
1386 &lad->lad_mdt_list);
1387 list_add_tail(<d->ltd_namespace_phase_list,
1388 &lad->lad_mdt_phase1_list);
1391 spin_unlock(<ds->ltd_lock);
1394 case LE_PHASE2_DONE:
1395 case LE_PEER_EXIT: {
1396 struct list_head *phase_head;
1398 /* Handle other MDTs firstly if needed, then notify the OSTs. */
1399 if (bk->lb_param & LPF_ALL_TGT) {
1400 phase_head = &lad->lad_mdt_list;
1401 ltds = &lfsck->li_mdt_descs;
1402 if (lr->lr_event == LE_STOP) {
1403 /* unlink other MDT targets locallly. */
1404 spin_lock(<ds->ltd_lock);
1405 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1406 list_for_each_entry_safe(ltd, next,
1407 phase_head, ltd_layout_list) {
1409 <d->ltd_layout_phase_list);
1411 <d->ltd_layout_list);
1414 list_for_each_entry_safe(ltd, next,
1416 ltd_namespace_list) {
1418 <d->ltd_namespace_phase_list);
1420 <d->ltd_namespace_list);
1423 spin_unlock(<ds->ltd_lock);
1425 if (com->lc_type != LFSCK_TYPE_LAYOUT)
1428 lr->lr_flags |= LEF_TO_OST;
1429 phase_head = &lad->lad_ost_list;
1430 ltds = &lfsck->li_ost_descs;
1432 lr->lr_flags &= ~LEF_TO_OST;
1434 } else if (com->lc_type != LFSCK_TYPE_LAYOUT) {
1437 lr->lr_flags |= LEF_TO_OST;
1438 phase_head = &lad->lad_ost_list;
1439 ltds = &lfsck->li_ost_descs;
1443 laia->laia_ltds = ltds;
1444 spin_lock(<ds->ltd_lock);
1445 while (!list_empty(phase_head)) {
1446 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1447 ltd = list_first_entry(phase_head,
1448 struct lfsck_tgt_desc,
1450 if (!list_empty(<d->ltd_layout_phase_list))
1452 <d->ltd_layout_phase_list);
1453 list_del_init(<d->ltd_layout_list);
1455 ltd = list_first_entry(phase_head,
1456 struct lfsck_tgt_desc,
1457 ltd_namespace_list);
1458 if (!list_empty(<d->ltd_namespace_phase_list))
1460 <d->ltd_namespace_phase_list);
1461 list_del_init(<d->ltd_namespace_list);
1463 atomic_inc(<d->ltd_ref);
1464 laia->laia_ltd = ltd;
1465 spin_unlock(<ds->ltd_lock);
1466 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1467 lfsck_async_interpret_common,
1468 laia, LFSCK_NOTIFY);
1471 "%s: LFSCK assistant fail to notify %s %x for %s stop/phase2_done/ peer_exit: rc = %d\n",
1472 lfsck_lfsck2name(lfsck),
1473 (lr->lr_flags & LEF_TO_OST) ?
1474 "OST" : "MDT", ltd->ltd_index,
1478 spin_lock(<ds->ltd_lock);
1480 spin_unlock(<ds->ltd_lock);
1482 rc = ptlrpc_set_wait(env, set);
1484 ptlrpc_set_destroy(set);
1488 if (com->lc_type == LFSCK_TYPE_LAYOUT &&
1489 !(lr->lr_flags & LEF_TO_OST)) {
1490 lr->lr_flags |= LEF_TO_OST;
1491 phase_head = &lad->lad_ost_list;
1492 ltds = &lfsck->li_ost_descs;
1497 case LE_PHASE1_DONE:
1498 lad->lad_ops->la_sync_failures(env, com, lr);
1499 lad->lad_touch_gen++;
1500 ltds = &lfsck->li_mdt_descs;
1501 laia->laia_ltds = ltds;
1502 spin_lock(<ds->ltd_lock);
1503 while (!list_empty(&lad->lad_mdt_list)) {
1504 struct list_head *list;
1507 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1508 ltd = list_first_entry(&lad->lad_mdt_list,
1509 struct lfsck_tgt_desc,
1511 list = <d->ltd_layout_list;
1512 gen = <d->ltd_layout_gen;
1514 struct lfsck_namespace *ns = com->lc_file_ram;
1516 ltd = list_first_entry(&lad->lad_mdt_list,
1517 struct lfsck_tgt_desc,
1518 ltd_namespace_list);
1519 list = <d->ltd_namespace_list;
1520 gen = <d->ltd_namespace_gen;
1521 lr->lr_flags2 = ns->ln_flags & ~LF_INCOMPLETE;
1524 if (*gen == lad->lad_touch_gen)
1527 *gen = lad->lad_touch_gen;
1528 list_move_tail(list, &lad->lad_mdt_list);
1529 if (ltd->ltd_synced_failures)
1532 atomic_inc(<d->ltd_ref);
1533 laia->laia_ltd = ltd;
1534 spin_unlock(<ds->ltd_lock);
1535 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1536 lfsck_async_interpret_common,
1537 laia, LFSCK_NOTIFY);
1540 "%s: LFSCK assistant fail to notify MDT %x for %s phase1 done: rc = %d\n",
1541 lfsck_lfsck2name(lfsck), ltd->ltd_index,
1545 spin_lock(<ds->ltd_lock);
1547 spin_unlock(<ds->ltd_lock);
1551 "%s: LFSCK assistant unexpected LFSCK event: rc = %d\n",
1552 lfsck_lfsck2name(lfsck), lr->lr_event);
1557 rc1 = ptlrpc_set_wait(env, set);
1558 ptlrpc_set_destroy(set);
1560 RETURN(rc != 0 ? rc : rc1);
1564 * The LFSCK assistant thread is triggered by the LFSCK main engine.
1565 * They co-work together as an asynchronous pipeline: the LFSCK main
1566 * engine scans the system and pre-fetches the objects, attributes,
1567 * or name entries, etc, and pushes them into the pipeline as input
1568 * requests for the LFSCK assistant thread; on the other end of the
1569 * pipeline, the LFSCK assistant thread performs the real check and
1570 * repair for every request from the main engine.
1572 * Generally, the assistant engine may be blocked when check/repair
1573 * something, so the LFSCK main engine will run some faster. On the
1574 * other hand, the LFSCK main engine will drive multiple assistant
1575 * threads in parallel, means for each LFSCK component on the master
1576 * (such as layout LFSCK, namespace LFSCK), there is an independent
1577 * LFSCK assistant thread. So under such 1:N multiple asynchronous
1578 * pipelines mode, the whole LFSCK performance will be much better
1579 * than check/repair everything by the LFSCK main engine itself.
1581 int lfsck_assistant_engine(void *args)
1583 struct lfsck_thread_args *lta = args;
1584 struct lu_env *env = <a->lta_env;
1585 struct lfsck_component *com = lta->lta_com;
1586 struct lfsck_instance *lfsck = lta->lta_lfsck;
1587 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1588 struct lfsck_position *pos = &com->lc_pos_start;
1589 struct lfsck_thread_info *info = lfsck_env_info(env);
1590 struct lfsck_request *lr = &info->lti_lr;
1591 struct lfsck_assistant_data *lad = com->lc_data;
1592 struct ptlrpc_thread *mthread = &lfsck->li_thread;
1593 struct ptlrpc_thread *athread = &lad->lad_thread;
1594 const struct lfsck_assistant_operations *lao = lad->lad_ops;
1595 struct lfsck_assistant_req *lar;
1601 CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread start\n",
1602 lfsck_lfsck2name(lfsck), lad->lad_name);
1604 memset(lr, 0, sizeof(*lr));
1605 lr->lr_event = LE_START;
1606 if (pos->lp_oit_cookie <= 1)
1607 lr->lr_param = LPF_RESET;
1608 rc = lfsck_assistant_notify_others(env, com, lr);
1611 "%s: LFSCK assistant fail to notify others to start %s: rc = %d\n",
1612 lfsck_lfsck2name(lfsck), lad->lad_name, rc);
1617 * thread is spawned with all signals set to SIG_IGN, re-enable
1618 * SIGINT for lfsck_stop() to awaken and stop the thread.
1620 allow_signal(SIGINT);
1621 spin_lock(&lad->lad_lock);
1622 lad->lad_task = current;
1623 thread_set_flags(athread, SVC_RUNNING);
1624 spin_unlock(&lad->lad_lock);
1625 wake_up(&mthread->t_ctl_waitq);
1628 while (!list_empty(&lad->lad_req_list)) {
1629 bool wakeup = false;
1631 if (unlikely(test_bit(LAD_EXIT, &lad->lad_flags) ||
1632 !thread_is_running(mthread)))
1633 GOTO(cleanup, rc = lad->lad_post_result);
1635 lar = list_first_entry(&lad->lad_req_list,
1636 struct lfsck_assistant_req,
1638 /* Only the lfsck_assistant_engine thread itself can
1639 * remove the "lar" from the head of the list, LFSCK
1640 * engine thread only inserts other new "lar" at the
1641 * end of the list. So it is safe to handle current
1642 * "lar" without the spin_lock.
1644 rc = lao->la_handler_p1(env, com, lar);
1645 spin_lock(&lad->lad_lock);
1646 list_del_init(&lar->lar_list);
1647 lad->lad_prefetched--;
1648 /* Wake up the main engine thread only when the list
1649 * is empty or half of the prefetched items have been
1650 * handled to avoid too frequent thread schedule.
1652 if (lad->lad_prefetched <= (bk->lb_async_windows / 2))
1654 spin_unlock(&lad->lad_lock);
1656 wake_up(&mthread->t_ctl_waitq);
1658 lao->la_req_fini(env, lar);
1659 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
1663 wait_event_idle(athread->t_ctl_waitq,
1664 !lfsck_assistant_req_empty(lad) ||
1665 test_bit(LAD_EXIT, &lad->lad_flags) ||
1666 test_bit(LAD_TO_POST, &lad->lad_flags) ||
1667 test_bit(LAD_TO_DOUBLE_SCAN, &lad->lad_flags));
1669 if (unlikely(test_bit(LAD_EXIT, &lad->lad_flags)))
1670 GOTO(cleanup, rc = lad->lad_post_result);
1672 if (!list_empty(&lad->lad_req_list))
1675 if (test_bit(LAD_TO_POST, &lad->lad_flags)) {
1676 CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread post\n",
1677 lfsck_lfsck2name(lfsck), lad->lad_name);
1679 if (unlikely(test_bit(LAD_EXIT, &lad->lad_flags)))
1680 GOTO(cleanup, rc = lad->lad_post_result);
1682 clear_bit(LAD_TO_POST, &lad->lad_flags);
1683 LASSERT(lad->lad_post_result > 0);
1685 /* Wakeup the master engine to go ahead. */
1686 wake_up(&mthread->t_ctl_waitq);
1688 memset(lr, 0, sizeof(*lr));
1689 lr->lr_event = LE_PHASE1_DONE;
1690 lr->lr_status = lad->lad_post_result;
1691 rc = lfsck_assistant_notify_others(env, com, lr);
1694 "%s: LFSCK assistant notified others for %s post: rc = %d\n",
1695 lfsck_lfsck2name(lfsck),
1699 if (test_bit(LAD_TO_DOUBLE_SCAN, &lad->lad_flags)) {
1700 clear_bit(LAD_TO_DOUBLE_SCAN, &lad->lad_flags);
1701 atomic_inc(&lfsck->li_double_scan_count);
1702 set_bit(LAD_IN_DOUBLE_SCAN, &lad->lad_flags);
1703 wake_up(&mthread->t_ctl_waitq);
1705 com->lc_new_checked = 0;
1706 com->lc_new_scanned = 0;
1707 com->lc_time_last_checkpoint = ktime_get_seconds();
1708 com->lc_time_next_checkpoint =
1709 com->lc_time_last_checkpoint +
1710 LFSCK_CHECKPOINT_INTERVAL;
1713 "%s: LFSCK assistant sync before the second-stage scaning\n",
1714 lfsck_lfsck2name(lfsck));
1716 /* Flush async updates before handling orphan. */
1717 rc2 = dt_sync(env, lfsck->li_next);
1720 "%s: LFSCK assistant phase2 scan start, synced: rc = %d\n",
1721 lfsck_lfsck2name(lfsck), rc2);
1723 if (CFS_FAIL_CHECK(OBD_FAIL_LFSCK_NO_DOUBLESCAN))
1724 GOTO(cleanup, rc = 0);
1726 while (test_bit(LAD_IN_DOUBLE_SCAN, &lad->lad_flags)) {
1729 rc = lfsck_assistant_query_others(env, com);
1730 if (lfsck_phase2_next_ready(lad))
1736 /* Pull LFSCK status on related targets once
1737 * per 30 seconds if we are not notified.
1739 while (seconds > 0 &&
1740 wait_event_idle_timeout(
1741 athread->t_ctl_waitq,
1742 lfsck_phase2_next_ready(lad) ||
1745 !thread_is_running(mthread),
1746 cfs_time_seconds(1)) == 0)
1750 test_bit(LAD_EXIT, &lad->lad_flags) ||
1751 !thread_is_running(mthread)))
1752 GOTO(cleanup, rc = 0);
1758 rc = lao->la_handler_p2(env, com);
1763 test_bit(LAD_EXIT, &lad->lad_flags) ||
1764 !thread_is_running(mthread)))
1765 GOTO(cleanup, rc = 0);
1771 /* Cleanup the unfinished requests. */
1772 spin_lock(&lad->lad_lock);
1774 lad->lad_assistant_status = rc;
1776 if (test_bit(LAD_EXIT, &lad->lad_flags) && lad->lad_post_result <= 0)
1777 lao->la_fill_pos(env, com, &lfsck->li_pos_checkpoint);
1779 thread_set_flags(athread, SVC_STOPPING);
1780 while (!list_empty(&lad->lad_req_list)) {
1781 lar = list_first_entry(&lad->lad_req_list,
1782 struct lfsck_assistant_req,
1784 list_del_init(&lar->lar_list);
1785 lad->lad_prefetched--;
1786 spin_unlock(&lad->lad_lock);
1787 lao->la_req_fini(env, lar);
1788 spin_lock(&lad->lad_lock);
1790 spin_unlock(&lad->lad_lock);
1792 memset(lr, 0, sizeof(*lr));
1794 lr->lr_event = LE_PHASE2_DONE;
1796 } else if (rc == 0) {
1797 if (lfsck->li_flags & LPF_ALL_TGT) {
1798 lr->lr_event = LE_STOP;
1799 lr->lr_status = LS_STOPPED;
1801 lr->lr_event = LE_PEER_EXIT;
1802 switch (lfsck->li_status) {
1805 lr->lr_status = LS_CO_PAUSED;
1809 lr->lr_status = LS_CO_STOPPED;
1813 "%s: LFSCK assistant unknown status: rc = %d\n",
1814 lfsck_lfsck2name(lfsck),
1816 lr->lr_status = LS_CO_FAILED;
1821 if (lfsck->li_flags & LPF_ALL_TGT) {
1822 lr->lr_event = LE_STOP;
1823 lr->lr_status = LS_FAILED;
1825 lr->lr_event = LE_PEER_EXIT;
1826 lr->lr_status = LS_CO_FAILED;
1830 rc1 = lfsck_assistant_notify_others(env, com, lr);
1833 "%s: LFSCK assistant failed to notify others for %s quit: rc = %d\n",
1834 lfsck_lfsck2name(lfsck), lad->lad_name, rc1);
1838 CDEBUG(D_LFSCK, "%s: LFSCK assistant sync before exit\n",
1839 lfsck_lfsck2name(lfsck));
1841 /* Flush async updates before exit. */
1842 rc2 = dt_sync(env, lfsck->li_next);
1844 CDEBUG(D_LFSCK, "%s: LFSCK assistant synced before exit: rc = %d\n",
1845 lfsck_lfsck2name(lfsck), rc2);
1847 /* Under force exit case, some requests may be just freed without
1848 * verification, those objects should be re-handled when next run.
1849 * So not update the on-disk trace file under such case.
1851 if (test_bit(LAD_IN_DOUBLE_SCAN, &lad->lad_flags)) {
1852 if (!test_bit(LAD_EXIT, &lad->lad_flags))
1853 rc1 = lao->la_double_scan_result(env, com, rc);
1856 "%s: LFSCK assistant phase2 scan finished: rc = %d\n",
1857 lfsck_lfsck2name(lfsck), rc1 != 0 ? rc1 : rc);
1861 if (test_bit(LAD_IN_DOUBLE_SCAN, &lad->lad_flags))
1862 atomic_dec(&lfsck->li_double_scan_count);
1864 spin_lock(&lad->lad_lock);
1865 lad->lad_assistant_status = (rc1 != 0 ? rc1 : rc);
1866 thread_set_flags(athread, SVC_STOPPED);
1867 lad->lad_task = NULL;
1868 spin_unlock(&lad->lad_lock);
1870 CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread exit: rc = %d\n",
1871 lfsck_lfsck2name(lfsck), lad->lad_name,
1872 lad->lad_assistant_status);
1874 lfsck_thread_args_fini(lta);
1875 wake_up(&mthread->t_ctl_waitq);