4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2013, 2016, Intel Corporation.
26 * lustre/lfsck/lfsck_engine.c
28 * Author: Fan, Yong <fan.yong@intel.com>
31 #define DEBUG_SUBSYSTEM S_LFSCK
33 #include <lu_object.h>
34 #include <dt_object.h>
35 #include <lustre_net.h>
36 #include <lustre_fid.h>
37 #include <obd_support.h>
38 #include <lustre_lib.h>
40 #include "lfsck_internal.h"
42 int lfsck_unpack_ent(struct lu_dirent *ent, __u64 *cookie, __u16 *type)
45 int align = sizeof(*lt) - 1;
48 fid_le_to_cpu(&ent->lde_fid, &ent->lde_fid);
49 *cookie = le64_to_cpu(ent->lde_hash);
50 ent->lde_reclen = le16_to_cpu(ent->lde_reclen);
51 ent->lde_namelen = le16_to_cpu(ent->lde_namelen);
52 ent->lde_attrs = le32_to_cpu(ent->lde_attrs);
54 if (unlikely(!(ent->lde_attrs & LUDA_TYPE)))
57 len = (ent->lde_namelen + align) & ~align;
58 lt = (struct luda_type *)(ent->lde_name + len);
59 *type = le16_to_cpu(lt->lt_type);
61 /* Make sure the name is terminated with '\0'. The data (object type)
62 * after ent::lde_name maybe broken, but we have stored such data in
63 * the output parameter @type as above. */
64 ent->lde_name[ent->lde_namelen] = '\0';
69 static void lfsck_di_oit_put(const struct lu_env *env, struct lfsck_instance *lfsck)
71 const struct dt_it_ops *iops;
74 spin_lock(&lfsck->li_lock);
75 iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
76 di = lfsck->li_di_oit;
77 lfsck->li_di_oit = NULL;
78 spin_unlock(&lfsck->li_lock);
82 static void lfsck_di_dir_put(const struct lu_env *env, struct lfsck_instance *lfsck)
84 const struct dt_it_ops *iops;
87 spin_lock(&lfsck->li_lock);
88 iops = &lfsck->li_obj_dir->do_index_ops->dio_it;
89 di = lfsck->li_di_dir;
90 lfsck->li_di_dir = NULL;
91 lfsck->li_cookie_dir = 0;
92 spin_unlock(&lfsck->li_lock);
96 static int lfsck_parent_fid(const struct lu_env *env, struct dt_object *obj,
99 if (unlikely(!S_ISDIR(lfsck_object_type(obj)) ||
100 !dt_try_as_dir(env, obj)))
103 return dt_lookup(env, obj, (struct dt_rec *)fid,
104 (const struct dt_key *)"..");
108 * Check whether needs to scan the directory or not.
110 * 1) If we are not doing namespace LFSCK, or the given @obj is not directory,
111 * then needs not to scan the @obj. Otherwise,
112 * 2) Global /ROOT needs to be scanned, backend root needs not to be scanned.
113 * 3) If the @obj is neither IGIF nor normal FID (including .lustre and its
114 * sub-directories that have been scanned when the LFSCK engine start),
115 * then needs not to be scanned.
116 * 4) If it is a remote object, then scanning the object will be done on the
117 * MDT on which the object really resides.
118 * 5) If the local object has normal FID, then needs to be scanned. Otherwise,
119 * 6) If the object has linkEA, then needs to be scanned. Otherwise,
120 * 7) If none of the previous conditions are true, we need to check the parent
121 * directories whether this subdirectory is in a tree that should be scanned.
122 * Set the parent as current @obj, repeat 2)-7).
124 * \param[in] env pointer to the thread context
125 * \param[in] lfsck pointer to the lfsck instance
126 * \param[in] obj pointer to the object to be checked
128 * \retval positive number if the directory needs to be scanned
129 * \retval 0 if the directory needs NOT to be scanned
130 * \retval negative error number on failure
132 static int lfsck_needs_scan_dir(const struct lu_env *env,
133 struct lfsck_instance *lfsck,
134 struct dt_object *obj)
136 struct lfsck_thread_info *info = lfsck_env_info(env);
137 struct lu_fid *fid = &info->lti_fid;
138 struct lu_seq_range *range = &info->lti_range;
139 struct lu_attr *la = &info->lti_la;
140 struct seq_server_site *ss = lfsck_dev_site(lfsck);
141 __u32 idx = lfsck_dev_idx(lfsck);
145 if (list_empty(&lfsck->li_list_dir) || !S_ISDIR(lfsck_object_type(obj)))
148 *fid = *lfsck_dto2fid(obj);
149 rc = dt_attr_get(env, obj, la);
150 if (unlikely(rc || (la->la_valid & LA_FLAGS &&
151 la->la_flags & LUSTRE_ORPHAN_FL))) {
152 /* Orphan directory is empty, does not need scan. */
154 "%s: skip orphan dir "DFID", %llx/%x: rc = %d\n",
155 lfsck_lfsck2name(lfsck), PFID(fid),
156 la->la_valid, la->la_flags, rc);
164 /* Global /ROOT is visible. */
165 if (unlikely(lu_fid_eq(fid, &lfsck->li_global_root_fid)))
168 /* Backend root is invisible. */
169 if (unlikely(lu_fid_eq(fid, &lfsck->li_local_root_fid)))
172 if (!fid_is_norm(fid) && !fid_is_igif(fid))
175 fld_range_set_mdt(range);
176 rc = fld_local_lookup(env, ss->ss_server_fld,
177 fid_seq(fid), range);
178 if (rc != 0 || range->lsr_index != idx)
179 /* Current FID should NOT be for the input parameter
180 * @obj, because the lfsck_master_oit_engine() has
181 * filtered out agent object. So current FID is for
182 * the ancestor of the original input parameter @obj.
183 * So the ancestor is a remote directory. The input
184 * parameter @obj is local directory, and should be
185 * scanned under such case. */
188 /* normal FID on this target (locally) must be for the
189 * client-side visiable object. */
190 if (fid_is_norm(fid))
193 /* Only true after "obj = NULL" set below */
195 obj = lfsck_object_find_bottom(env, lfsck, fid);
200 if (!dt_object_exists(obj))
204 dt_read_lock(env, obj, DT_TGT_CHILD);
205 if (unlikely(lfsck_is_dead_obj(obj))) {
206 dt_read_unlock(env, obj);
211 rc = dt_xattr_get(env, obj,
212 lfsck_buf_get(env, NULL, 0), XATTR_NAME_LINK);
213 dt_read_unlock(env, obj);
217 if (rc < 0 && rc != -ENODATA)
220 rc = lfsck_parent_fid(env, obj, fid);
222 lfsck_object_put(env, obj);
228 if (!fid_is_sane(fid))
233 if (depth > 0 && obj != NULL)
234 lfsck_object_put(env, obj);
239 static int lfsck_load_stripe_lmv(const struct lu_env *env,
240 struct lfsck_instance *lfsck,
241 struct dt_object *obj)
243 struct lmv_mds_md_v1 *lmv = &lfsck_env_info(env)->lti_lmv;
244 struct lfsck_lmv *llmv;
248 LASSERT(lfsck->li_obj_dir == NULL);
249 LASSERT(lfsck->li_lmv == NULL);
251 rc = lfsck_read_stripe_lmv(env, lfsck, obj, lmv);
252 if (rc == -ENODATA) {
253 lfsck->li_obj_dir = lfsck_object_get(obj);
265 if (lmv->lmv_magic == LMV_MAGIC) {
266 struct lfsck_slave_lmv_rec *lslr;
269 llmv->ll_lmv_master = 1;
270 if (lmv->lmv_stripe_count < 1)
271 stripes = LFSCK_LMV_DEF_STRIPES;
272 else if (lmv->lmv_stripe_count > LFSCK_LMV_MAX_STRIPES)
273 stripes = LFSCK_LMV_MAX_STRIPES;
275 stripes = lmv->lmv_stripe_count;
277 OBD_ALLOC_LARGE(lslr, sizeof(*lslr) * stripes);
284 llmv->ll_stripes_allocated = stripes;
285 llmv->ll_hash_type = LMV_HASH_TYPE_UNKNOWN;
286 llmv->ll_lslr = lslr;
288 llmv->ll_lmv_slave = 1;
291 lfsck->li_obj_dir = lfsck_object_get(obj);
293 atomic_set(&llmv->ll_ref, 1);
294 lfsck->li_lmv = llmv;
299 /* LFSCK wrap functions */
301 static void lfsck_fail(const struct lu_env *env, struct lfsck_instance *lfsck,
304 struct lfsck_component *com;
306 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
307 com->lc_ops->lfsck_fail(env, com, new_checked);
311 void lfsck_close_dir(const struct lu_env *env,
312 struct lfsck_instance *lfsck, int result)
314 struct lfsck_component *com;
317 if (lfsck->li_lmv != NULL) {
318 lfsck->li_lmv->ll_exit_value = result;
319 if (lfsck->li_obj_dir != NULL) {
320 list_for_each_entry(com, &lfsck->li_list_dir,
322 com->lc_ops->lfsck_close_dir(env, com);
326 lfsck_lmv_put(env, lfsck->li_lmv);
327 lfsck->li_lmv = NULL;
330 if (lfsck->li_di_dir != NULL) {
331 const struct dt_it_ops *dir_iops;
332 struct dt_it *dir_di = lfsck->li_di_dir;
334 LASSERT(lfsck->li_obj_dir != NULL);
336 dir_iops = &lfsck->li_obj_dir->do_index_ops->dio_it;
337 lfsck_di_dir_put(env, lfsck);
338 dir_iops->fini(env, dir_di);
341 if (lfsck->li_obj_dir != NULL) {
342 struct dt_object *dir_obj = lfsck->li_obj_dir;
344 lfsck->li_obj_dir = NULL;
345 lfsck_object_put(env, dir_obj);
351 int lfsck_open_dir(const struct lu_env *env,
352 struct lfsck_instance *lfsck, __u64 cookie)
354 struct dt_object *obj = lfsck->li_obj_dir;
355 struct dt_it *di = lfsck->li_di_dir;
356 struct lfsck_component *com;
357 const struct dt_it_ops *iops;
361 LASSERT(obj != NULL);
364 if (unlikely(!dt_try_as_dir(env, obj)))
365 GOTO(out, rc = -ENOTDIR);
367 list_for_each_entry(com, &lfsck->li_list_dir, lc_link_dir) {
368 rc = com->lc_ops->lfsck_open_dir(env, com);
373 iops = &obj->do_index_ops->dio_it;
374 di = iops->init(env, obj, lfsck->li_args_dir);
376 GOTO(out, rc = PTR_ERR(di));
378 rc = iops->load(env, di, cookie);
381 else if (rc == 0 || (rc > 0 && cookie > 0))
382 rc = iops->next(env, di);
390 lfsck->li_cookie_dir = iops->store(env, di);
391 spin_lock(&lfsck->li_lock);
392 lfsck->li_di_dir = di;
393 spin_unlock(&lfsck->li_lock);
400 lfsck_close_dir(env, lfsck, rc);
405 static int lfsck_checkpoint(const struct lu_env *env,
406 struct lfsck_instance *lfsck)
408 struct lfsck_component *com;
412 if (likely(ktime_get_seconds() <= lfsck->li_time_next_checkpoint))
415 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, false);
416 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
417 rc = com->lc_ops->lfsck_checkpoint(env, com, false);
422 lfsck->li_time_last_checkpoint = ktime_get_seconds();
423 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
424 LFSCK_CHECKPOINT_INTERVAL;
425 return rc1 != 0 ? rc1 : rc;
428 static int lfsck_prep(const struct lu_env *env, struct lfsck_instance *lfsck,
429 struct lfsck_start_param *lsp)
431 struct dt_object *obj = NULL;
432 struct lfsck_component *com;
433 struct lfsck_component *next;
434 struct lfsck_position *pos = NULL;
435 const struct dt_it_ops *iops =
436 &lfsck->li_obj_oit->do_index_ops->dio_it;
440 LASSERT(lfsck->li_obj_dir == NULL);
441 LASSERT(lfsck->li_di_dir == NULL);
443 lfsck->li_current_oit_processed = 0;
444 list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
445 com->lc_new_checked = 0;
446 rc = com->lc_ops->lfsck_prep(env, com, lsp);
451 (!lfsck_pos_is_zero(&com->lc_pos_start) &&
452 lfsck_pos_is_eq(pos, &com->lc_pos_start) > 0))
453 pos = &com->lc_pos_start;
456 /* Init otable-based iterator. */
458 rc = iops->load(env, lfsck->li_di_oit, 0);
459 if (rc > 0 || unlikely(rc == -ENODATA)) {
460 lfsck->li_oit_over = 1;
467 rc = iops->load(env, lfsck->li_di_oit, pos->lp_oit_cookie);
468 if (rc > 0 || unlikely(rc == -ENODATA))
469 lfsck->li_oit_over = 1;
473 if (!lfsck->li_master || fid_is_zero(&pos->lp_dir_parent))
476 /* Find the directory for namespace-based traverse. */
477 obj = lfsck_object_find_bottom(env, lfsck, &pos->lp_dir_parent);
479 RETURN(PTR_ERR(obj));
481 /* Remote directory will be scanned by the LFSCK instance
482 * on the MDT where the remote object really resides on. */
483 if (!dt_object_exists(obj) || dt_object_remote(obj) ||
484 unlikely(!S_ISDIR(lfsck_object_type(obj))))
487 rc = lfsck_load_stripe_lmv(env, lfsck, obj);
489 /* For the master MDT-object of a striped directory,
490 * reset the iteration from the directory beginning. */
491 if (lfsck->li_lmv != NULL && lfsck->li_lmv->ll_lmv_master)
492 pos->lp_dir_cookie = 0;
494 rc = lfsck_open_dir(env, lfsck, pos->lp_dir_cookie);
496 /* The end of the directory. */
504 lfsck_object_put(env, obj);
507 lfsck_close_dir(env, lfsck, rc);
508 list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
510 com->lc_ops->lfsck_post(env, com, rc, true);
517 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, true);
518 lfsck->li_pos_current = lfsck->li_pos_checkpoint;
519 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
520 rc = com->lc_ops->lfsck_checkpoint(env, com, true);
525 lfsck->li_time_last_checkpoint = ktime_get_seconds();
526 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
527 LFSCK_CHECKPOINT_INTERVAL;
531 static int lfsck_exec_oit(const struct lu_env *env,
532 struct lfsck_instance *lfsck, struct dt_object *obj)
534 struct lfsck_component *com;
538 LASSERT(lfsck->li_obj_dir == NULL);
540 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
541 rc = com->lc_ops->lfsck_exec_oit(env, com, obj);
546 rc = lfsck_needs_scan_dir(env, lfsck, obj);
550 rc = lfsck_load_stripe_lmv(env, lfsck, obj);
552 rc = lfsck_open_dir(env, lfsck, 0);
558 lfsck_fail(env, lfsck, false);
561 lfsck_close_dir(env, lfsck, rc);
563 return rc > 0 ? 0 : rc;
566 static int lfsck_exec_dir(const struct lu_env *env,
567 struct lfsck_instance *lfsck,
568 struct lfsck_assistant_object *lso,
569 struct lu_dirent *ent, __u16 type)
571 struct lfsck_component *com;
574 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
575 rc = com->lc_ops->lfsck_exec_dir(env, com, lso, ent, type);
582 static int lfsck_master_dir_engine(const struct lu_env *env,
583 struct lfsck_instance *lfsck);
585 static int lfsck_post(const struct lu_env *env, struct lfsck_instance *lfsck,
588 struct lfsck_component *com;
589 struct lfsck_component *next;
592 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, false);
593 lfsck_close_dir(env, lfsck, result);
595 while (thread_is_running(&lfsck->li_thread) && rc > 0 &&
596 !list_empty(&lfsck->li_list_lmv)) {
597 struct lfsck_lmv_unit *llu;
599 spin_lock(&lfsck->li_lock);
600 llu = list_entry(lfsck->li_list_lmv.next,
601 struct lfsck_lmv_unit, llu_link);
602 list_del_init(&llu->llu_link);
603 spin_unlock(&lfsck->li_lock);
605 lfsck->li_lmv = &llu->llu_lmv;
606 lfsck->li_obj_dir = lfsck_object_get(llu->llu_obj);
607 rc = lfsck_open_dir(env, lfsck, 0);
609 rc = lfsck_master_dir_engine(env, lfsck);
610 lfsck_close_dir(env, lfsck, result);
616 list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
617 rc = com->lc_ops->lfsck_post(env, com, result, false);
619 CDEBUG(D_LFSCK, "%s: lfsck_post at the component %u: "
620 "rc = %d\n", lfsck_lfsck2name(lfsck),
621 (__u32)com->lc_type, rc);
624 lfsck->li_time_last_checkpoint = ktime_get_seconds();
625 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
626 LFSCK_CHECKPOINT_INTERVAL;
628 /* Ignore some component post failure to make other can go ahead. */
632 static int lfsck_double_scan(const struct lu_env *env,
633 struct lfsck_instance *lfsck)
635 struct lfsck_component *com;
636 struct lfsck_component *next;
637 struct l_wait_info lwi = { 0 };
641 list_for_each_entry(com, &lfsck->li_list_double_scan, lc_link) {
642 rc = com->lc_ops->lfsck_double_scan(env, com);
647 l_wait_event(lfsck->li_thread.t_ctl_waitq,
648 atomic_read(&lfsck->li_double_scan_count) == 0,
651 if (lfsck->li_status != LS_PAUSED &&
652 lfsck->li_status != LS_CO_PAUSED) {
653 list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
655 spin_lock(&lfsck->li_lock);
656 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
657 spin_unlock(&lfsck->li_lock);
661 return rc1 != 0 ? rc1 : rc;
664 static void lfsck_quit(const struct lu_env *env, struct lfsck_instance *lfsck)
666 struct lfsck_component *com;
667 struct lfsck_component *next;
669 list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
671 if (com->lc_ops->lfsck_quit != NULL)
672 com->lc_ops->lfsck_quit(env, com);
674 spin_lock(&lfsck->li_lock);
675 list_del_init(&com->lc_link_dir);
676 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
677 spin_unlock(&lfsck->li_lock);
680 list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
682 if (com->lc_ops->lfsck_quit != NULL)
683 com->lc_ops->lfsck_quit(env, com);
685 spin_lock(&lfsck->li_lock);
686 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
687 spin_unlock(&lfsck->li_lock);
693 static int lfsck_master_dir_engine(const struct lu_env *env,
694 struct lfsck_instance *lfsck)
696 struct lfsck_thread_info *info = lfsck_env_info(env);
697 struct dt_object *dir = lfsck->li_obj_dir;
698 const struct dt_it_ops *iops = &dir->do_index_ops->dio_it;
699 struct dt_it *di = lfsck->li_di_dir;
700 struct lu_dirent *ent =
701 (struct lu_dirent *)info->lti_key;
702 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
703 struct ptlrpc_thread *thread = &lfsck->li_thread;
704 struct lfsck_assistant_object *lso = NULL;
710 if (CFS_FAIL_TIMEOUT(OBD_FAIL_LFSCK_DELAY2, cfs_fail_val) &&
711 unlikely(!thread_is_running(thread))) {
712 CDEBUG(D_LFSCK, "%s: scan dir exit for engine stop, "
713 "parent "DFID", cookie %#llx\n",
714 lfsck_lfsck2name(lfsck),
715 PFID(lfsck_dto2fid(dir)), lfsck->li_cookie_dir);
720 lfsck->li_new_scanned++;
721 rc = iops->rec(env, di, (struct dt_rec *)ent,
724 rc = lfsck_unpack_ent(ent, &lfsck->li_cookie_dir,
728 CDEBUG(D_LFSCK, "%s: scan dir failed at rec(), "
729 "parent "DFID", cookie %#llx: rc = %d\n",
730 lfsck_lfsck2name(lfsck),
731 PFID(lfsck_dto2fid(dir)),
732 lfsck->li_cookie_dir, rc);
733 lfsck_fail(env, lfsck, true);
734 if (bk->lb_param & LPF_FAILOUT)
740 if (ent->lde_attrs & LUDA_IGNORE)
743 /* skip dot entry. */
744 if (ent->lde_namelen == 1 && ent->lde_name[0] == '.')
748 lso = lfsck_assistant_object_init(env,
749 lfsck_dto2fid(dir), NULL,
750 lfsck->li_pos_current.lp_oit_cookie, true);
752 if (bk->lb_param & LPF_FAILOUT)
753 RETURN(PTR_ERR(lso));
760 /* The type in the @ent structure may has been overwritten,
761 * so we need to pass the @type parameter independently. */
762 rc = lfsck_exec_dir(env, lfsck, lso, ent, type);
763 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
767 rc = lfsck_checkpoint(env, lfsck);
768 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
772 lfsck_control_speed(lfsck);
773 if (unlikely(!thread_is_running(thread))) {
774 CDEBUG(D_LFSCK, "%s: scan dir exit for engine stop, "
775 "parent "DFID", cookie %#llx\n",
776 lfsck_lfsck2name(lfsck),
777 PFID(lfsck_dto2fid(dir)),
778 lfsck->li_cookie_dir);
782 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_FATAL2)) {
783 spin_lock(&lfsck->li_lock);
784 thread_set_flags(thread, SVC_STOPPING);
785 spin_unlock(&lfsck->li_lock);
786 GOTO(out, rc = -EINVAL);
789 rc = iops->next(env, di);
791 CDEBUG(D_LFSCK, "%s dir engine fail to locate next "
792 "for the directory "DFID": rc = %d\n",
793 lfsck_lfsck2name(lfsck),
794 PFID(&lfsck->li_pos_current.lp_dir_parent), rc);
797 if (rc > 0 && !lfsck->li_oit_over)
798 lfsck_close_dir(env, lfsck, rc);
804 lfsck_assistant_object_put(env, lso);
810 * Object-table based iteration engine.
812 * Object-table based iteration is the basic linear engine to scan all the
813 * objects on current device in turn. For each object, it calls all the
814 * registered LFSCK component(s)' API to perform related consistency
817 * It flushes related LFSCK trace files to disk via making checkpoint
818 * periodically. Then if the server crashed or the LFSCK is paused, the
819 * LFSCK can resume from the latest checkpoint.
821 * It also controls the whole LFSCK speed via lfsck_control_speed() to
822 * avoid the server to become overload.
824 * \param[in] env pointer to the thread context
825 * \param[in] lfsck pointer to the lfsck instance
827 * \retval positive number if all objects have been scanned
828 * \retval 0 if the iteration is stopped or paused
829 * \retval negative error number on failure
831 static int lfsck_master_oit_engine(const struct lu_env *env,
832 struct lfsck_instance *lfsck)
834 struct lfsck_thread_info *info = lfsck_env_info(env);
835 const struct dt_it_ops *iops =
836 &lfsck->li_obj_oit->do_index_ops->dio_it;
837 struct dt_it *di = lfsck->li_di_oit;
838 struct lu_fid *fid = &info->lti_fid;
839 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
840 struct ptlrpc_thread *thread = &lfsck->li_thread;
841 struct seq_server_site *ss = lfsck_dev_site(lfsck);
842 __u32 idx = lfsck_dev_idx(lfsck);
846 if (unlikely(ss == NULL))
850 struct dt_object *target;
852 if (lfsck->li_di_dir != NULL) {
853 rc = lfsck_master_dir_engine(env, lfsck);
858 if (unlikely(lfsck->li_oit_over))
861 if (CFS_FAIL_TIMEOUT(OBD_FAIL_LFSCK_DELAY1, cfs_fail_val) &&
862 unlikely(!thread_is_running(thread))) {
863 CDEBUG(D_LFSCK, "%s: OIT scan exit for engine stop, "
865 lfsck_lfsck2name(lfsck), iops->store(env, di));
870 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_CRASH))
873 lfsck->li_current_oit_processed = 1;
875 if (!list_empty(&lfsck->li_list_lmv)) {
876 struct lfsck_lmv_unit *llu;
878 spin_lock(&lfsck->li_lock);
879 llu = list_entry(lfsck->li_list_lmv.next,
880 struct lfsck_lmv_unit, llu_link);
881 list_del_init(&llu->llu_link);
882 spin_unlock(&lfsck->li_lock);
884 lfsck->li_lmv = &llu->llu_lmv;
885 lfsck->li_obj_dir = lfsck_object_get(llu->llu_obj);
886 rc = lfsck_open_dir(env, lfsck, 0);
888 rc = lfsck_master_dir_engine(env, lfsck);
894 lfsck->li_new_scanned++;
895 lfsck->li_pos_current.lp_oit_cookie = iops->store(env, di);
896 rc = iops->rec(env, di, (struct dt_rec *)fid, 0);
898 CDEBUG(D_LFSCK, "%s: OIT scan failed at rec(): "
899 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
900 lfsck_fail(env, lfsck, true);
901 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
907 if (unlikely(!fid_is_sane(fid))) {
908 CDEBUG(D_LFSCK, "%s: OIT scan find invalid FID "DFID
910 lfsck_lfsck2name(lfsck), PFID(fid));
914 if (fid_is_idif(fid)) {
915 __u32 idx1 = fid_idif_ost_idx(fid);
917 LASSERT(!lfsck->li_master);
920 struct ost_id *oi = &info->lti_oi;
922 if (unlikely(idx1 != 0)) {
923 CDEBUG(D_LFSCK, "%s: invalid IDIF "DFID
924 ", not match device index %u\n",
925 lfsck_lfsck2name(lfsck),
931 /* rebuild the IDIF with index to
932 * avoid double instances for the
934 fid_to_ostid(fid, oi);
935 ostid_to_fid(fid, oi, idx);
937 } else if (!fid_is_norm(fid) && !fid_is_igif(fid) &&
938 !fid_is_last_id(fid) &&
939 !lu_fid_eq(fid, &lfsck->li_global_root_fid)) {
941 /* If the FID/object is only used locally and invisible
942 * to external nodes, then LFSCK will not handle it.
944 * dot_lustre sequence has been handled specially. */
947 struct lu_seq_range *range = &info->lti_range;
949 if (lfsck->li_master)
950 fld_range_set_mdt(range);
952 fld_range_set_ost(range);
953 rc = fld_local_lookup(env, ss->ss_server_fld,
954 fid_seq(fid), range);
955 if (rc != 0 || range->lsr_index != idx) {
956 /* Remote object will be handled by the LFSCK
957 * instance on the MDT where the remote object
958 * really resides on. */
964 target = lfsck_object_find_bottom(env, lfsck, fid);
965 if (IS_ERR(target)) {
966 CDEBUG(D_LFSCK, "%s: OIT scan failed at find target "
967 DFID", cookie %llu: rc = %d\n",
968 lfsck_lfsck2name(lfsck), PFID(fid),
969 iops->store(env, di), rc);
970 lfsck_fail(env, lfsck, true);
971 if (bk->lb_param & LPF_FAILOUT)
972 RETURN(PTR_ERR(target));
977 if (dt_object_exists(target))
978 rc = lfsck_exec_oit(env, lfsck, target);
980 lfsck_object_put(env, target);
981 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
985 rc = lfsck_checkpoint(env, lfsck);
986 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
990 lfsck_control_speed(lfsck);
992 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_FATAL1)) {
993 spin_lock(&lfsck->li_lock);
994 thread_set_flags(thread, SVC_STOPPING);
995 spin_unlock(&lfsck->li_lock);
999 rc = iops->next(env, di);
1000 if (unlikely(rc > 0))
1001 lfsck->li_oit_over = 1;
1002 else if (likely(rc == 0))
1003 lfsck->li_current_oit_processed = 0;
1005 CDEBUG(D_LFSCK, "%s oit engine fail to locate next at "
1006 "%llu: rc = %d\n", lfsck_lfsck2name(lfsck),
1007 iops->store(env, di), rc);
1009 if (unlikely(!thread_is_running(thread))) {
1010 CDEBUG(D_LFSCK, "%s: OIT scan exit for engine stop, "
1011 "cookie %llu\n", lfsck_lfsck2name(lfsck),
1012 iops->store(env, di));
1015 } while (rc == 0 || lfsck->li_di_dir != NULL);
1020 int lfsck_master_engine(void *args)
1022 struct lfsck_thread_args *lta = args;
1023 struct lu_env *env = <a->lta_env;
1024 struct lfsck_instance *lfsck = lta->lta_lfsck;
1025 struct ptlrpc_thread *thread = &lfsck->li_thread;
1026 struct dt_object *oit_obj = lfsck->li_obj_oit;
1027 const struct dt_it_ops *oit_iops = &oit_obj->do_index_ops->dio_it;
1028 struct dt_it *oit_di;
1029 struct l_wait_info lwi = { 0 };
1033 spin_lock(&lfsck->li_lock);
1034 lfsck->li_task = current;
1035 spin_unlock(&lfsck->li_lock);
1037 /* There will be some objects verification during the LFSCK start,
1038 * such as the subsequent lfsck_verify_lpf(). Trigger low layer OI
1039 * OI scrub before that to handle the potential inconsistence. */
1040 oit_di = oit_iops->init(env, oit_obj, lfsck->li_args_oit);
1041 if (IS_ERR(oit_di)) {
1042 rc = PTR_ERR(oit_di);
1043 CDEBUG(D_LFSCK, "%s: master engine fail to init iteration: "
1044 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
1046 GOTO(fini_args, rc);
1049 if (lfsck->li_master &&
1050 (!list_empty(&lfsck->li_list_scan) ||
1051 !list_empty(&lfsck->li_list_double_scan))) {
1052 rc = lfsck_verify_lpf(env, lfsck);
1053 /* Fail to verify the .lustre/lost+found/MDTxxxx/ may be not
1054 * fatal, because the .lustre/lost+found/ maybe not accessed
1055 * by the LFSCK if it does not add orphans or others to such
1056 * directory. So go ahead until hit failure when really uses
1059 CDEBUG(D_LFSCK, "%s: master engine fail to verify the "
1060 ".lustre/lost+found/, go ahead: rc = %d\n",
1061 lfsck_lfsck2name(lfsck), rc);
1064 spin_lock(&lfsck->li_lock);
1065 lfsck->li_di_oit = oit_di;
1066 spin_unlock(&lfsck->li_lock);
1067 rc = lfsck_prep(env, lfsck, lta->lta_lsp);
1071 CDEBUG(D_LFSCK, "LFSCK entry: oit_flags = %#x, dir_flags = %#x, "
1072 "oit_cookie = %llu, dir_cookie = %#llx, parent = "DFID
1073 ", pid = %d\n", lfsck->li_args_oit, lfsck->li_args_dir,
1074 lfsck->li_pos_checkpoint.lp_oit_cookie,
1075 lfsck->li_pos_checkpoint.lp_dir_cookie,
1076 PFID(&lfsck->li_pos_checkpoint.lp_dir_parent),
1079 spin_lock(&lfsck->li_lock);
1080 if (unlikely(!thread_is_starting(thread))) {
1081 spin_unlock(&lfsck->li_lock);
1082 GOTO(fini_oit, rc = 0);
1085 thread_set_flags(thread, SVC_RUNNING);
1086 spin_unlock(&lfsck->li_lock);
1087 wake_up_all(&thread->t_ctl_waitq);
1089 l_wait_event(thread->t_ctl_waitq,
1090 lfsck->li_start_unplug ||
1091 !thread_is_running(thread),
1093 if (!thread_is_running(thread))
1094 GOTO(fini_oit, rc = 0);
1096 if (!list_empty(&lfsck->li_list_scan) ||
1097 list_empty(&lfsck->li_list_double_scan))
1098 rc = lfsck_master_oit_engine(env, lfsck);
1102 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, false);
1103 CDEBUG(D_LFSCK, "LFSCK exit: oit_flags = %#x, dir_flags = %#x, "
1104 "oit_cookie = %llu, dir_cookie = %#llx, parent = "DFID
1105 ", pid = %d, rc = %d\n", lfsck->li_args_oit, lfsck->li_args_dir,
1106 lfsck->li_pos_checkpoint.lp_oit_cookie,
1107 lfsck->li_pos_checkpoint.lp_dir_cookie,
1108 PFID(&lfsck->li_pos_checkpoint.lp_dir_parent),
1111 if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_CRASH))
1112 rc = lfsck_post(env, lfsck, rc);
1114 lfsck_close_dir(env, lfsck, rc);
1117 lfsck_di_oit_put(env, lfsck);
1118 oit_iops->fini(env, oit_di);
1120 if (!list_empty(&lfsck->li_list_double_scan))
1121 rc = lfsck_double_scan(env, lfsck);
1125 lfsck_quit(env, lfsck);
1128 /* XXX: Purge the pinned objects in the future. */
1131 spin_lock(&lfsck->li_lock);
1132 thread_set_flags(thread, SVC_STOPPED);
1133 lfsck->li_task = NULL;
1134 spin_unlock(&lfsck->li_lock);
1135 wake_up_all(&thread->t_ctl_waitq);
1136 lfsck_thread_args_fini(lta);
1140 static inline bool lfsck_assistant_req_empty(struct lfsck_assistant_data *lad)
1144 spin_lock(&lad->lad_lock);
1145 if (list_empty(&lad->lad_req_list))
1147 spin_unlock(&lad->lad_lock);
1153 * Query the LFSCK status from the instatnces on remote servers.
1155 * The LFSCK assistant thread queries the LFSCK instances on other
1156 * servers (MDT/OST) about their status, such as whether they have
1157 * finished the phase1/phase2 scanning or not, and so on.
1159 * \param[in] env pointer to the thread context
1160 * \param[in] com pointer to the lfsck component
1162 * \retval 0 for success
1163 * \retval negative error number on failure
1165 static int lfsck_assistant_query_others(const struct lu_env *env,
1166 struct lfsck_component *com)
1168 struct lfsck_thread_info *info = lfsck_env_info(env);
1169 struct lfsck_request *lr = &info->lti_lr;
1170 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1171 struct lfsck_instance *lfsck = com->lc_lfsck;
1172 struct lfsck_assistant_data *lad = com->lc_data;
1173 struct ptlrpc_request_set *set;
1174 struct lfsck_tgt_descs *ltds;
1175 struct lfsck_tgt_desc *ltd;
1176 struct list_head *phase_head;
1181 set = ptlrpc_prep_set();
1185 lad->lad_touch_gen++;
1186 memset(lr, 0, sizeof(*lr));
1187 lr->lr_event = LE_QUERY;
1188 lr->lr_active = com->lc_type;
1190 memset(laia, 0, sizeof(*laia));
1191 laia->laia_com = com;
1194 if (!list_empty(&lad->lad_mdt_phase1_list)) {
1195 ltds = &lfsck->li_mdt_descs;
1197 phase_head = &lad->lad_mdt_phase1_list;
1198 } else if (com->lc_type != LFSCK_TYPE_LAYOUT) {
1203 ltds = &lfsck->li_ost_descs;
1204 lr->lr_flags = LEF_TO_OST;
1205 phase_head = &lad->lad_ost_phase1_list;
1208 laia->laia_ltds = ltds;
1209 spin_lock(<ds->ltd_lock);
1210 while (!list_empty(phase_head)) {
1211 struct list_head *phase_list;
1214 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1215 ltd = list_entry(phase_head->next,
1216 struct lfsck_tgt_desc,
1217 ltd_layout_phase_list);
1218 phase_list = <d->ltd_layout_phase_list;
1219 gen = <d->ltd_layout_gen;
1221 ltd = list_entry(phase_head->next,
1222 struct lfsck_tgt_desc,
1223 ltd_namespace_phase_list);
1224 phase_list = <d->ltd_namespace_phase_list;
1225 gen = <d->ltd_namespace_gen;
1228 if (*gen == lad->lad_touch_gen)
1231 *gen = lad->lad_touch_gen;
1232 list_move_tail(phase_list, phase_head);
1233 atomic_inc(<d->ltd_ref);
1234 laia->laia_ltd = ltd;
1235 spin_unlock(<ds->ltd_lock);
1236 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1237 lfsck_async_interpret_common,
1240 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to query "
1241 "%s %x for %s: rc = %d\n",
1242 lfsck_lfsck2name(lfsck),
1243 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1244 ltd->ltd_index, lad->lad_name, rc);
1248 spin_lock(<ds->ltd_lock);
1250 spin_unlock(<ds->ltd_lock);
1252 rc = ptlrpc_set_wait(env, set);
1254 ptlrpc_set_destroy(set);
1258 if (com->lc_type == LFSCK_TYPE_LAYOUT && !(lr->lr_flags & LEF_TO_OST) &&
1259 list_empty(&lad->lad_mdt_phase1_list))
1263 ptlrpc_set_destroy(set);
1265 RETURN(rc1 != 0 ? rc1 : rc);
1269 * Notify the LFSCK event to the instances on remote servers.
1271 * The LFSCK assistant thread notifies the LFSCK instances on other
1272 * servers (MDT/OST) about some events, such as start new scanning,
1273 * stop the scanning, this LFSCK instance will exit, and so on.
1275 * \param[in] env pointer to the thread context
1276 * \param[in] com pointer to the lfsck component
1277 * \param[in] lr pointer to the LFSCK event request
1279 * \retval 0 for success
1280 * \retval negative error number on failure
1282 static int lfsck_assistant_notify_others(const struct lu_env *env,
1283 struct lfsck_component *com,
1284 struct lfsck_request *lr)
1286 struct lfsck_thread_info *info = lfsck_env_info(env);
1287 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1288 struct lfsck_instance *lfsck = com->lc_lfsck;
1289 struct lfsck_assistant_data *lad = com->lc_data;
1290 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1291 struct ptlrpc_request_set *set;
1292 struct lfsck_tgt_descs *ltds;
1293 struct lfsck_tgt_desc *ltd;
1294 struct lfsck_tgt_desc *next;
1300 set = ptlrpc_prep_set();
1304 lr->lr_index = lfsck_dev_idx(lfsck);
1305 lr->lr_active = com->lc_type;
1307 memset(laia, 0, sizeof(*laia));
1308 laia->laia_com = com;
1311 switch (lr->lr_event) {
1313 if (com->lc_type != LFSCK_TYPE_LAYOUT)
1316 lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN;
1317 lr->lr_speed = bk->lb_speed_limit;
1318 lr->lr_version = bk->lb_version;
1319 lr->lr_param |= bk->lb_param;
1320 lr->lr_async_windows = bk->lb_async_windows;
1321 lr->lr_flags = LEF_TO_OST;
1323 /* Notify OSTs firstly, then handle other MDTs if needed. */
1324 ltds = &lfsck->li_ost_descs;
1325 laia->laia_ltds = ltds;
1326 down_read(<ds->ltd_rw_sem);
1327 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1328 ltd = lfsck_tgt_get(ltds, idx);
1329 LASSERT(ltd != NULL);
1331 laia->laia_ltd = ltd;
1332 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1333 lfsck_async_interpret_common,
1334 laia, LFSCK_NOTIFY);
1336 lfsck_lad_set_bitmap(env, com, idx);
1337 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to "
1338 "notify OST %x for %s start: rc = %d\n",
1339 lfsck_lfsck2name(lfsck), idx,
1344 up_read(<ds->ltd_rw_sem);
1347 rc = ptlrpc_set_wait(env, set);
1349 ptlrpc_set_destroy(set);
1354 if (!(bk->lb_param & LPF_ALL_TGT))
1357 /* link other MDT targets locallly. */
1358 ltds = &lfsck->li_mdt_descs;
1359 spin_lock(<ds->ltd_lock);
1360 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1361 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1362 ltd = lfsck_ltd2tgt(ltds, idx);
1363 LASSERT(ltd != NULL);
1365 if (!list_empty(<d->ltd_layout_list))
1368 list_add_tail(<d->ltd_layout_list,
1369 &lad->lad_mdt_list);
1370 list_add_tail(<d->ltd_layout_phase_list,
1371 &lad->lad_mdt_phase1_list);
1374 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1375 ltd = lfsck_ltd2tgt(ltds, idx);
1376 LASSERT(ltd != NULL);
1378 if (!list_empty(<d->ltd_namespace_list))
1381 list_add_tail(<d->ltd_namespace_list,
1382 &lad->lad_mdt_list);
1383 list_add_tail(<d->ltd_namespace_phase_list,
1384 &lad->lad_mdt_phase1_list);
1387 spin_unlock(<ds->ltd_lock);
1390 case LE_PHASE2_DONE:
1391 case LE_PEER_EXIT: {
1392 struct list_head *phase_head;
1394 /* Handle other MDTs firstly if needed, then notify the OSTs. */
1395 if (bk->lb_param & LPF_ALL_TGT) {
1396 phase_head = &lad->lad_mdt_list;
1397 ltds = &lfsck->li_mdt_descs;
1398 if (lr->lr_event == LE_STOP) {
1399 /* unlink other MDT targets locallly. */
1400 spin_lock(<ds->ltd_lock);
1401 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1402 list_for_each_entry_safe(ltd, next,
1403 phase_head, ltd_layout_list) {
1405 <d->ltd_layout_phase_list);
1407 <d->ltd_layout_list);
1410 list_for_each_entry_safe(ltd, next,
1412 ltd_namespace_list) {
1414 <d->ltd_namespace_phase_list);
1416 <d->ltd_namespace_list);
1419 spin_unlock(<ds->ltd_lock);
1421 if (com->lc_type != LFSCK_TYPE_LAYOUT)
1424 lr->lr_flags |= LEF_TO_OST;
1425 phase_head = &lad->lad_ost_list;
1426 ltds = &lfsck->li_ost_descs;
1428 lr->lr_flags &= ~LEF_TO_OST;
1430 } else if (com->lc_type != LFSCK_TYPE_LAYOUT) {
1433 lr->lr_flags |= LEF_TO_OST;
1434 phase_head = &lad->lad_ost_list;
1435 ltds = &lfsck->li_ost_descs;
1439 laia->laia_ltds = ltds;
1440 spin_lock(<ds->ltd_lock);
1441 while (!list_empty(phase_head)) {
1442 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1443 ltd = list_entry(phase_head->next,
1444 struct lfsck_tgt_desc,
1446 if (!list_empty(<d->ltd_layout_phase_list))
1448 <d->ltd_layout_phase_list);
1449 list_del_init(<d->ltd_layout_list);
1451 ltd = list_entry(phase_head->next,
1452 struct lfsck_tgt_desc,
1453 ltd_namespace_list);
1454 if (!list_empty(<d->ltd_namespace_phase_list))
1456 <d->ltd_namespace_phase_list);
1457 list_del_init(<d->ltd_namespace_list);
1459 atomic_inc(<d->ltd_ref);
1460 laia->laia_ltd = ltd;
1461 spin_unlock(<ds->ltd_lock);
1462 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1463 lfsck_async_interpret_common,
1464 laia, LFSCK_NOTIFY);
1466 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to "
1467 "notify %s %x for %s stop/phase2_done/"
1468 "peer_exit: rc = %d\n",
1469 lfsck_lfsck2name(lfsck),
1470 (lr->lr_flags & LEF_TO_OST) ?
1471 "OST" : "MDT", ltd->ltd_index,
1475 spin_lock(<ds->ltd_lock);
1477 spin_unlock(<ds->ltd_lock);
1479 rc = ptlrpc_set_wait(env, set);
1481 ptlrpc_set_destroy(set);
1485 if (com->lc_type == LFSCK_TYPE_LAYOUT &&
1486 !(lr->lr_flags & LEF_TO_OST)) {
1487 lr->lr_flags |= LEF_TO_OST;
1488 phase_head = &lad->lad_ost_list;
1489 ltds = &lfsck->li_ost_descs;
1494 case LE_PHASE1_DONE:
1495 lad->lad_ops->la_sync_failures(env, com, lr);
1496 lad->lad_touch_gen++;
1497 ltds = &lfsck->li_mdt_descs;
1498 laia->laia_ltds = ltds;
1499 spin_lock(<ds->ltd_lock);
1500 while (!list_empty(&lad->lad_mdt_list)) {
1501 struct list_head *list;
1504 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1505 ltd = list_entry(lad->lad_mdt_list.next,
1506 struct lfsck_tgt_desc,
1508 list = <d->ltd_layout_list;
1509 gen = <d->ltd_layout_gen;
1511 struct lfsck_namespace *ns = com->lc_file_ram;
1513 ltd = list_entry(lad->lad_mdt_list.next,
1514 struct lfsck_tgt_desc,
1515 ltd_namespace_list);
1516 list = <d->ltd_namespace_list;
1517 gen = <d->ltd_namespace_gen;
1518 lr->lr_flags2 = ns->ln_flags & ~LF_INCOMPLETE;
1521 if (*gen == lad->lad_touch_gen)
1524 *gen = lad->lad_touch_gen;
1525 list_move_tail(list, &lad->lad_mdt_list);
1526 if (ltd->ltd_synced_failures)
1529 atomic_inc(<d->ltd_ref);
1530 laia->laia_ltd = ltd;
1531 spin_unlock(<ds->ltd_lock);
1532 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1533 lfsck_async_interpret_common,
1534 laia, LFSCK_NOTIFY);
1536 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to "
1537 "notify MDT %x for %s phase1 done: "
1538 "rc = %d\n", lfsck_lfsck2name(lfsck),
1539 ltd->ltd_index, lad->lad_name, rc);
1542 spin_lock(<ds->ltd_lock);
1544 spin_unlock(<ds->ltd_lock);
1547 CDEBUG(D_LFSCK, "%s: LFSCK assistant unexpected LFSCK event: "
1548 "rc = %d\n", lfsck_lfsck2name(lfsck), lr->lr_event);
1553 rc1 = ptlrpc_set_wait(env, set);
1554 ptlrpc_set_destroy(set);
1556 RETURN(rc != 0 ? rc : rc1);
1560 * The LFSCK assistant thread is triggered by the LFSCK main engine.
1561 * They co-work together as an asynchronous pipeline: the LFSCK main
1562 * engine scans the system and pre-fetches the objects, attributes,
1563 * or name entries, etc, and pushes them into the pipeline as input
1564 * requests for the LFSCK assistant thread; on the other end of the
1565 * pipeline, the LFSCK assistant thread performs the real check and
1566 * repair for every request from the main engine.
1568 * Generally, the assistant engine may be blocked when check/repair
1569 * something, so the LFSCK main engine will run some faster. On the
1570 * other hand, the LFSCK main engine will drive multiple assistant
1571 * threads in parallel, means for each LFSCK component on the master
1572 * (such as layout LFSCK, namespace LFSCK), there is an independent
1573 * LFSCK assistant thread. So under such 1:N multiple asynchronous
1574 * pipelines mode, the whole LFSCK performance will be much better
1575 * than check/repair everything by the LFSCK main engine itself.
1577 int lfsck_assistant_engine(void *args)
1579 struct lfsck_thread_args *lta = args;
1580 struct lu_env *env = <a->lta_env;
1581 struct lfsck_component *com = lta->lta_com;
1582 struct lfsck_instance *lfsck = lta->lta_lfsck;
1583 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1584 struct lfsck_position *pos = &com->lc_pos_start;
1585 struct lfsck_thread_info *info = lfsck_env_info(env);
1586 struct lfsck_request *lr = &info->lti_lr;
1587 struct lfsck_assistant_data *lad = com->lc_data;
1588 struct ptlrpc_thread *mthread = &lfsck->li_thread;
1589 struct ptlrpc_thread *athread = &lad->lad_thread;
1590 struct lfsck_assistant_operations *lao = lad->lad_ops;
1591 struct lfsck_assistant_req *lar;
1592 struct l_wait_info lwi = { 0 };
1598 CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread start\n",
1599 lfsck_lfsck2name(lfsck), lad->lad_name);
1601 memset(lr, 0, sizeof(*lr));
1602 lr->lr_event = LE_START;
1603 if (pos->lp_oit_cookie <= 1)
1604 lr->lr_param = LPF_RESET;
1605 rc = lfsck_assistant_notify_others(env, com, lr);
1607 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to notify others "
1608 "to start %s: rc = %d\n",
1609 lfsck_lfsck2name(lfsck), lad->lad_name, rc);
1613 spin_lock(&lad->lad_lock);
1614 lad->lad_task = current;
1615 thread_set_flags(athread, SVC_RUNNING);
1616 spin_unlock(&lad->lad_lock);
1617 wake_up_all(&mthread->t_ctl_waitq);
1620 while (!list_empty(&lad->lad_req_list)) {
1621 bool wakeup = false;
1623 if (unlikely(test_bit(LAD_EXIT, &lad->lad_flags) ||
1624 !thread_is_running(mthread)))
1625 GOTO(cleanup, rc = lad->lad_post_result);
1627 lar = list_entry(lad->lad_req_list.next,
1628 struct lfsck_assistant_req,
1630 /* Only the lfsck_assistant_engine thread itself can
1631 * remove the "lar" from the head of the list, LFSCK
1632 * engine thread only inserts other new "lar" at the
1633 * end of the list. So it is safe to handle current
1634 * "lar" without the spin_lock. */
1635 rc = lao->la_handler_p1(env, com, lar);
1636 spin_lock(&lad->lad_lock);
1637 list_del_init(&lar->lar_list);
1638 lad->lad_prefetched--;
1639 /* Wake up the main engine thread only when the list
1640 * is empty or half of the prefetched items have been
1641 * handled to avoid too frequent thread schedule. */
1642 if (lad->lad_prefetched <= (bk->lb_async_windows / 2))
1644 spin_unlock(&lad->lad_lock);
1646 wake_up_all(&mthread->t_ctl_waitq);
1648 lao->la_req_fini(env, lar);
1649 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
1653 l_wait_event(athread->t_ctl_waitq,
1654 !lfsck_assistant_req_empty(lad) ||
1655 test_bit(LAD_EXIT, &lad->lad_flags) ||
1656 test_bit(LAD_TO_POST, &lad->lad_flags) ||
1657 test_bit(LAD_TO_DOUBLE_SCAN, &lad->lad_flags),
1660 if (unlikely(test_bit(LAD_EXIT, &lad->lad_flags)))
1661 GOTO(cleanup, rc = lad->lad_post_result);
1663 if (!list_empty(&lad->lad_req_list))
1666 if (test_bit(LAD_TO_POST, &lad->lad_flags)) {
1667 CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread post\n",
1668 lfsck_lfsck2name(lfsck), lad->lad_name);
1670 if (unlikely(test_bit(LAD_EXIT, &lad->lad_flags)))
1671 GOTO(cleanup, rc = lad->lad_post_result);
1673 clear_bit(LAD_TO_POST, &lad->lad_flags);
1674 LASSERT(lad->lad_post_result > 0);
1676 /* Wakeup the master engine to go ahead. */
1677 wake_up_all(&mthread->t_ctl_waitq);
1679 memset(lr, 0, sizeof(*lr));
1680 lr->lr_event = LE_PHASE1_DONE;
1681 lr->lr_status = lad->lad_post_result;
1682 rc = lfsck_assistant_notify_others(env, com, lr);
1684 CDEBUG(D_LFSCK, "%s: LFSCK assistant notified "
1685 "others for %s post: rc = %d\n",
1686 lfsck_lfsck2name(lfsck),
1690 if (test_bit(LAD_TO_DOUBLE_SCAN, &lad->lad_flags)) {
1691 clear_bit(LAD_TO_DOUBLE_SCAN, &lad->lad_flags);
1692 atomic_inc(&lfsck->li_double_scan_count);
1693 set_bit(LAD_IN_DOUBLE_SCAN, &lad->lad_flags);
1694 wake_up_all(&mthread->t_ctl_waitq);
1696 com->lc_new_checked = 0;
1697 com->lc_new_scanned = 0;
1698 com->lc_time_last_checkpoint = ktime_get_seconds();
1699 com->lc_time_next_checkpoint =
1700 com->lc_time_last_checkpoint +
1701 LFSCK_CHECKPOINT_INTERVAL;
1703 CDEBUG(D_LFSCK, "%s: LFSCK assistant sync before "
1704 "the second-stage scaning\n",
1705 lfsck_lfsck2name(lfsck));
1707 /* Flush async updates before handling orphan. */
1708 rc2 = dt_sync(env, lfsck->li_next);
1710 CDEBUG(D_LFSCK, "%s: LFSCK assistant phase2 "
1711 "scan start, synced: rc = %d\n",
1712 lfsck_lfsck2name(lfsck), rc2);
1714 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_NO_DOUBLESCAN))
1715 GOTO(cleanup, rc = 0);
1717 while (test_bit(LAD_IN_DOUBLE_SCAN, &lad->lad_flags)) {
1718 rc = lfsck_assistant_query_others(env, com);
1719 if (lfsck_phase2_next_ready(lad))
1725 /* Pull LFSCK status on related targets once
1726 * per 30 seconds if we are not notified. */
1727 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(30),
1728 cfs_time_seconds(1),
1730 rc = l_wait_event(athread->t_ctl_waitq,
1731 lfsck_phase2_next_ready(lad) ||
1732 test_bit(LAD_EXIT, &lad->lad_flags) ||
1733 !thread_is_running(mthread),
1737 test_bit(LAD_EXIT, &lad->lad_flags) ||
1738 !thread_is_running(mthread)))
1739 GOTO(cleanup, rc = 0);
1741 if (rc == -ETIMEDOUT)
1748 rc = lao->la_handler_p2(env, com);
1753 test_bit(LAD_EXIT, &lad->lad_flags) ||
1754 !thread_is_running(mthread)))
1755 GOTO(cleanup, rc = 0);
1761 /* Cleanup the unfinished requests. */
1762 spin_lock(&lad->lad_lock);
1764 lad->lad_assistant_status = rc;
1766 if (test_bit(LAD_EXIT, &lad->lad_flags) && lad->lad_post_result <= 0)
1767 lao->la_fill_pos(env, com, &lfsck->li_pos_checkpoint);
1769 thread_set_flags(athread, SVC_STOPPING);
1770 while (!list_empty(&lad->lad_req_list)) {
1771 lar = list_entry(lad->lad_req_list.next,
1772 struct lfsck_assistant_req,
1774 list_del_init(&lar->lar_list);
1775 lad->lad_prefetched--;
1776 spin_unlock(&lad->lad_lock);
1777 lao->la_req_fini(env, lar);
1778 spin_lock(&lad->lad_lock);
1780 spin_unlock(&lad->lad_lock);
1782 memset(lr, 0, sizeof(*lr));
1784 lr->lr_event = LE_PHASE2_DONE;
1786 } else if (rc == 0) {
1787 if (lfsck->li_flags & LPF_ALL_TGT) {
1788 lr->lr_event = LE_STOP;
1789 lr->lr_status = LS_STOPPED;
1791 lr->lr_event = LE_PEER_EXIT;
1792 switch (lfsck->li_status) {
1795 lr->lr_status = LS_CO_PAUSED;
1799 lr->lr_status = LS_CO_STOPPED;
1802 CDEBUG(D_LFSCK, "%s: LFSCK assistant unknown "
1803 "status: rc = %d\n",
1804 lfsck_lfsck2name(lfsck),
1806 lr->lr_status = LS_CO_FAILED;
1811 if (lfsck->li_flags & LPF_ALL_TGT) {
1812 lr->lr_event = LE_STOP;
1813 lr->lr_status = LS_FAILED;
1815 lr->lr_event = LE_PEER_EXIT;
1816 lr->lr_status = LS_CO_FAILED;
1820 rc1 = lfsck_assistant_notify_others(env, com, lr);
1822 CDEBUG(D_LFSCK, "%s: LFSCK assistant failed to notify "
1823 "others for %s quit: rc = %d\n",
1824 lfsck_lfsck2name(lfsck), lad->lad_name, rc1);
1828 CDEBUG(D_LFSCK, "%s: LFSCK assistant sync before exit\n",
1829 lfsck_lfsck2name(lfsck));
1831 /* Flush async updates before exit. */
1832 rc2 = dt_sync(env, lfsck->li_next);
1834 CDEBUG(D_LFSCK, "%s: LFSCK assistant synced before exit: rc = %d\n",
1835 lfsck_lfsck2name(lfsck), rc2);
1837 /* Under force exit case, some requests may be just freed without
1838 * verification, those objects should be re-handled when next run.
1839 * So not update the on-disk trace file under such case. */
1840 if (test_bit(LAD_IN_DOUBLE_SCAN, &lad->lad_flags)) {
1841 if (!test_bit(LAD_EXIT, &lad->lad_flags))
1842 rc1 = lao->la_double_scan_result(env, com, rc);
1844 CDEBUG(D_LFSCK, "%s: LFSCK assistant phase2 scan "
1845 "finished: rc = %d\n",
1846 lfsck_lfsck2name(lfsck), rc1 != 0 ? rc1 : rc);
1850 if (test_bit(LAD_IN_DOUBLE_SCAN, &lad->lad_flags))
1851 atomic_dec(&lfsck->li_double_scan_count);
1853 spin_lock(&lad->lad_lock);
1854 lad->lad_assistant_status = (rc1 != 0 ? rc1 : rc);
1855 thread_set_flags(athread, SVC_STOPPED);
1856 lad->lad_task = NULL;
1857 spin_unlock(&lad->lad_lock);
1859 CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread exit: rc = %d\n",
1860 lfsck_lfsck2name(lfsck), lad->lad_name,
1861 lad->lad_assistant_status);
1863 lfsck_thread_args_fini(lta);
1864 wake_up_all(&mthread->t_ctl_waitq);