4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2012, 2013, Intel Corporation.
26 * lustre/lfsck/lfsck_engine.c
28 * Author: Fan, Yong <fan.yong@intel.com>
31 #define DEBUG_SUBSYSTEM S_LFSCK
33 #include <lu_object.h>
34 #include <dt_object.h>
35 #include <lustre_net.h>
36 #include <lustre_fid.h>
37 #include <obd_support.h>
38 #include <lustre_lib.h>
40 #include "lfsck_internal.h"
42 int lfsck_unpack_ent(struct lu_dirent *ent, __u64 *cookie, __u16 *type)
45 int align = sizeof(*lt) - 1;
48 fid_le_to_cpu(&ent->lde_fid, &ent->lde_fid);
49 *cookie = le64_to_cpu(ent->lde_hash);
50 ent->lde_reclen = le16_to_cpu(ent->lde_reclen);
51 ent->lde_namelen = le16_to_cpu(ent->lde_namelen);
52 ent->lde_attrs = le32_to_cpu(ent->lde_attrs);
54 if (unlikely(!(ent->lde_attrs & LUDA_TYPE)))
57 len = (ent->lde_namelen + align) & ~align;
58 lt = (struct luda_type *)(ent->lde_name + len);
59 *type = le16_to_cpu(lt->lt_type);
61 /* Make sure the name is terminated with '\0'. The data (object type)
62 * after ent::lde_name maybe broken, but we have stored such data in
63 * the output parameter @type as above. */
64 ent->lde_name[ent->lde_namelen] = '\0';
69 static void lfsck_di_oit_put(const struct lu_env *env, struct lfsck_instance *lfsck)
71 const struct dt_it_ops *iops;
74 spin_lock(&lfsck->li_lock);
75 iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
76 di = lfsck->li_di_oit;
77 lfsck->li_di_oit = NULL;
78 spin_unlock(&lfsck->li_lock);
82 static void lfsck_di_dir_put(const struct lu_env *env, struct lfsck_instance *lfsck)
84 const struct dt_it_ops *iops;
87 spin_lock(&lfsck->li_lock);
88 iops = &lfsck->li_obj_dir->do_index_ops->dio_it;
89 di = lfsck->li_di_dir;
90 lfsck->li_di_dir = NULL;
91 lfsck->li_cookie_dir = 0;
92 spin_unlock(&lfsck->li_lock);
96 static int lfsck_update_lma(const struct lu_env *env,
97 struct lfsck_instance *lfsck, struct dt_object *obj)
99 struct lfsck_thread_info *info = lfsck_env_info(env);
100 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
101 struct dt_device *dt = lfsck->li_bottom;
102 struct lustre_mdt_attrs *lma = &info->lti_lma;
109 if (bk->lb_param & LPF_DRYRUN)
112 buf = lfsck_buf_get(env, info->lti_lma_old, LMA_OLD_SIZE);
113 rc = dt_xattr_get(env, obj, buf, XATTR_NAME_LMA, BYPASS_CAPA);
118 fl = LU_XATTR_CREATE;
119 lustre_lma_init(lma, lfsck_dto2fid(obj), LMAC_FID_ON_OST, 0);
121 if (rc != LMA_OLD_SIZE && rc != sizeof(struct lustre_mdt_attrs))
124 fl = LU_XATTR_REPLACE;
125 lustre_lma_swab(lma);
126 lustre_lma_init(lma, lfsck_dto2fid(obj),
127 lma->lma_compat | LMAC_FID_ON_OST,
130 lustre_lma_swab(lma);
132 th = dt_trans_create(env, dt);
136 buf = lfsck_buf_get(env, lma, sizeof(*lma));
137 rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_LMA, fl, th);
141 rc = dt_trans_start(env, dt, th);
145 rc = dt_xattr_set(env, obj, buf, XATTR_NAME_LMA, fl, th, BYPASS_CAPA);
150 dt_trans_stop(env, dt, th);
154 static int lfsck_parent_fid(const struct lu_env *env, struct dt_object *obj,
157 if (unlikely(!S_ISDIR(lfsck_object_type(obj)) ||
158 !dt_try_as_dir(env, obj)))
161 return dt_lookup(env, obj, (struct dt_rec *)fid,
162 (const struct dt_key *)"..", BYPASS_CAPA);
166 * Check whether needs to scan the directory or not.
168 * 1) If we are not doing namespace LFSCK, or the given @obj is not directory,
169 * then needs not to scan the @obj. Otherwise,
170 * 2) Global /ROOT needs to be scanned, backend root needs not to be scanned.
171 * 3) If the @obj is neither IGIF nor normal FID (including .lustre and its
172 * sub-directories that have been scanned when the LFSCK engine start),
173 * then needs not to be scanned.
174 * 4) If it is a remote object, then scanning the object will be done on the
175 * MDT on which the object really resides.
176 * 5) If the local object has normal FID, then needs to be scanned. Otherwise,
177 * 6) If the object has linkEA, then needs to be scanned. Otherwise,
178 * 7) If none of the previous conditions are true, we need to check the parent
179 * directories whether this subdirectory is in a tree that should be scanned.
180 * Set the parent as current @obj, repeat 2)-7).
182 * \param[in] env pointer to the thread context
183 * \param[in] lfsck pointer to the lfsck instance
184 * \param[in] obj pointer to the object to be checked
186 * \retval positive number if the directory needs to be scanned
187 * \retval 0 if the directory needs NOT to be scanned
188 * \retval negative error number on failure
190 static int lfsck_needs_scan_dir(const struct lu_env *env,
191 struct lfsck_instance *lfsck,
192 struct dt_object *obj)
194 struct lfsck_thread_info *info = lfsck_env_info(env);
195 struct lu_fid *fid = &info->lti_fid;
196 struct lu_seq_range *range = &info->lti_range;
197 struct dt_device *dev = lfsck->li_bottom;
198 struct seq_server_site *ss = lu_site2seq(dev->dd_lu_dev.ld_site);
199 __u32 idx = lfsck_dev_idx(dev);
203 if (list_empty(&lfsck->li_list_dir) || !S_ISDIR(lfsck_object_type(obj)))
208 *fid = *lfsck_dto2fid(obj);
210 /* Global /ROOT is visible. */
211 if (unlikely(lu_fid_eq(fid, &lfsck->li_global_root_fid)))
214 /* Backend root is invisible. */
215 if (unlikely(lu_fid_eq(fid, &lfsck->li_local_root_fid)))
218 if (!fid_is_norm(fid) && !fid_is_igif(fid))
221 fld_range_set_mdt(range);
222 rc = fld_local_lookup(env, ss->ss_server_fld,
223 fid_seq(fid), range);
224 if (rc != 0 || range->lsr_index != idx) {
225 /* Current FID should NOT be for the input parameter
226 * @obj, because the lfsck_master_oit_engine() has
227 * filtered out agent object. So current FID is for
228 * the ancestor of the original input parameter @obj.
229 * So the ancestor is a remote directory. The input
230 * parameter @obj is local directory, and should be
231 * scanned under such case. */
237 /* normal FID on this target (locally) must be for the
238 * client-side visiable object. */
239 if (fid_is_norm(fid))
243 obj = lfsck_object_find(env, lfsck, fid);
248 if (!dt_object_exists(obj))
252 dt_read_lock(env, obj, MOR_TGT_CHILD);
253 if (unlikely(lfsck_is_dead_obj(obj))) {
254 dt_read_unlock(env, obj);
259 rc = dt_xattr_get(env, obj,
260 lfsck_buf_get(env, NULL, 0), XATTR_NAME_LINK,
262 dt_read_unlock(env, obj);
266 if (rc < 0 && rc != -ENODATA)
269 rc = lfsck_parent_fid(env, obj, fid);
271 lfsck_object_put(env, obj);
277 if (!fid_is_sane(fid))
282 if (depth > 0 && obj != NULL)
283 lfsck_object_put(env, obj);
288 static int lfsck_load_stripe_lmv(const struct lu_env *env,
289 struct lfsck_instance *lfsck,
290 struct dt_object *obj)
292 struct lmv_mds_md_v1 *lmv = &lfsck_env_info(env)->lti_lmv;
293 struct lfsck_lmv *llmv;
297 LASSERT(lfsck->li_obj_dir == NULL);
298 LASSERT(lfsck->li_lmv == NULL);
300 rc = lfsck_read_stripe_lmv(env, obj, lmv);
301 if (rc == -ENODATA) {
302 lfsck->li_obj_dir = lfsck_object_get(obj);
314 if (lmv->lmv_magic == LMV_MAGIC) {
315 struct lfsck_slave_lmv_rec *lslr;
318 llmv->ll_lmv_master = 1;
319 if (lmv->lmv_stripe_count < 1)
320 stripes = LFSCK_LMV_DEF_STRIPES;
321 else if (lmv->lmv_stripe_count > LFSCK_LMV_MAX_STRIPES)
322 stripes = LFSCK_LMV_MAX_STRIPES;
324 stripes = lmv->lmv_stripe_count;
326 OBD_ALLOC_LARGE(lslr, sizeof(*lslr) * stripes);
333 /* Find the object against the bottom device. */
334 obj = lfsck_object_find_by_dev(env, lfsck->li_bottom,
337 OBD_FREE_LARGE(lslr, sizeof(*lslr) * stripes);
340 RETURN(PTR_ERR(obj));
343 llmv->ll_stripes_allocated = stripes;
344 llmv->ll_hash_type = LMV_HASH_TYPE_UNKNOWN;
345 llmv->ll_lslr = lslr;
346 lfsck->li_obj_dir = obj;
348 llmv->ll_lmv_slave = 1;
349 lfsck->li_obj_dir = lfsck_object_get(obj);
353 atomic_set(&llmv->ll_ref, 1);
354 lfsck->li_lmv = llmv;
359 /* LFSCK wrap functions */
361 static void lfsck_fail(const struct lu_env *env, struct lfsck_instance *lfsck,
364 struct lfsck_component *com;
366 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
367 com->lc_ops->lfsck_fail(env, com, new_checked);
371 void lfsck_close_dir(const struct lu_env *env,
372 struct lfsck_instance *lfsck, int result)
374 struct lfsck_component *com;
377 if (lfsck->li_lmv != NULL) {
378 lfsck->li_lmv->ll_exit_value = result;
379 if (lfsck->li_obj_dir != NULL) {
380 list_for_each_entry(com, &lfsck->li_list_dir,
382 com->lc_ops->lfsck_close_dir(env, com);
386 lfsck_lmv_put(env, lfsck->li_lmv);
387 lfsck->li_lmv = NULL;
390 if (lfsck->li_di_dir != NULL) {
391 const struct dt_it_ops *dir_iops;
392 struct dt_it *dir_di = lfsck->li_di_dir;
394 LASSERT(lfsck->li_obj_dir != NULL);
396 dir_iops = &lfsck->li_obj_dir->do_index_ops->dio_it;
397 lfsck_di_dir_put(env, lfsck);
398 dir_iops->fini(env, dir_di);
401 if (lfsck->li_obj_dir != NULL) {
402 struct dt_object *dir_obj = lfsck->li_obj_dir;
404 lfsck->li_obj_dir = NULL;
405 lfsck_object_put(env, dir_obj);
411 int lfsck_open_dir(const struct lu_env *env,
412 struct lfsck_instance *lfsck, __u64 cookie)
414 struct dt_object *obj = lfsck->li_obj_dir;
415 struct dt_it *di = lfsck->li_di_dir;
416 struct lfsck_component *com;
417 const struct dt_it_ops *iops;
421 LASSERT(obj != NULL);
424 if (unlikely(!dt_try_as_dir(env, obj)))
425 GOTO(out, rc = -ENOTDIR);
427 list_for_each_entry(com, &lfsck->li_list_dir, lc_link_dir) {
428 rc = com->lc_ops->lfsck_open_dir(env, com);
433 iops = &obj->do_index_ops->dio_it;
434 di = iops->init(env, obj, lfsck->li_args_dir, BYPASS_CAPA);
436 GOTO(out, rc = PTR_ERR(di));
438 rc = iops->load(env, di, cookie);
439 if (rc == 0 || (rc > 0 && cookie > 0))
440 rc = iops->next(env, di);
448 lfsck->li_cookie_dir = iops->store(env, di);
449 spin_lock(&lfsck->li_lock);
450 lfsck->li_di_dir = di;
451 spin_unlock(&lfsck->li_lock);
458 lfsck_close_dir(env, lfsck, rc);
463 static int lfsck_checkpoint(const struct lu_env *env,
464 struct lfsck_instance *lfsck)
466 struct lfsck_component *com;
470 if (likely(cfs_time_beforeq(cfs_time_current(),
471 lfsck->li_time_next_checkpoint)))
474 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, false);
475 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
476 rc = com->lc_ops->lfsck_checkpoint(env, com, false);
481 lfsck->li_time_last_checkpoint = cfs_time_current();
482 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
483 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
484 return rc1 != 0 ? rc1 : rc;
487 static int lfsck_prep(const struct lu_env *env, struct lfsck_instance *lfsck,
488 struct lfsck_start_param *lsp)
490 struct dt_object *obj = NULL;
491 struct lfsck_component *com;
492 struct lfsck_component *next;
493 struct lfsck_position *pos = NULL;
494 const struct dt_it_ops *iops =
495 &lfsck->li_obj_oit->do_index_ops->dio_it;
499 LASSERT(lfsck->li_obj_dir == NULL);
500 LASSERT(lfsck->li_di_dir == NULL);
502 lfsck->li_current_oit_processed = 0;
503 list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
504 com->lc_new_checked = 0;
505 rc = com->lc_ops->lfsck_prep(env, com, lsp);
510 (!lfsck_pos_is_zero(&com->lc_pos_start) &&
511 lfsck_pos_is_eq(pos, &com->lc_pos_start) > 0))
512 pos = &com->lc_pos_start;
515 /* Init otable-based iterator. */
517 rc = iops->load(env, lfsck->li_di_oit, 0);
519 lfsck->li_oit_over = 1;
526 rc = iops->load(env, lfsck->li_di_oit, pos->lp_oit_cookie);
530 lfsck->li_oit_over = 1;
532 if (!lfsck->li_master || fid_is_zero(&pos->lp_dir_parent))
535 /* Find the directory for namespace-based traverse. */
536 obj = lfsck_object_find(env, lfsck, &pos->lp_dir_parent);
538 RETURN(PTR_ERR(obj));
540 /* Remote directory will be scanned by the LFSCK instance
541 * on the MDT where the remote object really resides on. */
542 if (!dt_object_exists(obj) || dt_object_remote(obj) ||
543 unlikely(!S_ISDIR(lfsck_object_type(obj))))
546 rc = lfsck_load_stripe_lmv(env, lfsck, obj);
548 /* For the master MDT-object of a striped directory,
549 * reset the iteration from the directory beginning. */
550 if (lfsck->li_lmv != NULL && lfsck->li_lmv->ll_lmv_master)
551 pos->lp_dir_cookie = 0;
553 rc = lfsck_open_dir(env, lfsck, pos->lp_dir_cookie);
555 /* The end of the directory. */
563 lfsck_object_put(env, obj);
566 lfsck_close_dir(env, lfsck, rc);
567 list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
569 com->lc_ops->lfsck_post(env, com, rc, true);
576 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, true);
577 lfsck->li_pos_current = lfsck->li_pos_checkpoint;
578 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
579 rc = com->lc_ops->lfsck_checkpoint(env, com, true);
584 lfsck->li_time_last_checkpoint = cfs_time_current();
585 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
586 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
590 static int lfsck_exec_oit(const struct lu_env *env,
591 struct lfsck_instance *lfsck, struct dt_object *obj)
593 struct lfsck_component *com;
597 LASSERT(lfsck->li_obj_dir == NULL);
599 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
600 rc = com->lc_ops->lfsck_exec_oit(env, com, obj);
605 rc = lfsck_needs_scan_dir(env, lfsck, obj);
609 rc = lfsck_load_stripe_lmv(env, lfsck, obj);
611 rc = lfsck_open_dir(env, lfsck, 0);
617 lfsck_fail(env, lfsck, false);
620 lfsck_close_dir(env, lfsck, rc);
622 return rc > 0 ? 0 : rc;
625 static int lfsck_exec_dir(const struct lu_env *env,
626 struct lfsck_instance *lfsck,
627 struct lu_dirent *ent, __u16 type)
629 struct lfsck_component *com;
632 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
633 rc = com->lc_ops->lfsck_exec_dir(env, com, ent, type);
640 static int lfsck_master_dir_engine(const struct lu_env *env,
641 struct lfsck_instance *lfsck);
643 static int lfsck_post(const struct lu_env *env, struct lfsck_instance *lfsck,
646 struct lfsck_component *com;
647 struct lfsck_component *next;
650 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, false);
651 lfsck_close_dir(env, lfsck, result);
653 while (thread_is_running(&lfsck->li_thread) && rc > 0 &&
654 !list_empty(&lfsck->li_list_lmv)) {
655 struct lfsck_lmv_unit *llu;
657 spin_lock(&lfsck->li_lock);
658 llu = list_entry(lfsck->li_list_lmv.next,
659 struct lfsck_lmv_unit, llu_link);
660 list_del_init(&llu->llu_link);
661 spin_unlock(&lfsck->li_lock);
663 lfsck->li_lmv = &llu->llu_lmv;
664 lfsck->li_obj_dir = lfsck_object_get(llu->llu_obj);
665 rc = lfsck_open_dir(env, lfsck, 0);
667 rc = lfsck_master_dir_engine(env, lfsck);
668 lfsck_close_dir(env, lfsck, result);
674 list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
675 rc = com->lc_ops->lfsck_post(env, com, result, false);
677 CDEBUG(D_LFSCK, "%s: lfsck_post at the component %u: "
678 "rc = %d\n", lfsck_lfsck2name(lfsck),
679 (__u32)com->lc_type, rc);
682 lfsck->li_time_last_checkpoint = cfs_time_current();
683 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
684 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
686 /* Ignore some component post failure to make other can go ahead. */
690 static int lfsck_double_scan(const struct lu_env *env,
691 struct lfsck_instance *lfsck)
693 struct lfsck_component *com;
694 struct lfsck_component *next;
695 struct l_wait_info lwi = { 0 };
699 list_for_each_entry(com, &lfsck->li_list_double_scan, lc_link) {
700 rc = com->lc_ops->lfsck_double_scan(env, com);
705 l_wait_event(lfsck->li_thread.t_ctl_waitq,
706 atomic_read(&lfsck->li_double_scan_count) == 0,
709 if (lfsck->li_status != LS_PAUSED &&
710 lfsck->li_status != LS_CO_PAUSED) {
711 list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
713 spin_lock(&lfsck->li_lock);
714 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
715 spin_unlock(&lfsck->li_lock);
719 return rc1 != 0 ? rc1 : rc;
722 static void lfsck_quit(const struct lu_env *env, struct lfsck_instance *lfsck)
724 struct lfsck_component *com;
725 struct lfsck_component *next;
727 list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
729 if (com->lc_ops->lfsck_quit != NULL)
730 com->lc_ops->lfsck_quit(env, com);
732 spin_lock(&lfsck->li_lock);
733 list_del_init(&com->lc_link_dir);
734 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
735 spin_unlock(&lfsck->li_lock);
738 list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
740 if (com->lc_ops->lfsck_quit != NULL)
741 com->lc_ops->lfsck_quit(env, com);
743 spin_lock(&lfsck->li_lock);
744 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
745 spin_unlock(&lfsck->li_lock);
751 static int lfsck_master_dir_engine(const struct lu_env *env,
752 struct lfsck_instance *lfsck)
754 struct lfsck_thread_info *info = lfsck_env_info(env);
755 struct dt_object *dir = lfsck->li_obj_dir;
756 const struct dt_it_ops *iops = &dir->do_index_ops->dio_it;
757 struct dt_it *di = lfsck->li_di_dir;
758 struct lu_dirent *ent =
759 (struct lu_dirent *)info->lti_key;
760 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
761 struct ptlrpc_thread *thread = &lfsck->li_thread;
767 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY2) &&
769 struct l_wait_info lwi;
771 lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val),
773 l_wait_event(thread->t_ctl_waitq,
774 !thread_is_running(thread),
777 if (unlikely(!thread_is_running(thread))) {
778 CDEBUG(D_LFSCK, "%s: scan dir exit for engine "
779 "stop, parent "DFID", cookie "LPX64"\n",
780 lfsck_lfsck2name(lfsck),
781 PFID(lfsck_dto2fid(dir)),
782 lfsck->li_cookie_dir);
787 lfsck->li_new_scanned++;
788 rc = iops->rec(env, di, (struct dt_rec *)ent,
791 rc = lfsck_unpack_ent(ent, &lfsck->li_cookie_dir,
795 CDEBUG(D_LFSCK, "%s: scan dir failed at rec(), "
796 "parent "DFID", cookie "LPX64": rc = %d\n",
797 lfsck_lfsck2name(lfsck),
798 PFID(lfsck_dto2fid(dir)),
799 lfsck->li_cookie_dir, rc);
800 lfsck_fail(env, lfsck, true);
801 if (bk->lb_param & LPF_FAILOUT)
807 if (ent->lde_attrs & LUDA_IGNORE &&
808 strcmp(ent->lde_name, dotdot) != 0)
811 /* The type in the @ent structure may has been overwritten,
812 * so we need to pass the @type parameter independently. */
813 rc = lfsck_exec_dir(env, lfsck, ent, type);
814 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
818 rc = lfsck_checkpoint(env, lfsck);
819 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
823 lfsck_control_speed(lfsck);
824 if (unlikely(!thread_is_running(thread))) {
825 CDEBUG(D_LFSCK, "%s: scan dir exit for engine stop, "
826 "parent "DFID", cookie "LPX64"\n",
827 lfsck_lfsck2name(lfsck),
828 PFID(lfsck_dto2fid(dir)),
829 lfsck->li_cookie_dir);
833 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_FATAL2)) {
834 spin_lock(&lfsck->li_lock);
835 thread_set_flags(thread, SVC_STOPPING);
836 spin_unlock(&lfsck->li_lock);
840 rc = iops->next(env, di);
843 if (rc > 0 && !lfsck->li_oit_over)
844 lfsck_close_dir(env, lfsck, rc);
850 * Object-table based iteration engine.
852 * Object-table based iteration is the basic linear engine to scan all the
853 * objects on current device in turn. For each object, it calls all the
854 * registered LFSCK component(s)' API to perform related consistency
857 * It flushes related LFSCK trace files to disk via making checkpoint
858 * periodically. Then if the server crashed or the LFSCK is paused, the
859 * LFSCK can resume from the latest checkpoint.
861 * It also controls the whole LFSCK speed via lfsck_control_speed() to
862 * avoid the server to become overload.
864 * \param[in] env pointer to the thread context
865 * \param[in] lfsck pointer to the lfsck instance
867 * \retval positive number if all objects have been scanned
868 * \retval 0 if the iteration is stopped or paused
869 * \retval negative error number on failure
871 static int lfsck_master_oit_engine(const struct lu_env *env,
872 struct lfsck_instance *lfsck)
874 struct lfsck_thread_info *info = lfsck_env_info(env);
875 const struct dt_it_ops *iops =
876 &lfsck->li_obj_oit->do_index_ops->dio_it;
877 struct dt_it *di = lfsck->li_di_oit;
878 struct lu_fid *fid = &info->lti_fid;
879 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
880 struct ptlrpc_thread *thread = &lfsck->li_thread;
881 struct dt_device *dev = lfsck->li_bottom;
882 struct seq_server_site *ss = lu_site2seq(dev->dd_lu_dev.ld_site);
883 __u32 idx = lfsck_dev_idx(dev);
887 if (unlikely(ss == NULL))
891 struct dt_object *target;
892 bool update_lma = false;
894 if (lfsck->li_di_dir != NULL) {
895 rc = lfsck_master_dir_engine(env, lfsck);
900 if (unlikely(lfsck->li_oit_over))
903 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY1) &&
905 struct l_wait_info lwi;
907 lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val),
909 l_wait_event(thread->t_ctl_waitq,
910 !thread_is_running(thread),
913 if (unlikely(!thread_is_running(thread))) {
914 CDEBUG(D_LFSCK, "%s: OIT scan exit for engine "
915 "stop, cookie "LPU64"\n",
916 lfsck_lfsck2name(lfsck),
917 iops->store(env, di));
922 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_CRASH))
925 lfsck->li_current_oit_processed = 1;
927 if (!list_empty(&lfsck->li_list_lmv)) {
928 struct lfsck_lmv_unit *llu;
930 spin_lock(&lfsck->li_lock);
931 llu = list_entry(lfsck->li_list_lmv.next,
932 struct lfsck_lmv_unit, llu_link);
933 list_del_init(&llu->llu_link);
934 spin_unlock(&lfsck->li_lock);
936 lfsck->li_lmv = &llu->llu_lmv;
937 lfsck->li_obj_dir = lfsck_object_get(llu->llu_obj);
938 rc = lfsck_open_dir(env, lfsck, 0);
940 rc = lfsck_master_dir_engine(env, lfsck);
946 lfsck->li_new_scanned++;
947 lfsck->li_pos_current.lp_oit_cookie = iops->store(env, di);
948 rc = iops->rec(env, di, (struct dt_rec *)fid, 0);
950 CDEBUG(D_LFSCK, "%s: OIT scan failed at rec(): "
951 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
952 lfsck_fail(env, lfsck, true);
953 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
959 if (fid_is_idif(fid)) {
960 __u32 idx1 = fid_idif_ost_idx(fid);
962 LASSERT(!lfsck->li_master);
964 /* It is an old format device, update the LMA. */
966 struct ost_id *oi = &info->lti_oi;
968 fid_to_ostid(fid, oi);
969 ostid_to_fid(fid, oi, idx);
972 } else if (!fid_is_norm(fid) && !fid_is_igif(fid) &&
973 !fid_is_last_id(fid) &&
974 !lu_fid_eq(fid, &lfsck->li_global_root_fid)) {
976 /* If the FID/object is only used locally and invisible
977 * to external nodes, then LFSCK will not handle it.
979 * dot_lustre sequence has been handled specially. */
982 struct lu_seq_range *range = &info->lti_range;
984 if (lfsck->li_master)
985 fld_range_set_mdt(range);
987 fld_range_set_ost(range);
988 rc = fld_local_lookup(env, ss->ss_server_fld,
989 fid_seq(fid), range);
990 if (rc != 0 || range->lsr_index != idx) {
991 /* Remote object will be handled by the LFSCK
992 * instance on the MDT where the remote object
993 * really resides on. */
999 target = lfsck_object_find(env, lfsck, fid);
1000 if (IS_ERR(target)) {
1001 CDEBUG(D_LFSCK, "%s: OIT scan failed at find target "
1002 DFID", cookie "LPU64": rc = %d\n",
1003 lfsck_lfsck2name(lfsck), PFID(fid),
1004 iops->store(env, di), rc);
1005 lfsck_fail(env, lfsck, true);
1006 if (bk->lb_param & LPF_FAILOUT)
1007 RETURN(PTR_ERR(target));
1012 if (dt_object_exists(target)) {
1014 rc = lfsck_update_lma(env, lfsck, target);
1016 CDEBUG(D_LFSCK, "%s: fail to update "
1017 "LMA for "DFID": rc = %d\n",
1018 lfsck_lfsck2name(lfsck),
1019 PFID(lfsck_dto2fid(target)), rc);
1022 rc = lfsck_exec_oit(env, lfsck, target);
1024 lfsck_object_put(env, target);
1025 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
1029 rc = lfsck_checkpoint(env, lfsck);
1030 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
1034 lfsck_control_speed(lfsck);
1036 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_FATAL1)) {
1037 spin_lock(&lfsck->li_lock);
1038 thread_set_flags(thread, SVC_STOPPING);
1039 spin_unlock(&lfsck->li_lock);
1043 rc = iops->next(env, di);
1044 if (unlikely(rc > 0))
1045 lfsck->li_oit_over = 1;
1046 else if (likely(rc == 0))
1047 lfsck->li_current_oit_processed = 0;
1049 if (unlikely(!thread_is_running(thread))) {
1050 CDEBUG(D_LFSCK, "%s: OIT scan exit for engine stop, "
1051 "cookie "LPU64"\n", lfsck_lfsck2name(lfsck),
1052 iops->store(env, di));
1055 } while (rc == 0 || lfsck->li_di_dir != NULL);
1060 int lfsck_master_engine(void *args)
1062 struct lfsck_thread_args *lta = args;
1063 struct lu_env *env = <a->lta_env;
1064 struct lfsck_instance *lfsck = lta->lta_lfsck;
1065 struct ptlrpc_thread *thread = &lfsck->li_thread;
1066 struct dt_object *oit_obj = lfsck->li_obj_oit;
1067 const struct dt_it_ops *oit_iops = &oit_obj->do_index_ops->dio_it;
1068 struct dt_it *oit_di;
1069 struct l_wait_info lwi = { 0 };
1073 if (lfsck->li_master &&
1074 (!list_empty(&lfsck->li_list_scan) ||
1075 !list_empty(&lfsck->li_list_double_scan))) {
1076 rc = lfsck_verify_lpf(env, lfsck);
1077 /* Fail to verify the .lustre/lost+found/MDTxxxx/ may be not
1078 * fatal, because the .lustre/lost+found/ maybe not accessed
1079 * by the LFSCK if it does not add orphans or others to such
1080 * directory. So go ahead until hit failure when really uses
1083 CDEBUG(D_LFSCK, "%s: master engine fail to verify the "
1084 ".lustre/lost+found/, go ahead: rc = %d\n",
1085 lfsck_lfsck2name(lfsck), rc);
1088 oit_di = oit_iops->init(env, oit_obj, lfsck->li_args_oit, BYPASS_CAPA);
1089 if (IS_ERR(oit_di)) {
1090 rc = PTR_ERR(oit_di);
1091 CDEBUG(D_LFSCK, "%s: master engine fail to init iteration: "
1092 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
1094 GOTO(fini_args, rc);
1097 spin_lock(&lfsck->li_lock);
1098 lfsck->li_di_oit = oit_di;
1099 spin_unlock(&lfsck->li_lock);
1100 rc = lfsck_prep(env, lfsck, lta->lta_lsp);
1104 CDEBUG(D_LFSCK, "LFSCK entry: oit_flags = %#x, dir_flags = %#x, "
1105 "oit_cookie = "LPU64", dir_cookie = "LPX64", parent = "DFID
1106 ", pid = %d\n", lfsck->li_args_oit, lfsck->li_args_dir,
1107 lfsck->li_pos_checkpoint.lp_oit_cookie,
1108 lfsck->li_pos_checkpoint.lp_dir_cookie,
1109 PFID(&lfsck->li_pos_checkpoint.lp_dir_parent),
1112 spin_lock(&lfsck->li_lock);
1113 thread_set_flags(thread, SVC_RUNNING);
1114 spin_unlock(&lfsck->li_lock);
1115 wake_up_all(&thread->t_ctl_waitq);
1117 l_wait_event(thread->t_ctl_waitq,
1118 lfsck->li_start_unplug ||
1119 !thread_is_running(thread),
1121 if (!thread_is_running(thread))
1122 GOTO(fini_oit, rc = 0);
1124 if (!list_empty(&lfsck->li_list_scan) ||
1125 list_empty(&lfsck->li_list_double_scan))
1126 rc = lfsck_master_oit_engine(env, lfsck);
1130 CDEBUG(D_LFSCK, "LFSCK exit: oit_flags = %#x, dir_flags = %#x, "
1131 "oit_cookie = "LPU64", dir_cookie = "LPX64", parent = "DFID
1132 ", pid = %d, rc = %d\n", lfsck->li_args_oit, lfsck->li_args_dir,
1133 lfsck->li_pos_checkpoint.lp_oit_cookie,
1134 lfsck->li_pos_checkpoint.lp_dir_cookie,
1135 PFID(&lfsck->li_pos_checkpoint.lp_dir_parent),
1138 if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_CRASH))
1139 rc = lfsck_post(env, lfsck, rc);
1141 lfsck_close_dir(env, lfsck, rc);
1144 lfsck_di_oit_put(env, lfsck);
1145 oit_iops->fini(env, oit_di);
1147 if (!list_empty(&lfsck->li_list_double_scan))
1148 rc = lfsck_double_scan(env, lfsck);
1152 lfsck_quit(env, lfsck);
1155 /* XXX: Purge the pinned objects in the future. */
1158 spin_lock(&lfsck->li_lock);
1159 thread_set_flags(thread, SVC_STOPPED);
1160 spin_unlock(&lfsck->li_lock);
1161 wake_up_all(&thread->t_ctl_waitq);
1162 lfsck_thread_args_fini(lta);
1166 static inline bool lfsck_assistant_req_empty(struct lfsck_assistant_data *lad)
1170 spin_lock(&lad->lad_lock);
1171 if (list_empty(&lad->lad_req_list))
1173 spin_unlock(&lad->lad_lock);
1179 * Query the LFSCK status from the instatnces on remote servers.
1181 * The LFSCK assistant thread queries the LFSCK instances on other
1182 * servers (MDT/OST) about their status, such as whether they have
1183 * finished the phase1/phase2 scanning or not, and so on.
1185 * \param[in] env pointer to the thread context
1186 * \param[in] com pointer to the lfsck component
1188 * \retval 0 for success
1189 * \retval negative error number on failure
1191 static int lfsck_assistant_query_others(const struct lu_env *env,
1192 struct lfsck_component *com)
1194 struct lfsck_thread_info *info = lfsck_env_info(env);
1195 struct lfsck_request *lr = &info->lti_lr;
1196 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1197 struct lfsck_instance *lfsck = com->lc_lfsck;
1198 struct lfsck_assistant_data *lad = com->lc_data;
1199 struct ptlrpc_request_set *set;
1200 struct lfsck_tgt_descs *ltds;
1201 struct lfsck_tgt_desc *ltd;
1202 struct list_head *phase_head;
1207 set = ptlrpc_prep_set();
1211 lad->lad_touch_gen++;
1212 memset(lr, 0, sizeof(*lr));
1213 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1214 lr->lr_event = LE_QUERY;
1215 lr->lr_active = com->lc_type;
1216 laia->laia_com = com;
1218 laia->laia_shared = 0;
1220 if (!list_empty(&lad->lad_mdt_phase1_list)) {
1221 ltds = &lfsck->li_mdt_descs;
1223 phase_head = &lad->lad_mdt_phase1_list;
1224 } else if (com->lc_type != LFSCK_TYPE_LAYOUT) {
1229 ltds = &lfsck->li_ost_descs;
1230 lr->lr_flags = LEF_TO_OST;
1231 phase_head = &lad->lad_ost_phase1_list;
1234 laia->laia_ltds = ltds;
1235 spin_lock(<ds->ltd_lock);
1236 while (!list_empty(phase_head)) {
1237 struct list_head *phase_list;
1240 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1241 ltd = list_entry(phase_head->next,
1242 struct lfsck_tgt_desc,
1243 ltd_layout_phase_list);
1244 phase_list = <d->ltd_layout_phase_list;
1245 gen = <d->ltd_layout_gen;
1247 ltd = list_entry(phase_head->next,
1248 struct lfsck_tgt_desc,
1249 ltd_namespace_phase_list);
1250 phase_list = <d->ltd_namespace_phase_list;
1251 gen = <d->ltd_namespace_gen;
1254 if (*gen == lad->lad_touch_gen)
1257 *gen = lad->lad_touch_gen;
1258 list_move_tail(phase_list, phase_head);
1259 atomic_inc(<d->ltd_ref);
1260 laia->laia_ltd = ltd;
1261 spin_unlock(<ds->ltd_lock);
1262 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1263 lfsck_async_interpret_common,
1266 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to query "
1267 "%s %x for %s: rc = %d\n",
1268 lfsck_lfsck2name(lfsck),
1269 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1270 ltd->ltd_index, lad->lad_name, rc);
1274 spin_lock(<ds->ltd_lock);
1276 spin_unlock(<ds->ltd_lock);
1278 rc = ptlrpc_set_wait(set);
1280 ptlrpc_set_destroy(set);
1284 if (com->lc_type == LFSCK_TYPE_LAYOUT && !(lr->lr_flags & LEF_TO_OST) &&
1285 list_empty(&lad->lad_mdt_phase1_list))
1289 ptlrpc_set_destroy(set);
1291 RETURN(rc1 != 0 ? rc1 : rc);
1295 * Notify the LFSCK event to the instatnces on remote servers.
1297 * The LFSCK assistant thread notifies the LFSCK instances on other
1298 * servers (MDT/OST) about some events, such as start new scanning,
1299 * stop the scanning, this LFSCK instance will exit, and so on.
1301 * \param[in] env pointer to the thread context
1302 * \param[in] com pointer to the lfsck component
1303 * \param[in] lr pointer to the LFSCK event request
1305 * \retval 0 for success
1306 * \retval negative error number on failure
1308 static int lfsck_assistant_notify_others(const struct lu_env *env,
1309 struct lfsck_component *com,
1310 struct lfsck_request *lr)
1312 struct lfsck_thread_info *info = lfsck_env_info(env);
1313 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1314 struct lfsck_instance *lfsck = com->lc_lfsck;
1315 struct lfsck_assistant_data *lad = com->lc_data;
1316 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1317 struct ptlrpc_request_set *set;
1318 struct lfsck_tgt_descs *ltds;
1319 struct lfsck_tgt_desc *ltd;
1320 struct lfsck_tgt_desc *next;
1326 set = ptlrpc_prep_set();
1330 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1331 lr->lr_active = com->lc_type;
1332 laia->laia_com = com;
1334 laia->laia_shared = 0;
1336 switch (lr->lr_event) {
1338 if (com->lc_type != LFSCK_TYPE_LAYOUT)
1341 lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
1342 LSV_ASYNC_WINDOWS | LSV_CREATE_OSTOBJ;
1343 lr->lr_speed = bk->lb_speed_limit;
1344 lr->lr_version = bk->lb_version;
1345 lr->lr_param |= bk->lb_param;
1346 lr->lr_async_windows = bk->lb_async_windows;
1347 lr->lr_flags = LEF_TO_OST;
1349 /* Notify OSTs firstly, then handle other MDTs if needed. */
1350 ltds = &lfsck->li_ost_descs;
1351 laia->laia_ltds = ltds;
1352 down_read(<ds->ltd_rw_sem);
1353 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1354 ltd = lfsck_tgt_get(ltds, idx);
1355 LASSERT(ltd != NULL);
1357 laia->laia_ltd = ltd;
1358 ltd->ltd_layout_done = 0;
1359 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1360 lfsck_async_interpret_common,
1361 laia, LFSCK_NOTIFY);
1363 lfsck_lad_set_bitmap(env, com, idx);
1364 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to "
1365 "notify OST %x for %s start: rc = %d\n",
1366 lfsck_lfsck2name(lfsck), idx,
1371 up_read(<ds->ltd_rw_sem);
1374 rc = ptlrpc_set_wait(set);
1376 ptlrpc_set_destroy(set);
1381 if (!(bk->lb_param & LPF_ALL_TGT))
1384 /* link other MDT targets locallly. */
1385 ltds = &lfsck->li_mdt_descs;
1386 spin_lock(<ds->ltd_lock);
1387 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1388 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1389 ltd = LTD_TGT(ltds, idx);
1390 LASSERT(ltd != NULL);
1392 if (!list_empty(<d->ltd_layout_list))
1395 list_add_tail(<d->ltd_layout_list,
1396 &lad->lad_mdt_list);
1397 list_add_tail(<d->ltd_layout_phase_list,
1398 &lad->lad_mdt_phase1_list);
1401 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1402 ltd = LTD_TGT(ltds, idx);
1403 LASSERT(ltd != NULL);
1405 if (!list_empty(<d->ltd_namespace_list))
1408 list_add_tail(<d->ltd_namespace_list,
1409 &lad->lad_mdt_list);
1410 list_add_tail(<d->ltd_namespace_phase_list,
1411 &lad->lad_mdt_phase1_list);
1414 spin_unlock(<ds->ltd_lock);
1417 case LE_PHASE2_DONE:
1418 case LE_PEER_EXIT: {
1419 struct list_head *phase_head;
1421 /* Handle other MDTs firstly if needed, then notify the OSTs. */
1422 if (bk->lb_param & LPF_ALL_TGT) {
1423 phase_head = &lad->lad_mdt_list;
1424 ltds = &lfsck->li_mdt_descs;
1425 if (lr->lr_event == LE_STOP) {
1426 /* unlink other MDT targets locallly. */
1427 spin_lock(<ds->ltd_lock);
1428 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1429 list_for_each_entry_safe(ltd, next,
1430 phase_head, ltd_layout_list) {
1432 <d->ltd_layout_phase_list);
1434 <d->ltd_layout_list);
1437 list_for_each_entry_safe(ltd, next,
1439 ltd_namespace_list) {
1441 <d->ltd_namespace_phase_list);
1443 <d->ltd_namespace_list);
1446 spin_unlock(<ds->ltd_lock);
1448 if (com->lc_type != LFSCK_TYPE_LAYOUT)
1451 lr->lr_flags |= LEF_TO_OST;
1452 phase_head = &lad->lad_ost_list;
1453 ltds = &lfsck->li_ost_descs;
1455 lr->lr_flags &= ~LEF_TO_OST;
1457 } else if (com->lc_type != LFSCK_TYPE_LAYOUT) {
1460 lr->lr_flags |= LEF_TO_OST;
1461 phase_head = &lad->lad_ost_list;
1462 ltds = &lfsck->li_ost_descs;
1466 laia->laia_ltds = ltds;
1467 spin_lock(<ds->ltd_lock);
1468 while (!list_empty(phase_head)) {
1469 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1470 ltd = list_entry(phase_head->next,
1471 struct lfsck_tgt_desc,
1473 if (!list_empty(<d->ltd_layout_phase_list))
1475 <d->ltd_layout_phase_list);
1476 list_del_init(<d->ltd_layout_list);
1478 ltd = list_entry(phase_head->next,
1479 struct lfsck_tgt_desc,
1480 ltd_namespace_list);
1481 if (!list_empty(<d->ltd_namespace_phase_list))
1483 <d->ltd_namespace_phase_list);
1484 list_del_init(<d->ltd_namespace_list);
1486 atomic_inc(<d->ltd_ref);
1487 laia->laia_ltd = ltd;
1488 spin_unlock(<ds->ltd_lock);
1489 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1490 lfsck_async_interpret_common,
1491 laia, LFSCK_NOTIFY);
1493 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to "
1494 "notify %s %x for %s stop/phase2_done/"
1495 "peer_exit: rc = %d\n",
1496 lfsck_lfsck2name(lfsck),
1497 (lr->lr_flags & LEF_TO_OST) ?
1498 "OST" : "MDT", ltd->ltd_index,
1502 spin_lock(<ds->ltd_lock);
1504 spin_unlock(<ds->ltd_lock);
1506 rc = ptlrpc_set_wait(set);
1508 ptlrpc_set_destroy(set);
1512 if (com->lc_type == LFSCK_TYPE_LAYOUT &&
1513 !(lr->lr_flags & LEF_TO_OST)) {
1514 lr->lr_flags |= LEF_TO_OST;
1515 phase_head = &lad->lad_ost_list;
1516 ltds = &lfsck->li_ost_descs;
1521 case LE_PHASE1_DONE:
1522 lad->lad_ops->la_sync_failures(env, com, lr);
1523 lad->lad_touch_gen++;
1524 ltds = &lfsck->li_mdt_descs;
1525 laia->laia_ltds = ltds;
1526 spin_lock(<ds->ltd_lock);
1527 while (!list_empty(&lad->lad_mdt_list)) {
1528 struct list_head *list;
1531 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1532 ltd = list_entry(lad->lad_mdt_list.next,
1533 struct lfsck_tgt_desc,
1535 list = <d->ltd_layout_list;
1536 gen = <d->ltd_layout_gen;
1538 struct lfsck_namespace *ns = com->lc_file_ram;
1540 ltd = list_entry(lad->lad_mdt_list.next,
1541 struct lfsck_tgt_desc,
1542 ltd_namespace_list);
1543 list = <d->ltd_namespace_list;
1544 gen = <d->ltd_namespace_gen;
1545 lr->lr_flags2 = ns->ln_flags & ~LF_INCOMPLETE;
1548 if (*gen == lad->lad_touch_gen)
1551 *gen = lad->lad_touch_gen;
1552 list_move_tail(list, &lad->lad_mdt_list);
1553 if (ltd->ltd_namespace_failed)
1556 atomic_inc(<d->ltd_ref);
1557 laia->laia_ltd = ltd;
1558 spin_unlock(<ds->ltd_lock);
1559 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1560 lfsck_async_interpret_common,
1561 laia, LFSCK_NOTIFY);
1563 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to "
1564 "notify MDT %x for %s phase1 done: "
1565 "rc = %d\n", lfsck_lfsck2name(lfsck),
1566 ltd->ltd_index, lad->lad_name, rc);
1569 spin_lock(<ds->ltd_lock);
1571 spin_unlock(<ds->ltd_lock);
1574 CDEBUG(D_LFSCK, "%s: LFSCK assistant unexpected LFSCK event: "
1575 "rc = %d\n", lfsck_lfsck2name(lfsck), lr->lr_event);
1580 rc1 = ptlrpc_set_wait(set);
1581 ptlrpc_set_destroy(set);
1583 RETURN(rc != 0 ? rc : rc1);
1587 * The LFSCK assistant thread is triggered by the LFSCK main engine.
1588 * They co-work together as an asynchronous pipeline: the LFSCK main
1589 * engine scans the system and pre-fetches the objects, attributes,
1590 * or name entries, etc, and pushes them into the pipeline as input
1591 * requests for the LFSCK assistant thread; on the other end of the
1592 * pipeline, the LFSCK assistant thread performs the real check and
1593 * repair for every request from the main engine.
1595 * Generally, the assistant engine may be blocked when check/repair
1596 * something, so the LFSCK main engine will run some faster. On the
1597 * other hand, the LFSCK main engine will drive multiple assistant
1598 * threads in parallel, means for each LFSCK component on the master
1599 * (such as layout LFSCK, namespace LFSCK), there is an independent
1600 * LFSCK assistant thread. So under such 1:N multiple asynchronous
1601 * pipelines mode, the whole LFSCK performance will be much better
1602 * than check/repair everything by the LFSCK main engine itself.
1604 int lfsck_assistant_engine(void *args)
1606 struct lfsck_thread_args *lta = args;
1607 struct lu_env *env = <a->lta_env;
1608 struct lfsck_component *com = lta->lta_com;
1609 struct lfsck_instance *lfsck = lta->lta_lfsck;
1610 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1611 struct lfsck_position *pos = &com->lc_pos_start;
1612 struct lfsck_thread_info *info = lfsck_env_info(env);
1613 struct lfsck_request *lr = &info->lti_lr;
1614 struct lfsck_assistant_data *lad = com->lc_data;
1615 struct ptlrpc_thread *mthread = &lfsck->li_thread;
1616 struct ptlrpc_thread *athread = &lad->lad_thread;
1617 struct lfsck_assistant_operations *lao = lad->lad_ops;
1618 struct lfsck_assistant_req *lar;
1619 struct l_wait_info lwi = { 0 };
1624 CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread start\n",
1625 lfsck_lfsck2name(lfsck), lad->lad_name);
1627 memset(lr, 0, sizeof(*lr));
1628 lr->lr_event = LE_START;
1629 if (pos->lp_oit_cookie <= 1)
1630 lr->lr_param = LPF_RESET;
1631 rc = lfsck_assistant_notify_others(env, com, lr);
1633 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to notify others "
1634 "to start %s: rc = %d\n",
1635 lfsck_lfsck2name(lfsck), lad->lad_name, rc);
1639 spin_lock(&lad->lad_lock);
1640 thread_set_flags(athread, SVC_RUNNING);
1641 spin_unlock(&lad->lad_lock);
1642 wake_up_all(&mthread->t_ctl_waitq);
1645 while (!list_empty(&lad->lad_req_list)) {
1646 bool wakeup = false;
1648 if (unlikely(lad->lad_exit ||
1649 !thread_is_running(mthread)))
1650 GOTO(cleanup1, rc = lad->lad_post_result);
1652 lar = list_entry(lad->lad_req_list.next,
1653 struct lfsck_assistant_req,
1655 /* Only the lfsck_assistant_engine thread itself can
1656 * remove the "lar" from the head of the list, LFSCK
1657 * engine thread only inserts other new "lar" at the
1658 * end of the list. So it is safe to handle current
1659 * "lar" without the spin_lock. */
1660 rc = lao->la_handler_p1(env, com, lar);
1661 spin_lock(&lad->lad_lock);
1662 list_del_init(&lar->lar_list);
1663 lad->lad_prefetched--;
1664 /* Wake up the main engine thread only when the list
1665 * is empty or half of the prefetched items have been
1666 * handled to avoid too frequent thread schedule. */
1667 if (lad->lad_prefetched == 0 ||
1668 (bk->lb_async_windows != 0 &&
1669 bk->lb_async_windows / 2 ==
1670 lad->lad_prefetched))
1672 spin_unlock(&lad->lad_lock);
1674 wake_up_all(&mthread->t_ctl_waitq);
1676 lao->la_req_fini(env, lar);
1677 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
1681 l_wait_event(athread->t_ctl_waitq,
1682 !lfsck_assistant_req_empty(lad) ||
1685 lad->lad_to_double_scan,
1688 if (unlikely(lad->lad_exit))
1689 GOTO(cleanup1, rc = lad->lad_post_result);
1691 if (!list_empty(&lad->lad_req_list))
1694 if (lad->lad_to_post) {
1695 CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread post\n",
1696 lfsck_lfsck2name(lfsck), lad->lad_name);
1698 if (unlikely(lad->lad_exit))
1699 GOTO(cleanup1, rc = lad->lad_post_result);
1701 lad->lad_to_post = 0;
1702 LASSERT(lad->lad_post_result > 0);
1704 memset(lr, 0, sizeof(*lr));
1705 lr->lr_event = LE_PHASE1_DONE;
1706 lr->lr_status = lad->lad_post_result;
1707 rc = lfsck_assistant_notify_others(env, com, lr);
1709 CDEBUG(D_LFSCK, "%s: LFSCK assistant failed to "
1710 "notify others for %s post: rc = %d\n",
1711 lfsck_lfsck2name(lfsck),
1714 /* Wakeup the master engine to go ahead. */
1715 wake_up_all(&mthread->t_ctl_waitq);
1718 if (lad->lad_to_double_scan) {
1719 lad->lad_to_double_scan = 0;
1720 atomic_inc(&lfsck->li_double_scan_count);
1721 lad->lad_in_double_scan = 1;
1722 wake_up_all(&mthread->t_ctl_waitq);
1724 com->lc_new_checked = 0;
1725 com->lc_new_scanned = 0;
1726 com->lc_time_last_checkpoint = cfs_time_current();
1727 com->lc_time_next_checkpoint =
1728 com->lc_time_last_checkpoint +
1729 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
1731 /* Flush async updates before handling orphan. */
1732 dt_sync(env, lfsck->li_next);
1734 CDEBUG(D_LFSCK, "%s: LFSCK assistant phase2 "
1735 "scan start\n", lfsck_lfsck2name(lfsck));
1737 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_NO_DOUBLESCAN))
1738 GOTO(cleanup2, rc = 0);
1740 while (lad->lad_in_double_scan) {
1741 rc = lfsck_assistant_query_others(env, com);
1742 if (lfsck_phase2_next_ready(lad))
1748 /* Pull LFSCK status on related targets once
1749 * per 30 seconds if we are not notified. */
1750 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(30),
1751 cfs_time_seconds(1),
1753 rc = l_wait_event(athread->t_ctl_waitq,
1754 lfsck_phase2_next_ready(lad) ||
1756 !thread_is_running(mthread),
1759 if (unlikely(lad->lad_exit ||
1760 !thread_is_running(mthread)))
1761 GOTO(cleanup2, rc = 0);
1763 if (rc == -ETIMEDOUT)
1770 rc = lao->la_handler_p2(env, com);
1774 if (unlikely(lad->lad_exit ||
1775 !thread_is_running(mthread)))
1776 GOTO(cleanup2, rc = 0);
1782 /* Cleanup the unfinished requests. */
1783 spin_lock(&lad->lad_lock);
1785 lad->lad_assistant_status = rc;
1787 if (lad->lad_exit && lad->lad_post_result <= 0)
1788 lao->la_fill_pos(env, com, &lfsck->li_pos_checkpoint);
1790 while (!list_empty(&lad->lad_req_list)) {
1791 lar = list_entry(lad->lad_req_list.next,
1792 struct lfsck_assistant_req,
1794 list_del_init(&lar->lar_list);
1795 lad->lad_prefetched--;
1796 spin_unlock(&lad->lad_lock);
1797 lao->la_req_fini(env, lar);
1798 spin_lock(&lad->lad_lock);
1800 spin_unlock(&lad->lad_lock);
1802 LASSERTF(lad->lad_prefetched == 0, "unmatched prefeteched objs %d\n",
1803 lad->lad_prefetched);
1806 memset(lr, 0, sizeof(*lr));
1808 lr->lr_event = LE_PHASE2_DONE;
1810 } else if (rc == 0) {
1811 if (lfsck->li_flags & LPF_ALL_TGT) {
1812 lr->lr_event = LE_STOP;
1813 lr->lr_status = LS_STOPPED;
1815 lr->lr_event = LE_PEER_EXIT;
1816 switch (lfsck->li_status) {
1819 lr->lr_status = LS_CO_PAUSED;
1823 lr->lr_status = LS_CO_STOPPED;
1826 CDEBUG(D_LFSCK, "%s: LFSCK assistant unknown "
1827 "status: rc = %d\n",
1828 lfsck_lfsck2name(lfsck),
1830 lr->lr_status = LS_CO_FAILED;
1835 if (lfsck->li_flags & LPF_ALL_TGT) {
1836 lr->lr_event = LE_STOP;
1837 lr->lr_status = LS_FAILED;
1839 lr->lr_event = LE_PEER_EXIT;
1840 lr->lr_status = LS_CO_FAILED;
1844 rc1 = lfsck_assistant_notify_others(env, com, lr);
1846 CDEBUG(D_LFSCK, "%s: LFSCK assistant failed to notify "
1847 "others for %s quit: rc = %d\n",
1848 lfsck_lfsck2name(lfsck), lad->lad_name, rc1);
1852 /* Flush async updates before exit. */
1853 dt_sync(env, lfsck->li_next);
1855 /* Under force exit case, some requests may be just freed without
1856 * verification, those objects should be re-handled when next run.
1857 * So not update the on-disk trace file under such case. */
1858 if (lad->lad_in_double_scan) {
1860 rc1 = lao->la_double_scan_result(env, com, rc);
1862 CDEBUG(D_LFSCK, "%s: LFSCK assistant phase2 scan "
1863 "finished: rc = %d\n",
1864 lfsck_lfsck2name(lfsck), rc1 != 0 ? rc1 : rc);
1868 if (lad->lad_in_double_scan)
1869 atomic_dec(&lfsck->li_double_scan_count);
1871 spin_lock(&lad->lad_lock);
1872 lad->lad_assistant_status = (rc1 != 0 ? rc1 : rc);
1873 thread_set_flags(athread, SVC_STOPPED);
1874 wake_up_all(&mthread->t_ctl_waitq);
1875 spin_unlock(&lad->lad_lock);
1877 CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread exit: rc = %d\n",
1878 lfsck_lfsck2name(lfsck), lad->lad_name,
1879 lad->lad_assistant_status);
1881 lfsck_thread_args_fini(lta);