4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2012, 2013, Intel Corporation.
26 * lustre/lfsck/lfsck_engine.c
28 * Author: Fan, Yong <fan.yong@intel.com>
31 #define DEBUG_SUBSYSTEM S_LFSCK
33 #include <lu_object.h>
34 #include <dt_object.h>
35 #include <lustre_net.h>
36 #include <lustre_fid.h>
37 #include <obd_support.h>
38 #include <lustre_lib.h>
40 #include "lfsck_internal.h"
42 static int lfsck_unpack_ent(struct lu_dirent *ent, __u64 *cookie, __u16 *type)
45 int align = sizeof(*lt) - 1;
48 fid_le_to_cpu(&ent->lde_fid, &ent->lde_fid);
49 *cookie = le64_to_cpu(ent->lde_hash);
50 ent->lde_reclen = le16_to_cpu(ent->lde_reclen);
51 ent->lde_namelen = le16_to_cpu(ent->lde_namelen);
52 ent->lde_attrs = le32_to_cpu(ent->lde_attrs);
54 if (unlikely(!(ent->lde_attrs & LUDA_TYPE)))
57 len = (ent->lde_namelen + align) & ~align;
58 lt = (struct luda_type *)(ent->lde_name + len);
59 *type = le16_to_cpu(lt->lt_type);
61 /* Make sure the name is terminated with '\0'. The data (object type)
62 * after ent::lde_name maybe broken, but we have stored such data in
63 * the output parameter @type as above. */
64 ent->lde_name[ent->lde_namelen] = '\0';
69 static void lfsck_di_oit_put(const struct lu_env *env, struct lfsck_instance *lfsck)
71 const struct dt_it_ops *iops;
74 spin_lock(&lfsck->li_lock);
75 iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
76 di = lfsck->li_di_oit;
77 lfsck->li_di_oit = NULL;
78 spin_unlock(&lfsck->li_lock);
82 static void lfsck_di_dir_put(const struct lu_env *env, struct lfsck_instance *lfsck)
84 const struct dt_it_ops *iops;
87 spin_lock(&lfsck->li_lock);
88 iops = &lfsck->li_obj_dir->do_index_ops->dio_it;
89 di = lfsck->li_di_dir;
90 lfsck->li_di_dir = NULL;
91 lfsck->li_cookie_dir = 0;
92 spin_unlock(&lfsck->li_lock);
96 static void lfsck_close_dir(const struct lu_env *env,
97 struct lfsck_instance *lfsck)
99 struct dt_object *dir_obj = lfsck->li_obj_dir;
100 const struct dt_it_ops *dir_iops = &dir_obj->do_index_ops->dio_it;
101 struct dt_it *dir_di = lfsck->li_di_dir;
103 lfsck_di_dir_put(env, lfsck);
104 dir_iops->fini(env, dir_di);
105 lfsck->li_obj_dir = NULL;
106 lfsck_object_put(env, dir_obj);
109 static int lfsck_update_lma(const struct lu_env *env,
110 struct lfsck_instance *lfsck, struct dt_object *obj)
112 struct lfsck_thread_info *info = lfsck_env_info(env);
113 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
114 struct dt_device *dt = lfsck->li_bottom;
115 struct lustre_mdt_attrs *lma = &info->lti_lma;
122 if (bk->lb_param & LPF_DRYRUN)
125 buf = lfsck_buf_get(env, info->lti_lma_old, LMA_OLD_SIZE);
126 rc = dt_xattr_get(env, obj, buf, XATTR_NAME_LMA, BYPASS_CAPA);
131 fl = LU_XATTR_CREATE;
132 lustre_lma_init(lma, lfsck_dto2fid(obj), LMAC_FID_ON_OST, 0);
134 if (rc != LMA_OLD_SIZE && rc != sizeof(struct lustre_mdt_attrs))
137 fl = LU_XATTR_REPLACE;
138 lustre_lma_swab(lma);
139 lustre_lma_init(lma, lfsck_dto2fid(obj),
140 lma->lma_compat | LMAC_FID_ON_OST,
143 lustre_lma_swab(lma);
145 th = dt_trans_create(env, dt);
149 buf = lfsck_buf_get(env, lma, sizeof(*lma));
150 rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_LMA, fl, th);
154 rc = dt_trans_start(env, dt, th);
158 rc = dt_xattr_set(env, obj, buf, XATTR_NAME_LMA, fl, th, BYPASS_CAPA);
163 dt_trans_stop(env, dt, th);
167 static int lfsck_parent_fid(const struct lu_env *env, struct dt_object *obj,
170 if (unlikely(!S_ISDIR(lfsck_object_type(obj)) ||
171 !dt_try_as_dir(env, obj)))
174 return dt_lookup(env, obj, (struct dt_rec *)fid,
175 (const struct dt_key *)"..", BYPASS_CAPA);
179 * Check whether needs to scan the directory or not.
181 * 1) If we are not doing namespace LFSCK, or the given @obj is not directory,
182 * then needs not to scan the @obj. Otherwise,
183 * 2) Global /ROOT needs to be scanned, backend root needs not to be scanned.
184 * 3) If the @obj is neither IGIF nor normal FID (including .lustre and its
185 * sub-directories that have been scanned when the LFSCK engine start),
186 * then needs not to be scanned.
187 * 4) If it is a remote object, then scanning the object will be done on the
188 * MDT on which the object really resides.
189 * 5) If the local object has normal FID, then needs to be scanned. Otherwise,
190 * 6) If the object has linkEA, then needs to be scanned. Otherwise,
191 * 7) If none of the previous conditions are true, we need to check the parent
192 * directories whether this subdirectory is in a tree that should be scanned.
193 * Set the parent as current @obj, repeat 2)-7).
195 * \param[in] env pointer to the thread context
196 * \param[in] lfsck pointer to the lfsck instance
197 * \param[in] obj pointer to the object to be checked
199 * \retval positive number if the directory needs to be scanned
200 * \retval 0 if the directory needs NOT to be scanned
201 * \retval negative error number on failure
203 static int lfsck_needs_scan_dir(const struct lu_env *env,
204 struct lfsck_instance *lfsck,
205 struct dt_object *obj)
207 struct lfsck_thread_info *info = lfsck_env_info(env);
208 struct lu_fid *fid = &info->lti_fid;
209 struct lu_seq_range *range = &info->lti_range;
210 struct dt_device *dev = lfsck->li_bottom;
211 struct seq_server_site *ss = lu_site2seq(dev->dd_lu_dev.ld_site);
212 __u32 idx = lfsck_dev_idx(dev);
216 if (list_empty(&lfsck->li_list_dir) || !S_ISDIR(lfsck_object_type(obj)))
221 *fid = *lfsck_dto2fid(obj);
223 /* Global /ROOT is visible. */
224 if (unlikely(lu_fid_eq(fid, &lfsck->li_global_root_fid)))
227 /* Backend root is invisible. */
228 if (unlikely(lu_fid_eq(fid, &lfsck->li_local_root_fid)))
231 if (!fid_is_norm(fid) && !fid_is_igif(fid))
234 fld_range_set_mdt(range);
235 rc = fld_local_lookup(env, ss->ss_server_fld,
236 fid_seq(fid), range);
237 if (rc != 0 || range->lsr_index != idx) {
238 /* Current FID should NOT be for the input parameter
239 * @obj, because the lfsck_master_oit_engine() has
240 * filtered out agent object. So current FID is for
241 * the ancestor of the original input parameter @obj.
242 * So the ancestor is a remote directory. The input
243 * parameter @obj is local directory, and should be
244 * scanned under such case. */
250 /* normal FID on this target (locally) must be for the
251 * client-side visiable object. */
252 if (fid_is_norm(fid))
256 obj = lfsck_object_find(env, lfsck, fid);
261 if (!dt_object_exists(obj))
265 dt_read_lock(env, obj, MOR_TGT_CHILD);
266 if (unlikely(lfsck_is_dead_obj(obj))) {
267 dt_read_unlock(env, obj);
272 rc = dt_xattr_get(env, obj,
273 lfsck_buf_get(env, NULL, 0), XATTR_NAME_LINK,
275 dt_read_unlock(env, obj);
279 if (rc < 0 && rc != -ENODATA)
282 rc = lfsck_parent_fid(env, obj, fid);
284 lfsck_object_put(env, obj);
290 if (!fid_is_sane(fid))
295 if (depth > 0 && obj != NULL)
296 lfsck_object_put(env, obj);
301 /* LFSCK wrap functions */
303 static void lfsck_fail(const struct lu_env *env, struct lfsck_instance *lfsck,
306 struct lfsck_component *com;
308 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
309 com->lc_ops->lfsck_fail(env, com, new_checked);
313 static int lfsck_checkpoint(const struct lu_env *env,
314 struct lfsck_instance *lfsck)
316 struct lfsck_component *com;
320 if (likely(cfs_time_beforeq(cfs_time_current(),
321 lfsck->li_time_next_checkpoint)))
324 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, false);
325 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
326 rc = com->lc_ops->lfsck_checkpoint(env, com, false);
331 lfsck->li_time_last_checkpoint = cfs_time_current();
332 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
333 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
334 return rc1 != 0 ? rc1 : rc;
337 static int lfsck_prep(const struct lu_env *env, struct lfsck_instance *lfsck,
338 struct lfsck_start_param *lsp)
340 struct dt_object *obj = NULL;
341 struct lfsck_component *com;
342 struct lfsck_component *next;
343 struct lfsck_position *pos = NULL;
344 const struct dt_it_ops *iops =
345 &lfsck->li_obj_oit->do_index_ops->dio_it;
350 LASSERT(lfsck->li_obj_dir == NULL);
351 LASSERT(lfsck->li_di_dir == NULL);
353 lfsck->li_current_oit_processed = 0;
354 list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
355 com->lc_new_checked = 0;
356 rc = com->lc_ops->lfsck_prep(env, com, lsp);
361 (!lfsck_pos_is_zero(&com->lc_pos_start) &&
362 lfsck_pos_is_eq(pos, &com->lc_pos_start) > 0))
363 pos = &com->lc_pos_start;
366 /* Init otable-based iterator. */
368 rc = iops->load(env, lfsck->li_di_oit, 0);
370 lfsck->li_oit_over = 1;
377 rc = iops->load(env, lfsck->li_di_oit, pos->lp_oit_cookie);
381 lfsck->li_oit_over = 1;
383 if (!lfsck->li_master || fid_is_zero(&pos->lp_dir_parent))
386 /* Find the directory for namespace-based traverse. */
387 obj = lfsck_object_find(env, lfsck, &pos->lp_dir_parent);
389 RETURN(PTR_ERR(obj));
391 /* Remote directory will be scanned by the LFSCK instance
392 * on the MDT where the remote object really resides on. */
393 if (!dt_object_exists(obj) || dt_object_remote(obj) ||
394 unlikely(!S_ISDIR(lfsck_object_type(obj))))
397 if (unlikely(!dt_try_as_dir(env, obj)))
398 GOTO(out, rc = -ENOTDIR);
400 /* Init the namespace-based directory traverse. */
401 iops = &obj->do_index_ops->dio_it;
402 di = iops->init(env, obj, lfsck->li_args_dir, BYPASS_CAPA);
404 GOTO(out, rc = PTR_ERR(di));
406 LASSERT(pos->lp_dir_cookie < MDS_DIR_END_OFF);
408 rc = iops->load(env, di, pos->lp_dir_cookie);
409 if ((rc == 0) || (rc > 0 && pos->lp_dir_cookie > 0))
410 rc = iops->next(env, di);
420 lfsck->li_obj_dir = lfsck_object_get(obj);
421 lfsck->li_cookie_dir = iops->store(env, di);
422 spin_lock(&lfsck->li_lock);
423 lfsck->li_di_dir = di;
424 spin_unlock(&lfsck->li_lock);
430 lfsck_object_put(env, obj);
433 list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
435 com->lc_ops->lfsck_post(env, com, rc, true);
441 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, true);
442 lfsck->li_pos_current = lfsck->li_pos_checkpoint;
443 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
444 rc = com->lc_ops->lfsck_checkpoint(env, com, true);
449 lfsck->li_time_last_checkpoint = cfs_time_current();
450 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
451 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
455 static int lfsck_exec_oit(const struct lu_env *env,
456 struct lfsck_instance *lfsck, struct dt_object *obj)
458 struct lfsck_component *com;
459 const struct dt_it_ops *iops;
464 LASSERT(lfsck->li_obj_dir == NULL);
466 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
467 rc = com->lc_ops->lfsck_exec_oit(env, com, obj);
472 rc = lfsck_needs_scan_dir(env, lfsck, obj);
476 if (unlikely(!dt_try_as_dir(env, obj)))
477 GOTO(out, rc = -ENOTDIR);
479 iops = &obj->do_index_ops->dio_it;
480 di = iops->init(env, obj, lfsck->li_args_dir, BYPASS_CAPA);
482 GOTO(out, rc = PTR_ERR(di));
484 rc = iops->load(env, di, 0);
486 rc = iops->next(env, di);
496 lfsck->li_obj_dir = lfsck_object_get(obj);
497 lfsck->li_cookie_dir = iops->store(env, di);
498 spin_lock(&lfsck->li_lock);
499 lfsck->li_di_dir = di;
500 spin_unlock(&lfsck->li_lock);
506 lfsck_fail(env, lfsck, false);
507 return (rc > 0 ? 0 : rc);
510 static int lfsck_exec_dir(const struct lu_env *env,
511 struct lfsck_instance *lfsck,
512 struct lu_dirent *ent, __u16 type)
514 struct lfsck_component *com;
517 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
518 rc = com->lc_ops->lfsck_exec_dir(env, com, ent, type);
525 static int lfsck_post(const struct lu_env *env, struct lfsck_instance *lfsck,
528 struct lfsck_component *com;
529 struct lfsck_component *next;
533 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, false);
534 list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
535 rc = com->lc_ops->lfsck_post(env, com, result, false);
540 lfsck->li_time_last_checkpoint = cfs_time_current();
541 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
542 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
544 /* Ignore some component post failure to make other can go ahead. */
548 static int lfsck_double_scan(const struct lu_env *env,
549 struct lfsck_instance *lfsck)
551 struct lfsck_component *com;
552 struct lfsck_component *next;
553 struct l_wait_info lwi = { 0 };
557 list_for_each_entry(com, &lfsck->li_list_double_scan, lc_link) {
558 rc = com->lc_ops->lfsck_double_scan(env, com);
563 l_wait_event(lfsck->li_thread.t_ctl_waitq,
564 atomic_read(&lfsck->li_double_scan_count) == 0,
567 if (lfsck->li_status != LS_PAUSED &&
568 lfsck->li_status != LS_CO_PAUSED) {
569 list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
571 spin_lock(&lfsck->li_lock);
572 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
573 spin_unlock(&lfsck->li_lock);
577 return rc1 != 0 ? rc1 : rc;
580 static void lfsck_quit(const struct lu_env *env, struct lfsck_instance *lfsck)
582 struct lfsck_component *com;
583 struct lfsck_component *next;
585 list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
587 if (com->lc_ops->lfsck_quit != NULL)
588 com->lc_ops->lfsck_quit(env, com);
590 spin_lock(&lfsck->li_lock);
591 list_del_init(&com->lc_link_dir);
592 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
593 spin_unlock(&lfsck->li_lock);
596 list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
598 if (com->lc_ops->lfsck_quit != NULL)
599 com->lc_ops->lfsck_quit(env, com);
601 spin_lock(&lfsck->li_lock);
602 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
603 spin_unlock(&lfsck->li_lock);
609 static int lfsck_master_dir_engine(const struct lu_env *env,
610 struct lfsck_instance *lfsck)
612 struct lfsck_thread_info *info = lfsck_env_info(env);
613 struct dt_object *dir = lfsck->li_obj_dir;
614 const struct dt_it_ops *iops = &dir->do_index_ops->dio_it;
615 struct dt_it *di = lfsck->li_di_dir;
616 struct lu_dirent *ent =
617 (struct lu_dirent *)info->lti_key;
618 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
619 struct ptlrpc_thread *thread = &lfsck->li_thread;
625 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY2) &&
627 struct l_wait_info lwi;
629 lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val),
631 l_wait_event(thread->t_ctl_waitq,
632 !thread_is_running(thread),
635 if (unlikely(!thread_is_running(thread))) {
636 CDEBUG(D_LFSCK, "%s: scan dir exit for engine "
637 "stop, parent "DFID", cookie "LPX64"\n",
638 lfsck_lfsck2name(lfsck),
639 PFID(lfsck_dto2fid(dir)),
640 lfsck->li_cookie_dir);
645 lfsck->li_new_scanned++;
646 rc = iops->rec(env, di, (struct dt_rec *)ent,
649 rc = lfsck_unpack_ent(ent, &lfsck->li_cookie_dir,
653 CDEBUG(D_LFSCK, "%s: scan dir failed at rec(), "
654 "parent "DFID", cookie "LPX64": rc = %d\n",
655 lfsck_lfsck2name(lfsck),
656 PFID(lfsck_dto2fid(dir)),
657 lfsck->li_cookie_dir, rc);
658 lfsck_fail(env, lfsck, true);
659 if (bk->lb_param & LPF_FAILOUT)
665 if (ent->lde_attrs & LUDA_IGNORE &&
666 strcmp(ent->lde_name, dotdot) != 0)
669 /* The type in the @ent structure may has been overwritten,
670 * so we need to pass the @type parameter independently. */
671 rc = lfsck_exec_dir(env, lfsck, ent, type);
672 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
676 rc = lfsck_checkpoint(env, lfsck);
677 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
681 lfsck_control_speed(lfsck);
682 if (unlikely(!thread_is_running(thread))) {
683 CDEBUG(D_LFSCK, "%s: scan dir exit for engine stop, "
684 "parent "DFID", cookie "LPX64"\n",
685 lfsck_lfsck2name(lfsck),
686 PFID(lfsck_dto2fid(dir)),
687 lfsck->li_cookie_dir);
691 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_FATAL2)) {
692 spin_lock(&lfsck->li_lock);
693 thread_set_flags(thread, SVC_STOPPING);
694 spin_unlock(&lfsck->li_lock);
698 rc = iops->next(env, di);
701 if (rc > 0 && !lfsck->li_oit_over)
702 lfsck_close_dir(env, lfsck);
708 * Object-table based iteration engine.
710 * Object-table based iteration is the basic linear engine to scan all the
711 * objects on current device in turn. For each object, it calls all the
712 * registered LFSCK component(s)' API to perform related consistency
715 * It flushes related LFSCK tracing files to disk via making checkpoint
716 * periodically. Then if the server crashed or the LFSCK is paused, the
717 * LFSCK can resume from the latest checkpoint.
719 * It also controls the whole LFSCK speed via lfsck_control_speed() to
720 * avoid the server to become overload.
722 * \param[in] env pointer to the thread context
723 * \param[in] lfsck pointer to the lfsck instance
725 * \retval positive number if all objects have been scanned
726 * \retval 0 if the iteration is stopped or paused
727 * \retval negative error number on failure
729 static int lfsck_master_oit_engine(const struct lu_env *env,
730 struct lfsck_instance *lfsck)
732 struct lfsck_thread_info *info = lfsck_env_info(env);
733 const struct dt_it_ops *iops =
734 &lfsck->li_obj_oit->do_index_ops->dio_it;
735 struct dt_it *di = lfsck->li_di_oit;
736 struct lu_fid *fid = &info->lti_fid;
737 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
738 struct ptlrpc_thread *thread = &lfsck->li_thread;
739 struct dt_device *dev = lfsck->li_bottom;
740 struct seq_server_site *ss = lu_site2seq(dev->dd_lu_dev.ld_site);
741 __u32 idx = lfsck_dev_idx(dev);
745 if (unlikely(ss == NULL))
749 struct dt_object *target;
750 bool update_lma = false;
752 if (lfsck->li_di_dir != NULL) {
753 rc = lfsck_master_dir_engine(env, lfsck);
758 if (unlikely(lfsck->li_oit_over))
761 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY1) &&
763 struct l_wait_info lwi;
765 lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val),
767 l_wait_event(thread->t_ctl_waitq,
768 !thread_is_running(thread),
771 if (unlikely(!thread_is_running(thread))) {
772 CDEBUG(D_LFSCK, "%s: OIT scan exit for engine "
773 "stop, cookie "LPU64"\n",
774 lfsck_lfsck2name(lfsck),
775 iops->store(env, di));
780 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_CRASH))
783 lfsck->li_current_oit_processed = 1;
784 lfsck->li_new_scanned++;
785 lfsck->li_pos_current.lp_oit_cookie = iops->store(env, di);
786 rc = iops->rec(env, di, (struct dt_rec *)fid, 0);
788 CDEBUG(D_LFSCK, "%s: OIT scan failed at rec(): "
789 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
790 lfsck_fail(env, lfsck, true);
791 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
797 if (fid_is_idif(fid)) {
798 __u32 idx1 = fid_idif_ost_idx(fid);
800 LASSERT(!lfsck->li_master);
802 /* It is an old format device, update the LMA. */
804 struct ost_id *oi = &info->lti_oi;
806 fid_to_ostid(fid, oi);
807 ostid_to_fid(fid, oi, idx);
810 } else if (!fid_is_norm(fid) && !fid_is_igif(fid) &&
811 !fid_is_last_id(fid) &&
812 !lu_fid_eq(fid, &lfsck->li_global_root_fid)) {
814 /* If the FID/object is only used locally and invisible
815 * to external nodes, then LFSCK will not handle it.
817 * dot_lustre sequence has been handled specially. */
820 struct lu_seq_range *range = &info->lti_range;
822 if (lfsck->li_master)
823 fld_range_set_mdt(range);
825 fld_range_set_ost(range);
826 rc = fld_local_lookup(env, ss->ss_server_fld,
827 fid_seq(fid), range);
828 if (rc != 0 || range->lsr_index != idx) {
829 /* Remote object will be handled by the LFSCK
830 * instance on the MDT where the remote object
831 * really resides on. */
837 target = lfsck_object_find(env, lfsck, fid);
838 if (IS_ERR(target)) {
839 CDEBUG(D_LFSCK, "%s: OIT scan failed at find target "
840 DFID", cookie "LPU64": rc = %d\n",
841 lfsck_lfsck2name(lfsck), PFID(fid),
842 iops->store(env, di), rc);
843 lfsck_fail(env, lfsck, true);
844 if (bk->lb_param & LPF_FAILOUT)
845 RETURN(PTR_ERR(target));
850 if (dt_object_exists(target)) {
852 rc = lfsck_update_lma(env, lfsck, target);
854 CDEBUG(D_LFSCK, "%s: fail to update "
855 "LMA for "DFID": rc = %d\n",
856 lfsck_lfsck2name(lfsck),
857 PFID(lfsck_dto2fid(target)), rc);
860 rc = lfsck_exec_oit(env, lfsck, target);
862 lfsck_object_put(env, target);
863 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
867 rc = lfsck_checkpoint(env, lfsck);
868 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
872 lfsck_control_speed(lfsck);
874 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_FATAL1)) {
875 spin_lock(&lfsck->li_lock);
876 thread_set_flags(thread, SVC_STOPPING);
877 spin_unlock(&lfsck->li_lock);
881 rc = iops->next(env, di);
882 if (unlikely(rc > 0))
883 lfsck->li_oit_over = 1;
884 else if (likely(rc == 0))
885 lfsck->li_current_oit_processed = 0;
887 if (unlikely(!thread_is_running(thread))) {
888 CDEBUG(D_LFSCK, "%s: OIT scan exit for engine stop, "
889 "cookie "LPU64"\n", lfsck_lfsck2name(lfsck),
890 iops->store(env, di));
893 } while (rc == 0 || lfsck->li_di_dir != NULL);
898 int lfsck_master_engine(void *args)
900 struct lfsck_thread_args *lta = args;
901 struct lu_env *env = <a->lta_env;
902 struct lfsck_instance *lfsck = lta->lta_lfsck;
903 struct ptlrpc_thread *thread = &lfsck->li_thread;
904 struct dt_object *oit_obj = lfsck->li_obj_oit;
905 const struct dt_it_ops *oit_iops = &oit_obj->do_index_ops->dio_it;
906 struct dt_it *oit_di;
907 struct l_wait_info lwi = { 0 };
911 if (lfsck->li_master &&
912 (!list_empty(&lfsck->li_list_scan) ||
913 !list_empty(&lfsck->li_list_double_scan))) {
914 rc = lfsck_verify_lpf(env, lfsck);
915 /* Fail to verify the .lustre/lost+found/MDTxxxx/ may be not
916 * fatal, because the .lustre/lost+found/ maybe not accessed
917 * by the LFSCK if it does not add orphans or others to such
918 * directory. So go ahead until hit failure when really uses
921 CDEBUG(D_LFSCK, "%s: master engine fail to verify the "
922 ".lustre/lost+found/, go ahead: rc = %d\n",
923 lfsck_lfsck2name(lfsck), rc);
926 oit_di = oit_iops->init(env, oit_obj, lfsck->li_args_oit, BYPASS_CAPA);
927 if (IS_ERR(oit_di)) {
928 rc = PTR_ERR(oit_di);
929 CDEBUG(D_LFSCK, "%s: master engine fail to init iteration: "
930 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
935 spin_lock(&lfsck->li_lock);
936 lfsck->li_di_oit = oit_di;
937 spin_unlock(&lfsck->li_lock);
938 rc = lfsck_prep(env, lfsck, lta->lta_lsp);
942 CDEBUG(D_LFSCK, "LFSCK entry: oit_flags = %#x, dir_flags = %#x, "
943 "oit_cookie = "LPU64", dir_cookie = "LPX64", parent = "DFID
944 ", pid = %d\n", lfsck->li_args_oit, lfsck->li_args_dir,
945 lfsck->li_pos_checkpoint.lp_oit_cookie,
946 lfsck->li_pos_checkpoint.lp_dir_cookie,
947 PFID(&lfsck->li_pos_checkpoint.lp_dir_parent),
950 spin_lock(&lfsck->li_lock);
951 thread_set_flags(thread, SVC_RUNNING);
952 spin_unlock(&lfsck->li_lock);
953 wake_up_all(&thread->t_ctl_waitq);
955 l_wait_event(thread->t_ctl_waitq,
956 lfsck->li_start_unplug ||
957 !thread_is_running(thread),
959 if (!thread_is_running(thread))
960 GOTO(fini_oit, rc = 0);
962 if (!list_empty(&lfsck->li_list_scan) ||
963 list_empty(&lfsck->li_list_double_scan))
964 rc = lfsck_master_oit_engine(env, lfsck);
968 CDEBUG(D_LFSCK, "LFSCK exit: oit_flags = %#x, dir_flags = %#x, "
969 "oit_cookie = "LPU64", dir_cookie = "LPX64", parent = "DFID
970 ", pid = %d, rc = %d\n", lfsck->li_args_oit, lfsck->li_args_dir,
971 lfsck->li_pos_checkpoint.lp_oit_cookie,
972 lfsck->li_pos_checkpoint.lp_dir_cookie,
973 PFID(&lfsck->li_pos_checkpoint.lp_dir_parent),
976 if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_CRASH))
977 rc = lfsck_post(env, lfsck, rc);
979 if (lfsck->li_di_dir != NULL)
980 lfsck_close_dir(env, lfsck);
983 lfsck_di_oit_put(env, lfsck);
984 oit_iops->fini(env, oit_di);
986 if (!list_empty(&lfsck->li_list_double_scan))
987 rc = lfsck_double_scan(env, lfsck);
991 lfsck_quit(env, lfsck);
994 /* XXX: Purge the pinned objects in the future. */
997 spin_lock(&lfsck->li_lock);
998 thread_set_flags(thread, SVC_STOPPED);
999 spin_unlock(&lfsck->li_lock);
1000 wake_up_all(&thread->t_ctl_waitq);
1001 lfsck_thread_args_fini(lta);
1005 static inline bool lfsck_assistant_req_empty(struct lfsck_assistant_data *lad)
1009 spin_lock(&lad->lad_lock);
1010 if (list_empty(&lad->lad_req_list))
1012 spin_unlock(&lad->lad_lock);
1018 * Query the LFSCK status from the instatnces on remote servers.
1020 * The LFSCK assistant thread queries the LFSCK instances on other
1021 * servers (MDT/OST) about their status, such as whether they have
1022 * finished the phase1/phase2 scanning or not, and so on.
1024 * \param[in] env pointer to the thread context
1025 * \param[in] com pointer to the lfsck component
1027 * \retval 0 for success
1028 * \retval negative error number on failure
1030 static int lfsck_assistant_query_others(const struct lu_env *env,
1031 struct lfsck_component *com)
1033 struct lfsck_thread_info *info = lfsck_env_info(env);
1034 struct lfsck_request *lr = &info->lti_lr;
1035 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1036 struct lfsck_instance *lfsck = com->lc_lfsck;
1037 struct lfsck_assistant_data *lad = com->lc_data;
1038 struct ptlrpc_request_set *set;
1039 struct lfsck_tgt_descs *ltds;
1040 struct lfsck_tgt_desc *ltd;
1041 struct list_head *phase_head;
1046 set = ptlrpc_prep_set();
1050 lad->lad_touch_gen++;
1051 memset(lr, 0, sizeof(*lr));
1052 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1053 lr->lr_event = LE_QUERY;
1054 lr->lr_active = com->lc_type;
1055 laia->laia_com = com;
1057 laia->laia_shared = 0;
1059 if (!list_empty(&lad->lad_mdt_phase1_list)) {
1060 ltds = &lfsck->li_mdt_descs;
1062 phase_head = &lad->lad_mdt_phase1_list;
1063 } else if (com->lc_type != LFSCK_TYPE_LAYOUT) {
1068 ltds = &lfsck->li_ost_descs;
1069 lr->lr_flags = LEF_TO_OST;
1070 phase_head = &lad->lad_ost_phase1_list;
1073 laia->laia_ltds = ltds;
1074 spin_lock(<ds->ltd_lock);
1075 while (!list_empty(phase_head)) {
1076 struct list_head *phase_list;
1079 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1080 ltd = list_entry(phase_head->next,
1081 struct lfsck_tgt_desc,
1082 ltd_layout_phase_list);
1083 phase_list = <d->ltd_layout_phase_list;
1084 gen = <d->ltd_layout_gen;
1086 ltd = list_entry(phase_head->next,
1087 struct lfsck_tgt_desc,
1088 ltd_namespace_phase_list);
1089 phase_list = <d->ltd_namespace_phase_list;
1090 gen = <d->ltd_namespace_gen;
1093 if (*gen == lad->lad_touch_gen)
1096 *gen = lad->lad_touch_gen;
1097 list_move_tail(phase_list, phase_head);
1098 atomic_inc(<d->ltd_ref);
1099 laia->laia_ltd = ltd;
1100 spin_unlock(<ds->ltd_lock);
1101 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1102 lfsck_async_interpret_common,
1105 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to query "
1106 "%s %x for %s: rc = %d\n",
1107 lfsck_lfsck2name(lfsck),
1108 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1109 ltd->ltd_index, lad->lad_name, rc);
1113 spin_lock(<ds->ltd_lock);
1115 spin_unlock(<ds->ltd_lock);
1117 rc = ptlrpc_set_wait(set);
1119 ptlrpc_set_destroy(set);
1123 if (com->lc_type == LFSCK_TYPE_LAYOUT && !(lr->lr_flags & LEF_TO_OST) &&
1124 list_empty(&lad->lad_mdt_phase1_list))
1128 ptlrpc_set_destroy(set);
1130 RETURN(rc1 != 0 ? rc1 : rc);
1134 * Notify the LFSCK event to the instatnces on remote servers.
1136 * The LFSCK assistant thread notifies the LFSCK instances on other
1137 * servers (MDT/OST) about some events, such as start new scanning,
1138 * stop the scanning, this LFSCK instance will exit, and so on.
1140 * \param[in] env pointer to the thread context
1141 * \param[in] com pointer to the lfsck component
1142 * \param[in] lr pointer to the LFSCK event request
1144 * \retval 0 for success
1145 * \retval negative error number on failure
1147 static int lfsck_assistant_notify_others(const struct lu_env *env,
1148 struct lfsck_component *com,
1149 struct lfsck_request *lr)
1151 struct lfsck_thread_info *info = lfsck_env_info(env);
1152 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1153 struct lfsck_instance *lfsck = com->lc_lfsck;
1154 struct lfsck_assistant_data *lad = com->lc_data;
1155 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1156 struct ptlrpc_request_set *set;
1157 struct lfsck_tgt_descs *ltds;
1158 struct lfsck_tgt_desc *ltd;
1159 struct lfsck_tgt_desc *next;
1165 set = ptlrpc_prep_set();
1169 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1170 lr->lr_active = com->lc_type;
1171 laia->laia_com = com;
1173 laia->laia_shared = 0;
1175 switch (lr->lr_event) {
1177 if (com->lc_type != LFSCK_TYPE_LAYOUT)
1180 lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
1181 LSV_ASYNC_WINDOWS | LSV_CREATE_OSTOBJ;
1182 lr->lr_speed = bk->lb_speed_limit;
1183 lr->lr_version = bk->lb_version;
1184 lr->lr_param |= bk->lb_param;
1185 lr->lr_async_windows = bk->lb_async_windows;
1186 lr->lr_flags = LEF_TO_OST;
1188 /* Notify OSTs firstly, then handle other MDTs if needed. */
1189 ltds = &lfsck->li_ost_descs;
1190 laia->laia_ltds = ltds;
1191 down_read(<ds->ltd_rw_sem);
1192 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1193 ltd = lfsck_tgt_get(ltds, idx);
1194 LASSERT(ltd != NULL);
1196 laia->laia_ltd = ltd;
1197 ltd->ltd_layout_done = 0;
1198 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1199 lfsck_async_interpret_common,
1200 laia, LFSCK_NOTIFY);
1202 lfsck_lad_set_bitmap(env, com, idx);
1203 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to "
1204 "notify OST %x for %s start: rc = %d\n",
1205 lfsck_lfsck2name(lfsck), idx,
1210 up_read(<ds->ltd_rw_sem);
1213 rc = ptlrpc_set_wait(set);
1215 ptlrpc_set_destroy(set);
1220 if (!(bk->lb_param & LPF_ALL_TGT))
1223 /* link other MDT targets locallly. */
1224 ltds = &lfsck->li_mdt_descs;
1225 spin_lock(<ds->ltd_lock);
1226 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1227 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1228 ltd = LTD_TGT(ltds, idx);
1229 LASSERT(ltd != NULL);
1231 if (!list_empty(<d->ltd_layout_list))
1234 list_add_tail(<d->ltd_layout_list,
1235 &lad->lad_mdt_list);
1236 list_add_tail(<d->ltd_layout_phase_list,
1237 &lad->lad_mdt_phase1_list);
1240 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1241 ltd = LTD_TGT(ltds, idx);
1242 LASSERT(ltd != NULL);
1244 if (!list_empty(<d->ltd_namespace_list))
1247 list_add_tail(<d->ltd_namespace_list,
1248 &lad->lad_mdt_list);
1249 list_add_tail(<d->ltd_namespace_phase_list,
1250 &lad->lad_mdt_phase1_list);
1253 spin_unlock(<ds->ltd_lock);
1256 case LE_PHASE2_DONE:
1257 case LE_PEER_EXIT: {
1258 struct list_head *phase_head;
1260 /* Handle other MDTs firstly if needed, then notify the OSTs. */
1261 if (bk->lb_param & LPF_ALL_TGT) {
1262 phase_head = &lad->lad_mdt_list;
1263 ltds = &lfsck->li_mdt_descs;
1264 if (lr->lr_event == LE_STOP) {
1265 /* unlink other MDT targets locallly. */
1266 spin_lock(<ds->ltd_lock);
1267 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1268 list_for_each_entry_safe(ltd, next,
1269 phase_head, ltd_layout_list) {
1271 <d->ltd_layout_phase_list);
1273 <d->ltd_layout_list);
1276 list_for_each_entry_safe(ltd, next,
1278 ltd_namespace_list) {
1280 <d->ltd_namespace_phase_list);
1282 <d->ltd_namespace_list);
1285 spin_unlock(<ds->ltd_lock);
1287 if (com->lc_type != LFSCK_TYPE_LAYOUT)
1290 lr->lr_flags |= LEF_TO_OST;
1291 phase_head = &lad->lad_ost_list;
1292 ltds = &lfsck->li_ost_descs;
1294 lr->lr_flags &= ~LEF_TO_OST;
1296 } else if (com->lc_type != LFSCK_TYPE_LAYOUT) {
1299 lr->lr_flags |= LEF_TO_OST;
1300 phase_head = &lad->lad_ost_list;
1301 ltds = &lfsck->li_ost_descs;
1305 laia->laia_ltds = ltds;
1306 spin_lock(<ds->ltd_lock);
1307 while (!list_empty(phase_head)) {
1308 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1309 ltd = list_entry(phase_head->next,
1310 struct lfsck_tgt_desc,
1312 if (!list_empty(<d->ltd_layout_phase_list))
1314 <d->ltd_layout_phase_list);
1315 list_del_init(<d->ltd_layout_list);
1317 ltd = list_entry(phase_head->next,
1318 struct lfsck_tgt_desc,
1319 ltd_namespace_list);
1320 if (!list_empty(<d->ltd_namespace_phase_list))
1322 <d->ltd_namespace_phase_list);
1323 list_del_init(<d->ltd_namespace_list);
1325 atomic_inc(<d->ltd_ref);
1326 laia->laia_ltd = ltd;
1327 spin_unlock(<ds->ltd_lock);
1328 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1329 lfsck_async_interpret_common,
1330 laia, LFSCK_NOTIFY);
1332 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to "
1333 "notify %s %x for %s stop/phase2_done/"
1334 "peer_exit: rc = %d\n",
1335 lfsck_lfsck2name(lfsck),
1336 (lr->lr_flags & LEF_TO_OST) ?
1337 "OST" : "MDT", ltd->ltd_index,
1341 spin_lock(<ds->ltd_lock);
1343 spin_unlock(<ds->ltd_lock);
1345 rc = ptlrpc_set_wait(set);
1347 ptlrpc_set_destroy(set);
1351 if (com->lc_type == LFSCK_TYPE_LAYOUT &&
1352 !(lr->lr_flags & LEF_TO_OST)) {
1353 lr->lr_flags |= LEF_TO_OST;
1354 phase_head = &lad->lad_ost_list;
1355 ltds = &lfsck->li_ost_descs;
1360 case LE_PHASE1_DONE:
1361 lad->lad_ops->la_sync_failures(env, com, lr);
1362 lad->lad_touch_gen++;
1363 ltds = &lfsck->li_mdt_descs;
1364 laia->laia_ltds = ltds;
1365 spin_lock(<ds->ltd_lock);
1366 while (!list_empty(&lad->lad_mdt_list)) {
1367 struct list_head *list;
1370 if (com->lc_type == LFSCK_TYPE_LAYOUT) {
1371 ltd = list_entry(lad->lad_mdt_list.next,
1372 struct lfsck_tgt_desc,
1374 list = <d->ltd_layout_list;
1375 gen = <d->ltd_layout_gen;
1377 ltd = list_entry(lad->lad_mdt_list.next,
1378 struct lfsck_tgt_desc,
1379 ltd_namespace_list);
1380 list = <d->ltd_namespace_list;
1381 gen = <d->ltd_namespace_gen;
1384 if (*gen == lad->lad_touch_gen)
1387 *gen = lad->lad_touch_gen;
1388 list_move_tail(list, &lad->lad_mdt_list);
1389 atomic_inc(<d->ltd_ref);
1390 laia->laia_ltd = ltd;
1391 spin_unlock(<ds->ltd_lock);
1392 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1393 lfsck_async_interpret_common,
1394 laia, LFSCK_NOTIFY);
1396 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to "
1397 "notify MDT %x for %s phase1 done: "
1398 "rc = %d\n", lfsck_lfsck2name(lfsck),
1399 ltd->ltd_index, lad->lad_name, rc);
1402 spin_lock(<ds->ltd_lock);
1404 spin_unlock(<ds->ltd_lock);
1407 CDEBUG(D_LFSCK, "%s: LFSCK assistant unexpected LFSCK event: "
1408 "rc = %d\n", lfsck_lfsck2name(lfsck), lr->lr_event);
1413 rc1 = ptlrpc_set_wait(set);
1414 ptlrpc_set_destroy(set);
1416 RETURN(rc != 0 ? rc : rc1);
1420 * The LFSCK assistant thread is triggered by the LFSCK main engine.
1421 * They co-work together as an asynchronous pipeline: the LFSCK main
1422 * engine scans the system and pre-fetches the objects, attributes,
1423 * or name entries, etc, and pushes them into the pipeline as input
1424 * requests for the LFSCK assistant thread; on the other end of the
1425 * pipeline, the LFSCK assistant thread performs the real check and
1426 * repair for every request from the main engine.
1428 * Generally, the assistant engine may be blocked when check/repair
1429 * something, so the LFSCK main engine will run some faster. On the
1430 * other hand, the LFSCK main engine will drive multiple assistant
1431 * threads in parallel, means for each LFSCK component on the master
1432 * (such as layout LFSCK, namespace LFSCK), there is an independent
1433 * LFSCK assistant thread. So under such 1:N multiple asynchronous
1434 * pipelines mode, the whole LFSCK performance will be much better
1435 * than check/repair everything by the LFSCK main engine itself.
1437 int lfsck_assistant_engine(void *args)
1439 struct lfsck_thread_args *lta = args;
1440 struct lu_env *env = <a->lta_env;
1441 struct lfsck_component *com = lta->lta_com;
1442 struct lfsck_instance *lfsck = lta->lta_lfsck;
1443 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1444 struct lfsck_position *pos = &com->lc_pos_start;
1445 struct lfsck_thread_info *info = lfsck_env_info(env);
1446 struct lfsck_request *lr = &info->lti_lr;
1447 struct lfsck_assistant_data *lad = com->lc_data;
1448 struct ptlrpc_thread *mthread = &lfsck->li_thread;
1449 struct ptlrpc_thread *athread = &lad->lad_thread;
1450 struct lfsck_assistant_operations *lao = lad->lad_ops;
1451 struct lfsck_assistant_req *lar;
1452 struct l_wait_info lwi = { 0 };
1457 CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread start\n",
1458 lfsck_lfsck2name(lfsck), lad->lad_name);
1460 memset(lr, 0, sizeof(*lr));
1461 lr->lr_event = LE_START;
1462 if (pos->lp_oit_cookie <= 1)
1463 lr->lr_param = LPF_RESET;
1464 rc = lfsck_assistant_notify_others(env, com, lr);
1466 CDEBUG(D_LFSCK, "%s: LFSCK assistant fail to notify others "
1467 "to start %s: rc = %d\n",
1468 lfsck_lfsck2name(lfsck), lad->lad_name, rc);
1472 spin_lock(&lad->lad_lock);
1473 thread_set_flags(athread, SVC_RUNNING);
1474 spin_unlock(&lad->lad_lock);
1475 wake_up_all(&mthread->t_ctl_waitq);
1478 while (!list_empty(&lad->lad_req_list)) {
1479 bool wakeup = false;
1481 if (unlikely(lad->lad_exit ||
1482 !thread_is_running(mthread)))
1483 GOTO(cleanup1, rc = lad->lad_post_result);
1485 lar = list_entry(lad->lad_req_list.next,
1486 struct lfsck_assistant_req,
1488 /* Only the lfsck_assistant_engine thread itself can
1489 * remove the "lar" from the head of the list, LFSCK
1490 * engine thread only inserts other new "lar" at the
1491 * end of the list. So it is safe to handle current
1492 * "lar" without the spin_lock. */
1493 rc = lao->la_handler_p1(env, com, lar);
1494 spin_lock(&lad->lad_lock);
1495 list_del_init(&lar->lar_list);
1496 lad->lad_prefetched--;
1497 /* Wake up the main engine thread only when the list
1498 * is empty or half of the prefetched items have been
1499 * handled to avoid too frequent thread schedule. */
1500 if (lad->lad_prefetched == 0 ||
1501 (bk->lb_async_windows != 0 &&
1502 bk->lb_async_windows / 2 ==
1503 lad->lad_prefetched))
1505 spin_unlock(&lad->lad_lock);
1507 wake_up_all(&mthread->t_ctl_waitq);
1509 lao->la_req_fini(env, lar);
1510 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
1514 l_wait_event(athread->t_ctl_waitq,
1515 !lfsck_assistant_req_empty(lad) ||
1518 lad->lad_to_double_scan,
1521 if (unlikely(lad->lad_exit))
1522 GOTO(cleanup1, rc = lad->lad_post_result);
1524 if (!list_empty(&lad->lad_req_list))
1527 if (lad->lad_to_post) {
1528 CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread post\n",
1529 lfsck_lfsck2name(lfsck), lad->lad_name);
1531 if (unlikely(lad->lad_exit))
1532 GOTO(cleanup1, rc = lad->lad_post_result);
1534 lad->lad_to_post = 0;
1535 LASSERT(lad->lad_post_result > 0);
1537 memset(lr, 0, sizeof(*lr));
1538 lr->lr_event = LE_PHASE1_DONE;
1539 lr->lr_status = lad->lad_post_result;
1540 rc = lfsck_assistant_notify_others(env, com, lr);
1542 CDEBUG(D_LFSCK, "%s: LFSCK assistant failed to "
1543 "notify others for %s post: rc = %d\n",
1544 lfsck_lfsck2name(lfsck),
1547 /* Wakeup the master engine to go ahead. */
1548 wake_up_all(&mthread->t_ctl_waitq);
1551 if (lad->lad_to_double_scan) {
1552 lad->lad_to_double_scan = 0;
1553 atomic_inc(&lfsck->li_double_scan_count);
1554 lad->lad_in_double_scan = 1;
1555 wake_up_all(&mthread->t_ctl_waitq);
1557 com->lc_new_checked = 0;
1558 com->lc_new_scanned = 0;
1559 com->lc_time_last_checkpoint = cfs_time_current();
1560 com->lc_time_next_checkpoint =
1561 com->lc_time_last_checkpoint +
1562 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
1564 /* Flush async updates before handling orphan. */
1565 dt_sync(env, lfsck->li_next);
1567 CDEBUG(D_LFSCK, "%s: LFSCK assistant phase2 "
1568 "scan start\n", lfsck_lfsck2name(lfsck));
1570 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_NO_DOUBLESCAN))
1571 GOTO(cleanup2, rc = 0);
1573 while (lad->lad_in_double_scan) {
1574 rc = lfsck_assistant_query_others(env, com);
1575 if (lfsck_phase2_next_ready(lad))
1581 /* Pull LFSCK status on related targets once
1582 * per 30 seconds if we are not notified. */
1583 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(30),
1584 cfs_time_seconds(1),
1586 rc = l_wait_event(athread->t_ctl_waitq,
1587 lfsck_phase2_next_ready(lad) ||
1589 !thread_is_running(mthread),
1592 if (unlikely(lad->lad_exit ||
1593 !thread_is_running(mthread)))
1594 GOTO(cleanup2, rc = 0);
1596 if (rc == -ETIMEDOUT)
1603 rc = lao->la_handler_p2(env, com);
1607 if (unlikely(lad->lad_exit ||
1608 !thread_is_running(mthread)))
1609 GOTO(cleanup2, rc = 0);
1615 /* Cleanup the unfinished requests. */
1616 spin_lock(&lad->lad_lock);
1618 lad->lad_assistant_status = rc;
1620 if (lad->lad_exit && lad->lad_post_result <= 0)
1621 lao->la_fill_pos(env, com, &lfsck->li_pos_checkpoint);
1623 while (!list_empty(&lad->lad_req_list)) {
1624 lar = list_entry(lad->lad_req_list.next,
1625 struct lfsck_assistant_req,
1627 list_del_init(&lar->lar_list);
1628 lad->lad_prefetched--;
1629 spin_unlock(&lad->lad_lock);
1630 lao->la_req_fini(env, lar);
1631 spin_lock(&lad->lad_lock);
1633 spin_unlock(&lad->lad_lock);
1635 LASSERTF(lad->lad_prefetched == 0, "unmatched prefeteched objs %d\n",
1636 lad->lad_prefetched);
1639 memset(lr, 0, sizeof(*lr));
1641 lr->lr_event = LE_PHASE2_DONE;
1643 } else if (rc == 0) {
1644 if (lfsck->li_flags & LPF_ALL_TGT) {
1645 lr->lr_event = LE_STOP;
1646 lr->lr_status = LS_STOPPED;
1648 lr->lr_event = LE_PEER_EXIT;
1649 switch (lfsck->li_status) {
1652 lr->lr_status = LS_CO_PAUSED;
1656 lr->lr_status = LS_CO_STOPPED;
1659 CDEBUG(D_LFSCK, "%s: LFSCK assistant unknown "
1660 "status: rc = %d\n",
1661 lfsck_lfsck2name(lfsck),
1663 lr->lr_status = LS_CO_FAILED;
1668 if (lfsck->li_flags & LPF_ALL_TGT) {
1669 lr->lr_event = LE_STOP;
1670 lr->lr_status = LS_FAILED;
1672 lr->lr_event = LE_PEER_EXIT;
1673 lr->lr_status = LS_CO_FAILED;
1677 rc1 = lfsck_assistant_notify_others(env, com, lr);
1679 CDEBUG(D_LFSCK, "%s: LFSCK assistant failed to notify "
1680 "others for %s quit: rc = %d\n",
1681 lfsck_lfsck2name(lfsck), lad->lad_name, rc1);
1685 /* Flush async updates before exit. */
1686 dt_sync(env, lfsck->li_next);
1688 /* Under force exit case, some requests may be just freed without
1689 * verification, those objects should be re-handled when next run.
1690 * So not update the on-disk tracing file under such case. */
1691 if (lad->lad_in_double_scan) {
1693 rc1 = lao->la_double_scan_result(env, com, rc);
1695 CDEBUG(D_LFSCK, "%s: LFSCK assistant phase2 scan "
1696 "finished: rc = %d\n",
1697 lfsck_lfsck2name(lfsck), rc1 != 0 ? rc1 : rc);
1701 if (lad->lad_in_double_scan)
1702 atomic_dec(&lfsck->li_double_scan_count);
1704 spin_lock(&lad->lad_lock);
1705 lad->lad_assistant_status = (rc1 != 0 ? rc1 : rc);
1706 thread_set_flags(athread, SVC_STOPPED);
1707 wake_up_all(&mthread->t_ctl_waitq);
1708 spin_unlock(&lad->lad_lock);
1710 CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread exit: rc = %d\n",
1711 lfsck_lfsck2name(lfsck), lad->lad_name,
1712 lad->lad_assistant_status);
1714 lfsck_thread_args_fini(lta);