4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2012, 2013, Intel Corporation.
26 * lustre/lfsck/lfsck_lib.c
28 * Author: Fan, Yong <fan.yong@intel.com>
31 #define DEBUG_SUBSYSTEM S_LFSCK
33 #include <libcfs/list.h>
34 #include <lu_object.h>
35 #include <dt_object.h>
36 #include <md_object.h>
37 #include <lustre_fld.h>
38 #include <lustre_lib.h>
39 #include <lustre_net.h>
40 #include <lustre_lfsck.h>
41 #include <lustre/lustre_lfsck_user.h>
43 #include "lfsck_internal.h"
45 /* define lfsck thread key */
46 LU_KEY_INIT(lfsck, struct lfsck_thread_info);
48 static void lfsck_key_fini(const struct lu_context *ctx,
49 struct lu_context_key *key, void *data)
51 struct lfsck_thread_info *info = data;
53 lu_buf_free(&info->lti_linkea_buf);
54 lu_buf_free(&info->lti_big_buf);
58 LU_CONTEXT_KEY_DEFINE(lfsck, LCT_MD_THREAD | LCT_DT_THREAD);
59 LU_KEY_INIT_GENERIC(lfsck);
61 static CFS_LIST_HEAD(lfsck_instance_list);
62 static struct list_head lfsck_ost_orphan_list;
63 static struct list_head lfsck_mdt_orphan_list;
64 static DEFINE_SPINLOCK(lfsck_instance_lock);
66 static const char *lfsck_status_names[] = {
68 [LS_SCANNING_PHASE1] = "scanning-phase1",
69 [LS_SCANNING_PHASE2] = "scanning-phase2",
70 [LS_COMPLETED] = "completed",
71 [LS_FAILED] = "failed",
72 [LS_STOPPED] = "stopped",
73 [LS_PAUSED] = "paused",
74 [LS_CRASHED] = "crashed",
75 [LS_PARTIAL] = "partial",
76 [LS_CO_FAILED] = "co-failed",
77 [LS_CO_STOPPED] = "co-stopped",
78 [LS_CO_PAUSED] = "co-paused"
81 const char *lfsck_flags_names[] = {
90 const char *lfsck_param_names[] = {
98 const char *lfsck_status2names(enum lfsck_status status)
100 if (unlikely(status < 0 || status >= LS_MAX))
103 return lfsck_status_names[status];
106 static int lfsck_tgt_descs_init(struct lfsck_tgt_descs *ltds)
108 spin_lock_init(<ds->ltd_lock);
109 init_rwsem(<ds->ltd_rw_sem);
110 INIT_LIST_HEAD(<ds->ltd_orphan);
111 ltds->ltd_tgts_bitmap = CFS_ALLOCATE_BITMAP(BITS_PER_LONG);
112 if (ltds->ltd_tgts_bitmap == NULL)
118 static void lfsck_tgt_descs_fini(struct lfsck_tgt_descs *ltds)
120 struct lfsck_tgt_desc *ltd;
121 struct lfsck_tgt_desc *next;
124 down_write(<ds->ltd_rw_sem);
126 list_for_each_entry_safe(ltd, next, <ds->ltd_orphan,
128 list_del_init(<d->ltd_orphan_list);
132 if (unlikely(ltds->ltd_tgts_bitmap == NULL)) {
133 up_write(<ds->ltd_rw_sem);
138 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
139 ltd = LTD_TGT(ltds, idx);
140 if (likely(ltd != NULL)) {
141 LASSERT(list_empty(<d->ltd_layout_list));
142 LASSERT(list_empty(<d->ltd_layout_phase_list));
145 cfs_bitmap_clear(ltds->ltd_tgts_bitmap, idx);
146 LTD_TGT(ltds, idx) = NULL;
151 LASSERTF(ltds->ltd_tgtnr == 0, "tgt count unmatched: %d\n",
154 for (idx = 0; idx < TGT_PTRS; idx++) {
155 if (ltds->ltd_tgts_idx[idx] != NULL) {
156 OBD_FREE_PTR(ltds->ltd_tgts_idx[idx]);
157 ltds->ltd_tgts_idx[idx] = NULL;
161 CFS_FREE_BITMAP(ltds->ltd_tgts_bitmap);
162 ltds->ltd_tgts_bitmap = NULL;
163 up_write(<ds->ltd_rw_sem);
166 static int __lfsck_add_target(const struct lu_env *env,
167 struct lfsck_instance *lfsck,
168 struct lfsck_tgt_desc *ltd,
169 bool for_ost, bool locked)
171 struct lfsck_tgt_descs *ltds;
172 __u32 index = ltd->ltd_index;
177 ltds = &lfsck->li_ost_descs;
179 ltds = &lfsck->li_mdt_descs;
182 down_write(<ds->ltd_rw_sem);
184 LASSERT(ltds->ltd_tgts_bitmap != NULL);
186 if (index >= ltds->ltd_tgts_bitmap->size) {
187 __u32 newsize = max((__u32)ltds->ltd_tgts_bitmap->size,
188 (__u32)BITS_PER_LONG);
189 cfs_bitmap_t *old_bitmap = ltds->ltd_tgts_bitmap;
190 cfs_bitmap_t *new_bitmap;
192 while (newsize < index + 1)
195 new_bitmap = CFS_ALLOCATE_BITMAP(newsize);
196 if (new_bitmap == NULL)
197 GOTO(unlock, rc = -ENOMEM);
199 if (ltds->ltd_tgtnr > 0)
200 cfs_bitmap_copy(new_bitmap, old_bitmap);
201 ltds->ltd_tgts_bitmap = new_bitmap;
202 CFS_FREE_BITMAP(old_bitmap);
205 if (cfs_bitmap_check(ltds->ltd_tgts_bitmap, index)) {
206 CERROR("%s: the device %s (%u) is registered already\n",
207 lfsck_lfsck2name(lfsck),
208 ltd->ltd_tgt->dd_lu_dev.ld_obd->obd_name, index);
209 GOTO(unlock, rc = -EEXIST);
212 if (ltds->ltd_tgts_idx[index / TGT_PTRS_PER_BLOCK] == NULL) {
213 OBD_ALLOC_PTR(ltds->ltd_tgts_idx[index / TGT_PTRS_PER_BLOCK]);
214 if (ltds->ltd_tgts_idx[index / TGT_PTRS_PER_BLOCK] == NULL)
215 GOTO(unlock, rc = -ENOMEM);
218 LTD_TGT(ltds, index) = ltd;
219 cfs_bitmap_set(ltds->ltd_tgts_bitmap, index);
222 GOTO(unlock, rc = 0);
226 up_write(<ds->ltd_rw_sem);
231 static int lfsck_add_target_from_orphan(const struct lu_env *env,
232 struct lfsck_instance *lfsck)
234 struct lfsck_tgt_descs *ltds = &lfsck->li_ost_descs;
235 struct lfsck_tgt_desc *ltd;
236 struct lfsck_tgt_desc *next;
237 struct list_head *head = &lfsck_ost_orphan_list;
242 spin_lock(&lfsck_instance_lock);
243 list_for_each_entry_safe(ltd, next, head, ltd_orphan_list) {
244 if (ltd->ltd_key == lfsck->li_bottom) {
245 list_del_init(<d->ltd_orphan_list);
246 list_add_tail(<d->ltd_orphan_list,
250 spin_unlock(&lfsck_instance_lock);
252 down_write(<ds->ltd_rw_sem);
253 while (!list_empty(<ds->ltd_orphan)) {
254 ltd = list_entry(ltds->ltd_orphan.next,
255 struct lfsck_tgt_desc,
257 list_del_init(<d->ltd_orphan_list);
258 rc = __lfsck_add_target(env, lfsck, ltd, for_ost, true);
259 /* Do not hold the semaphore for too long time. */
260 up_write(<ds->ltd_rw_sem);
264 down_write(<ds->ltd_rw_sem);
266 up_write(<ds->ltd_rw_sem);
269 ltds = &lfsck->li_mdt_descs;
270 head = &lfsck_mdt_orphan_list;
278 static inline struct lfsck_component *
279 __lfsck_component_find(struct lfsck_instance *lfsck, __u16 type, cfs_list_t *list)
281 struct lfsck_component *com;
283 cfs_list_for_each_entry(com, list, lc_link) {
284 if (com->lc_type == type)
290 static struct lfsck_component *
291 lfsck_component_find(struct lfsck_instance *lfsck, __u16 type)
293 struct lfsck_component *com;
295 spin_lock(&lfsck->li_lock);
296 com = __lfsck_component_find(lfsck, type, &lfsck->li_list_scan);
300 com = __lfsck_component_find(lfsck, type,
301 &lfsck->li_list_double_scan);
305 com = __lfsck_component_find(lfsck, type, &lfsck->li_list_idle);
309 lfsck_component_get(com);
310 spin_unlock(&lfsck->li_lock);
314 void lfsck_component_cleanup(const struct lu_env *env,
315 struct lfsck_component *com)
317 if (!cfs_list_empty(&com->lc_link))
318 cfs_list_del_init(&com->lc_link);
319 if (!cfs_list_empty(&com->lc_link_dir))
320 cfs_list_del_init(&com->lc_link_dir);
322 lfsck_component_put(env, com);
325 void lfsck_instance_cleanup(const struct lu_env *env,
326 struct lfsck_instance *lfsck)
328 struct ptlrpc_thread *thread = &lfsck->li_thread;
329 struct lfsck_component *com;
332 LASSERT(list_empty(&lfsck->li_link));
333 LASSERT(thread_is_init(thread) || thread_is_stopped(thread));
335 if (lfsck->li_obj_oit != NULL) {
336 lu_object_put_nocache(env, &lfsck->li_obj_oit->do_lu);
337 lfsck->li_obj_oit = NULL;
340 LASSERT(lfsck->li_obj_dir == NULL);
342 while (!cfs_list_empty(&lfsck->li_list_scan)) {
343 com = cfs_list_entry(lfsck->li_list_scan.next,
344 struct lfsck_component,
346 lfsck_component_cleanup(env, com);
349 LASSERT(cfs_list_empty(&lfsck->li_list_dir));
351 while (!cfs_list_empty(&lfsck->li_list_double_scan)) {
352 com = cfs_list_entry(lfsck->li_list_double_scan.next,
353 struct lfsck_component,
355 lfsck_component_cleanup(env, com);
358 while (!cfs_list_empty(&lfsck->li_list_idle)) {
359 com = cfs_list_entry(lfsck->li_list_idle.next,
360 struct lfsck_component,
362 lfsck_component_cleanup(env, com);
365 lfsck_tgt_descs_fini(&lfsck->li_ost_descs);
366 lfsck_tgt_descs_fini(&lfsck->li_mdt_descs);
368 if (lfsck->li_bookmark_obj != NULL) {
369 lu_object_put_nocache(env, &lfsck->li_bookmark_obj->do_lu);
370 lfsck->li_bookmark_obj = NULL;
373 if (lfsck->li_los != NULL) {
374 local_oid_storage_fini(env, lfsck->li_los);
375 lfsck->li_los = NULL;
381 static inline struct lfsck_instance *
382 __lfsck_instance_find(struct dt_device *key, bool ref, bool unlink)
384 struct lfsck_instance *lfsck;
386 cfs_list_for_each_entry(lfsck, &lfsck_instance_list, li_link) {
387 if (lfsck->li_bottom == key) {
389 lfsck_instance_get(lfsck);
391 list_del_init(&lfsck->li_link);
400 static inline struct lfsck_instance *lfsck_instance_find(struct dt_device *key,
401 bool ref, bool unlink)
403 struct lfsck_instance *lfsck;
405 spin_lock(&lfsck_instance_lock);
406 lfsck = __lfsck_instance_find(key, ref, unlink);
407 spin_unlock(&lfsck_instance_lock);
412 static inline int lfsck_instance_add(struct lfsck_instance *lfsck)
414 struct lfsck_instance *tmp;
416 spin_lock(&lfsck_instance_lock);
417 cfs_list_for_each_entry(tmp, &lfsck_instance_list, li_link) {
418 if (lfsck->li_bottom == tmp->li_bottom) {
419 spin_unlock(&lfsck_instance_lock);
424 cfs_list_add_tail(&lfsck->li_link, &lfsck_instance_list);
425 spin_unlock(&lfsck_instance_lock);
429 int lfsck_bits_dump(char **buf, int *len, int bits, const char *names[],
437 rc = snprintf(*buf, *len, "%s:%c", prefix, bits != 0 ? ' ' : '\n');
443 for (i = 0, flag = 1; bits != 0; i++, flag = 1 << i) {
446 if (names[i] != NULL) {
447 rc = snprintf(*buf, *len, "%s%c", names[i],
448 bits != 0 ? ',' : '\n');
460 int lfsck_time_dump(char **buf, int *len, __u64 time, const char *prefix)
465 rc = snprintf(*buf, *len, "%s: "LPU64" seconds\n", prefix,
466 cfs_time_current_sec() - time);
468 rc = snprintf(*buf, *len, "%s: N/A\n", prefix);
477 int lfsck_pos_dump(char **buf, int *len, struct lfsck_position *pos,
482 if (fid_is_zero(&pos->lp_dir_parent)) {
483 if (pos->lp_oit_cookie == 0)
484 rc = snprintf(*buf, *len, "%s: N/A, N/A, N/A\n",
487 rc = snprintf(*buf, *len, "%s: "LPU64", N/A, N/A\n",
488 prefix, pos->lp_oit_cookie);
490 rc = snprintf(*buf, *len, "%s: "LPU64", "DFID", "LPU64"\n",
491 prefix, pos->lp_oit_cookie,
492 PFID(&pos->lp_dir_parent), pos->lp_dir_cookie);
502 void lfsck_pos_fill(const struct lu_env *env, struct lfsck_instance *lfsck,
503 struct lfsck_position *pos, bool init)
505 const struct dt_it_ops *iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
507 if (unlikely(lfsck->li_di_oit == NULL)) {
508 memset(pos, 0, sizeof(*pos));
512 pos->lp_oit_cookie = iops->store(env, lfsck->li_di_oit);
513 if (!lfsck->li_current_oit_processed && !init)
514 pos->lp_oit_cookie--;
516 LASSERT(pos->lp_oit_cookie > 0);
518 if (lfsck->li_di_dir != NULL) {
519 struct dt_object *dto = lfsck->li_obj_dir;
521 pos->lp_dir_cookie = dto->do_index_ops->dio_it.store(env,
524 if (pos->lp_dir_cookie >= MDS_DIR_END_OFF) {
525 fid_zero(&pos->lp_dir_parent);
526 pos->lp_dir_cookie = 0;
528 pos->lp_dir_parent = *lfsck_dto2fid(dto);
531 fid_zero(&pos->lp_dir_parent);
532 pos->lp_dir_cookie = 0;
536 static void __lfsck_set_speed(struct lfsck_instance *lfsck, __u32 limit)
538 lfsck->li_bookmark_ram.lb_speed_limit = limit;
539 if (limit != LFSCK_SPEED_NO_LIMIT) {
541 lfsck->li_sleep_rate = limit / HZ;
542 lfsck->li_sleep_jif = 1;
544 lfsck->li_sleep_rate = 1;
545 lfsck->li_sleep_jif = HZ / limit;
548 lfsck->li_sleep_jif = 0;
549 lfsck->li_sleep_rate = 0;
553 void lfsck_control_speed(struct lfsck_instance *lfsck)
555 struct ptlrpc_thread *thread = &lfsck->li_thread;
556 struct l_wait_info lwi;
558 if (lfsck->li_sleep_jif > 0 &&
559 lfsck->li_new_scanned >= lfsck->li_sleep_rate) {
560 lwi = LWI_TIMEOUT_INTR(lfsck->li_sleep_jif, NULL,
561 LWI_ON_SIGNAL_NOOP, NULL);
563 l_wait_event(thread->t_ctl_waitq,
564 !thread_is_running(thread),
566 lfsck->li_new_scanned = 0;
570 void lfsck_control_speed_by_self(struct lfsck_component *com)
572 struct lfsck_instance *lfsck = com->lc_lfsck;
573 struct ptlrpc_thread *thread = &lfsck->li_thread;
574 struct l_wait_info lwi;
576 if (lfsck->li_sleep_jif > 0 &&
577 com->lc_new_scanned >= lfsck->li_sleep_rate) {
578 lwi = LWI_TIMEOUT_INTR(lfsck->li_sleep_jif, NULL,
579 LWI_ON_SIGNAL_NOOP, NULL);
581 l_wait_event(thread->t_ctl_waitq,
582 !thread_is_running(thread),
584 com->lc_new_scanned = 0;
588 static int lfsck_parent_fid(const struct lu_env *env, struct dt_object *obj,
591 if (unlikely(!S_ISDIR(lfsck_object_type(obj)) ||
592 !dt_try_as_dir(env, obj)))
595 return dt_lookup(env, obj, (struct dt_rec *)fid,
596 (const struct dt_key *)"..", BYPASS_CAPA);
599 static int lfsck_needs_scan_dir(const struct lu_env *env,
600 struct lfsck_instance *lfsck,
601 struct dt_object *obj)
603 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
607 if (!lfsck->li_master || !S_ISDIR(lfsck_object_type(obj)) ||
608 cfs_list_empty(&lfsck->li_list_dir))
612 /* XXX: Currently, we do not scan the "/REMOTE_PARENT_DIR",
613 * which is the agent directory to manage the objects
614 * which name entries reside on remote MDTs. Related
615 * consistency verification will be processed in LFSCK
617 if (lu_fid_eq(lfsck_dto2fid(obj), &lfsck->li_global_root_fid)) {
619 lfsck_object_put(env, obj);
623 /* .lustre doesn't contain "real" user objects, no need lfsck */
624 if (fid_is_dot_lustre(lfsck_dto2fid(obj))) {
626 lfsck_object_put(env, obj);
630 dt_read_lock(env, obj, MOR_TGT_CHILD);
631 if (unlikely(lfsck_is_dead_obj(obj))) {
632 dt_read_unlock(env, obj);
634 lfsck_object_put(env, obj);
638 rc = dt_xattr_get(env, obj,
639 lfsck_buf_get(env, NULL, 0), XATTR_NAME_LINK,
641 dt_read_unlock(env, obj);
644 lfsck_object_put(env, obj);
648 if (rc < 0 && rc != -ENODATA) {
650 lfsck_object_put(env, obj);
654 rc = lfsck_parent_fid(env, obj, fid);
656 lfsck_object_put(env, obj);
660 if (unlikely(lu_fid_eq(fid, &lfsck->li_local_root_fid)))
663 obj = lfsck_object_find(env, lfsck, fid);
666 else if (IS_ERR(obj))
669 if (!dt_object_exists(obj)) {
670 lfsck_object_put(env, obj);
674 /* Currently, only client visible directory can be remote. */
675 if (dt_object_remote(obj)) {
676 lfsck_object_put(env, obj);
685 struct lfsck_thread_args *lfsck_thread_args_init(struct lfsck_instance *lfsck,
686 struct lfsck_component *com,
687 struct lfsck_start_param *lsp)
689 struct lfsck_thread_args *lta;
694 return ERR_PTR(-ENOMEM);
696 rc = lu_env_init(<a->lta_env, LCT_MD_THREAD | LCT_DT_THREAD);
702 lta->lta_lfsck = lfsck_instance_get(lfsck);
704 lta->lta_com = lfsck_component_get(com);
711 void lfsck_thread_args_fini(struct lfsck_thread_args *lta)
713 if (lta->lta_com != NULL)
714 lfsck_component_put(<a->lta_env, lta->lta_com);
715 lfsck_instance_put(<a->lta_env, lta->lta_lfsck);
716 lu_env_fini(<a->lta_env);
720 /* LFSCK wrap functions */
722 void lfsck_fail(const struct lu_env *env, struct lfsck_instance *lfsck,
725 struct lfsck_component *com;
727 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
728 com->lc_ops->lfsck_fail(env, com, new_checked);
732 int lfsck_checkpoint(const struct lu_env *env, struct lfsck_instance *lfsck)
734 struct lfsck_component *com;
738 if (likely(cfs_time_beforeq(cfs_time_current(),
739 lfsck->li_time_next_checkpoint)))
742 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_current, false);
743 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
744 rc = com->lc_ops->lfsck_checkpoint(env, com, false);
749 lfsck->li_time_last_checkpoint = cfs_time_current();
750 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
751 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
752 return rc1 != 0 ? rc1 : rc;
755 int lfsck_prep(const struct lu_env *env, struct lfsck_instance *lfsck,
756 struct lfsck_start_param *lsp)
758 struct dt_object *obj = NULL;
759 struct lfsck_component *com;
760 struct lfsck_component *next;
761 struct lfsck_position *pos = NULL;
762 const struct dt_it_ops *iops =
763 &lfsck->li_obj_oit->do_index_ops->dio_it;
768 LASSERT(lfsck->li_obj_dir == NULL);
769 LASSERT(lfsck->li_di_dir == NULL);
771 lfsck->li_current_oit_processed = 0;
772 cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
773 com->lc_new_checked = 0;
774 if (lfsck->li_bookmark_ram.lb_param & LPF_DRYRUN)
777 rc = com->lc_ops->lfsck_prep(env, com, lsp);
782 (!lfsck_pos_is_zero(&com->lc_pos_start) &&
783 lfsck_pos_is_eq(pos, &com->lc_pos_start) > 0))
784 pos = &com->lc_pos_start;
787 /* Init otable-based iterator. */
789 rc = iops->load(env, lfsck->li_di_oit, 0);
791 lfsck->li_oit_over = 1;
798 rc = iops->load(env, lfsck->li_di_oit, pos->lp_oit_cookie);
802 lfsck->li_oit_over = 1;
804 if (!lfsck->li_master || fid_is_zero(&pos->lp_dir_parent))
807 /* Find the directory for namespace-based traverse. */
808 obj = lfsck_object_find(env, lfsck, &pos->lp_dir_parent);
811 else if (IS_ERR(obj))
812 RETURN(PTR_ERR(obj));
814 /* XXX: Currently, skip remote object, the consistency for
815 * remote object will be processed in LFSCK phase III. */
816 if (!dt_object_exists(obj) || dt_object_remote(obj) ||
817 unlikely(!S_ISDIR(lfsck_object_type(obj))))
820 if (unlikely(!dt_try_as_dir(env, obj)))
821 GOTO(out, rc = -ENOTDIR);
823 /* Init the namespace-based directory traverse. */
824 iops = &obj->do_index_ops->dio_it;
825 di = iops->init(env, obj, lfsck->li_args_dir, BYPASS_CAPA);
827 GOTO(out, rc = PTR_ERR(di));
829 LASSERT(pos->lp_dir_cookie < MDS_DIR_END_OFF);
831 rc = iops->load(env, di, pos->lp_dir_cookie);
832 if ((rc == 0) || (rc > 0 && pos->lp_dir_cookie > 0))
833 rc = iops->next(env, di);
843 lfsck->li_obj_dir = lfsck_object_get(obj);
844 lfsck->li_cookie_dir = iops->store(env, di);
845 spin_lock(&lfsck->li_lock);
846 lfsck->li_di_dir = di;
847 spin_unlock(&lfsck->li_lock);
853 lfsck_object_put(env, obj);
856 cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
858 com->lc_ops->lfsck_post(env, com, rc, true);
864 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_current, true);
865 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
866 rc = com->lc_ops->lfsck_checkpoint(env, com, true);
871 lfsck->li_time_last_checkpoint = cfs_time_current();
872 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
873 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
877 int lfsck_exec_oit(const struct lu_env *env, struct lfsck_instance *lfsck,
878 struct dt_object *obj)
880 struct lfsck_component *com;
881 const struct dt_it_ops *iops;
886 LASSERT(lfsck->li_obj_dir == NULL);
888 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
889 rc = com->lc_ops->lfsck_exec_oit(env, com, obj);
894 rc = lfsck_needs_scan_dir(env, lfsck, obj);
898 if (unlikely(!dt_try_as_dir(env, obj)))
899 GOTO(out, rc = -ENOTDIR);
901 iops = &obj->do_index_ops->dio_it;
902 di = iops->init(env, obj, lfsck->li_args_dir, BYPASS_CAPA);
904 GOTO(out, rc = PTR_ERR(di));
906 rc = iops->load(env, di, 0);
908 rc = iops->next(env, di);
918 lfsck->li_obj_dir = lfsck_object_get(obj);
919 lfsck->li_cookie_dir = iops->store(env, di);
920 spin_lock(&lfsck->li_lock);
921 lfsck->li_di_dir = di;
922 spin_unlock(&lfsck->li_lock);
928 lfsck_fail(env, lfsck, false);
929 return (rc > 0 ? 0 : rc);
932 int lfsck_exec_dir(const struct lu_env *env, struct lfsck_instance *lfsck,
933 struct dt_object *obj, struct lu_dirent *ent)
935 struct lfsck_component *com;
938 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
939 rc = com->lc_ops->lfsck_exec_dir(env, com, obj, ent);
946 int lfsck_post(const struct lu_env *env, struct lfsck_instance *lfsck,
949 struct lfsck_component *com;
950 struct lfsck_component *next;
954 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_current, false);
955 cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
956 rc = com->lc_ops->lfsck_post(env, com, result, false);
961 lfsck->li_time_last_checkpoint = cfs_time_current();
962 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
963 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
965 /* Ignore some component post failure to make other can go ahead. */
969 static void lfsck_interpret(const struct lu_env *env,
970 struct lfsck_instance *lfsck,
971 struct ptlrpc_request *req, void *args, int result)
973 struct lfsck_async_interpret_args *laia = args;
974 struct lfsck_component *com;
976 LASSERT(laia->laia_shared);
978 spin_lock(&lfsck->li_lock);
979 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
980 if (com->lc_ops->lfsck_interpret != NULL) {
981 laia->laia_com = com;
982 com->lc_ops->lfsck_interpret(env, req, laia, result);
986 list_for_each_entry(com, &lfsck->li_list_double_scan, lc_link) {
987 if (com->lc_ops->lfsck_interpret != NULL) {
988 laia->laia_com = com;
989 com->lc_ops->lfsck_interpret(env, req, laia, result);
992 spin_unlock(&lfsck->li_lock);
995 int lfsck_double_scan(const struct lu_env *env, struct lfsck_instance *lfsck)
997 struct lfsck_component *com;
998 struct lfsck_component *next;
999 struct l_wait_info lwi = { 0 };
1003 cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
1005 if (lfsck->li_bookmark_ram.lb_param & LPF_DRYRUN)
1006 com->lc_journal = 0;
1008 rc = com->lc_ops->lfsck_double_scan(env, com);
1013 l_wait_event(lfsck->li_thread.t_ctl_waitq,
1014 atomic_read(&lfsck->li_double_scan_count) == 0,
1017 return rc1 != 0 ? rc1 : rc;
1020 static int lfsck_stop_notify(const struct lu_env *env,
1021 struct lfsck_instance *lfsck,
1022 struct lfsck_tgt_descs *ltds,
1023 struct lfsck_tgt_desc *ltd, __u16 type)
1025 struct ptlrpc_request_set *set;
1026 struct lfsck_component *com;
1030 spin_lock(&lfsck->li_lock);
1031 com = __lfsck_component_find(lfsck, type, &lfsck->li_list_scan);
1033 com = __lfsck_component_find(lfsck, type,
1034 &lfsck->li_list_double_scan);
1036 lfsck_component_get(com);
1037 spin_lock(&lfsck->li_lock);
1040 if (com->lc_ops->lfsck_stop_notify != NULL) {
1041 set = ptlrpc_prep_set();
1043 lfsck_component_put(env, com);
1048 rc = com->lc_ops->lfsck_stop_notify(env, com, ltds,
1051 rc = ptlrpc_set_wait(set);
1053 ptlrpc_set_destroy(set);
1056 lfsck_component_put(env, com);
1062 void lfsck_quit(const struct lu_env *env, struct lfsck_instance *lfsck)
1064 struct lfsck_component *com;
1065 struct lfsck_component *next;
1067 list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
1069 if (com->lc_ops->lfsck_quit != NULL)
1070 com->lc_ops->lfsck_quit(env, com);
1073 list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
1075 if (com->lc_ops->lfsck_quit != NULL)
1076 com->lc_ops->lfsck_quit(env, com);
1080 static int lfsck_async_interpret(const struct lu_env *env,
1081 struct ptlrpc_request *req,
1084 struct lfsck_async_interpret_args *laia = args;
1085 struct lfsck_instance *lfsck;
1087 lfsck = container_of0(laia->laia_ltds, struct lfsck_instance,
1089 lfsck_interpret(env, lfsck, req, laia, rc);
1090 lfsck_tgt_put(laia->laia_ltd);
1091 if (rc != 0 && laia->laia_result != -EALREADY)
1092 laia->laia_result = rc;
1097 int lfsck_async_request(const struct lu_env *env, struct obd_export *exp,
1098 struct lfsck_request *lr,
1099 struct ptlrpc_request_set *set,
1100 ptlrpc_interpterer_t interpreter,
1101 void *args, int request)
1103 struct lfsck_async_interpret_args *laia;
1104 struct ptlrpc_request *req;
1105 struct lfsck_request *tmp;
1106 struct req_format *format;
1109 if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
1114 format = &RQF_LFSCK_NOTIFY;
1117 format = &RQF_LFSCK_QUERY;
1120 CERROR("%s: unknown async request: opc = %d\n",
1121 exp->exp_obd->obd_name, request);
1125 req = ptlrpc_request_alloc(class_exp2cliimp(exp), format);
1129 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, request);
1131 ptlrpc_request_free(req);
1136 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
1138 ptlrpc_request_set_replen(req);
1140 laia = ptlrpc_req_async_args(req);
1141 *laia = *(struct lfsck_async_interpret_args *)args;
1142 if (laia->laia_com != NULL)
1143 lfsck_component_get(laia->laia_com);
1144 req->rq_interpret_reply = interpreter;
1145 ptlrpc_set_add_req(set, req);
1150 /* external interfaces */
1152 int lfsck_get_speed(struct dt_device *key, void *buf, int len)
1155 struct lfsck_instance *lfsck;
1159 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1163 lfsck = lfsck_instance_find(key, true, false);
1164 if (likely(lfsck != NULL)) {
1165 rc = snprintf(buf, len, "%u\n",
1166 lfsck->li_bookmark_ram.lb_speed_limit);
1167 lfsck_instance_put(&env, lfsck);
1176 EXPORT_SYMBOL(lfsck_get_speed);
1178 int lfsck_set_speed(struct dt_device *key, int val)
1181 struct lfsck_instance *lfsck;
1185 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1189 lfsck = lfsck_instance_find(key, true, false);
1190 if (likely(lfsck != NULL)) {
1191 mutex_lock(&lfsck->li_mutex);
1192 __lfsck_set_speed(lfsck, val);
1193 rc = lfsck_bookmark_store(&env, lfsck);
1194 mutex_unlock(&lfsck->li_mutex);
1195 lfsck_instance_put(&env, lfsck);
1204 EXPORT_SYMBOL(lfsck_set_speed);
1206 int lfsck_get_windows(struct dt_device *key, void *buf, int len)
1209 struct lfsck_instance *lfsck;
1213 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1217 lfsck = lfsck_instance_find(key, true, false);
1218 if (likely(lfsck != NULL)) {
1219 rc = snprintf(buf, len, "%u\n",
1220 lfsck->li_bookmark_ram.lb_async_windows);
1221 lfsck_instance_put(&env, lfsck);
1230 EXPORT_SYMBOL(lfsck_get_windows);
1232 int lfsck_set_windows(struct dt_device *key, int val)
1235 struct lfsck_instance *lfsck;
1239 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1243 lfsck = lfsck_instance_find(key, true, false);
1244 if (likely(lfsck != NULL)) {
1245 if (val > LFSCK_ASYNC_WIN_MAX) {
1246 CERROR("%s: Too large async windows size, which "
1247 "may cause memory issues. The valid range "
1248 "is [0 - %u]. If you do not want to restrict "
1249 "the windows size for async requests pipeline, "
1250 "just set it as 0.\n",
1251 lfsck_lfsck2name(lfsck), LFSCK_ASYNC_WIN_MAX);
1253 } else if (lfsck->li_bookmark_ram.lb_async_windows != val) {
1254 mutex_lock(&lfsck->li_mutex);
1255 lfsck->li_bookmark_ram.lb_async_windows = val;
1256 rc = lfsck_bookmark_store(&env, lfsck);
1257 mutex_unlock(&lfsck->li_mutex);
1259 lfsck_instance_put(&env, lfsck);
1268 EXPORT_SYMBOL(lfsck_set_windows);
1270 int lfsck_dump(struct dt_device *key, void *buf, int len, enum lfsck_type type)
1273 struct lfsck_instance *lfsck;
1274 struct lfsck_component *com;
1278 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1282 lfsck = lfsck_instance_find(key, true, false);
1283 if (likely(lfsck != NULL)) {
1284 com = lfsck_component_find(lfsck, type);
1285 if (likely(com != NULL)) {
1286 rc = com->lc_ops->lfsck_dump(&env, com, buf, len);
1287 lfsck_component_put(&env, com);
1292 lfsck_instance_put(&env, lfsck);
1301 EXPORT_SYMBOL(lfsck_dump);
1303 static int lfsck_stop_all(const struct lu_env *env,
1304 struct lfsck_instance *lfsck,
1305 struct lfsck_stop *stop)
1307 struct lfsck_thread_info *info = lfsck_env_info(env);
1308 struct lfsck_request *lr = &info->lti_lr;
1309 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1310 struct ptlrpc_request_set *set;
1311 struct lfsck_tgt_descs *ltds = &lfsck->li_mdt_descs;
1312 struct lfsck_tgt_desc *ltd;
1313 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1319 LASSERT(stop->ls_flags & LPF_BROADCAST);
1321 set = ptlrpc_prep_set();
1322 if (unlikely(set == NULL)) {
1323 CERROR("%s: cannot allocate memory for stop LFSCK on "
1324 "all targets\n", lfsck_lfsck2name(lfsck));
1329 memset(lr, 0, sizeof(*lr));
1330 lr->lr_event = LE_STOP;
1331 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1332 lr->lr_status = stop->ls_status;
1333 lr->lr_version = bk->lb_version;
1334 lr->lr_active = LFSCK_TYPES_ALL;
1335 lr->lr_param = stop->ls_flags;
1337 laia->laia_com = NULL;
1338 laia->laia_ltds = ltds;
1340 laia->laia_result = 0;
1341 laia->laia_shared = 1;
1343 down_read(<ds->ltd_rw_sem);
1344 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1345 ltd = lfsck_tgt_get(ltds, idx);
1346 LASSERT(ltd != NULL);
1348 laia->laia_ltd = ltd;
1349 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1350 lfsck_async_interpret, laia,
1353 lfsck_interpret(env, lfsck, NULL, laia, rc);
1355 CWARN("%s: cannot notify MDT %x for LFSCK stop: "
1356 "rc = %d\n", lfsck_lfsck2name(lfsck), idx, rc);
1360 up_read(<ds->ltd_rw_sem);
1362 rc = ptlrpc_set_wait(set);
1363 ptlrpc_set_destroy(set);
1366 rc = laia->laia_result;
1368 if (rc == -EALREADY)
1372 CWARN("%s: fail to stop LFSCK on some MDTs: rc = %d\n",
1373 lfsck_lfsck2name(lfsck), rc);
1375 RETURN(rc != 0 ? rc : rc1);
1378 static int lfsck_start_all(const struct lu_env *env,
1379 struct lfsck_instance *lfsck,
1380 struct lfsck_start *start)
1382 struct lfsck_thread_info *info = lfsck_env_info(env);
1383 struct lfsck_request *lr = &info->lti_lr;
1384 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1385 struct ptlrpc_request_set *set;
1386 struct lfsck_tgt_descs *ltds = &lfsck->li_mdt_descs;
1387 struct lfsck_tgt_desc *ltd;
1388 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1393 LASSERT(start->ls_flags & LPF_BROADCAST);
1395 set = ptlrpc_prep_set();
1396 if (unlikely(set == NULL)) {
1397 if (bk->lb_param & LPF_FAILOUT) {
1398 CERROR("%s: cannot allocate memory for start LFSCK on "
1399 "all targets, failout.\n",
1400 lfsck_lfsck2name(lfsck));
1404 CWARN("%s: cannot allocate memory for start LFSCK on "
1405 "all targets, partly scan.\n",
1406 lfsck_lfsck2name(lfsck));
1412 memset(lr, 0, sizeof(*lr));
1413 lr->lr_event = LE_START;
1414 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1415 lr->lr_speed = bk->lb_speed_limit;
1416 lr->lr_version = bk->lb_version;
1417 lr->lr_active = start->ls_active;
1418 lr->lr_param = start->ls_flags;
1419 lr->lr_async_windows = bk->lb_async_windows;
1420 lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
1423 laia->laia_com = NULL;
1424 laia->laia_ltds = ltds;
1426 laia->laia_result = 0;
1427 laia->laia_shared = 1;
1429 down_read(<ds->ltd_rw_sem);
1430 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1431 ltd = lfsck_tgt_get(ltds, idx);
1432 LASSERT(ltd != NULL);
1434 laia->laia_ltd = ltd;
1435 ltd->ltd_layout_done = 0;
1436 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1437 lfsck_async_interpret, laia,
1440 lfsck_interpret(env, lfsck, NULL, laia, rc);
1442 if (bk->lb_param & LPF_FAILOUT) {
1443 CERROR("%s: cannot notify MDT %x for LFSCK "
1444 "start, failout: rc = %d\n",
1445 lfsck_lfsck2name(lfsck), idx, rc);
1448 CWARN("%s: cannot notify MDT %x for LFSCK "
1449 "start, partly scan: rc = %d\n",
1450 lfsck_lfsck2name(lfsck), idx, rc);
1455 up_read(<ds->ltd_rw_sem);
1458 ptlrpc_set_destroy(set);
1463 rc = ptlrpc_set_wait(set);
1464 ptlrpc_set_destroy(set);
1467 rc = laia->laia_result;
1470 if (bk->lb_param & LPF_FAILOUT) {
1471 struct lfsck_stop *stop = &info->lti_stop;
1473 CERROR("%s: cannot start LFSCK on some MDTs, "
1474 "stop all: rc = %d\n",
1475 lfsck_lfsck2name(lfsck), rc);
1476 if (rc != -EALREADY) {
1477 stop->ls_status = LS_FAILED;
1478 stop->ls_flags = LPF_ALL_TGT | LPF_BROADCAST;
1479 lfsck_stop_all(env, lfsck, stop);
1482 CWARN("%s: cannot start LFSCK on some MDTs, "
1483 "partly scan: rc = %d\n",
1484 lfsck_lfsck2name(lfsck), rc);
1492 int lfsck_start(const struct lu_env *env, struct dt_device *key,
1493 struct lfsck_start_param *lsp)
1495 struct lfsck_start *start = lsp->lsp_start;
1496 struct lfsck_instance *lfsck;
1497 struct lfsck_bookmark *bk;
1498 struct ptlrpc_thread *thread;
1499 struct lfsck_component *com;
1500 struct l_wait_info lwi = { 0 };
1501 struct lfsck_thread_args *lta;
1509 lfsck = lfsck_instance_find(key, true, false);
1510 if (unlikely(lfsck == NULL))
1513 /* System is not ready, try again later. */
1514 if (unlikely(lfsck->li_namespace == NULL))
1515 GOTO(put, rc = -EAGAIN);
1517 /* start == NULL means auto trigger paused LFSCK. */
1518 if ((start == NULL) &&
1519 (cfs_list_empty(&lfsck->li_list_scan) ||
1520 OBD_FAIL_CHECK(OBD_FAIL_LFSCK_NO_AUTO)))
1523 bk = &lfsck->li_bookmark_ram;
1524 thread = &lfsck->li_thread;
1525 mutex_lock(&lfsck->li_mutex);
1526 spin_lock(&lfsck->li_lock);
1527 if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
1529 while (start->ls_active != 0) {
1530 if (!(type & start->ls_active)) {
1535 com = __lfsck_component_find(lfsck, type,
1536 &lfsck->li_list_scan);
1538 com = __lfsck_component_find(lfsck, type,
1539 &lfsck->li_list_double_scan);
1545 if (com->lc_ops->lfsck_join != NULL) {
1546 rc = com->lc_ops->lfsck_join( env, com, lsp);
1547 if (rc != 0 && rc != -EALREADY)
1550 start->ls_active &= ~type;
1553 spin_unlock(&lfsck->li_lock);
1556 spin_unlock(&lfsck->li_lock);
1558 lfsck->li_status = 0;
1559 lfsck->li_oit_over = 0;
1560 lfsck->li_start_unplug = 0;
1561 lfsck->li_drop_dryrun = 0;
1562 lfsck->li_new_scanned = 0;
1564 /* For auto trigger. */
1568 if (start->ls_flags & LPF_BROADCAST && !lfsck->li_master) {
1569 CERROR("%s: only allow to specify '-A | -o' via MDS\n",
1570 lfsck_lfsck2name(lfsck));
1572 GOTO(out, rc = -EPERM);
1575 start->ls_version = bk->lb_version;
1576 if (start->ls_valid & LSV_SPEED_LIMIT) {
1577 __lfsck_set_speed(lfsck, start->ls_speed_limit);
1581 if (start->ls_valid & LSV_ASYNC_WINDOWS &&
1582 bk->lb_async_windows != start->ls_async_windows) {
1583 bk->lb_async_windows = start->ls_async_windows;
1587 if (start->ls_valid & LSV_ERROR_HANDLE) {
1588 valid |= DOIV_ERROR_HANDLE;
1589 if (start->ls_flags & LPF_FAILOUT)
1590 flags |= DOIF_FAILOUT;
1592 if ((start->ls_flags & LPF_FAILOUT) &&
1593 !(bk->lb_param & LPF_FAILOUT)) {
1594 bk->lb_param |= LPF_FAILOUT;
1596 } else if (!(start->ls_flags & LPF_FAILOUT) &&
1597 (bk->lb_param & LPF_FAILOUT)) {
1598 bk->lb_param &= ~LPF_FAILOUT;
1603 if (start->ls_valid & LSV_DRYRUN) {
1604 valid |= DOIV_DRYRUN;
1605 if (start->ls_flags & LPF_DRYRUN)
1606 flags |= DOIF_DRYRUN;
1608 if ((start->ls_flags & LPF_DRYRUN) &&
1609 !(bk->lb_param & LPF_DRYRUN)) {
1610 bk->lb_param |= LPF_DRYRUN;
1612 } else if (!(start->ls_flags & LPF_DRYRUN) &&
1613 (bk->lb_param & LPF_DRYRUN)) {
1614 bk->lb_param &= ~LPF_DRYRUN;
1615 lfsck->li_drop_dryrun = 1;
1620 if (bk->lb_param & LPF_ALL_TGT &&
1621 !(start->ls_flags & LPF_ALL_TGT)) {
1622 bk->lb_param &= ~LPF_ALL_TGT;
1624 } else if (!(bk->lb_param & LPF_ALL_TGT) &&
1625 start->ls_flags & LPF_ALL_TGT) {
1626 bk->lb_param |= LPF_ALL_TGT;
1630 if (bk->lb_param & LPF_ORPHAN &&
1631 !(start->ls_flags & LPF_ORPHAN)) {
1632 bk->lb_param &= ~LPF_ORPHAN;
1634 } else if (!(bk->lb_param & LPF_ORPHAN) &&
1635 start->ls_flags & LPF_ORPHAN) {
1636 bk->lb_param |= LPF_ORPHAN;
1641 rc = lfsck_bookmark_store(env, lfsck);
1646 if (start->ls_flags & LPF_RESET)
1647 flags |= DOIF_RESET;
1649 if (start->ls_active != 0) {
1650 struct lfsck_component *next;
1652 if (start->ls_active == LFSCK_TYPES_ALL)
1653 start->ls_active = LFSCK_TYPES_SUPPORTED;
1655 if (start->ls_active & ~LFSCK_TYPES_SUPPORTED) {
1656 start->ls_active &= ~LFSCK_TYPES_SUPPORTED;
1657 GOTO(out, rc = -ENOTSUPP);
1660 cfs_list_for_each_entry_safe(com, next,
1661 &lfsck->li_list_scan, lc_link) {
1662 if (!(com->lc_type & start->ls_active)) {
1663 rc = com->lc_ops->lfsck_post(env, com, 0,
1670 while (start->ls_active != 0) {
1671 if (type & start->ls_active) {
1672 com = __lfsck_component_find(lfsck, type,
1673 &lfsck->li_list_idle);
1675 /* The component status will be updated
1676 * when its prep() is called later by
1677 * the LFSCK main engine. */
1678 cfs_list_del_init(&com->lc_link);
1679 cfs_list_add_tail(&com->lc_link,
1680 &lfsck->li_list_scan);
1682 start->ls_active &= ~type;
1688 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
1689 start->ls_active |= com->lc_type;
1690 if (flags & DOIF_RESET) {
1691 rc = com->lc_ops->lfsck_reset(env, com, false);
1698 lfsck->li_args_dir = LUDA_64BITHASH | LUDA_VERIFY;
1699 if (bk->lb_param & LPF_DRYRUN) {
1700 lfsck->li_args_dir |= LUDA_VERIFY_DRYRUN;
1701 valid |= DOIV_DRYRUN;
1702 flags |= DOIF_DRYRUN;
1705 if (bk->lb_param & LPF_FAILOUT) {
1706 valid |= DOIV_ERROR_HANDLE;
1707 flags |= DOIF_FAILOUT;
1710 if (!cfs_list_empty(&lfsck->li_list_scan))
1711 flags |= DOIF_OUTUSED;
1713 lfsck->li_args_oit = (flags << DT_OTABLE_IT_FLAGS_SHIFT) | valid;
1714 thread_set_flags(thread, 0);
1715 lta = lfsck_thread_args_init(lfsck, NULL, lsp);
1717 GOTO(out, rc = PTR_ERR(lta));
1719 rc = PTR_ERR(kthread_run(lfsck_master_engine, lta, "lfsck"));
1720 if (IS_ERR_VALUE(rc)) {
1721 CERROR("%s: cannot start LFSCK thread: rc = %ld\n",
1722 lfsck_lfsck2name(lfsck), rc);
1723 lfsck_thread_args_fini(lta);
1728 l_wait_event(thread->t_ctl_waitq,
1729 thread_is_running(thread) ||
1730 thread_is_stopped(thread),
1732 if (start == NULL || !(start->ls_flags & LPF_BROADCAST)) {
1733 lfsck->li_start_unplug = 1;
1734 wake_up_all(&thread->t_ctl_waitq);
1739 /* release lfsck::li_mutex to avoid deadlock. */
1740 mutex_unlock(&lfsck->li_mutex);
1741 rc = lfsck_start_all(env, lfsck, start);
1743 spin_lock(&lfsck->li_lock);
1744 if (thread_is_stopped(thread)) {
1745 spin_unlock(&lfsck->li_lock);
1747 lfsck->li_status = LS_FAILED;
1748 lfsck->li_flags = 0;
1749 thread_set_flags(thread, SVC_STOPPING);
1750 spin_unlock(&lfsck->li_lock);
1752 lfsck->li_start_unplug = 1;
1753 wake_up_all(&thread->t_ctl_waitq);
1754 l_wait_event(thread->t_ctl_waitq,
1755 thread_is_stopped(thread),
1759 lfsck->li_start_unplug = 1;
1760 wake_up_all(&thread->t_ctl_waitq);
1766 mutex_unlock(&lfsck->li_mutex);
1769 lfsck_instance_put(env, lfsck);
1771 return rc < 0 ? rc : 0;
1773 EXPORT_SYMBOL(lfsck_start);
1775 int lfsck_stop(const struct lu_env *env, struct dt_device *key,
1776 struct lfsck_stop *stop)
1778 struct lfsck_instance *lfsck;
1779 struct ptlrpc_thread *thread;
1780 struct l_wait_info lwi = { 0 };
1785 lfsck = lfsck_instance_find(key, true, false);
1786 if (unlikely(lfsck == NULL))
1789 thread = &lfsck->li_thread;
1790 /* release lfsck::li_mutex to avoid deadlock. */
1791 if (stop != NULL && stop->ls_flags & LPF_BROADCAST) {
1792 if (!lfsck->li_master) {
1793 CERROR("%s: only allow to specify '-A' via MDS\n",
1794 lfsck_lfsck2name(lfsck));
1796 GOTO(out, rc = -EPERM);
1799 rc1 = lfsck_stop_all(env, lfsck, stop);
1802 mutex_lock(&lfsck->li_mutex);
1803 spin_lock(&lfsck->li_lock);
1804 if (thread_is_init(thread) || thread_is_stopped(thread)) {
1805 spin_unlock(&lfsck->li_lock);
1806 GOTO(out, rc = -EALREADY);
1810 lfsck->li_status = stop->ls_status;
1811 lfsck->li_flags = stop->ls_flags;
1813 lfsck->li_status = LS_STOPPED;
1814 lfsck->li_flags = 0;
1817 thread_set_flags(thread, SVC_STOPPING);
1818 spin_unlock(&lfsck->li_lock);
1820 wake_up_all(&thread->t_ctl_waitq);
1821 l_wait_event(thread->t_ctl_waitq,
1822 thread_is_stopped(thread),
1828 mutex_unlock(&lfsck->li_mutex);
1829 lfsck_instance_put(env, lfsck);
1831 return rc != 0 ? rc : rc1;
1833 EXPORT_SYMBOL(lfsck_stop);
1835 int lfsck_in_notify(const struct lu_env *env, struct dt_device *key,
1836 struct lfsck_request *lr)
1838 int rc = -EOPNOTSUPP;
1841 switch (lr->lr_event) {
1843 struct lfsck_start *start = &lfsck_env_info(env)->lti_start;
1844 struct lfsck_start_param lsp;
1846 memset(start, 0, sizeof(*start));
1847 start->ls_valid = lr->lr_valid;
1848 start->ls_speed_limit = lr->lr_speed;
1849 start->ls_version = lr->lr_version;
1850 start->ls_active = lr->lr_active;
1851 start->ls_flags = lr->lr_param & ~LPF_BROADCAST;
1852 start->ls_async_windows = lr->lr_async_windows;
1854 lsp.lsp_start = start;
1855 lsp.lsp_index = lr->lr_index;
1856 lsp.lsp_index_valid = 1;
1857 rc = lfsck_start(env, key, &lsp);
1861 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
1863 memset(stop, 0, sizeof(*stop));
1864 stop->ls_status = lr->lr_status;
1865 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
1866 rc = lfsck_stop(env, key, stop);
1869 case LE_PHASE1_DONE:
1870 case LE_PHASE2_DONE:
1871 case LE_FID_ACCESSED:
1872 case LE_PEER_EXIT: {
1873 struct lfsck_instance *lfsck;
1874 struct lfsck_component *com;
1876 lfsck = lfsck_instance_find(key, true, false);
1877 if (unlikely(lfsck == NULL))
1880 com = lfsck_component_find(lfsck, lr->lr_active);
1881 if (likely(com != NULL)) {
1882 rc = com->lc_ops->lfsck_in_notify(env, com, lr);
1883 lfsck_component_put(env, com);
1886 lfsck_instance_put(env, lfsck);
1895 EXPORT_SYMBOL(lfsck_in_notify);
1897 int lfsck_query(const struct lu_env *env, struct dt_device *key,
1898 struct lfsck_request *lr)
1900 struct lfsck_instance *lfsck;
1901 struct lfsck_component *com;
1905 lfsck = lfsck_instance_find(key, true, false);
1906 if (unlikely(lfsck == NULL))
1909 com = lfsck_component_find(lfsck, lr->lr_active);
1910 if (likely(com != NULL)) {
1911 rc = com->lc_ops->lfsck_query(env, com);
1912 lfsck_component_put(env, com);
1917 lfsck_instance_put(env, lfsck);
1921 EXPORT_SYMBOL(lfsck_query);
1923 int lfsck_register_namespace(const struct lu_env *env, struct dt_device *key,
1924 struct ldlm_namespace *ns)
1926 struct lfsck_instance *lfsck;
1929 lfsck = lfsck_instance_find(key, true, false);
1930 if (likely(lfsck != NULL)) {
1931 lfsck->li_namespace = ns;
1932 lfsck_instance_put(env, lfsck);
1938 EXPORT_SYMBOL(lfsck_register_namespace);
1940 int lfsck_register(const struct lu_env *env, struct dt_device *key,
1941 struct dt_device *next, struct obd_device *obd,
1942 lfsck_out_notify notify, void *notify_data, bool master)
1944 struct lfsck_instance *lfsck;
1945 struct dt_object *root = NULL;
1946 struct dt_object *obj;
1947 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
1951 lfsck = lfsck_instance_find(key, false, false);
1952 if (unlikely(lfsck != NULL))
1955 OBD_ALLOC_PTR(lfsck);
1959 mutex_init(&lfsck->li_mutex);
1960 spin_lock_init(&lfsck->li_lock);
1961 CFS_INIT_LIST_HEAD(&lfsck->li_link);
1962 CFS_INIT_LIST_HEAD(&lfsck->li_list_scan);
1963 CFS_INIT_LIST_HEAD(&lfsck->li_list_dir);
1964 CFS_INIT_LIST_HEAD(&lfsck->li_list_double_scan);
1965 CFS_INIT_LIST_HEAD(&lfsck->li_list_idle);
1966 atomic_set(&lfsck->li_ref, 1);
1967 atomic_set(&lfsck->li_double_scan_count, 0);
1968 init_waitqueue_head(&lfsck->li_thread.t_ctl_waitq);
1969 lfsck->li_out_notify = notify;
1970 lfsck->li_out_notify_data = notify_data;
1971 lfsck->li_next = next;
1972 lfsck->li_bottom = key;
1973 lfsck->li_obd = obd;
1975 rc = lfsck_tgt_descs_init(&lfsck->li_ost_descs);
1979 rc = lfsck_tgt_descs_init(&lfsck->li_mdt_descs);
1983 fid->f_seq = FID_SEQ_LOCAL_NAME;
1986 rc = local_oid_storage_init(env, lfsck->li_bottom, fid, &lfsck->li_los);
1990 rc = dt_root_get(env, key, fid);
1994 root = dt_locate(env, lfsck->li_bottom, fid);
1996 GOTO(out, rc = PTR_ERR(root));
1998 if (unlikely(!dt_try_as_dir(env, root)))
1999 GOTO(out, rc = -ENOTDIR);
2001 lfsck->li_local_root_fid = *fid;
2003 lfsck->li_master = 1;
2004 if (lfsck_dev_idx(lfsck->li_bottom) == 0) {
2005 rc = dt_lookup(env, root,
2006 (struct dt_rec *)(&lfsck->li_global_root_fid),
2007 (const struct dt_key *)"ROOT", BYPASS_CAPA);
2013 fid->f_seq = FID_SEQ_LOCAL_FILE;
2014 fid->f_oid = OTABLE_IT_OID;
2016 obj = dt_locate(env, lfsck->li_bottom, fid);
2018 GOTO(out, rc = PTR_ERR(obj));
2020 lfsck->li_obj_oit = obj;
2021 rc = obj->do_ops->do_index_try(env, obj, &dt_otable_features);
2023 if (rc == -ENOTSUPP)
2029 rc = lfsck_bookmark_setup(env, lfsck);
2034 rc = lfsck_namespace_setup(env, lfsck);
2039 rc = lfsck_layout_setup(env, lfsck);
2043 /* XXX: more LFSCK components initialization to be added here. */
2046 rc = lfsck_instance_add(lfsck);
2048 rc = lfsck_add_target_from_orphan(env, lfsck);
2050 if (root != NULL && !IS_ERR(root))
2051 lu_object_put(env, &root->do_lu);
2053 lfsck_instance_cleanup(env, lfsck);
2056 EXPORT_SYMBOL(lfsck_register);
2058 void lfsck_degister(const struct lu_env *env, struct dt_device *key)
2060 struct lfsck_instance *lfsck;
2062 lfsck = lfsck_instance_find(key, false, true);
2064 lfsck_instance_put(env, lfsck);
2066 EXPORT_SYMBOL(lfsck_degister);
2068 int lfsck_add_target(const struct lu_env *env, struct dt_device *key,
2069 struct dt_device *tgt, struct obd_export *exp,
2070 __u32 index, bool for_ost)
2072 struct lfsck_instance *lfsck;
2073 struct lfsck_tgt_desc *ltd;
2084 INIT_LIST_HEAD(<d->ltd_orphan_list);
2085 INIT_LIST_HEAD(<d->ltd_layout_list);
2086 INIT_LIST_HEAD(<d->ltd_layout_phase_list);
2087 atomic_set(<d->ltd_ref, 1);
2088 ltd->ltd_index = index;
2090 spin_lock(&lfsck_instance_lock);
2091 lfsck = __lfsck_instance_find(key, true, false);
2092 if (lfsck == NULL) {
2094 list_add_tail(<d->ltd_orphan_list,
2095 &lfsck_ost_orphan_list);
2097 list_add_tail(<d->ltd_orphan_list,
2098 &lfsck_mdt_orphan_list);
2099 spin_unlock(&lfsck_instance_lock);
2103 spin_unlock(&lfsck_instance_lock);
2105 rc = __lfsck_add_target(env, lfsck, ltd, for_ost, false);
2109 lfsck_instance_put(env, lfsck);
2113 EXPORT_SYMBOL(lfsck_add_target);
2115 void lfsck_del_target(const struct lu_env *env, struct dt_device *key,
2116 struct dt_device *tgt, __u32 index, bool for_ost)
2118 struct lfsck_instance *lfsck;
2119 struct lfsck_tgt_descs *ltds;
2120 struct lfsck_tgt_desc *ltd = NULL;
2121 struct list_head *head;
2124 head = &lfsck_ost_orphan_list;
2126 head = &lfsck_mdt_orphan_list;
2128 spin_lock(&lfsck_instance_lock);
2129 list_for_each_entry(ltd, head, ltd_orphan_list) {
2130 if (ltd->ltd_tgt == tgt) {
2131 list_del_init(<d->ltd_orphan_list);
2132 spin_unlock(&lfsck_instance_lock);
2139 lfsck = __lfsck_instance_find(key, true, false);
2140 spin_unlock(&lfsck_instance_lock);
2141 if (unlikely(lfsck == NULL))
2145 ltds = &lfsck->li_ost_descs;
2147 ltds = &lfsck->li_mdt_descs;
2149 down_write(<ds->ltd_rw_sem);
2150 LASSERT(ltds->ltd_tgts_bitmap != NULL);
2152 if (unlikely(index >= ltds->ltd_tgts_bitmap->size))
2155 ltd = LTD_TGT(ltds, index);
2156 if (unlikely(ltd == NULL))
2159 LASSERT(ltds->ltd_tgtnr > 0);
2162 cfs_bitmap_clear(ltds->ltd_tgts_bitmap, index);
2163 LTD_TGT(ltds, index) = NULL;
2168 head = &lfsck->li_ost_descs.ltd_orphan;
2170 head = &lfsck->li_ost_descs.ltd_orphan;
2172 list_for_each_entry(ltd, head, ltd_orphan_list) {
2173 if (ltd->ltd_tgt == tgt) {
2174 list_del_init(<d->ltd_orphan_list);
2180 up_write(<ds->ltd_rw_sem);
2182 spin_lock(<ds->ltd_lock);
2184 spin_unlock(<ds->ltd_lock);
2185 lfsck_stop_notify(env, lfsck, ltds, ltd, LT_LAYOUT);
2189 lfsck_instance_put(env, lfsck);
2191 EXPORT_SYMBOL(lfsck_del_target);
2193 static int __init lfsck_init(void)
2197 INIT_LIST_HEAD(&lfsck_ost_orphan_list);
2198 INIT_LIST_HEAD(&lfsck_mdt_orphan_list);
2199 lfsck_key_init_generic(&lfsck_thread_key, NULL);
2200 rc = lu_context_key_register(&lfsck_thread_key);
2202 tgt_register_lfsck_in_notify(lfsck_in_notify);
2203 tgt_register_lfsck_query(lfsck_query);
2209 static void __exit lfsck_exit(void)
2211 struct lfsck_tgt_desc *ltd;
2212 struct lfsck_tgt_desc *next;
2214 LASSERT(cfs_list_empty(&lfsck_instance_list));
2216 list_for_each_entry_safe(ltd, next, &lfsck_ost_orphan_list,
2218 list_del_init(<d->ltd_orphan_list);
2222 list_for_each_entry_safe(ltd, next, &lfsck_mdt_orphan_list,
2224 list_del_init(<d->ltd_orphan_list);
2228 lu_context_key_degister(&lfsck_thread_key);
2231 MODULE_AUTHOR("Intel Corporation <http://www.intel.com/>");
2232 MODULE_DESCRIPTION("LFSCK");
2233 MODULE_LICENSE("GPL");
2235 cfs_module(lfsck, LUSTRE_VERSION_STRING, lfsck_init, lfsck_exit);