4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2012, 2013, Intel Corporation.
26 * lustre/lfsck/lfsck_lib.c
28 * Author: Fan, Yong <fan.yong@intel.com>
31 #define DEBUG_SUBSYSTEM S_LFSCK
33 #include <libcfs/list.h>
34 #include <lu_object.h>
35 #include <dt_object.h>
36 #include <md_object.h>
37 #include <lustre_fld.h>
38 #include <lustre_lib.h>
39 #include <lustre_net.h>
40 #include <lustre_lfsck.h>
41 #include <lustre/lustre_lfsck_user.h>
43 #include "lfsck_internal.h"
45 /* define lfsck thread key */
46 LU_KEY_INIT(lfsck, struct lfsck_thread_info);
48 static void lfsck_key_fini(const struct lu_context *ctx,
49 struct lu_context_key *key, void *data)
51 struct lfsck_thread_info *info = data;
53 lu_buf_free(&info->lti_linkea_buf);
57 LU_CONTEXT_KEY_DEFINE(lfsck, LCT_MD_THREAD | LCT_DT_THREAD);
58 LU_KEY_INIT_GENERIC(lfsck);
60 static CFS_LIST_HEAD(lfsck_instance_list);
61 static struct list_head lfsck_ost_orphan_list;
62 static struct list_head lfsck_mdt_orphan_list;
63 static DEFINE_SPINLOCK(lfsck_instance_lock);
65 static const char *lfsck_status_names[] = {
67 [LS_SCANNING_PHASE1] = "scanning-phase1",
68 [LS_SCANNING_PHASE2] = "scanning-phase2",
69 [LS_COMPLETED] = "completed",
70 [LS_FAILED] = "failed",
71 [LS_STOPPED] = "stopped",
72 [LS_PAUSED] = "paused",
73 [LS_CRASHED] = "crashed",
74 [LS_PARTIAL] = "partial",
75 [LS_CO_FAILED] = "co-failed",
76 [LS_CO_STOPPED] = "co-stopped",
77 [LS_CO_PAUSED] = "co-paused"
80 const char *lfsck_flags_names[] = {
89 const char *lfsck_param_names[] = {
96 const char *lfsck_status2names(enum lfsck_status status)
98 if (unlikely(status < 0 || status >= LS_MAX))
101 return lfsck_status_names[status];
104 static int lfsck_tgt_descs_init(struct lfsck_tgt_descs *ltds)
106 spin_lock_init(<ds->ltd_lock);
107 init_rwsem(<ds->ltd_rw_sem);
108 INIT_LIST_HEAD(<ds->ltd_orphan);
109 ltds->ltd_tgts_bitmap = CFS_ALLOCATE_BITMAP(BITS_PER_LONG);
110 if (ltds->ltd_tgts_bitmap == NULL)
116 static void lfsck_tgt_descs_fini(struct lfsck_tgt_descs *ltds)
118 struct lfsck_tgt_desc *ltd;
119 struct lfsck_tgt_desc *next;
122 down_write(<ds->ltd_rw_sem);
124 list_for_each_entry_safe(ltd, next, <ds->ltd_orphan,
126 list_del_init(<d->ltd_orphan_list);
130 if (unlikely(ltds->ltd_tgts_bitmap == NULL)) {
131 up_write(<ds->ltd_rw_sem);
136 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
137 ltd = LTD_TGT(ltds, idx);
138 if (likely(ltd != NULL)) {
139 LASSERT(list_empty(<d->ltd_layout_list));
142 cfs_bitmap_clear(ltds->ltd_tgts_bitmap, idx);
143 LTD_TGT(ltds, idx) = NULL;
148 LASSERTF(ltds->ltd_tgtnr == 0, "tgt count unmatched: %d\n",
151 for (idx = 0; idx < TGT_PTRS; idx++) {
152 if (ltds->ltd_tgts_idx[idx] != NULL) {
153 OBD_FREE_PTR(ltds->ltd_tgts_idx[idx]);
154 ltds->ltd_tgts_idx[idx] = NULL;
158 CFS_FREE_BITMAP(ltds->ltd_tgts_bitmap);
159 ltds->ltd_tgts_bitmap = NULL;
160 up_write(<ds->ltd_rw_sem);
163 static int __lfsck_add_target(const struct lu_env *env,
164 struct lfsck_instance *lfsck,
165 struct lfsck_tgt_desc *ltd,
166 bool for_ost, bool locked)
168 struct lfsck_tgt_descs *ltds;
169 __u32 index = ltd->ltd_index;
174 ltds = &lfsck->li_ost_descs;
176 ltds = &lfsck->li_mdt_descs;
179 down_write(<ds->ltd_rw_sem);
181 LASSERT(ltds->ltd_tgts_bitmap != NULL);
183 if (index >= ltds->ltd_tgts_bitmap->size) {
184 __u32 newsize = max((__u32)ltds->ltd_tgts_bitmap->size,
185 (__u32)BITS_PER_LONG);
186 cfs_bitmap_t *old_bitmap = ltds->ltd_tgts_bitmap;
187 cfs_bitmap_t *new_bitmap;
189 while (newsize < index + 1)
192 new_bitmap = CFS_ALLOCATE_BITMAP(newsize);
193 if (new_bitmap == NULL)
194 GOTO(unlock, rc = -ENOMEM);
196 if (ltds->ltd_tgtnr > 0)
197 cfs_bitmap_copy(new_bitmap, old_bitmap);
198 ltds->ltd_tgts_bitmap = new_bitmap;
199 CFS_FREE_BITMAP(old_bitmap);
202 if (cfs_bitmap_check(ltds->ltd_tgts_bitmap, index)) {
203 CERROR("%s: the device %s (%u) is registered already\n",
204 lfsck_lfsck2name(lfsck),
205 ltd->ltd_tgt->dd_lu_dev.ld_obd->obd_name, index);
206 GOTO(unlock, rc = -EEXIST);
209 if (ltds->ltd_tgts_idx[index / TGT_PTRS_PER_BLOCK] == NULL) {
210 OBD_ALLOC_PTR(ltds->ltd_tgts_idx[index / TGT_PTRS_PER_BLOCK]);
211 if (ltds->ltd_tgts_idx[index / TGT_PTRS_PER_BLOCK] == NULL)
212 GOTO(unlock, rc = -ENOMEM);
215 LTD_TGT(ltds, index) = ltd;
216 cfs_bitmap_set(ltds->ltd_tgts_bitmap, index);
219 GOTO(unlock, rc = 0);
223 up_write(<ds->ltd_rw_sem);
228 static int lfsck_add_target_from_orphan(const struct lu_env *env,
229 struct lfsck_instance *lfsck)
231 struct lfsck_tgt_descs *ltds = &lfsck->li_ost_descs;
232 struct lfsck_tgt_desc *ltd;
233 struct lfsck_tgt_desc *next;
234 struct list_head *head = &lfsck_ost_orphan_list;
239 spin_lock(&lfsck_instance_lock);
240 list_for_each_entry_safe(ltd, next, head, ltd_orphan_list) {
241 if (ltd->ltd_key == lfsck->li_bottom) {
242 list_del_init(<d->ltd_orphan_list);
243 list_add_tail(<d->ltd_orphan_list,
247 spin_unlock(&lfsck_instance_lock);
249 down_write(<ds->ltd_rw_sem);
250 while (!list_empty(<ds->ltd_orphan)) {
251 ltd = list_entry(ltds->ltd_orphan.next,
252 struct lfsck_tgt_desc,
254 list_del_init(<d->ltd_orphan_list);
255 rc = __lfsck_add_target(env, lfsck, ltd, for_ost, true);
256 /* Do not hold the semaphore for too long time. */
257 up_write(<ds->ltd_rw_sem);
261 down_write(<ds->ltd_rw_sem);
263 up_write(<ds->ltd_rw_sem);
266 ltds = &lfsck->li_mdt_descs;
267 head = &lfsck_mdt_orphan_list;
275 static inline struct lfsck_component *
276 __lfsck_component_find(struct lfsck_instance *lfsck, __u16 type, cfs_list_t *list)
278 struct lfsck_component *com;
280 cfs_list_for_each_entry(com, list, lc_link) {
281 if (com->lc_type == type)
287 static struct lfsck_component *
288 lfsck_component_find(struct lfsck_instance *lfsck, __u16 type)
290 struct lfsck_component *com;
292 spin_lock(&lfsck->li_lock);
293 com = __lfsck_component_find(lfsck, type, &lfsck->li_list_scan);
297 com = __lfsck_component_find(lfsck, type,
298 &lfsck->li_list_double_scan);
302 com = __lfsck_component_find(lfsck, type, &lfsck->li_list_idle);
306 lfsck_component_get(com);
307 spin_unlock(&lfsck->li_lock);
311 void lfsck_component_cleanup(const struct lu_env *env,
312 struct lfsck_component *com)
314 if (!cfs_list_empty(&com->lc_link))
315 cfs_list_del_init(&com->lc_link);
316 if (!cfs_list_empty(&com->lc_link_dir))
317 cfs_list_del_init(&com->lc_link_dir);
319 lfsck_component_put(env, com);
322 void lfsck_instance_cleanup(const struct lu_env *env,
323 struct lfsck_instance *lfsck)
325 struct ptlrpc_thread *thread = &lfsck->li_thread;
326 struct lfsck_component *com;
329 LASSERT(list_empty(&lfsck->li_link));
330 LASSERT(thread_is_init(thread) || thread_is_stopped(thread));
332 lfsck_tgt_descs_fini(&lfsck->li_ost_descs);
333 lfsck_tgt_descs_fini(&lfsck->li_mdt_descs);
335 if (lfsck->li_obj_oit != NULL) {
336 lu_object_put_nocache(env, &lfsck->li_obj_oit->do_lu);
337 lfsck->li_obj_oit = NULL;
340 LASSERT(lfsck->li_obj_dir == NULL);
342 while (!cfs_list_empty(&lfsck->li_list_scan)) {
343 com = cfs_list_entry(lfsck->li_list_scan.next,
344 struct lfsck_component,
346 lfsck_component_cleanup(env, com);
349 LASSERT(cfs_list_empty(&lfsck->li_list_dir));
351 while (!cfs_list_empty(&lfsck->li_list_double_scan)) {
352 com = cfs_list_entry(lfsck->li_list_double_scan.next,
353 struct lfsck_component,
355 lfsck_component_cleanup(env, com);
358 while (!cfs_list_empty(&lfsck->li_list_idle)) {
359 com = cfs_list_entry(lfsck->li_list_idle.next,
360 struct lfsck_component,
362 lfsck_component_cleanup(env, com);
365 if (lfsck->li_bookmark_obj != NULL) {
366 lu_object_put_nocache(env, &lfsck->li_bookmark_obj->do_lu);
367 lfsck->li_bookmark_obj = NULL;
370 if (lfsck->li_los != NULL) {
371 local_oid_storage_fini(env, lfsck->li_los);
372 lfsck->li_los = NULL;
378 static inline struct lfsck_instance *
379 __lfsck_instance_find(struct dt_device *key, bool ref, bool unlink)
381 struct lfsck_instance *lfsck;
383 cfs_list_for_each_entry(lfsck, &lfsck_instance_list, li_link) {
384 if (lfsck->li_bottom == key) {
386 lfsck_instance_get(lfsck);
388 list_del_init(&lfsck->li_link);
397 static inline struct lfsck_instance *lfsck_instance_find(struct dt_device *key,
398 bool ref, bool unlink)
400 struct lfsck_instance *lfsck;
402 spin_lock(&lfsck_instance_lock);
403 lfsck = __lfsck_instance_find(key, ref, unlink);
404 spin_unlock(&lfsck_instance_lock);
409 static inline int lfsck_instance_add(struct lfsck_instance *lfsck)
411 struct lfsck_instance *tmp;
413 spin_lock(&lfsck_instance_lock);
414 cfs_list_for_each_entry(tmp, &lfsck_instance_list, li_link) {
415 if (lfsck->li_bottom == tmp->li_bottom) {
416 spin_unlock(&lfsck_instance_lock);
421 cfs_list_add_tail(&lfsck->li_link, &lfsck_instance_list);
422 spin_unlock(&lfsck_instance_lock);
426 int lfsck_bits_dump(char **buf, int *len, int bits, const char *names[],
434 rc = snprintf(*buf, *len, "%s:%c", prefix, bits != 0 ? ' ' : '\n');
440 for (i = 0, flag = 1; bits != 0; i++, flag = 1 << i) {
443 if (names[i] != NULL) {
444 rc = snprintf(*buf, *len, "%s%c", names[i],
445 bits != 0 ? ',' : '\n');
457 int lfsck_time_dump(char **buf, int *len, __u64 time, const char *prefix)
462 rc = snprintf(*buf, *len, "%s: "LPU64" seconds\n", prefix,
463 cfs_time_current_sec() - time);
465 rc = snprintf(*buf, *len, "%s: N/A\n", prefix);
474 int lfsck_pos_dump(char **buf, int *len, struct lfsck_position *pos,
479 if (fid_is_zero(&pos->lp_dir_parent)) {
480 if (pos->lp_oit_cookie == 0)
481 rc = snprintf(*buf, *len, "%s: N/A, N/A, N/A\n",
484 rc = snprintf(*buf, *len, "%s: "LPU64", N/A, N/A\n",
485 prefix, pos->lp_oit_cookie);
487 rc = snprintf(*buf, *len, "%s: "LPU64", "DFID", "LPU64"\n",
488 prefix, pos->lp_oit_cookie,
489 PFID(&pos->lp_dir_parent), pos->lp_dir_cookie);
499 void lfsck_pos_fill(const struct lu_env *env, struct lfsck_instance *lfsck,
500 struct lfsck_position *pos, bool init)
502 const struct dt_it_ops *iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
504 if (unlikely(lfsck->li_di_oit == NULL)) {
505 memset(pos, 0, sizeof(*pos));
509 pos->lp_oit_cookie = iops->store(env, lfsck->li_di_oit);
510 if (!lfsck->li_current_oit_processed && !init)
511 pos->lp_oit_cookie--;
513 LASSERT(pos->lp_oit_cookie > 0);
515 if (lfsck->li_di_dir != NULL) {
516 struct dt_object *dto = lfsck->li_obj_dir;
518 pos->lp_dir_cookie = dto->do_index_ops->dio_it.store(env,
521 if (pos->lp_dir_cookie >= MDS_DIR_END_OFF) {
522 fid_zero(&pos->lp_dir_parent);
523 pos->lp_dir_cookie = 0;
525 pos->lp_dir_parent = *lfsck_dto2fid(dto);
528 fid_zero(&pos->lp_dir_parent);
529 pos->lp_dir_cookie = 0;
533 static void __lfsck_set_speed(struct lfsck_instance *lfsck, __u32 limit)
535 lfsck->li_bookmark_ram.lb_speed_limit = limit;
536 if (limit != LFSCK_SPEED_NO_LIMIT) {
538 lfsck->li_sleep_rate = limit / HZ;
539 lfsck->li_sleep_jif = 1;
541 lfsck->li_sleep_rate = 1;
542 lfsck->li_sleep_jif = HZ / limit;
545 lfsck->li_sleep_jif = 0;
546 lfsck->li_sleep_rate = 0;
550 void lfsck_control_speed(struct lfsck_instance *lfsck)
552 struct ptlrpc_thread *thread = &lfsck->li_thread;
553 struct l_wait_info lwi;
555 if (lfsck->li_sleep_jif > 0 &&
556 lfsck->li_new_scanned >= lfsck->li_sleep_rate) {
557 lwi = LWI_TIMEOUT_INTR(lfsck->li_sleep_jif, NULL,
558 LWI_ON_SIGNAL_NOOP, NULL);
560 l_wait_event(thread->t_ctl_waitq,
561 !thread_is_running(thread),
563 lfsck->li_new_scanned = 0;
567 void lfsck_control_speed_by_self(struct lfsck_component *com)
569 struct lfsck_instance *lfsck = com->lc_lfsck;
570 struct ptlrpc_thread *thread = &lfsck->li_thread;
571 struct l_wait_info lwi;
573 if (lfsck->li_sleep_jif > 0 &&
574 com->lc_new_scanned >= lfsck->li_sleep_rate) {
575 lwi = LWI_TIMEOUT_INTR(lfsck->li_sleep_jif, NULL,
576 LWI_ON_SIGNAL_NOOP, NULL);
578 l_wait_event(thread->t_ctl_waitq,
579 !thread_is_running(thread),
581 com->lc_new_scanned = 0;
585 static int lfsck_parent_fid(const struct lu_env *env, struct dt_object *obj,
588 if (unlikely(!S_ISDIR(lfsck_object_type(obj)) ||
589 !dt_try_as_dir(env, obj)))
592 return dt_lookup(env, obj, (struct dt_rec *)fid,
593 (const struct dt_key *)"..", BYPASS_CAPA);
596 static int lfsck_needs_scan_dir(const struct lu_env *env,
597 struct lfsck_instance *lfsck,
598 struct dt_object *obj)
600 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
604 if (!lfsck->li_master || !S_ISDIR(lfsck_object_type(obj)) ||
605 cfs_list_empty(&lfsck->li_list_dir))
609 /* XXX: Currently, we do not scan the "/REMOTE_PARENT_DIR",
610 * which is the agent directory to manage the objects
611 * which name entries reside on remote MDTs. Related
612 * consistency verification will be processed in LFSCK
614 if (lu_fid_eq(lfsck_dto2fid(obj), &lfsck->li_global_root_fid)) {
616 lfsck_object_put(env, obj);
620 /* .lustre doesn't contain "real" user objects, no need lfsck */
621 if (fid_is_dot_lustre(lfsck_dto2fid(obj))) {
623 lfsck_object_put(env, obj);
627 dt_read_lock(env, obj, MOR_TGT_CHILD);
628 if (unlikely(lfsck_is_dead_obj(obj))) {
629 dt_read_unlock(env, obj);
631 lfsck_object_put(env, obj);
635 rc = dt_xattr_get(env, obj,
636 lfsck_buf_get(env, NULL, 0), XATTR_NAME_LINK,
638 dt_read_unlock(env, obj);
641 lfsck_object_put(env, obj);
645 if (rc < 0 && rc != -ENODATA) {
647 lfsck_object_put(env, obj);
651 rc = lfsck_parent_fid(env, obj, fid);
653 lfsck_object_put(env, obj);
657 if (unlikely(lu_fid_eq(fid, &lfsck->li_local_root_fid)))
660 obj = lfsck_object_find(env, lfsck, fid);
663 else if (IS_ERR(obj))
666 if (!dt_object_exists(obj)) {
667 lfsck_object_put(env, obj);
671 /* Currently, only client visible directory can be remote. */
672 if (dt_object_remote(obj)) {
673 lfsck_object_put(env, obj);
682 struct lfsck_thread_args *lfsck_thread_args_init(struct lfsck_instance *lfsck,
683 struct lfsck_component *com)
685 struct lfsck_thread_args *lta;
690 return ERR_PTR(-ENOMEM);
692 rc = lu_env_init(<a->lta_env, LCT_MD_THREAD | LCT_DT_THREAD);
698 lta->lta_lfsck = lfsck_instance_get(lfsck);
700 lta->lta_com = lfsck_component_get(com);
705 void lfsck_thread_args_fini(struct lfsck_thread_args *lta)
707 if (lta->lta_com != NULL)
708 lfsck_component_put(<a->lta_env, lta->lta_com);
709 lfsck_instance_put(<a->lta_env, lta->lta_lfsck);
710 lu_env_fini(<a->lta_env);
714 /* LFSCK wrap functions */
716 void lfsck_fail(const struct lu_env *env, struct lfsck_instance *lfsck,
719 struct lfsck_component *com;
721 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
722 com->lc_ops->lfsck_fail(env, com, new_checked);
726 int lfsck_checkpoint(const struct lu_env *env, struct lfsck_instance *lfsck)
728 struct lfsck_component *com;
732 if (likely(cfs_time_beforeq(cfs_time_current(),
733 lfsck->li_time_next_checkpoint)))
736 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_current, false);
737 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
738 rc = com->lc_ops->lfsck_checkpoint(env, com, false);
743 lfsck->li_time_last_checkpoint = cfs_time_current();
744 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
745 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
746 return rc1 != 0 ? rc1 : rc;
749 int lfsck_prep(const struct lu_env *env, struct lfsck_instance *lfsck)
751 struct dt_object *obj = NULL;
752 struct lfsck_component *com;
753 struct lfsck_component *next;
754 struct lfsck_position *pos = NULL;
755 const struct dt_it_ops *iops =
756 &lfsck->li_obj_oit->do_index_ops->dio_it;
761 LASSERT(lfsck->li_obj_dir == NULL);
762 LASSERT(lfsck->li_di_dir == NULL);
764 lfsck->li_current_oit_processed = 0;
765 cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
766 com->lc_new_checked = 0;
767 if (lfsck->li_bookmark_ram.lb_param & LPF_DRYRUN)
770 rc = com->lc_ops->lfsck_prep(env, com);
775 (!lfsck_pos_is_zero(&com->lc_pos_start) &&
776 lfsck_pos_is_eq(pos, &com->lc_pos_start) > 0))
777 pos = &com->lc_pos_start;
780 /* Init otable-based iterator. */
782 rc = iops->load(env, lfsck->li_di_oit, 0);
784 lfsck->li_oit_over = 1;
791 rc = iops->load(env, lfsck->li_di_oit, pos->lp_oit_cookie);
795 lfsck->li_oit_over = 1;
797 if (!lfsck->li_master || fid_is_zero(&pos->lp_dir_parent))
800 /* Find the directory for namespace-based traverse. */
801 obj = lfsck_object_find(env, lfsck, &pos->lp_dir_parent);
804 else if (IS_ERR(obj))
805 RETURN(PTR_ERR(obj));
807 /* XXX: Currently, skip remote object, the consistency for
808 * remote object will be processed in LFSCK phase III. */
809 if (!dt_object_exists(obj) || dt_object_remote(obj) ||
810 unlikely(!S_ISDIR(lfsck_object_type(obj))))
813 if (unlikely(!dt_try_as_dir(env, obj)))
814 GOTO(out, rc = -ENOTDIR);
816 /* Init the namespace-based directory traverse. */
817 iops = &obj->do_index_ops->dio_it;
818 di = iops->init(env, obj, lfsck->li_args_dir, BYPASS_CAPA);
820 GOTO(out, rc = PTR_ERR(di));
822 LASSERT(pos->lp_dir_cookie < MDS_DIR_END_OFF);
824 rc = iops->load(env, di, pos->lp_dir_cookie);
825 if ((rc == 0) || (rc > 0 && pos->lp_dir_cookie > 0))
826 rc = iops->next(env, di);
836 lfsck->li_obj_dir = lfsck_object_get(obj);
837 lfsck->li_cookie_dir = iops->store(env, di);
838 spin_lock(&lfsck->li_lock);
839 lfsck->li_di_dir = di;
840 spin_unlock(&lfsck->li_lock);
846 lfsck_object_put(env, obj);
849 cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
851 com->lc_ops->lfsck_post(env, com, rc, true);
857 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_current, true);
858 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
859 rc = com->lc_ops->lfsck_checkpoint(env, com, true);
864 lfsck->li_time_last_checkpoint = cfs_time_current();
865 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
866 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
870 int lfsck_exec_oit(const struct lu_env *env, struct lfsck_instance *lfsck,
871 struct dt_object *obj)
873 struct lfsck_component *com;
874 const struct dt_it_ops *iops;
879 LASSERT(lfsck->li_obj_dir == NULL);
881 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
882 rc = com->lc_ops->lfsck_exec_oit(env, com, obj);
887 rc = lfsck_needs_scan_dir(env, lfsck, obj);
891 if (unlikely(!dt_try_as_dir(env, obj)))
892 GOTO(out, rc = -ENOTDIR);
894 iops = &obj->do_index_ops->dio_it;
895 di = iops->init(env, obj, lfsck->li_args_dir, BYPASS_CAPA);
897 GOTO(out, rc = PTR_ERR(di));
899 rc = iops->load(env, di, 0);
901 rc = iops->next(env, di);
911 lfsck->li_obj_dir = lfsck_object_get(obj);
912 lfsck->li_cookie_dir = iops->store(env, di);
913 spin_lock(&lfsck->li_lock);
914 lfsck->li_di_dir = di;
915 spin_unlock(&lfsck->li_lock);
921 lfsck_fail(env, lfsck, false);
922 return (rc > 0 ? 0 : rc);
925 int lfsck_exec_dir(const struct lu_env *env, struct lfsck_instance *lfsck,
926 struct dt_object *obj, struct lu_dirent *ent)
928 struct lfsck_component *com;
931 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
932 rc = com->lc_ops->lfsck_exec_dir(env, com, obj, ent);
939 int lfsck_post(const struct lu_env *env, struct lfsck_instance *lfsck,
942 struct lfsck_component *com;
943 struct lfsck_component *next;
947 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_current, false);
948 cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
949 rc = com->lc_ops->lfsck_post(env, com, result, false);
954 lfsck->li_time_last_checkpoint = cfs_time_current();
955 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
956 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
958 /* Ignore some component post failure to make other can go ahead. */
962 int lfsck_double_scan(const struct lu_env *env, struct lfsck_instance *lfsck)
964 struct lfsck_component *com;
965 struct lfsck_component *next;
966 struct l_wait_info lwi = { 0 };
970 cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
972 if (lfsck->li_bookmark_ram.lb_param & LPF_DRYRUN)
975 rc = com->lc_ops->lfsck_double_scan(env, com);
980 l_wait_event(lfsck->li_thread.t_ctl_waitq,
981 atomic_read(&lfsck->li_double_scan_count) == 0,
984 return (rc1 != 0 ? rc1 : rc);
987 void lfsck_quit(const struct lu_env *env, struct lfsck_instance *lfsck)
989 struct lfsck_component *com;
990 struct lfsck_component *next;
992 list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
994 if (com->lc_ops->lfsck_quit != NULL)
995 com->lc_ops->lfsck_quit(env, com);
998 list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
1000 if (com->lc_ops->lfsck_quit != NULL)
1001 com->lc_ops->lfsck_quit(env, com);
1005 /* external interfaces */
1007 int lfsck_get_speed(struct dt_device *key, void *buf, int len)
1010 struct lfsck_instance *lfsck;
1014 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1018 lfsck = lfsck_instance_find(key, true, false);
1019 if (likely(lfsck != NULL)) {
1020 rc = snprintf(buf, len, "%u\n",
1021 lfsck->li_bookmark_ram.lb_speed_limit);
1022 lfsck_instance_put(&env, lfsck);
1031 EXPORT_SYMBOL(lfsck_get_speed);
1033 int lfsck_set_speed(struct dt_device *key, int val)
1036 struct lfsck_instance *lfsck;
1040 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1044 lfsck = lfsck_instance_find(key, true, false);
1045 if (likely(lfsck != NULL)) {
1046 mutex_lock(&lfsck->li_mutex);
1047 __lfsck_set_speed(lfsck, val);
1048 rc = lfsck_bookmark_store(&env, lfsck);
1049 mutex_unlock(&lfsck->li_mutex);
1050 lfsck_instance_put(&env, lfsck);
1059 EXPORT_SYMBOL(lfsck_set_speed);
1061 int lfsck_get_windows(struct dt_device *key, void *buf, int len)
1064 struct lfsck_instance *lfsck;
1068 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1072 lfsck = lfsck_instance_find(key, true, false);
1073 if (likely(lfsck != NULL)) {
1074 rc = snprintf(buf, len, "%u\n",
1075 lfsck->li_bookmark_ram.lb_async_windows);
1076 lfsck_instance_put(&env, lfsck);
1085 EXPORT_SYMBOL(lfsck_get_windows);
1087 int lfsck_set_windows(struct dt_device *key, int val)
1090 struct lfsck_instance *lfsck;
1094 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1098 lfsck = lfsck_instance_find(key, true, false);
1099 if (likely(lfsck != NULL)) {
1100 if (val > LFSCK_ASYNC_WIN_MAX) {
1101 CERROR("%s: Too large async windows size, which "
1102 "may cause memory issues. The valid range "
1103 "is [0 - %u]. If you do not want to restrict "
1104 "the windows size for async requests pipeline, "
1105 "just set it as 0.\n",
1106 lfsck_lfsck2name(lfsck), LFSCK_ASYNC_WIN_MAX);
1108 } else if (lfsck->li_bookmark_ram.lb_async_windows != val) {
1109 mutex_lock(&lfsck->li_mutex);
1110 lfsck->li_bookmark_ram.lb_async_windows = val;
1111 rc = lfsck_bookmark_store(&env, lfsck);
1112 mutex_unlock(&lfsck->li_mutex);
1114 lfsck_instance_put(&env, lfsck);
1123 EXPORT_SYMBOL(lfsck_set_windows);
1125 int lfsck_dump(struct dt_device *key, void *buf, int len, enum lfsck_type type)
1128 struct lfsck_instance *lfsck;
1129 struct lfsck_component *com;
1133 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1137 lfsck = lfsck_instance_find(key, true, false);
1138 if (likely(lfsck != NULL)) {
1139 com = lfsck_component_find(lfsck, type);
1140 if (likely(com != NULL)) {
1141 rc = com->lc_ops->lfsck_dump(&env, com, buf, len);
1142 lfsck_component_put(&env, com);
1147 lfsck_instance_put(&env, lfsck);
1156 EXPORT_SYMBOL(lfsck_dump);
1158 int lfsck_start(const struct lu_env *env, struct dt_device *key,
1159 struct lfsck_start_param *lsp)
1161 struct lfsck_start *start = lsp->lsp_start;
1162 struct lfsck_instance *lfsck;
1163 struct lfsck_bookmark *bk;
1164 struct ptlrpc_thread *thread;
1165 struct lfsck_component *com;
1166 struct l_wait_info lwi = { 0 };
1167 struct lfsck_thread_args *lta;
1175 lfsck = lfsck_instance_find(key, true, false);
1176 if (unlikely(lfsck == NULL))
1179 /* start == NULL means auto trigger paused LFSCK. */
1180 if ((start == NULL) &&
1181 (cfs_list_empty(&lfsck->li_list_scan) ||
1182 OBD_FAIL_CHECK(OBD_FAIL_LFSCK_NO_AUTO)))
1185 bk = &lfsck->li_bookmark_ram;
1186 thread = &lfsck->li_thread;
1187 mutex_lock(&lfsck->li_mutex);
1188 spin_lock(&lfsck->li_lock);
1189 if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
1191 while (start->ls_active != 0) {
1192 if (type & start->ls_active) {
1193 com = __lfsck_component_find(lfsck, type,
1194 &lfsck->li_list_scan);
1196 com = __lfsck_component_find(lfsck,
1198 &lfsck->li_list_double_scan);
1203 start->ls_active &= ~type;
1208 spin_unlock(&lfsck->li_lock);
1211 spin_unlock(&lfsck->li_lock);
1213 lfsck->li_namespace = lsp->lsp_namespace;
1214 lfsck->li_paused = 0;
1215 lfsck->li_oit_over = 0;
1216 lfsck->li_drop_dryrun = 0;
1217 lfsck->li_new_scanned = 0;
1219 /* For auto trigger. */
1223 start->ls_version = bk->lb_version;
1224 if (start->ls_valid & LSV_SPEED_LIMIT) {
1225 __lfsck_set_speed(lfsck, start->ls_speed_limit);
1229 if (start->ls_valid & LSV_ASYNC_WINDOWS &&
1230 bk->lb_async_windows != start->ls_async_windows) {
1231 bk->lb_async_windows = start->ls_async_windows;
1235 if (start->ls_valid & LSV_ERROR_HANDLE) {
1236 valid |= DOIV_ERROR_HANDLE;
1237 if (start->ls_flags & LPF_FAILOUT)
1238 flags |= DOIF_FAILOUT;
1240 if ((start->ls_flags & LPF_FAILOUT) &&
1241 !(bk->lb_param & LPF_FAILOUT)) {
1242 bk->lb_param |= LPF_FAILOUT;
1244 } else if (!(start->ls_flags & LPF_FAILOUT) &&
1245 (bk->lb_param & LPF_FAILOUT)) {
1246 bk->lb_param &= ~LPF_FAILOUT;
1251 if (start->ls_valid & LSV_DRYRUN) {
1252 valid |= DOIV_DRYRUN;
1253 if (start->ls_flags & LPF_DRYRUN)
1254 flags |= DOIF_DRYRUN;
1256 if ((start->ls_flags & LPF_DRYRUN) &&
1257 !(bk->lb_param & LPF_DRYRUN)) {
1258 bk->lb_param |= LPF_DRYRUN;
1260 } else if (!(start->ls_flags & LPF_DRYRUN) &&
1261 (bk->lb_param & LPF_DRYRUN)) {
1262 bk->lb_param &= ~LPF_DRYRUN;
1263 lfsck->li_drop_dryrun = 1;
1269 rc = lfsck_bookmark_store(env, lfsck);
1274 if (start->ls_flags & LPF_RESET)
1275 flags |= DOIF_RESET;
1277 if (start->ls_active != 0) {
1278 struct lfsck_component *next;
1280 if (start->ls_active == LFSCK_TYPES_ALL)
1281 start->ls_active = LFSCK_TYPES_SUPPORTED;
1283 if (start->ls_active & ~LFSCK_TYPES_SUPPORTED) {
1284 start->ls_active &= ~LFSCK_TYPES_SUPPORTED;
1285 GOTO(out, rc = -ENOTSUPP);
1288 cfs_list_for_each_entry_safe(com, next,
1289 &lfsck->li_list_scan, lc_link) {
1290 if (!(com->lc_type & start->ls_active)) {
1291 rc = com->lc_ops->lfsck_post(env, com, 0,
1298 while (start->ls_active != 0) {
1299 if (type & start->ls_active) {
1300 com = __lfsck_component_find(lfsck, type,
1301 &lfsck->li_list_idle);
1303 /* The component status will be updated
1304 * when its prep() is called later by
1305 * the LFSCK main engine. */
1306 cfs_list_del_init(&com->lc_link);
1307 cfs_list_add_tail(&com->lc_link,
1308 &lfsck->li_list_scan);
1310 start->ls_active &= ~type;
1316 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
1317 start->ls_active |= com->lc_type;
1318 if (flags & DOIF_RESET) {
1319 rc = com->lc_ops->lfsck_reset(env, com, false);
1326 lfsck->li_args_dir = LUDA_64BITHASH | LUDA_VERIFY;
1327 if (bk->lb_param & LPF_DRYRUN) {
1328 lfsck->li_args_dir |= LUDA_VERIFY_DRYRUN;
1329 valid |= DOIV_DRYRUN;
1330 flags |= DOIF_DRYRUN;
1333 if (bk->lb_param & LPF_FAILOUT) {
1334 valid |= DOIV_ERROR_HANDLE;
1335 flags |= DOIF_FAILOUT;
1338 if (!cfs_list_empty(&lfsck->li_list_scan))
1339 flags |= DOIF_OUTUSED;
1341 lfsck->li_args_oit = (flags << DT_OTABLE_IT_FLAGS_SHIFT) | valid;
1342 thread_set_flags(thread, 0);
1343 lta = lfsck_thread_args_init(lfsck, NULL);
1345 GOTO(out, rc = PTR_ERR(lta));
1347 rc = PTR_ERR(kthread_run(lfsck_master_engine, lta, "lfsck"));
1348 if (IS_ERR_VALUE(rc)) {
1349 CERROR("%s: cannot start LFSCK thread: rc = %ld\n",
1350 lfsck_lfsck2name(lfsck), rc);
1351 lfsck_thread_args_fini(lta);
1354 l_wait_event(thread->t_ctl_waitq,
1355 thread_is_running(thread) ||
1356 thread_is_stopped(thread),
1363 mutex_unlock(&lfsck->li_mutex);
1365 lfsck_instance_put(env, lfsck);
1366 return (rc < 0 ? rc : 0);
1368 EXPORT_SYMBOL(lfsck_start);
1370 int lfsck_stop(const struct lu_env *env, struct dt_device *key, bool pause)
1372 struct lfsck_instance *lfsck;
1373 struct ptlrpc_thread *thread;
1374 struct l_wait_info lwi = { 0 };
1377 lfsck = lfsck_instance_find(key, true, false);
1378 if (unlikely(lfsck == NULL))
1381 thread = &lfsck->li_thread;
1382 mutex_lock(&lfsck->li_mutex);
1383 spin_lock(&lfsck->li_lock);
1384 if (thread_is_init(thread) || thread_is_stopped(thread)) {
1385 spin_unlock(&lfsck->li_lock);
1386 mutex_unlock(&lfsck->li_mutex);
1387 lfsck_instance_put(env, lfsck);
1392 lfsck->li_paused = 1;
1393 thread_set_flags(thread, SVC_STOPPING);
1394 spin_unlock(&lfsck->li_lock);
1396 wake_up_all(&thread->t_ctl_waitq);
1397 l_wait_event(thread->t_ctl_waitq,
1398 thread_is_stopped(thread),
1400 mutex_unlock(&lfsck->li_mutex);
1401 lfsck_instance_put(env, lfsck);
1405 EXPORT_SYMBOL(lfsck_stop);
1407 int lfsck_register(const struct lu_env *env, struct dt_device *key,
1408 struct dt_device *next, lfsck_out_notify notify,
1409 void *notify_data, bool master)
1411 struct lfsck_instance *lfsck;
1412 struct dt_object *root = NULL;
1413 struct dt_object *obj;
1414 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
1418 lfsck = lfsck_instance_find(key, false, false);
1419 if (unlikely(lfsck != NULL))
1422 OBD_ALLOC_PTR(lfsck);
1426 mutex_init(&lfsck->li_mutex);
1427 spin_lock_init(&lfsck->li_lock);
1428 CFS_INIT_LIST_HEAD(&lfsck->li_link);
1429 CFS_INIT_LIST_HEAD(&lfsck->li_list_scan);
1430 CFS_INIT_LIST_HEAD(&lfsck->li_list_dir);
1431 CFS_INIT_LIST_HEAD(&lfsck->li_list_double_scan);
1432 CFS_INIT_LIST_HEAD(&lfsck->li_list_idle);
1433 atomic_set(&lfsck->li_ref, 1);
1434 atomic_set(&lfsck->li_double_scan_count, 0);
1435 init_waitqueue_head(&lfsck->li_thread.t_ctl_waitq);
1436 lfsck->li_out_notify = notify;
1437 lfsck->li_out_notify_data = notify_data;
1438 lfsck->li_next = next;
1439 lfsck->li_bottom = key;
1441 rc = lfsck_tgt_descs_init(&lfsck->li_ost_descs);
1445 rc = lfsck_tgt_descs_init(&lfsck->li_mdt_descs);
1449 fid->f_seq = FID_SEQ_LOCAL_NAME;
1452 rc = local_oid_storage_init(env, lfsck->li_bottom, fid, &lfsck->li_los);
1456 rc = dt_root_get(env, key, fid);
1460 root = dt_locate(env, lfsck->li_bottom, fid);
1462 GOTO(out, rc = PTR_ERR(root));
1464 if (unlikely(!dt_try_as_dir(env, root)))
1465 GOTO(out, rc = -ENOTDIR);
1467 lfsck->li_local_root_fid = *fid;
1469 lfsck->li_master = 1;
1470 if (lfsck_dev_idx(lfsck->li_bottom) == 0) {
1471 rc = dt_lookup(env, root,
1472 (struct dt_rec *)(&lfsck->li_global_root_fid),
1473 (const struct dt_key *)"ROOT", BYPASS_CAPA);
1479 fid->f_seq = FID_SEQ_LOCAL_FILE;
1480 fid->f_oid = OTABLE_IT_OID;
1482 obj = dt_locate(env, lfsck->li_bottom, fid);
1484 GOTO(out, rc = PTR_ERR(obj));
1486 lfsck->li_obj_oit = obj;
1487 rc = obj->do_ops->do_index_try(env, obj, &dt_otable_features);
1489 if (rc == -ENOTSUPP)
1495 rc = lfsck_bookmark_setup(env, lfsck);
1500 rc = lfsck_namespace_setup(env, lfsck);
1505 rc = lfsck_layout_setup(env, lfsck);
1509 /* XXX: more LFSCK components initialization to be added here. */
1512 rc = lfsck_instance_add(lfsck);
1514 rc = lfsck_add_target_from_orphan(env, lfsck);
1516 if (root != NULL && !IS_ERR(root))
1517 lu_object_put(env, &root->do_lu);
1519 lfsck_instance_cleanup(env, lfsck);
1522 EXPORT_SYMBOL(lfsck_register);
1524 void lfsck_degister(const struct lu_env *env, struct dt_device *key)
1526 struct lfsck_instance *lfsck;
1528 lfsck = lfsck_instance_find(key, false, true);
1530 lfsck_instance_put(env, lfsck);
1532 EXPORT_SYMBOL(lfsck_degister);
1534 int lfsck_add_target(const struct lu_env *env, struct dt_device *key,
1535 struct dt_device *tgt, struct obd_export *exp,
1536 __u32 index, bool for_ost)
1538 struct lfsck_instance *lfsck;
1539 struct lfsck_tgt_desc *ltd;
1550 INIT_LIST_HEAD(<d->ltd_orphan_list);
1551 INIT_LIST_HEAD(<d->ltd_layout_list);
1552 atomic_set(<d->ltd_ref, 1);
1553 ltd->ltd_index = index;
1555 spin_lock(&lfsck_instance_lock);
1556 lfsck = __lfsck_instance_find(key, true, false);
1557 if (lfsck == NULL) {
1559 list_add_tail(<d->ltd_orphan_list,
1560 &lfsck_ost_orphan_list);
1562 list_add_tail(<d->ltd_orphan_list,
1563 &lfsck_mdt_orphan_list);
1564 spin_unlock(&lfsck_instance_lock);
1568 spin_unlock(&lfsck_instance_lock);
1570 rc = __lfsck_add_target(env, lfsck, ltd, for_ost, false);
1574 lfsck_instance_put(env, lfsck);
1578 EXPORT_SYMBOL(lfsck_add_target);
1580 void lfsck_del_target(const struct lu_env *env, struct dt_device *key,
1581 struct dt_device *tgt, __u32 index, bool for_ost)
1583 struct lfsck_instance *lfsck;
1584 struct lfsck_tgt_descs *ltds;
1585 struct lfsck_tgt_desc *ltd;
1586 struct list_head *head;
1590 head = &lfsck_ost_orphan_list;
1592 head = &lfsck_mdt_orphan_list;
1594 spin_lock(&lfsck_instance_lock);
1595 list_for_each_entry(ltd, head, ltd_orphan_list) {
1596 if (ltd->ltd_tgt == tgt) {
1597 list_del_init(<d->ltd_orphan_list);
1598 spin_unlock(&lfsck_instance_lock);
1605 lfsck = __lfsck_instance_find(key, true, false);
1606 spin_unlock(&lfsck_instance_lock);
1607 if (unlikely(lfsck == NULL))
1611 ltds = &lfsck->li_ost_descs;
1613 ltds = &lfsck->li_mdt_descs;
1615 down_write(<ds->ltd_rw_sem);
1617 LASSERT(ltds->ltd_tgts_bitmap != NULL);
1619 if (unlikely(index >= ltds->ltd_tgts_bitmap->size))
1622 ltd = LTD_TGT(ltds, index);
1623 if (unlikely(ltd == NULL))
1627 if (!list_empty(<d->ltd_layout_list)) {
1628 spin_lock(<ds->ltd_lock);
1629 list_del_init(<d->ltd_layout_list);
1630 spin_unlock(<ds->ltd_lock);
1633 LASSERT(ltds->ltd_tgtnr > 0);
1636 cfs_bitmap_clear(ltds->ltd_tgts_bitmap, index);
1637 LTD_TGT(ltds, index) = NULL;
1643 head = &lfsck->li_ost_descs.ltd_orphan;
1645 head = &lfsck->li_ost_descs.ltd_orphan;
1647 list_for_each_entry(ltd, head, ltd_orphan_list) {
1648 if (ltd->ltd_tgt == tgt) {
1649 list_del_init(<d->ltd_orphan_list);
1656 up_write(<ds->ltd_rw_sem);
1657 lfsck_instance_put(env, lfsck);
1659 EXPORT_SYMBOL(lfsck_del_target);
1661 static int __init lfsck_init(void)
1665 INIT_LIST_HEAD(&lfsck_ost_orphan_list);
1666 INIT_LIST_HEAD(&lfsck_mdt_orphan_list);
1667 lfsck_key_init_generic(&lfsck_thread_key, NULL);
1668 rc = lu_context_key_register(&lfsck_thread_key);
1673 static void __exit lfsck_exit(void)
1675 struct lfsck_tgt_desc *ltd;
1676 struct lfsck_tgt_desc *next;
1678 LASSERT(cfs_list_empty(&lfsck_instance_list));
1680 list_for_each_entry_safe(ltd, next, &lfsck_ost_orphan_list,
1682 list_del_init(<d->ltd_orphan_list);
1686 list_for_each_entry_safe(ltd, next, &lfsck_mdt_orphan_list,
1688 list_del_init(<d->ltd_orphan_list);
1692 lu_context_key_degister(&lfsck_thread_key);
1695 MODULE_AUTHOR("Intel Corporation <http://www.intel.com/>");
1696 MODULE_DESCRIPTION("LFSCK");
1697 MODULE_LICENSE("GPL");
1699 cfs_module(lfsck, LUSTRE_VERSION_STRING, lfsck_init, lfsck_exit);