4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2012, 2013, Intel Corporation.
26 * lustre/lfsck/lfsck_lib.c
28 * Author: Fan, Yong <fan.yong@intel.com>
31 #define DEBUG_SUBSYSTEM S_LFSCK
33 #include <libcfs/list.h>
34 #include <lu_object.h>
35 #include <dt_object.h>
36 #include <md_object.h>
37 #include <lustre_fld.h>
38 #include <lustre_lib.h>
39 #include <lustre_net.h>
40 #include <lustre_lfsck.h>
41 #include <lustre/lustre_lfsck_user.h>
43 #include "lfsck_internal.h"
45 /* define lfsck thread key */
46 LU_KEY_INIT(lfsck, struct lfsck_thread_info);
48 static void lfsck_key_fini(const struct lu_context *ctx,
49 struct lu_context_key *key, void *data)
51 struct lfsck_thread_info *info = data;
53 lu_buf_free(&info->lti_linkea_buf);
57 LU_CONTEXT_KEY_DEFINE(lfsck, LCT_MD_THREAD | LCT_DT_THREAD);
58 LU_KEY_INIT_GENERIC(lfsck);
60 static CFS_LIST_HEAD(lfsck_instance_list);
61 static struct list_head lfsck_ost_orphan_list;
62 static struct list_head lfsck_mdt_orphan_list;
63 static DEFINE_SPINLOCK(lfsck_instance_lock);
65 static const char *lfsck_status_names[] = {
67 [LS_SCANNING_PHASE1] = "scanning-phase1",
68 [LS_SCANNING_PHASE2] = "scanning-phase2",
69 [LS_COMPLETED] = "completed",
70 [LS_FAILED] = "failed",
71 [LS_STOPPED] = "stopped",
72 [LS_PAUSED] = "paused",
73 [LS_CRASHED] = "crashed",
74 [LS_PARTIAL] = "partial",
75 [LS_CO_FAILED] = "co-failed",
76 [LS_CO_STOPPED] = "co-stopped",
77 [LS_CO_PAUSED] = "co-paused"
80 const char *lfsck_flags_names[] = {
89 const char *lfsck_param_names[] = {
97 const char *lfsck_status2names(enum lfsck_status status)
99 if (unlikely(status < 0 || status >= LS_MAX))
102 return lfsck_status_names[status];
105 static int lfsck_tgt_descs_init(struct lfsck_tgt_descs *ltds)
107 spin_lock_init(<ds->ltd_lock);
108 init_rwsem(<ds->ltd_rw_sem);
109 INIT_LIST_HEAD(<ds->ltd_orphan);
110 ltds->ltd_tgts_bitmap = CFS_ALLOCATE_BITMAP(BITS_PER_LONG);
111 if (ltds->ltd_tgts_bitmap == NULL)
117 static void lfsck_tgt_descs_fini(struct lfsck_tgt_descs *ltds)
119 struct lfsck_tgt_desc *ltd;
120 struct lfsck_tgt_desc *next;
123 down_write(<ds->ltd_rw_sem);
125 list_for_each_entry_safe(ltd, next, <ds->ltd_orphan,
127 list_del_init(<d->ltd_orphan_list);
131 if (unlikely(ltds->ltd_tgts_bitmap == NULL)) {
132 up_write(<ds->ltd_rw_sem);
137 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
138 ltd = LTD_TGT(ltds, idx);
139 if (likely(ltd != NULL)) {
140 LASSERT(list_empty(<d->ltd_layout_list));
141 LASSERT(list_empty(<d->ltd_layout_phase_list));
144 cfs_bitmap_clear(ltds->ltd_tgts_bitmap, idx);
145 LTD_TGT(ltds, idx) = NULL;
150 LASSERTF(ltds->ltd_tgtnr == 0, "tgt count unmatched: %d\n",
153 for (idx = 0; idx < TGT_PTRS; idx++) {
154 if (ltds->ltd_tgts_idx[idx] != NULL) {
155 OBD_FREE_PTR(ltds->ltd_tgts_idx[idx]);
156 ltds->ltd_tgts_idx[idx] = NULL;
160 CFS_FREE_BITMAP(ltds->ltd_tgts_bitmap);
161 ltds->ltd_tgts_bitmap = NULL;
162 up_write(<ds->ltd_rw_sem);
165 static int __lfsck_add_target(const struct lu_env *env,
166 struct lfsck_instance *lfsck,
167 struct lfsck_tgt_desc *ltd,
168 bool for_ost, bool locked)
170 struct lfsck_tgt_descs *ltds;
171 __u32 index = ltd->ltd_index;
176 ltds = &lfsck->li_ost_descs;
178 ltds = &lfsck->li_mdt_descs;
181 down_write(<ds->ltd_rw_sem);
183 LASSERT(ltds->ltd_tgts_bitmap != NULL);
185 if (index >= ltds->ltd_tgts_bitmap->size) {
186 __u32 newsize = max((__u32)ltds->ltd_tgts_bitmap->size,
187 (__u32)BITS_PER_LONG);
188 cfs_bitmap_t *old_bitmap = ltds->ltd_tgts_bitmap;
189 cfs_bitmap_t *new_bitmap;
191 while (newsize < index + 1)
194 new_bitmap = CFS_ALLOCATE_BITMAP(newsize);
195 if (new_bitmap == NULL)
196 GOTO(unlock, rc = -ENOMEM);
198 if (ltds->ltd_tgtnr > 0)
199 cfs_bitmap_copy(new_bitmap, old_bitmap);
200 ltds->ltd_tgts_bitmap = new_bitmap;
201 CFS_FREE_BITMAP(old_bitmap);
204 if (cfs_bitmap_check(ltds->ltd_tgts_bitmap, index)) {
205 CERROR("%s: the device %s (%u) is registered already\n",
206 lfsck_lfsck2name(lfsck),
207 ltd->ltd_tgt->dd_lu_dev.ld_obd->obd_name, index);
208 GOTO(unlock, rc = -EEXIST);
211 if (ltds->ltd_tgts_idx[index / TGT_PTRS_PER_BLOCK] == NULL) {
212 OBD_ALLOC_PTR(ltds->ltd_tgts_idx[index / TGT_PTRS_PER_BLOCK]);
213 if (ltds->ltd_tgts_idx[index / TGT_PTRS_PER_BLOCK] == NULL)
214 GOTO(unlock, rc = -ENOMEM);
217 LTD_TGT(ltds, index) = ltd;
218 cfs_bitmap_set(ltds->ltd_tgts_bitmap, index);
221 GOTO(unlock, rc = 0);
225 up_write(<ds->ltd_rw_sem);
230 static int lfsck_add_target_from_orphan(const struct lu_env *env,
231 struct lfsck_instance *lfsck)
233 struct lfsck_tgt_descs *ltds = &lfsck->li_ost_descs;
234 struct lfsck_tgt_desc *ltd;
235 struct lfsck_tgt_desc *next;
236 struct list_head *head = &lfsck_ost_orphan_list;
241 spin_lock(&lfsck_instance_lock);
242 list_for_each_entry_safe(ltd, next, head, ltd_orphan_list) {
243 if (ltd->ltd_key == lfsck->li_bottom) {
244 list_del_init(<d->ltd_orphan_list);
245 list_add_tail(<d->ltd_orphan_list,
249 spin_unlock(&lfsck_instance_lock);
251 down_write(<ds->ltd_rw_sem);
252 while (!list_empty(<ds->ltd_orphan)) {
253 ltd = list_entry(ltds->ltd_orphan.next,
254 struct lfsck_tgt_desc,
256 list_del_init(<d->ltd_orphan_list);
257 rc = __lfsck_add_target(env, lfsck, ltd, for_ost, true);
258 /* Do not hold the semaphore for too long time. */
259 up_write(<ds->ltd_rw_sem);
263 down_write(<ds->ltd_rw_sem);
265 up_write(<ds->ltd_rw_sem);
268 ltds = &lfsck->li_mdt_descs;
269 head = &lfsck_mdt_orphan_list;
277 static inline struct lfsck_component *
278 __lfsck_component_find(struct lfsck_instance *lfsck, __u16 type, cfs_list_t *list)
280 struct lfsck_component *com;
282 cfs_list_for_each_entry(com, list, lc_link) {
283 if (com->lc_type == type)
289 static struct lfsck_component *
290 lfsck_component_find(struct lfsck_instance *lfsck, __u16 type)
292 struct lfsck_component *com;
294 spin_lock(&lfsck->li_lock);
295 com = __lfsck_component_find(lfsck, type, &lfsck->li_list_scan);
299 com = __lfsck_component_find(lfsck, type,
300 &lfsck->li_list_double_scan);
304 com = __lfsck_component_find(lfsck, type, &lfsck->li_list_idle);
308 lfsck_component_get(com);
309 spin_unlock(&lfsck->li_lock);
313 void lfsck_component_cleanup(const struct lu_env *env,
314 struct lfsck_component *com)
316 if (!cfs_list_empty(&com->lc_link))
317 cfs_list_del_init(&com->lc_link);
318 if (!cfs_list_empty(&com->lc_link_dir))
319 cfs_list_del_init(&com->lc_link_dir);
321 lfsck_component_put(env, com);
324 void lfsck_instance_cleanup(const struct lu_env *env,
325 struct lfsck_instance *lfsck)
327 struct ptlrpc_thread *thread = &lfsck->li_thread;
328 struct lfsck_component *com;
331 LASSERT(list_empty(&lfsck->li_link));
332 LASSERT(thread_is_init(thread) || thread_is_stopped(thread));
334 if (lfsck->li_obj_oit != NULL) {
335 lu_object_put_nocache(env, &lfsck->li_obj_oit->do_lu);
336 lfsck->li_obj_oit = NULL;
339 LASSERT(lfsck->li_obj_dir == NULL);
341 while (!cfs_list_empty(&lfsck->li_list_scan)) {
342 com = cfs_list_entry(lfsck->li_list_scan.next,
343 struct lfsck_component,
345 lfsck_component_cleanup(env, com);
348 LASSERT(cfs_list_empty(&lfsck->li_list_dir));
350 while (!cfs_list_empty(&lfsck->li_list_double_scan)) {
351 com = cfs_list_entry(lfsck->li_list_double_scan.next,
352 struct lfsck_component,
354 lfsck_component_cleanup(env, com);
357 while (!cfs_list_empty(&lfsck->li_list_idle)) {
358 com = cfs_list_entry(lfsck->li_list_idle.next,
359 struct lfsck_component,
361 lfsck_component_cleanup(env, com);
364 lfsck_tgt_descs_fini(&lfsck->li_ost_descs);
365 lfsck_tgt_descs_fini(&lfsck->li_mdt_descs);
367 if (lfsck->li_bookmark_obj != NULL) {
368 lu_object_put_nocache(env, &lfsck->li_bookmark_obj->do_lu);
369 lfsck->li_bookmark_obj = NULL;
372 if (lfsck->li_los != NULL) {
373 local_oid_storage_fini(env, lfsck->li_los);
374 lfsck->li_los = NULL;
380 static inline struct lfsck_instance *
381 __lfsck_instance_find(struct dt_device *key, bool ref, bool unlink)
383 struct lfsck_instance *lfsck;
385 cfs_list_for_each_entry(lfsck, &lfsck_instance_list, li_link) {
386 if (lfsck->li_bottom == key) {
388 lfsck_instance_get(lfsck);
390 list_del_init(&lfsck->li_link);
399 static inline struct lfsck_instance *lfsck_instance_find(struct dt_device *key,
400 bool ref, bool unlink)
402 struct lfsck_instance *lfsck;
404 spin_lock(&lfsck_instance_lock);
405 lfsck = __lfsck_instance_find(key, ref, unlink);
406 spin_unlock(&lfsck_instance_lock);
411 static inline int lfsck_instance_add(struct lfsck_instance *lfsck)
413 struct lfsck_instance *tmp;
415 spin_lock(&lfsck_instance_lock);
416 cfs_list_for_each_entry(tmp, &lfsck_instance_list, li_link) {
417 if (lfsck->li_bottom == tmp->li_bottom) {
418 spin_unlock(&lfsck_instance_lock);
423 cfs_list_add_tail(&lfsck->li_link, &lfsck_instance_list);
424 spin_unlock(&lfsck_instance_lock);
428 int lfsck_bits_dump(char **buf, int *len, int bits, const char *names[],
436 rc = snprintf(*buf, *len, "%s:%c", prefix, bits != 0 ? ' ' : '\n');
442 for (i = 0, flag = 1; bits != 0; i++, flag = 1 << i) {
445 if (names[i] != NULL) {
446 rc = snprintf(*buf, *len, "%s%c", names[i],
447 bits != 0 ? ',' : '\n');
459 int lfsck_time_dump(char **buf, int *len, __u64 time, const char *prefix)
464 rc = snprintf(*buf, *len, "%s: "LPU64" seconds\n", prefix,
465 cfs_time_current_sec() - time);
467 rc = snprintf(*buf, *len, "%s: N/A\n", prefix);
476 int lfsck_pos_dump(char **buf, int *len, struct lfsck_position *pos,
481 if (fid_is_zero(&pos->lp_dir_parent)) {
482 if (pos->lp_oit_cookie == 0)
483 rc = snprintf(*buf, *len, "%s: N/A, N/A, N/A\n",
486 rc = snprintf(*buf, *len, "%s: "LPU64", N/A, N/A\n",
487 prefix, pos->lp_oit_cookie);
489 rc = snprintf(*buf, *len, "%s: "LPU64", "DFID", "LPU64"\n",
490 prefix, pos->lp_oit_cookie,
491 PFID(&pos->lp_dir_parent), pos->lp_dir_cookie);
501 void lfsck_pos_fill(const struct lu_env *env, struct lfsck_instance *lfsck,
502 struct lfsck_position *pos, bool init)
504 const struct dt_it_ops *iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
506 if (unlikely(lfsck->li_di_oit == NULL)) {
507 memset(pos, 0, sizeof(*pos));
511 pos->lp_oit_cookie = iops->store(env, lfsck->li_di_oit);
512 if (!lfsck->li_current_oit_processed && !init)
513 pos->lp_oit_cookie--;
515 LASSERT(pos->lp_oit_cookie > 0);
517 if (lfsck->li_di_dir != NULL) {
518 struct dt_object *dto = lfsck->li_obj_dir;
520 pos->lp_dir_cookie = dto->do_index_ops->dio_it.store(env,
523 if (pos->lp_dir_cookie >= MDS_DIR_END_OFF) {
524 fid_zero(&pos->lp_dir_parent);
525 pos->lp_dir_cookie = 0;
527 pos->lp_dir_parent = *lfsck_dto2fid(dto);
530 fid_zero(&pos->lp_dir_parent);
531 pos->lp_dir_cookie = 0;
535 static void __lfsck_set_speed(struct lfsck_instance *lfsck, __u32 limit)
537 lfsck->li_bookmark_ram.lb_speed_limit = limit;
538 if (limit != LFSCK_SPEED_NO_LIMIT) {
540 lfsck->li_sleep_rate = limit / HZ;
541 lfsck->li_sleep_jif = 1;
543 lfsck->li_sleep_rate = 1;
544 lfsck->li_sleep_jif = HZ / limit;
547 lfsck->li_sleep_jif = 0;
548 lfsck->li_sleep_rate = 0;
552 void lfsck_control_speed(struct lfsck_instance *lfsck)
554 struct ptlrpc_thread *thread = &lfsck->li_thread;
555 struct l_wait_info lwi;
557 if (lfsck->li_sleep_jif > 0 &&
558 lfsck->li_new_scanned >= lfsck->li_sleep_rate) {
559 lwi = LWI_TIMEOUT_INTR(lfsck->li_sleep_jif, NULL,
560 LWI_ON_SIGNAL_NOOP, NULL);
562 l_wait_event(thread->t_ctl_waitq,
563 !thread_is_running(thread),
565 lfsck->li_new_scanned = 0;
569 void lfsck_control_speed_by_self(struct lfsck_component *com)
571 struct lfsck_instance *lfsck = com->lc_lfsck;
572 struct ptlrpc_thread *thread = &lfsck->li_thread;
573 struct l_wait_info lwi;
575 if (lfsck->li_sleep_jif > 0 &&
576 com->lc_new_scanned >= lfsck->li_sleep_rate) {
577 lwi = LWI_TIMEOUT_INTR(lfsck->li_sleep_jif, NULL,
578 LWI_ON_SIGNAL_NOOP, NULL);
580 l_wait_event(thread->t_ctl_waitq,
581 !thread_is_running(thread),
583 com->lc_new_scanned = 0;
587 static int lfsck_parent_fid(const struct lu_env *env, struct dt_object *obj,
590 if (unlikely(!S_ISDIR(lfsck_object_type(obj)) ||
591 !dt_try_as_dir(env, obj)))
594 return dt_lookup(env, obj, (struct dt_rec *)fid,
595 (const struct dt_key *)"..", BYPASS_CAPA);
598 static int lfsck_needs_scan_dir(const struct lu_env *env,
599 struct lfsck_instance *lfsck,
600 struct dt_object *obj)
602 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
606 if (!lfsck->li_master || !S_ISDIR(lfsck_object_type(obj)) ||
607 cfs_list_empty(&lfsck->li_list_dir))
611 /* XXX: Currently, we do not scan the "/REMOTE_PARENT_DIR",
612 * which is the agent directory to manage the objects
613 * which name entries reside on remote MDTs. Related
614 * consistency verification will be processed in LFSCK
616 if (lu_fid_eq(lfsck_dto2fid(obj), &lfsck->li_global_root_fid)) {
618 lfsck_object_put(env, obj);
622 /* .lustre doesn't contain "real" user objects, no need lfsck */
623 if (fid_is_dot_lustre(lfsck_dto2fid(obj))) {
625 lfsck_object_put(env, obj);
629 dt_read_lock(env, obj, MOR_TGT_CHILD);
630 if (unlikely(lfsck_is_dead_obj(obj))) {
631 dt_read_unlock(env, obj);
633 lfsck_object_put(env, obj);
637 rc = dt_xattr_get(env, obj,
638 lfsck_buf_get(env, NULL, 0), XATTR_NAME_LINK,
640 dt_read_unlock(env, obj);
643 lfsck_object_put(env, obj);
647 if (rc < 0 && rc != -ENODATA) {
649 lfsck_object_put(env, obj);
653 rc = lfsck_parent_fid(env, obj, fid);
655 lfsck_object_put(env, obj);
659 if (unlikely(lu_fid_eq(fid, &lfsck->li_local_root_fid)))
662 obj = lfsck_object_find(env, lfsck, fid);
665 else if (IS_ERR(obj))
668 if (!dt_object_exists(obj)) {
669 lfsck_object_put(env, obj);
673 /* Currently, only client visible directory can be remote. */
674 if (dt_object_remote(obj)) {
675 lfsck_object_put(env, obj);
684 struct lfsck_thread_args *lfsck_thread_args_init(struct lfsck_instance *lfsck,
685 struct lfsck_component *com,
686 struct lfsck_start_param *lsp)
688 struct lfsck_thread_args *lta;
693 return ERR_PTR(-ENOMEM);
695 rc = lu_env_init(<a->lta_env, LCT_MD_THREAD | LCT_DT_THREAD);
701 lta->lta_lfsck = lfsck_instance_get(lfsck);
703 lta->lta_com = lfsck_component_get(com);
710 void lfsck_thread_args_fini(struct lfsck_thread_args *lta)
712 if (lta->lta_com != NULL)
713 lfsck_component_put(<a->lta_env, lta->lta_com);
714 lfsck_instance_put(<a->lta_env, lta->lta_lfsck);
715 lu_env_fini(<a->lta_env);
719 /* LFSCK wrap functions */
721 void lfsck_fail(const struct lu_env *env, struct lfsck_instance *lfsck,
724 struct lfsck_component *com;
726 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
727 com->lc_ops->lfsck_fail(env, com, new_checked);
731 int lfsck_checkpoint(const struct lu_env *env, struct lfsck_instance *lfsck)
733 struct lfsck_component *com;
737 if (likely(cfs_time_beforeq(cfs_time_current(),
738 lfsck->li_time_next_checkpoint)))
741 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_current, false);
742 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
743 rc = com->lc_ops->lfsck_checkpoint(env, com, false);
748 lfsck->li_time_last_checkpoint = cfs_time_current();
749 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
750 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
751 return rc1 != 0 ? rc1 : rc;
754 int lfsck_prep(const struct lu_env *env, struct lfsck_instance *lfsck,
755 struct lfsck_start_param *lsp)
757 struct dt_object *obj = NULL;
758 struct lfsck_component *com;
759 struct lfsck_component *next;
760 struct lfsck_position *pos = NULL;
761 const struct dt_it_ops *iops =
762 &lfsck->li_obj_oit->do_index_ops->dio_it;
767 LASSERT(lfsck->li_obj_dir == NULL);
768 LASSERT(lfsck->li_di_dir == NULL);
770 lfsck->li_current_oit_processed = 0;
771 cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
772 com->lc_new_checked = 0;
773 if (lfsck->li_bookmark_ram.lb_param & LPF_DRYRUN)
776 rc = com->lc_ops->lfsck_prep(env, com, lsp);
781 (!lfsck_pos_is_zero(&com->lc_pos_start) &&
782 lfsck_pos_is_eq(pos, &com->lc_pos_start) > 0))
783 pos = &com->lc_pos_start;
786 /* Init otable-based iterator. */
788 rc = iops->load(env, lfsck->li_di_oit, 0);
790 lfsck->li_oit_over = 1;
797 rc = iops->load(env, lfsck->li_di_oit, pos->lp_oit_cookie);
801 lfsck->li_oit_over = 1;
803 if (!lfsck->li_master || fid_is_zero(&pos->lp_dir_parent))
806 /* Find the directory for namespace-based traverse. */
807 obj = lfsck_object_find(env, lfsck, &pos->lp_dir_parent);
810 else if (IS_ERR(obj))
811 RETURN(PTR_ERR(obj));
813 /* XXX: Currently, skip remote object, the consistency for
814 * remote object will be processed in LFSCK phase III. */
815 if (!dt_object_exists(obj) || dt_object_remote(obj) ||
816 unlikely(!S_ISDIR(lfsck_object_type(obj))))
819 if (unlikely(!dt_try_as_dir(env, obj)))
820 GOTO(out, rc = -ENOTDIR);
822 /* Init the namespace-based directory traverse. */
823 iops = &obj->do_index_ops->dio_it;
824 di = iops->init(env, obj, lfsck->li_args_dir, BYPASS_CAPA);
826 GOTO(out, rc = PTR_ERR(di));
828 LASSERT(pos->lp_dir_cookie < MDS_DIR_END_OFF);
830 rc = iops->load(env, di, pos->lp_dir_cookie);
831 if ((rc == 0) || (rc > 0 && pos->lp_dir_cookie > 0))
832 rc = iops->next(env, di);
842 lfsck->li_obj_dir = lfsck_object_get(obj);
843 lfsck->li_cookie_dir = iops->store(env, di);
844 spin_lock(&lfsck->li_lock);
845 lfsck->li_di_dir = di;
846 spin_unlock(&lfsck->li_lock);
852 lfsck_object_put(env, obj);
855 cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
857 com->lc_ops->lfsck_post(env, com, rc, true);
863 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_current, true);
864 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
865 rc = com->lc_ops->lfsck_checkpoint(env, com, true);
870 lfsck->li_time_last_checkpoint = cfs_time_current();
871 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
872 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
876 int lfsck_exec_oit(const struct lu_env *env, struct lfsck_instance *lfsck,
877 struct dt_object *obj)
879 struct lfsck_component *com;
880 const struct dt_it_ops *iops;
885 LASSERT(lfsck->li_obj_dir == NULL);
887 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
888 rc = com->lc_ops->lfsck_exec_oit(env, com, obj);
893 rc = lfsck_needs_scan_dir(env, lfsck, obj);
897 if (unlikely(!dt_try_as_dir(env, obj)))
898 GOTO(out, rc = -ENOTDIR);
900 iops = &obj->do_index_ops->dio_it;
901 di = iops->init(env, obj, lfsck->li_args_dir, BYPASS_CAPA);
903 GOTO(out, rc = PTR_ERR(di));
905 rc = iops->load(env, di, 0);
907 rc = iops->next(env, di);
917 lfsck->li_obj_dir = lfsck_object_get(obj);
918 lfsck->li_cookie_dir = iops->store(env, di);
919 spin_lock(&lfsck->li_lock);
920 lfsck->li_di_dir = di;
921 spin_unlock(&lfsck->li_lock);
927 lfsck_fail(env, lfsck, false);
928 return (rc > 0 ? 0 : rc);
931 int lfsck_exec_dir(const struct lu_env *env, struct lfsck_instance *lfsck,
932 struct dt_object *obj, struct lu_dirent *ent)
934 struct lfsck_component *com;
937 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
938 rc = com->lc_ops->lfsck_exec_dir(env, com, obj, ent);
945 int lfsck_post(const struct lu_env *env, struct lfsck_instance *lfsck,
948 struct lfsck_component *com;
949 struct lfsck_component *next;
953 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_current, false);
954 cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
955 rc = com->lc_ops->lfsck_post(env, com, result, false);
960 lfsck->li_time_last_checkpoint = cfs_time_current();
961 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
962 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
964 /* Ignore some component post failure to make other can go ahead. */
968 int lfsck_double_scan(const struct lu_env *env, struct lfsck_instance *lfsck)
970 struct lfsck_component *com;
971 struct lfsck_component *next;
972 struct l_wait_info lwi = { 0 };
976 cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
978 if (lfsck->li_bookmark_ram.lb_param & LPF_DRYRUN)
981 rc = com->lc_ops->lfsck_double_scan(env, com);
986 l_wait_event(lfsck->li_thread.t_ctl_waitq,
987 atomic_read(&lfsck->li_double_scan_count) == 0,
990 return rc1 != 0 ? rc1 : rc;
993 int lfsck_stop_notify(const struct lu_env *env, struct lfsck_instance *lfsck,
994 struct lfsck_tgt_descs *ltds, struct lfsck_tgt_desc *ltd)
996 struct ptlrpc_request_set *set;
997 struct lfsck_component *com;
1002 set = ptlrpc_prep_set();
1006 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
1007 if (com->lc_ops->lfsck_stop_notify != NULL) {
1008 rc = com->lc_ops->lfsck_stop_notify(env, com, ltds,
1017 list_for_each_entry(com, &lfsck->li_list_double_scan, lc_link) {
1018 if (com->lc_ops->lfsck_stop_notify != NULL) {
1019 rc = com->lc_ops->lfsck_stop_notify(env, com, ltds,
1029 rc = ptlrpc_set_wait(set);
1030 ptlrpc_set_destroy(set);
1032 return rc1 != 0 ? rc1 : rc;
1035 void lfsck_quit(const struct lu_env *env, struct lfsck_instance *lfsck)
1037 struct lfsck_component *com;
1038 struct lfsck_component *next;
1040 list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
1042 if (com->lc_ops->lfsck_quit != NULL)
1043 com->lc_ops->lfsck_quit(env, com);
1046 list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
1048 if (com->lc_ops->lfsck_quit != NULL)
1049 com->lc_ops->lfsck_quit(env, com);
1053 int lfsck_async_request(const struct lu_env *env, struct obd_export *exp,
1054 struct lfsck_request *lr,
1055 struct ptlrpc_request_set *set,
1056 ptlrpc_interpterer_t interpreter,
1057 void *args, int request)
1059 struct lfsck_async_interpret_args *laia;
1060 struct ptlrpc_request *req;
1061 struct lfsck_request *tmp;
1062 struct req_format *format;
1065 if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
1070 format = &RQF_LFSCK_NOTIFY;
1073 format = &RQF_LFSCK_QUERY;
1076 CERROR("%s: unknown async request: opc = %d\n",
1077 exp->exp_obd->obd_name, request);
1081 req = ptlrpc_request_alloc(class_exp2cliimp(exp), format);
1085 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, request);
1087 ptlrpc_request_free(req);
1092 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
1094 ptlrpc_request_set_replen(req);
1096 laia = ptlrpc_req_async_args(req);
1097 *laia = *(struct lfsck_async_interpret_args *)args;
1098 lfsck_component_get(laia->laia_com);
1099 req->rq_interpret_reply = interpreter;
1100 ptlrpc_set_add_req(set, req);
1105 /* external interfaces */
1107 int lfsck_get_speed(struct dt_device *key, void *buf, int len)
1110 struct lfsck_instance *lfsck;
1114 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1118 lfsck = lfsck_instance_find(key, true, false);
1119 if (likely(lfsck != NULL)) {
1120 rc = snprintf(buf, len, "%u\n",
1121 lfsck->li_bookmark_ram.lb_speed_limit);
1122 lfsck_instance_put(&env, lfsck);
1131 EXPORT_SYMBOL(lfsck_get_speed);
1133 int lfsck_set_speed(struct dt_device *key, int val)
1136 struct lfsck_instance *lfsck;
1140 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1144 lfsck = lfsck_instance_find(key, true, false);
1145 if (likely(lfsck != NULL)) {
1146 mutex_lock(&lfsck->li_mutex);
1147 __lfsck_set_speed(lfsck, val);
1148 rc = lfsck_bookmark_store(&env, lfsck);
1149 mutex_unlock(&lfsck->li_mutex);
1150 lfsck_instance_put(&env, lfsck);
1159 EXPORT_SYMBOL(lfsck_set_speed);
1161 int lfsck_get_windows(struct dt_device *key, void *buf, int len)
1164 struct lfsck_instance *lfsck;
1168 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1172 lfsck = lfsck_instance_find(key, true, false);
1173 if (likely(lfsck != NULL)) {
1174 rc = snprintf(buf, len, "%u\n",
1175 lfsck->li_bookmark_ram.lb_async_windows);
1176 lfsck_instance_put(&env, lfsck);
1185 EXPORT_SYMBOL(lfsck_get_windows);
1187 int lfsck_set_windows(struct dt_device *key, int val)
1190 struct lfsck_instance *lfsck;
1194 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1198 lfsck = lfsck_instance_find(key, true, false);
1199 if (likely(lfsck != NULL)) {
1200 if (val > LFSCK_ASYNC_WIN_MAX) {
1201 CERROR("%s: Too large async windows size, which "
1202 "may cause memory issues. The valid range "
1203 "is [0 - %u]. If you do not want to restrict "
1204 "the windows size for async requests pipeline, "
1205 "just set it as 0.\n",
1206 lfsck_lfsck2name(lfsck), LFSCK_ASYNC_WIN_MAX);
1208 } else if (lfsck->li_bookmark_ram.lb_async_windows != val) {
1209 mutex_lock(&lfsck->li_mutex);
1210 lfsck->li_bookmark_ram.lb_async_windows = val;
1211 rc = lfsck_bookmark_store(&env, lfsck);
1212 mutex_unlock(&lfsck->li_mutex);
1214 lfsck_instance_put(&env, lfsck);
1223 EXPORT_SYMBOL(lfsck_set_windows);
1225 int lfsck_dump(struct dt_device *key, void *buf, int len, enum lfsck_type type)
1228 struct lfsck_instance *lfsck;
1229 struct lfsck_component *com;
1233 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1237 lfsck = lfsck_instance_find(key, true, false);
1238 if (likely(lfsck != NULL)) {
1239 com = lfsck_component_find(lfsck, type);
1240 if (likely(com != NULL)) {
1241 rc = com->lc_ops->lfsck_dump(&env, com, buf, len);
1242 lfsck_component_put(&env, com);
1247 lfsck_instance_put(&env, lfsck);
1256 EXPORT_SYMBOL(lfsck_dump);
1258 int lfsck_start(const struct lu_env *env, struct dt_device *key,
1259 struct lfsck_start_param *lsp)
1261 struct lfsck_start *start = lsp->lsp_start;
1262 struct lfsck_instance *lfsck;
1263 struct lfsck_bookmark *bk;
1264 struct ptlrpc_thread *thread;
1265 struct lfsck_component *com;
1266 struct l_wait_info lwi = { 0 };
1267 struct lfsck_thread_args *lta;
1275 lfsck = lfsck_instance_find(key, true, false);
1276 if (unlikely(lfsck == NULL))
1279 /* start == NULL means auto trigger paused LFSCK. */
1280 if ((start == NULL) &&
1281 (cfs_list_empty(&lfsck->li_list_scan) ||
1282 OBD_FAIL_CHECK(OBD_FAIL_LFSCK_NO_AUTO)))
1285 bk = &lfsck->li_bookmark_ram;
1286 thread = &lfsck->li_thread;
1287 mutex_lock(&lfsck->li_mutex);
1288 spin_lock(&lfsck->li_lock);
1289 if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
1291 while (start->ls_active != 0) {
1292 if (!(type & start->ls_active)) {
1297 com = __lfsck_component_find(lfsck, type,
1298 &lfsck->li_list_scan);
1300 com = __lfsck_component_find(lfsck, type,
1301 &lfsck->li_list_double_scan);
1307 if (com->lc_ops->lfsck_join != NULL) {
1308 rc = com->lc_ops->lfsck_join( env, com, lsp);
1309 if (rc != 0 && rc != -EALREADY)
1312 start->ls_active &= ~type;
1315 spin_unlock(&lfsck->li_lock);
1318 spin_unlock(&lfsck->li_lock);
1320 lfsck->li_namespace = lsp->lsp_namespace;
1321 lfsck->li_status = 0;
1322 lfsck->li_oit_over = 0;
1323 lfsck->li_drop_dryrun = 0;
1324 lfsck->li_new_scanned = 0;
1326 /* For auto trigger. */
1330 start->ls_version = bk->lb_version;
1331 if (start->ls_valid & LSV_SPEED_LIMIT) {
1332 __lfsck_set_speed(lfsck, start->ls_speed_limit);
1336 if (start->ls_valid & LSV_ASYNC_WINDOWS &&
1337 bk->lb_async_windows != start->ls_async_windows) {
1338 bk->lb_async_windows = start->ls_async_windows;
1342 if (start->ls_valid & LSV_ERROR_HANDLE) {
1343 valid |= DOIV_ERROR_HANDLE;
1344 if (start->ls_flags & LPF_FAILOUT)
1345 flags |= DOIF_FAILOUT;
1347 if ((start->ls_flags & LPF_FAILOUT) &&
1348 !(bk->lb_param & LPF_FAILOUT)) {
1349 bk->lb_param |= LPF_FAILOUT;
1351 } else if (!(start->ls_flags & LPF_FAILOUT) &&
1352 (bk->lb_param & LPF_FAILOUT)) {
1353 bk->lb_param &= ~LPF_FAILOUT;
1358 if (start->ls_valid & LSV_DRYRUN) {
1359 valid |= DOIV_DRYRUN;
1360 if (start->ls_flags & LPF_DRYRUN)
1361 flags |= DOIF_DRYRUN;
1363 if ((start->ls_flags & LPF_DRYRUN) &&
1364 !(bk->lb_param & LPF_DRYRUN)) {
1365 bk->lb_param |= LPF_DRYRUN;
1367 } else if (!(start->ls_flags & LPF_DRYRUN) &&
1368 (bk->lb_param & LPF_DRYRUN)) {
1369 bk->lb_param &= ~LPF_DRYRUN;
1370 lfsck->li_drop_dryrun = 1;
1375 if (bk->lb_param & LPF_ALL_MDT &&
1376 !(start->ls_flags & LPF_ALL_MDT)) {
1377 bk->lb_param &= ~LPF_ALL_MDT;
1379 } else if (!(bk->lb_param & LPF_ALL_MDT) &&
1380 start->ls_flags & LPF_ALL_MDT) {
1381 bk->lb_param |= LPF_ALL_MDT;
1386 rc = lfsck_bookmark_store(env, lfsck);
1391 if (start->ls_flags & LPF_RESET)
1392 flags |= DOIF_RESET;
1394 if (start->ls_active != 0) {
1395 struct lfsck_component *next;
1397 if (start->ls_active == LFSCK_TYPES_ALL)
1398 start->ls_active = LFSCK_TYPES_SUPPORTED;
1400 if (start->ls_active & ~LFSCK_TYPES_SUPPORTED) {
1401 start->ls_active &= ~LFSCK_TYPES_SUPPORTED;
1402 GOTO(out, rc = -ENOTSUPP);
1405 cfs_list_for_each_entry_safe(com, next,
1406 &lfsck->li_list_scan, lc_link) {
1407 if (!(com->lc_type & start->ls_active)) {
1408 rc = com->lc_ops->lfsck_post(env, com, 0,
1415 while (start->ls_active != 0) {
1416 if (type & start->ls_active) {
1417 com = __lfsck_component_find(lfsck, type,
1418 &lfsck->li_list_idle);
1420 /* The component status will be updated
1421 * when its prep() is called later by
1422 * the LFSCK main engine. */
1423 cfs_list_del_init(&com->lc_link);
1424 cfs_list_add_tail(&com->lc_link,
1425 &lfsck->li_list_scan);
1427 start->ls_active &= ~type;
1433 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
1434 start->ls_active |= com->lc_type;
1435 if (flags & DOIF_RESET) {
1436 rc = com->lc_ops->lfsck_reset(env, com, false);
1443 lfsck->li_args_dir = LUDA_64BITHASH | LUDA_VERIFY;
1444 if (bk->lb_param & LPF_DRYRUN) {
1445 lfsck->li_args_dir |= LUDA_VERIFY_DRYRUN;
1446 valid |= DOIV_DRYRUN;
1447 flags |= DOIF_DRYRUN;
1450 if (bk->lb_param & LPF_FAILOUT) {
1451 valid |= DOIV_ERROR_HANDLE;
1452 flags |= DOIF_FAILOUT;
1455 if (!cfs_list_empty(&lfsck->li_list_scan))
1456 flags |= DOIF_OUTUSED;
1458 lfsck->li_args_oit = (flags << DT_OTABLE_IT_FLAGS_SHIFT) | valid;
1459 thread_set_flags(thread, 0);
1460 lta = lfsck_thread_args_init(lfsck, NULL, lsp);
1462 GOTO(out, rc = PTR_ERR(lta));
1464 rc = PTR_ERR(kthread_run(lfsck_master_engine, lta, "lfsck"));
1465 if (IS_ERR_VALUE(rc)) {
1466 CERROR("%s: cannot start LFSCK thread: rc = %ld\n",
1467 lfsck_lfsck2name(lfsck), rc);
1468 lfsck_thread_args_fini(lta);
1471 l_wait_event(thread->t_ctl_waitq,
1472 thread_is_running(thread) ||
1473 thread_is_stopped(thread),
1480 mutex_unlock(&lfsck->li_mutex);
1482 lfsck_instance_put(env, lfsck);
1483 return (rc < 0 ? rc : 0);
1485 EXPORT_SYMBOL(lfsck_start);
1487 int lfsck_stop(const struct lu_env *env, struct dt_device *key,
1488 struct lfsck_stop *stop)
1490 struct lfsck_instance *lfsck;
1491 struct ptlrpc_thread *thread;
1492 struct l_wait_info lwi = { 0 };
1496 lfsck = lfsck_instance_find(key, true, false);
1497 if (unlikely(lfsck == NULL))
1500 thread = &lfsck->li_thread;
1501 mutex_lock(&lfsck->li_mutex);
1502 spin_lock(&lfsck->li_lock);
1503 if (thread_is_init(thread) || thread_is_stopped(thread)) {
1504 spin_unlock(&lfsck->li_lock);
1505 GOTO(out, rc = -EALREADY);
1509 lfsck->li_status = stop->ls_status;
1510 lfsck->li_flags = stop->ls_flags;
1512 lfsck->li_status = LS_STOPPED;
1513 lfsck->li_flags = 0;
1516 thread_set_flags(thread, SVC_STOPPING);
1517 spin_unlock(&lfsck->li_lock);
1519 wake_up_all(&thread->t_ctl_waitq);
1520 l_wait_event(thread->t_ctl_waitq,
1521 thread_is_stopped(thread),
1527 mutex_unlock(&lfsck->li_mutex);
1528 lfsck_instance_put(env, lfsck);
1532 EXPORT_SYMBOL(lfsck_stop);
1534 int lfsck_in_notify(const struct lu_env *env, struct dt_device *key,
1535 struct lfsck_request *lr)
1537 struct lfsck_instance *lfsck;
1538 struct lfsck_component *com;
1542 switch (lr->lr_event) {
1544 case LE_PHASE1_DONE:
1545 case LE_PHASE2_DONE:
1548 RETURN(-EOPNOTSUPP);
1551 lfsck = lfsck_instance_find(key, true, false);
1552 if (unlikely(lfsck == NULL))
1555 com = lfsck_component_find(lfsck, lr->lr_active);
1556 if (likely(com != NULL)) {
1557 rc = com->lc_ops->lfsck_in_notify(env, com, lr);
1558 lfsck_component_put(env, com);
1563 lfsck_instance_put(env, lfsck);
1567 EXPORT_SYMBOL(lfsck_in_notify);
1569 int lfsck_query(const struct lu_env *env, struct dt_device *key,
1570 struct lfsck_request *lr)
1572 struct lfsck_instance *lfsck;
1573 struct lfsck_component *com;
1577 lfsck = lfsck_instance_find(key, true, false);
1578 if (unlikely(lfsck == NULL))
1581 com = lfsck_component_find(lfsck, lr->lr_active);
1582 if (likely(com != NULL)) {
1583 rc = com->lc_ops->lfsck_query(env, com);
1584 lfsck_component_put(env, com);
1589 lfsck_instance_put(env, lfsck);
1593 EXPORT_SYMBOL(lfsck_query);
1595 int lfsck_register(const struct lu_env *env, struct dt_device *key,
1596 struct dt_device *next, struct obd_device *obd,
1597 lfsck_out_notify notify, void *notify_data, bool master)
1599 struct lfsck_instance *lfsck;
1600 struct dt_object *root = NULL;
1601 struct dt_object *obj;
1602 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
1606 lfsck = lfsck_instance_find(key, false, false);
1607 if (unlikely(lfsck != NULL))
1610 OBD_ALLOC_PTR(lfsck);
1614 mutex_init(&lfsck->li_mutex);
1615 spin_lock_init(&lfsck->li_lock);
1616 CFS_INIT_LIST_HEAD(&lfsck->li_link);
1617 CFS_INIT_LIST_HEAD(&lfsck->li_list_scan);
1618 CFS_INIT_LIST_HEAD(&lfsck->li_list_dir);
1619 CFS_INIT_LIST_HEAD(&lfsck->li_list_double_scan);
1620 CFS_INIT_LIST_HEAD(&lfsck->li_list_idle);
1621 atomic_set(&lfsck->li_ref, 1);
1622 atomic_set(&lfsck->li_double_scan_count, 0);
1623 init_waitqueue_head(&lfsck->li_thread.t_ctl_waitq);
1624 lfsck->li_out_notify = notify;
1625 lfsck->li_out_notify_data = notify_data;
1626 lfsck->li_next = next;
1627 lfsck->li_bottom = key;
1628 lfsck->li_obd = obd;
1630 rc = lfsck_tgt_descs_init(&lfsck->li_ost_descs);
1634 rc = lfsck_tgt_descs_init(&lfsck->li_mdt_descs);
1638 fid->f_seq = FID_SEQ_LOCAL_NAME;
1641 rc = local_oid_storage_init(env, lfsck->li_bottom, fid, &lfsck->li_los);
1645 rc = dt_root_get(env, key, fid);
1649 root = dt_locate(env, lfsck->li_bottom, fid);
1651 GOTO(out, rc = PTR_ERR(root));
1653 if (unlikely(!dt_try_as_dir(env, root)))
1654 GOTO(out, rc = -ENOTDIR);
1656 lfsck->li_local_root_fid = *fid;
1658 lfsck->li_master = 1;
1659 if (lfsck_dev_idx(lfsck->li_bottom) == 0) {
1660 rc = dt_lookup(env, root,
1661 (struct dt_rec *)(&lfsck->li_global_root_fid),
1662 (const struct dt_key *)"ROOT", BYPASS_CAPA);
1668 fid->f_seq = FID_SEQ_LOCAL_FILE;
1669 fid->f_oid = OTABLE_IT_OID;
1671 obj = dt_locate(env, lfsck->li_bottom, fid);
1673 GOTO(out, rc = PTR_ERR(obj));
1675 lfsck->li_obj_oit = obj;
1676 rc = obj->do_ops->do_index_try(env, obj, &dt_otable_features);
1678 if (rc == -ENOTSUPP)
1684 rc = lfsck_bookmark_setup(env, lfsck);
1689 rc = lfsck_namespace_setup(env, lfsck);
1694 rc = lfsck_layout_setup(env, lfsck);
1698 /* XXX: more LFSCK components initialization to be added here. */
1701 rc = lfsck_instance_add(lfsck);
1703 rc = lfsck_add_target_from_orphan(env, lfsck);
1705 if (root != NULL && !IS_ERR(root))
1706 lu_object_put(env, &root->do_lu);
1708 lfsck_instance_cleanup(env, lfsck);
1711 EXPORT_SYMBOL(lfsck_register);
1713 void lfsck_degister(const struct lu_env *env, struct dt_device *key)
1715 struct lfsck_instance *lfsck;
1717 lfsck = lfsck_instance_find(key, false, true);
1719 lfsck_instance_put(env, lfsck);
1721 EXPORT_SYMBOL(lfsck_degister);
1723 int lfsck_add_target(const struct lu_env *env, struct dt_device *key,
1724 struct dt_device *tgt, struct obd_export *exp,
1725 __u32 index, bool for_ost)
1727 struct lfsck_instance *lfsck;
1728 struct lfsck_tgt_desc *ltd;
1739 INIT_LIST_HEAD(<d->ltd_orphan_list);
1740 INIT_LIST_HEAD(<d->ltd_layout_list);
1741 INIT_LIST_HEAD(<d->ltd_layout_phase_list);
1742 atomic_set(<d->ltd_ref, 1);
1743 ltd->ltd_index = index;
1745 spin_lock(&lfsck_instance_lock);
1746 lfsck = __lfsck_instance_find(key, true, false);
1747 if (lfsck == NULL) {
1749 list_add_tail(<d->ltd_orphan_list,
1750 &lfsck_ost_orphan_list);
1752 list_add_tail(<d->ltd_orphan_list,
1753 &lfsck_mdt_orphan_list);
1754 spin_unlock(&lfsck_instance_lock);
1758 spin_unlock(&lfsck_instance_lock);
1760 rc = __lfsck_add_target(env, lfsck, ltd, for_ost, false);
1764 lfsck_instance_put(env, lfsck);
1768 EXPORT_SYMBOL(lfsck_add_target);
1770 void lfsck_del_target(const struct lu_env *env, struct dt_device *key,
1771 struct dt_device *tgt, __u32 index, bool for_ost)
1773 struct lfsck_instance *lfsck;
1774 struct lfsck_tgt_descs *ltds;
1775 struct lfsck_tgt_desc *ltd;
1776 struct list_head *head;
1781 head = &lfsck_ost_orphan_list;
1783 head = &lfsck_mdt_orphan_list;
1785 spin_lock(&lfsck_instance_lock);
1786 list_for_each_entry(ltd, head, ltd_orphan_list) {
1787 if (ltd->ltd_tgt == tgt) {
1788 list_del_init(<d->ltd_orphan_list);
1789 spin_unlock(&lfsck_instance_lock);
1796 lfsck = __lfsck_instance_find(key, true, false);
1797 spin_unlock(&lfsck_instance_lock);
1798 if (unlikely(lfsck == NULL))
1802 ltds = &lfsck->li_ost_descs;
1804 ltds = &lfsck->li_mdt_descs;
1806 down_write(<ds->ltd_rw_sem);
1808 LASSERT(ltds->ltd_tgts_bitmap != NULL);
1810 if (unlikely(index >= ltds->ltd_tgts_bitmap->size))
1813 ltd = LTD_TGT(ltds, index);
1814 if (unlikely(ltd == NULL))
1818 spin_lock(<ds->ltd_lock);
1820 if (!list_empty(<d->ltd_layout_list)) {
1821 list_del_init(<d->ltd_layout_phase_list);
1822 list_del_init(<d->ltd_layout_list);
1825 LASSERT(list_empty(<d->ltd_layout_phase_list));
1827 spin_unlock(<ds->ltd_lock);
1829 if (stop && lfsck->li_master)
1830 lfsck_stop_notify(env, lfsck, ltds, ltd);
1832 LASSERT(ltds->ltd_tgtnr > 0);
1835 cfs_bitmap_clear(ltds->ltd_tgts_bitmap, index);
1836 LTD_TGT(ltds, index) = NULL;
1842 head = &lfsck->li_ost_descs.ltd_orphan;
1844 head = &lfsck->li_ost_descs.ltd_orphan;
1846 list_for_each_entry(ltd, head, ltd_orphan_list) {
1847 if (ltd->ltd_tgt == tgt) {
1848 list_del_init(<d->ltd_orphan_list);
1855 up_write(<ds->ltd_rw_sem);
1856 lfsck_instance_put(env, lfsck);
1858 EXPORT_SYMBOL(lfsck_del_target);
1860 static int __init lfsck_init(void)
1864 INIT_LIST_HEAD(&lfsck_ost_orphan_list);
1865 INIT_LIST_HEAD(&lfsck_mdt_orphan_list);
1866 lfsck_key_init_generic(&lfsck_thread_key, NULL);
1867 rc = lu_context_key_register(&lfsck_thread_key);
1869 tgt_register_lfsck_start(lfsck_start);
1870 tgt_register_lfsck_in_notify(lfsck_in_notify);
1871 tgt_register_lfsck_query(lfsck_query);
1877 static void __exit lfsck_exit(void)
1879 struct lfsck_tgt_desc *ltd;
1880 struct lfsck_tgt_desc *next;
1882 LASSERT(cfs_list_empty(&lfsck_instance_list));
1884 list_for_each_entry_safe(ltd, next, &lfsck_ost_orphan_list,
1886 list_del_init(<d->ltd_orphan_list);
1890 list_for_each_entry_safe(ltd, next, &lfsck_mdt_orphan_list,
1892 list_del_init(<d->ltd_orphan_list);
1896 lu_context_key_degister(&lfsck_thread_key);
1899 MODULE_AUTHOR("Intel Corporation <http://www.intel.com/>");
1900 MODULE_DESCRIPTION("LFSCK");
1901 MODULE_LICENSE("GPL");
1903 cfs_module(lfsck, LUSTRE_VERSION_STRING, lfsck_init, lfsck_exit);