4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2012, 2013, Intel Corporation.
26 * lustre/lfsck/lfsck_lib.c
28 * Author: Fan, Yong <fan.yong@intel.com>
31 #define DEBUG_SUBSYSTEM S_LFSCK
33 #include <libcfs/list.h>
34 #include <lu_object.h>
35 #include <dt_object.h>
36 #include <md_object.h>
37 #include <lustre_fld.h>
38 #include <lustre_lib.h>
39 #include <lustre_net.h>
40 #include <lustre_lfsck.h>
41 #include <lustre/lustre_lfsck_user.h>
43 #include "lfsck_internal.h"
45 /* define lfsck thread key */
46 LU_KEY_INIT(lfsck, struct lfsck_thread_info);
48 static void lfsck_key_fini(const struct lu_context *ctx,
49 struct lu_context_key *key, void *data)
51 struct lfsck_thread_info *info = data;
53 lu_buf_free(&info->lti_linkea_buf);
54 lu_buf_free(&info->lti_big_buf);
58 LU_CONTEXT_KEY_DEFINE(lfsck, LCT_MD_THREAD | LCT_DT_THREAD);
59 LU_KEY_INIT_GENERIC(lfsck);
61 static CFS_LIST_HEAD(lfsck_instance_list);
62 static struct list_head lfsck_ost_orphan_list;
63 static struct list_head lfsck_mdt_orphan_list;
64 static DEFINE_SPINLOCK(lfsck_instance_lock);
66 static const char *lfsck_status_names[] = {
68 [LS_SCANNING_PHASE1] = "scanning-phase1",
69 [LS_SCANNING_PHASE2] = "scanning-phase2",
70 [LS_COMPLETED] = "completed",
71 [LS_FAILED] = "failed",
72 [LS_STOPPED] = "stopped",
73 [LS_PAUSED] = "paused",
74 [LS_CRASHED] = "crashed",
75 [LS_PARTIAL] = "partial",
76 [LS_CO_FAILED] = "co-failed",
77 [LS_CO_STOPPED] = "co-stopped",
78 [LS_CO_PAUSED] = "co-paused"
81 const char *lfsck_flags_names[] = {
90 const char *lfsck_param_names[] = {
98 const char *lfsck_status2names(enum lfsck_status status)
100 if (unlikely(status < 0 || status >= LS_MAX))
103 return lfsck_status_names[status];
106 static int lfsck_tgt_descs_init(struct lfsck_tgt_descs *ltds)
108 spin_lock_init(<ds->ltd_lock);
109 init_rwsem(<ds->ltd_rw_sem);
110 INIT_LIST_HEAD(<ds->ltd_orphan);
111 ltds->ltd_tgts_bitmap = CFS_ALLOCATE_BITMAP(BITS_PER_LONG);
112 if (ltds->ltd_tgts_bitmap == NULL)
118 static void lfsck_tgt_descs_fini(struct lfsck_tgt_descs *ltds)
120 struct lfsck_tgt_desc *ltd;
121 struct lfsck_tgt_desc *next;
124 down_write(<ds->ltd_rw_sem);
126 list_for_each_entry_safe(ltd, next, <ds->ltd_orphan,
128 list_del_init(<d->ltd_orphan_list);
132 if (unlikely(ltds->ltd_tgts_bitmap == NULL)) {
133 up_write(<ds->ltd_rw_sem);
138 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
139 ltd = LTD_TGT(ltds, idx);
140 if (likely(ltd != NULL)) {
141 LASSERT(list_empty(<d->ltd_layout_list));
142 LASSERT(list_empty(<d->ltd_layout_phase_list));
145 cfs_bitmap_clear(ltds->ltd_tgts_bitmap, idx);
146 LTD_TGT(ltds, idx) = NULL;
151 LASSERTF(ltds->ltd_tgtnr == 0, "tgt count unmatched: %d\n",
154 for (idx = 0; idx < TGT_PTRS; idx++) {
155 if (ltds->ltd_tgts_idx[idx] != NULL) {
156 OBD_FREE_PTR(ltds->ltd_tgts_idx[idx]);
157 ltds->ltd_tgts_idx[idx] = NULL;
161 CFS_FREE_BITMAP(ltds->ltd_tgts_bitmap);
162 ltds->ltd_tgts_bitmap = NULL;
163 up_write(<ds->ltd_rw_sem);
166 static int __lfsck_add_target(const struct lu_env *env,
167 struct lfsck_instance *lfsck,
168 struct lfsck_tgt_desc *ltd,
169 bool for_ost, bool locked)
171 struct lfsck_tgt_descs *ltds;
172 __u32 index = ltd->ltd_index;
177 ltds = &lfsck->li_ost_descs;
179 ltds = &lfsck->li_mdt_descs;
182 down_write(<ds->ltd_rw_sem);
184 LASSERT(ltds->ltd_tgts_bitmap != NULL);
186 if (index >= ltds->ltd_tgts_bitmap->size) {
187 __u32 newsize = max((__u32)ltds->ltd_tgts_bitmap->size,
188 (__u32)BITS_PER_LONG);
189 cfs_bitmap_t *old_bitmap = ltds->ltd_tgts_bitmap;
190 cfs_bitmap_t *new_bitmap;
192 while (newsize < index + 1)
195 new_bitmap = CFS_ALLOCATE_BITMAP(newsize);
196 if (new_bitmap == NULL)
197 GOTO(unlock, rc = -ENOMEM);
199 if (ltds->ltd_tgtnr > 0)
200 cfs_bitmap_copy(new_bitmap, old_bitmap);
201 ltds->ltd_tgts_bitmap = new_bitmap;
202 CFS_FREE_BITMAP(old_bitmap);
205 if (cfs_bitmap_check(ltds->ltd_tgts_bitmap, index)) {
206 CERROR("%s: the device %s (%u) is registered already\n",
207 lfsck_lfsck2name(lfsck),
208 ltd->ltd_tgt->dd_lu_dev.ld_obd->obd_name, index);
209 GOTO(unlock, rc = -EEXIST);
212 if (ltds->ltd_tgts_idx[index / TGT_PTRS_PER_BLOCK] == NULL) {
213 OBD_ALLOC_PTR(ltds->ltd_tgts_idx[index / TGT_PTRS_PER_BLOCK]);
214 if (ltds->ltd_tgts_idx[index / TGT_PTRS_PER_BLOCK] == NULL)
215 GOTO(unlock, rc = -ENOMEM);
218 LTD_TGT(ltds, index) = ltd;
219 cfs_bitmap_set(ltds->ltd_tgts_bitmap, index);
222 GOTO(unlock, rc = 0);
226 up_write(<ds->ltd_rw_sem);
231 static int lfsck_add_target_from_orphan(const struct lu_env *env,
232 struct lfsck_instance *lfsck)
234 struct lfsck_tgt_descs *ltds = &lfsck->li_ost_descs;
235 struct lfsck_tgt_desc *ltd;
236 struct lfsck_tgt_desc *next;
237 struct list_head *head = &lfsck_ost_orphan_list;
242 spin_lock(&lfsck_instance_lock);
243 list_for_each_entry_safe(ltd, next, head, ltd_orphan_list) {
244 if (ltd->ltd_key == lfsck->li_bottom) {
245 list_del_init(<d->ltd_orphan_list);
246 list_add_tail(<d->ltd_orphan_list,
250 spin_unlock(&lfsck_instance_lock);
252 down_write(<ds->ltd_rw_sem);
253 while (!list_empty(<ds->ltd_orphan)) {
254 ltd = list_entry(ltds->ltd_orphan.next,
255 struct lfsck_tgt_desc,
257 list_del_init(<d->ltd_orphan_list);
258 rc = __lfsck_add_target(env, lfsck, ltd, for_ost, true);
259 /* Do not hold the semaphore for too long time. */
260 up_write(<ds->ltd_rw_sem);
264 down_write(<ds->ltd_rw_sem);
266 up_write(<ds->ltd_rw_sem);
269 ltds = &lfsck->li_mdt_descs;
270 head = &lfsck_mdt_orphan_list;
278 static inline struct lfsck_component *
279 __lfsck_component_find(struct lfsck_instance *lfsck, __u16 type, cfs_list_t *list)
281 struct lfsck_component *com;
283 cfs_list_for_each_entry(com, list, lc_link) {
284 if (com->lc_type == type)
290 struct lfsck_component *
291 lfsck_component_find(struct lfsck_instance *lfsck, __u16 type)
293 struct lfsck_component *com;
295 spin_lock(&lfsck->li_lock);
296 com = __lfsck_component_find(lfsck, type, &lfsck->li_list_scan);
300 com = __lfsck_component_find(lfsck, type,
301 &lfsck->li_list_double_scan);
305 com = __lfsck_component_find(lfsck, type, &lfsck->li_list_idle);
309 lfsck_component_get(com);
310 spin_unlock(&lfsck->li_lock);
314 void lfsck_component_cleanup(const struct lu_env *env,
315 struct lfsck_component *com)
317 if (!cfs_list_empty(&com->lc_link))
318 cfs_list_del_init(&com->lc_link);
319 if (!cfs_list_empty(&com->lc_link_dir))
320 cfs_list_del_init(&com->lc_link_dir);
322 lfsck_component_put(env, com);
325 int lfsck_fid_alloc(const struct lu_env *env, struct lfsck_instance *lfsck,
326 struct lu_fid *fid, bool locked)
328 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
333 mutex_lock(&lfsck->li_mutex);
335 rc = seq_client_alloc_fid(env, lfsck->li_seq, fid);
337 bk->lb_last_fid = *fid;
338 /* We do not care about whether the subsequent sub-operations
339 * failed or not. The worst case is that one FID is lost that
340 * is not a big issue for the LFSCK since it is relative rare
341 * for LFSCK create. */
342 rc = lfsck_bookmark_store(env, lfsck);
346 mutex_unlock(&lfsck->li_mutex);
351 static const char dot[] = ".";
352 static const char dotdot[] = "..";
354 static int lfsck_create_lpf_local(const struct lu_env *env,
355 struct lfsck_instance *lfsck,
356 struct dt_object *parent,
357 struct dt_object *child,
359 struct dt_object_format *dof,
362 struct dt_device *dev = lfsck->li_bottom;
363 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
364 struct dt_object *bk_obj = lfsck->li_bookmark_obj;
365 const struct lu_fid *cfid = lu_object_fid(&child->do_lu);
366 struct thandle *th = NULL;
368 int len = sizeof(struct lfsck_bookmark);
372 th = dt_trans_create(env, dev);
376 /* 1a. create child */
377 rc = dt_declare_create(env, child, la, NULL, dof, th);
381 /* 2a. increase child nlink */
382 rc = dt_declare_ref_add(env, child, th);
386 /* 3a. insert name into parent dir */
387 rc = dt_declare_insert(env, parent, (const struct dt_rec *)cfid,
388 (const struct dt_key *)name, th);
392 /* 4a. increase parent nlink */
393 rc = dt_declare_ref_add(env, parent, th);
397 /* 5a. update bookmark */
398 rc = dt_declare_record_write(env, bk_obj,
399 lfsck_buf_get(env, bk, len), 0, th);
403 rc = dt_trans_start_local(env, dev, th);
407 dt_write_lock(env, child, 0);
408 /* 1b.1 create child */
409 rc = dt_create(env, child, la, NULL, dof, th);
413 if (unlikely(!dt_try_as_dir(env, child)))
414 GOTO(unlock, rc = -ENOTDIR);
416 /* 1b.2 insert dot into child dir */
417 rc = dt_insert(env, child, (const struct dt_rec *)cfid,
418 (const struct dt_key *)dot, th, BYPASS_CAPA, 1);
422 /* 1b.3 insert dotdot into child dir */
423 rc = dt_insert(env, child, (const struct dt_rec *)&LU_LPF_FID,
424 (const struct dt_key *)dotdot, th, BYPASS_CAPA, 1);
428 /* 2b. increase child nlink */
429 rc = dt_ref_add(env, child, th);
430 dt_write_unlock(env, child);
434 /* 3b. insert name into parent dir */
435 rc = dt_insert(env, parent, (const struct dt_rec *)cfid,
436 (const struct dt_key *)name, th, BYPASS_CAPA, 1);
440 dt_write_lock(env, parent, 0);
441 /* 4b. increase parent nlink */
442 rc = dt_ref_add(env, parent, th);
443 dt_write_unlock(env, parent);
447 bk->lb_lpf_fid = *cfid;
448 lfsck_bookmark_cpu_to_le(&lfsck->li_bookmark_disk, bk);
450 /* 5b. update bookmark */
451 rc = dt_record_write(env, bk_obj,
452 lfsck_buf_get(env, bk, len), &pos, th);
457 dt_write_unlock(env, child);
460 dt_trans_stop(env, dev, th);
465 static int lfsck_create_lpf_remote(const struct lu_env *env,
466 struct lfsck_instance *lfsck,
467 struct dt_object *parent,
468 struct dt_object *child,
470 struct dt_object_format *dof,
473 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
474 struct dt_object *bk_obj = lfsck->li_bookmark_obj;
475 const struct lu_fid *cfid = lu_object_fid(&child->do_lu);
476 struct thandle *th = NULL;
477 struct dt_device *dev;
479 int len = sizeof(struct lfsck_bookmark);
483 /* Create .lustre/lost+found/MDTxxxx. */
485 /* XXX: Currently, cross-MDT create operation needs to create the child
486 * object firstly, then insert name into the parent directory. For
487 * this case, the child object resides on current MDT (local), but
488 * the parent ".lustre/lost+found" may be on remote MDT. It is not
489 * easy to contain all the sub-modifications orderly within single
492 * To avoid more inconsistency, we split the create operation into
495 * 1) create the child locally.
496 * 2) insert the name "MDTXXXX" in the parent ".lustre/lost+found"
497 * remotely and update the lfsck_bookmark::lb_lpf_fid locally.
499 * If 1) done but 2) failed, then the worst case is that we lose
500 * one object locally, which is not a big issue. (can be repaird
501 * by LFSCK phase III) */
505 dev = lfsck->li_bottom;
506 th = dt_trans_create(env, dev);
510 /* 1a. create child locally. */
511 rc = dt_declare_create(env, child, la, NULL, dof, th);
515 /* 2a. increase child nlink locally. */
516 rc = dt_declare_ref_add(env, child, th);
520 rc = dt_trans_start_local(env, dev, th);
524 dt_write_lock(env, child, 0);
525 /* 1b. create child locally. */
526 rc = dt_create(env, child, la, NULL, dof, th);
530 if (unlikely(!dt_try_as_dir(env, child)))
531 GOTO(unlock, rc = -ENOTDIR);
533 /* 2b.1 insert dot into child dir locally. */
534 rc = dt_insert(env, child, (const struct dt_rec *)cfid,
535 (const struct dt_key *)dot, th, BYPASS_CAPA, 1);
539 /* 2b.2 insert dotdot into child dir locally. */
540 rc = dt_insert(env, child, (const struct dt_rec *)&LU_LPF_FID,
541 (const struct dt_key *)dotdot, th, BYPASS_CAPA, 1);
545 /* 2b.3 increase child nlink locally. */
546 rc = dt_ref_add(env, child, th);
547 dt_write_unlock(env, child);
548 dt_trans_stop(env, dev, th);
552 /* Transaction II: */
554 dev = lfsck->li_next;
555 th = dt_trans_create(env, dev);
559 /* 3a. insert name into parent dir remotely. */
560 rc = dt_declare_insert(env, parent, (const struct dt_rec *)cfid,
561 (const struct dt_key *)name, th);
565 /* 4a. increase parent nlink remotely. */
566 rc = dt_declare_ref_add(env, parent, th);
570 /* 5a. decrease child nlink for dotdot locally if former remote
572 rc = dt_declare_ref_del(env, child, th);
576 /* 6a. decrease child nlink for dot locally if former remote
578 rc = dt_declare_ref_del(env, child, th);
582 /* 7a. destroy child locally if former remote update failed. */
583 rc = dt_declare_destroy(env, child, th);
587 /* 8a. update bookmark locally. */
588 rc = dt_declare_record_write(env, bk_obj,
589 lfsck_buf_get(env, bk, len), 0, th);
593 rc = dt_trans_start(env, dev, th);
597 /* 3b. insert name into parent dir remotely. */
598 rc = dt_insert(env, parent, (const struct dt_rec *)cfid,
599 (const struct dt_key *)name, th, BYPASS_CAPA, 1);
601 dt_write_lock(env, parent, 0);
602 /* 4b. increase parent nlink remotely. */
603 rc = dt_ref_add(env, parent, th);
604 dt_write_unlock(env, parent);
607 /* 5b. decrease child nlink for dotdot locally. */
608 dt_ref_del(env, child, th);
609 /* 6b. decrease child nlink for dot locally. */
610 dt_ref_del(env, child, th);
611 /* 7b. destroy child locally. */
612 dt_destroy(env, child, th);
616 bk->lb_lpf_fid = *cfid;
617 lfsck_bookmark_cpu_to_le(&lfsck->li_bookmark_disk, bk);
619 /* 8b. update bookmark locally. */
620 rc = dt_record_write(env, bk_obj,
621 lfsck_buf_get(env, bk, len), &pos, th);
626 dt_write_unlock(env, child);
628 dt_trans_stop(env, dev, th);
633 /* Do NOT create .lustre/lost+found/MDTxxxx when register the lfsck instance,
634 * because the MDT0 maybe not reaady for sequence allocation yet. We do that
635 * only when it is required, such as orphan OST-objects repairing. */
636 int lfsck_create_lpf(const struct lu_env *env, struct lfsck_instance *lfsck)
638 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
639 struct lfsck_thread_info *info = lfsck_env_info(env);
640 struct lu_fid *cfid = &info->lti_fid2;
641 struct lu_attr *la = &info->lti_la;
642 struct dt_object_format *dof = &info->lti_dof;
643 struct dt_object *parent = NULL;
644 struct dt_object *child = NULL;
646 int node = lfsck_dev_idx(lfsck->li_bottom);
650 LASSERT(lfsck->li_master);
652 sprintf(name, "MDT%04x", node);
654 parent = lfsck_object_find_by_dev(env, lfsck->li_bottom,
657 struct lfsck_tgt_desc *ltd;
659 ltd = lfsck_tgt_get(&lfsck->li_mdt_descs, 0);
660 if (unlikely(ltd == NULL))
663 parent = lfsck_object_find_by_dev(env, ltd->ltd_tgt,
668 RETURN(PTR_ERR(parent));
670 if (unlikely(!dt_try_as_dir(env, parent)))
671 GOTO(out, rc = -ENOTDIR);
673 mutex_lock(&lfsck->li_mutex);
674 if (lfsck->li_lpf_obj != NULL)
675 GOTO(unlock, rc = 0);
677 if (fid_is_zero(&bk->lb_lpf_fid)) {
678 /* There is corner case that: in former LFSCK scanning we have
679 * created the .lustre/lost+found/MDTxxxx but failed to update
680 * the lfsck_bookmark::lb_lpf_fid successfully. So need lookup
681 * it from MDT0 firstly. */
682 rc = dt_lookup(env, parent, (struct dt_rec *)cfid,
683 (const struct dt_key *)name, BYPASS_CAPA);
684 if (rc != 0 && rc != -ENOENT)
688 bk->lb_lpf_fid = *cfid;
689 rc = lfsck_bookmark_store(env, lfsck);
691 rc = lfsck_fid_alloc(env, lfsck, cfid, true);
696 *cfid = bk->lb_lpf_fid;
699 child = lfsck_object_find_by_dev(env, lfsck->li_bottom, cfid);
701 GOTO(unlock, rc = PTR_ERR(child));
703 if (dt_object_exists(child) != 0) {
704 if (unlikely(!dt_try_as_dir(env, child)))
705 GOTO(unlock, rc = -ENOTDIR);
707 lfsck->li_lpf_obj = child;
708 GOTO(unlock, rc = 0);
711 memset(la, 0, sizeof(*la));
712 la->la_atime = la->la_mtime = la->la_ctime = cfs_time_current_sec();
713 la->la_mode = S_IFDIR | S_IRWXU;
714 la->la_valid = LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
716 memset(dof, 0, sizeof(*dof));
717 dof->dof_type = dt_mode_to_dft(S_IFDIR);
720 rc = lfsck_create_lpf_local(env, lfsck, parent, child, la,
723 rc = lfsck_create_lpf_remote(env, lfsck, parent, child, la,
726 lfsck->li_lpf_obj = child;
731 mutex_unlock(&lfsck->li_mutex);
732 if (rc != 0 && child != NULL && !IS_ERR(child))
733 lu_object_put(env, &child->do_lu);
735 if (parent != NULL && !IS_ERR(parent))
736 lu_object_put(env, &parent->do_lu);
741 static int lfsck_fid_init(struct lfsck_instance *lfsck)
743 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
744 struct seq_server_site *ss;
749 ss = lu_site2seq(lfsck->li_bottom->dd_lu_dev.ld_site);
750 if (unlikely(ss == NULL))
753 OBD_ALLOC_PTR(lfsck->li_seq);
754 if (lfsck->li_seq == NULL)
757 OBD_ALLOC(prefix, MAX_OBD_NAME + 7);
759 GOTO(out, rc = -ENOMEM);
761 snprintf(prefix, MAX_OBD_NAME + 7, "lfsck-%s", lfsck_lfsck2name(lfsck));
762 rc = seq_client_init(lfsck->li_seq, NULL, LUSTRE_SEQ_METADATA, prefix,
764 OBD_FREE(prefix, MAX_OBD_NAME + 7);
768 if (fid_is_sane(&bk->lb_last_fid))
769 lfsck->li_seq->lcs_fid = bk->lb_last_fid;
774 OBD_FREE_PTR(lfsck->li_seq);
775 lfsck->li_seq = NULL;
780 static void lfsck_fid_fini(struct lfsck_instance *lfsck)
782 if (lfsck->li_seq != NULL) {
783 seq_client_fini(lfsck->li_seq);
784 OBD_FREE_PTR(lfsck->li_seq);
785 lfsck->li_seq = NULL;
789 void lfsck_instance_cleanup(const struct lu_env *env,
790 struct lfsck_instance *lfsck)
792 struct ptlrpc_thread *thread = &lfsck->li_thread;
793 struct lfsck_component *com;
796 LASSERT(list_empty(&lfsck->li_link));
797 LASSERT(thread_is_init(thread) || thread_is_stopped(thread));
799 if (lfsck->li_obj_oit != NULL) {
800 lu_object_put_nocache(env, &lfsck->li_obj_oit->do_lu);
801 lfsck->li_obj_oit = NULL;
804 LASSERT(lfsck->li_obj_dir == NULL);
806 while (!cfs_list_empty(&lfsck->li_list_scan)) {
807 com = cfs_list_entry(lfsck->li_list_scan.next,
808 struct lfsck_component,
810 lfsck_component_cleanup(env, com);
813 LASSERT(cfs_list_empty(&lfsck->li_list_dir));
815 while (!cfs_list_empty(&lfsck->li_list_double_scan)) {
816 com = cfs_list_entry(lfsck->li_list_double_scan.next,
817 struct lfsck_component,
819 lfsck_component_cleanup(env, com);
822 while (!cfs_list_empty(&lfsck->li_list_idle)) {
823 com = cfs_list_entry(lfsck->li_list_idle.next,
824 struct lfsck_component,
826 lfsck_component_cleanup(env, com);
829 lfsck_tgt_descs_fini(&lfsck->li_ost_descs);
830 lfsck_tgt_descs_fini(&lfsck->li_mdt_descs);
832 if (lfsck->li_bookmark_obj != NULL) {
833 lu_object_put_nocache(env, &lfsck->li_bookmark_obj->do_lu);
834 lfsck->li_bookmark_obj = NULL;
837 if (lfsck->li_lpf_obj != NULL) {
838 lu_object_put(env, &lfsck->li_lpf_obj->do_lu);
839 lfsck->li_lpf_obj = NULL;
842 if (lfsck->li_los != NULL) {
843 local_oid_storage_fini(env, lfsck->li_los);
844 lfsck->li_los = NULL;
847 lfsck_fid_fini(lfsck);
852 static inline struct lfsck_instance *
853 __lfsck_instance_find(struct dt_device *key, bool ref, bool unlink)
855 struct lfsck_instance *lfsck;
857 cfs_list_for_each_entry(lfsck, &lfsck_instance_list, li_link) {
858 if (lfsck->li_bottom == key) {
860 lfsck_instance_get(lfsck);
862 list_del_init(&lfsck->li_link);
871 struct lfsck_instance *lfsck_instance_find(struct dt_device *key, bool ref,
874 struct lfsck_instance *lfsck;
876 spin_lock(&lfsck_instance_lock);
877 lfsck = __lfsck_instance_find(key, ref, unlink);
878 spin_unlock(&lfsck_instance_lock);
883 static inline int lfsck_instance_add(struct lfsck_instance *lfsck)
885 struct lfsck_instance *tmp;
887 spin_lock(&lfsck_instance_lock);
888 cfs_list_for_each_entry(tmp, &lfsck_instance_list, li_link) {
889 if (lfsck->li_bottom == tmp->li_bottom) {
890 spin_unlock(&lfsck_instance_lock);
895 cfs_list_add_tail(&lfsck->li_link, &lfsck_instance_list);
896 spin_unlock(&lfsck_instance_lock);
900 int lfsck_bits_dump(char **buf, int *len, int bits, const char *names[],
908 rc = snprintf(*buf, *len, "%s:%c", prefix, bits != 0 ? ' ' : '\n');
914 for (i = 0, flag = 1; bits != 0; i++, flag = 1 << i) {
917 if (names[i] != NULL) {
918 rc = snprintf(*buf, *len, "%s%c", names[i],
919 bits != 0 ? ',' : '\n');
931 int lfsck_time_dump(char **buf, int *len, __u64 time, const char *prefix)
936 rc = snprintf(*buf, *len, "%s: "LPU64" seconds\n", prefix,
937 cfs_time_current_sec() - time);
939 rc = snprintf(*buf, *len, "%s: N/A\n", prefix);
948 int lfsck_pos_dump(char **buf, int *len, struct lfsck_position *pos,
953 if (fid_is_zero(&pos->lp_dir_parent)) {
954 if (pos->lp_oit_cookie == 0)
955 rc = snprintf(*buf, *len, "%s: N/A, N/A, N/A\n",
958 rc = snprintf(*buf, *len, "%s: "LPU64", N/A, N/A\n",
959 prefix, pos->lp_oit_cookie);
961 rc = snprintf(*buf, *len, "%s: "LPU64", "DFID", "LPU64"\n",
962 prefix, pos->lp_oit_cookie,
963 PFID(&pos->lp_dir_parent), pos->lp_dir_cookie);
973 void lfsck_pos_fill(const struct lu_env *env, struct lfsck_instance *lfsck,
974 struct lfsck_position *pos, bool init)
976 const struct dt_it_ops *iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
978 if (unlikely(lfsck->li_di_oit == NULL)) {
979 memset(pos, 0, sizeof(*pos));
983 pos->lp_oit_cookie = iops->store(env, lfsck->li_di_oit);
984 if (!lfsck->li_current_oit_processed && !init)
985 pos->lp_oit_cookie--;
987 LASSERT(pos->lp_oit_cookie > 0);
989 if (lfsck->li_di_dir != NULL) {
990 struct dt_object *dto = lfsck->li_obj_dir;
992 pos->lp_dir_cookie = dto->do_index_ops->dio_it.store(env,
995 if (pos->lp_dir_cookie >= MDS_DIR_END_OFF) {
996 fid_zero(&pos->lp_dir_parent);
997 pos->lp_dir_cookie = 0;
999 pos->lp_dir_parent = *lfsck_dto2fid(dto);
1002 fid_zero(&pos->lp_dir_parent);
1003 pos->lp_dir_cookie = 0;
1007 static void __lfsck_set_speed(struct lfsck_instance *lfsck, __u32 limit)
1009 lfsck->li_bookmark_ram.lb_speed_limit = limit;
1010 if (limit != LFSCK_SPEED_NO_LIMIT) {
1012 lfsck->li_sleep_rate = limit / HZ;
1013 lfsck->li_sleep_jif = 1;
1015 lfsck->li_sleep_rate = 1;
1016 lfsck->li_sleep_jif = HZ / limit;
1019 lfsck->li_sleep_jif = 0;
1020 lfsck->li_sleep_rate = 0;
1024 void lfsck_control_speed(struct lfsck_instance *lfsck)
1026 struct ptlrpc_thread *thread = &lfsck->li_thread;
1027 struct l_wait_info lwi;
1029 if (lfsck->li_sleep_jif > 0 &&
1030 lfsck->li_new_scanned >= lfsck->li_sleep_rate) {
1031 lwi = LWI_TIMEOUT_INTR(lfsck->li_sleep_jif, NULL,
1032 LWI_ON_SIGNAL_NOOP, NULL);
1034 l_wait_event(thread->t_ctl_waitq,
1035 !thread_is_running(thread),
1037 lfsck->li_new_scanned = 0;
1041 void lfsck_control_speed_by_self(struct lfsck_component *com)
1043 struct lfsck_instance *lfsck = com->lc_lfsck;
1044 struct ptlrpc_thread *thread = &lfsck->li_thread;
1045 struct l_wait_info lwi;
1047 if (lfsck->li_sleep_jif > 0 &&
1048 com->lc_new_scanned >= lfsck->li_sleep_rate) {
1049 lwi = LWI_TIMEOUT_INTR(lfsck->li_sleep_jif, NULL,
1050 LWI_ON_SIGNAL_NOOP, NULL);
1052 l_wait_event(thread->t_ctl_waitq,
1053 !thread_is_running(thread),
1055 com->lc_new_scanned = 0;
1059 static int lfsck_parent_fid(const struct lu_env *env, struct dt_object *obj,
1062 if (unlikely(!S_ISDIR(lfsck_object_type(obj)) ||
1063 !dt_try_as_dir(env, obj)))
1066 return dt_lookup(env, obj, (struct dt_rec *)fid,
1067 (const struct dt_key *)"..", BYPASS_CAPA);
1070 static int lfsck_needs_scan_dir(const struct lu_env *env,
1071 struct lfsck_instance *lfsck,
1072 struct dt_object *obj)
1074 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
1078 if (!lfsck->li_master || !S_ISDIR(lfsck_object_type(obj)) ||
1079 cfs_list_empty(&lfsck->li_list_dir))
1083 /* XXX: Currently, we do not scan the "/REMOTE_PARENT_DIR",
1084 * which is the agent directory to manage the objects
1085 * which name entries reside on remote MDTs. Related
1086 * consistency verification will be processed in LFSCK
1088 if (lu_fid_eq(lfsck_dto2fid(obj), &lfsck->li_global_root_fid)) {
1090 lfsck_object_put(env, obj);
1094 /* .lustre doesn't contain "real" user objects, no need lfsck */
1095 if (fid_is_dot_lustre(lfsck_dto2fid(obj))) {
1097 lfsck_object_put(env, obj);
1101 dt_read_lock(env, obj, MOR_TGT_CHILD);
1102 if (unlikely(lfsck_is_dead_obj(obj))) {
1103 dt_read_unlock(env, obj);
1105 lfsck_object_put(env, obj);
1109 rc = dt_xattr_get(env, obj,
1110 lfsck_buf_get(env, NULL, 0), XATTR_NAME_LINK,
1112 dt_read_unlock(env, obj);
1115 lfsck_object_put(env, obj);
1119 if (rc < 0 && rc != -ENODATA) {
1121 lfsck_object_put(env, obj);
1125 rc = lfsck_parent_fid(env, obj, fid);
1127 lfsck_object_put(env, obj);
1131 if (unlikely(lu_fid_eq(fid, &lfsck->li_local_root_fid)))
1134 obj = lfsck_object_find(env, lfsck, fid);
1137 else if (IS_ERR(obj))
1138 return PTR_ERR(obj);
1140 if (!dt_object_exists(obj)) {
1141 lfsck_object_put(env, obj);
1145 /* Currently, only client visible directory can be remote. */
1146 if (dt_object_remote(obj)) {
1147 lfsck_object_put(env, obj);
1156 struct lfsck_thread_args *lfsck_thread_args_init(struct lfsck_instance *lfsck,
1157 struct lfsck_component *com,
1158 struct lfsck_start_param *lsp)
1160 struct lfsck_thread_args *lta;
1165 return ERR_PTR(-ENOMEM);
1167 rc = lu_env_init(<a->lta_env, LCT_MD_THREAD | LCT_DT_THREAD);
1173 lta->lta_lfsck = lfsck_instance_get(lfsck);
1175 lta->lta_com = lfsck_component_get(com);
1182 void lfsck_thread_args_fini(struct lfsck_thread_args *lta)
1184 if (lta->lta_com != NULL)
1185 lfsck_component_put(<a->lta_env, lta->lta_com);
1186 lfsck_instance_put(<a->lta_env, lta->lta_lfsck);
1187 lu_env_fini(<a->lta_env);
1191 /* LFSCK wrap functions */
1193 void lfsck_fail(const struct lu_env *env, struct lfsck_instance *lfsck,
1196 struct lfsck_component *com;
1198 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
1199 com->lc_ops->lfsck_fail(env, com, new_checked);
1203 int lfsck_checkpoint(const struct lu_env *env, struct lfsck_instance *lfsck)
1205 struct lfsck_component *com;
1209 if (likely(cfs_time_beforeq(cfs_time_current(),
1210 lfsck->li_time_next_checkpoint)))
1213 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_current, false);
1214 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
1215 rc = com->lc_ops->lfsck_checkpoint(env, com, false);
1220 lfsck->li_time_last_checkpoint = cfs_time_current();
1221 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
1222 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
1223 return rc1 != 0 ? rc1 : rc;
1226 int lfsck_prep(const struct lu_env *env, struct lfsck_instance *lfsck,
1227 struct lfsck_start_param *lsp)
1229 struct dt_object *obj = NULL;
1230 struct lfsck_component *com;
1231 struct lfsck_component *next;
1232 struct lfsck_position *pos = NULL;
1233 const struct dt_it_ops *iops =
1234 &lfsck->li_obj_oit->do_index_ops->dio_it;
1239 LASSERT(lfsck->li_obj_dir == NULL);
1240 LASSERT(lfsck->li_di_dir == NULL);
1242 lfsck->li_current_oit_processed = 0;
1243 cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
1244 com->lc_new_checked = 0;
1245 if (lfsck->li_bookmark_ram.lb_param & LPF_DRYRUN)
1246 com->lc_journal = 0;
1248 rc = com->lc_ops->lfsck_prep(env, com, lsp);
1252 if ((pos == NULL) ||
1253 (!lfsck_pos_is_zero(&com->lc_pos_start) &&
1254 lfsck_pos_is_eq(pos, &com->lc_pos_start) > 0))
1255 pos = &com->lc_pos_start;
1258 /* Init otable-based iterator. */
1260 rc = iops->load(env, lfsck->li_di_oit, 0);
1262 lfsck->li_oit_over = 1;
1269 rc = iops->load(env, lfsck->li_di_oit, pos->lp_oit_cookie);
1273 lfsck->li_oit_over = 1;
1275 if (!lfsck->li_master || fid_is_zero(&pos->lp_dir_parent))
1278 /* Find the directory for namespace-based traverse. */
1279 obj = lfsck_object_find(env, lfsck, &pos->lp_dir_parent);
1282 else if (IS_ERR(obj))
1283 RETURN(PTR_ERR(obj));
1285 /* XXX: Currently, skip remote object, the consistency for
1286 * remote object will be processed in LFSCK phase III. */
1287 if (!dt_object_exists(obj) || dt_object_remote(obj) ||
1288 unlikely(!S_ISDIR(lfsck_object_type(obj))))
1291 if (unlikely(!dt_try_as_dir(env, obj)))
1292 GOTO(out, rc = -ENOTDIR);
1294 /* Init the namespace-based directory traverse. */
1295 iops = &obj->do_index_ops->dio_it;
1296 di = iops->init(env, obj, lfsck->li_args_dir, BYPASS_CAPA);
1298 GOTO(out, rc = PTR_ERR(di));
1300 LASSERT(pos->lp_dir_cookie < MDS_DIR_END_OFF);
1302 rc = iops->load(env, di, pos->lp_dir_cookie);
1303 if ((rc == 0) || (rc > 0 && pos->lp_dir_cookie > 0))
1304 rc = iops->next(env, di);
1310 iops->fini(env, di);
1314 lfsck->li_obj_dir = lfsck_object_get(obj);
1315 lfsck->li_cookie_dir = iops->store(env, di);
1316 spin_lock(&lfsck->li_lock);
1317 lfsck->li_di_dir = di;
1318 spin_unlock(&lfsck->li_lock);
1324 lfsck_object_put(env, obj);
1327 cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
1329 com->lc_ops->lfsck_post(env, com, rc, true);
1335 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_current, true);
1336 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
1337 rc = com->lc_ops->lfsck_checkpoint(env, com, true);
1342 lfsck->li_time_last_checkpoint = cfs_time_current();
1343 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
1344 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
1348 int lfsck_exec_oit(const struct lu_env *env, struct lfsck_instance *lfsck,
1349 struct dt_object *obj)
1351 struct lfsck_component *com;
1352 const struct dt_it_ops *iops;
1357 LASSERT(lfsck->li_obj_dir == NULL);
1359 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
1360 rc = com->lc_ops->lfsck_exec_oit(env, com, obj);
1365 rc = lfsck_needs_scan_dir(env, lfsck, obj);
1369 if (unlikely(!dt_try_as_dir(env, obj)))
1370 GOTO(out, rc = -ENOTDIR);
1372 iops = &obj->do_index_ops->dio_it;
1373 di = iops->init(env, obj, lfsck->li_args_dir, BYPASS_CAPA);
1375 GOTO(out, rc = PTR_ERR(di));
1377 rc = iops->load(env, di, 0);
1379 rc = iops->next(env, di);
1385 iops->fini(env, di);
1389 lfsck->li_obj_dir = lfsck_object_get(obj);
1390 lfsck->li_cookie_dir = iops->store(env, di);
1391 spin_lock(&lfsck->li_lock);
1392 lfsck->li_di_dir = di;
1393 spin_unlock(&lfsck->li_lock);
1399 lfsck_fail(env, lfsck, false);
1400 return (rc > 0 ? 0 : rc);
1403 int lfsck_exec_dir(const struct lu_env *env, struct lfsck_instance *lfsck,
1404 struct dt_object *obj, struct lu_dirent *ent)
1406 struct lfsck_component *com;
1409 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
1410 rc = com->lc_ops->lfsck_exec_dir(env, com, obj, ent);
1417 int lfsck_post(const struct lu_env *env, struct lfsck_instance *lfsck,
1420 struct lfsck_component *com;
1421 struct lfsck_component *next;
1425 lfsck_pos_fill(env, lfsck, &lfsck->li_pos_current, false);
1426 cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
1427 rc = com->lc_ops->lfsck_post(env, com, result, false);
1432 lfsck->li_time_last_checkpoint = cfs_time_current();
1433 lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
1434 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
1436 /* Ignore some component post failure to make other can go ahead. */
1440 static void lfsck_interpret(const struct lu_env *env,
1441 struct lfsck_instance *lfsck,
1442 struct ptlrpc_request *req, void *args, int result)
1444 struct lfsck_async_interpret_args *laia = args;
1445 struct lfsck_component *com;
1447 LASSERT(laia->laia_shared);
1449 spin_lock(&lfsck->li_lock);
1450 list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
1451 if (com->lc_ops->lfsck_interpret != NULL) {
1452 laia->laia_com = com;
1453 com->lc_ops->lfsck_interpret(env, req, laia, result);
1457 list_for_each_entry(com, &lfsck->li_list_double_scan, lc_link) {
1458 if (com->lc_ops->lfsck_interpret != NULL) {
1459 laia->laia_com = com;
1460 com->lc_ops->lfsck_interpret(env, req, laia, result);
1463 spin_unlock(&lfsck->li_lock);
1466 int lfsck_double_scan(const struct lu_env *env, struct lfsck_instance *lfsck)
1468 struct lfsck_component *com;
1469 struct lfsck_component *next;
1470 struct l_wait_info lwi = { 0 };
1474 cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
1476 if (lfsck->li_bookmark_ram.lb_param & LPF_DRYRUN)
1477 com->lc_journal = 0;
1479 rc = com->lc_ops->lfsck_double_scan(env, com);
1484 l_wait_event(lfsck->li_thread.t_ctl_waitq,
1485 atomic_read(&lfsck->li_double_scan_count) == 0,
1488 return rc1 != 0 ? rc1 : rc;
1491 static int lfsck_stop_notify(const struct lu_env *env,
1492 struct lfsck_instance *lfsck,
1493 struct lfsck_tgt_descs *ltds,
1494 struct lfsck_tgt_desc *ltd, __u16 type)
1496 struct ptlrpc_request_set *set;
1497 struct lfsck_component *com;
1501 spin_lock(&lfsck->li_lock);
1502 com = __lfsck_component_find(lfsck, type, &lfsck->li_list_scan);
1504 com = __lfsck_component_find(lfsck, type,
1505 &lfsck->li_list_double_scan);
1507 lfsck_component_get(com);
1508 spin_lock(&lfsck->li_lock);
1511 if (com->lc_ops->lfsck_stop_notify != NULL) {
1512 set = ptlrpc_prep_set();
1514 lfsck_component_put(env, com);
1519 rc = com->lc_ops->lfsck_stop_notify(env, com, ltds,
1522 rc = ptlrpc_set_wait(set);
1524 ptlrpc_set_destroy(set);
1527 lfsck_component_put(env, com);
1533 void lfsck_quit(const struct lu_env *env, struct lfsck_instance *lfsck)
1535 struct lfsck_component *com;
1536 struct lfsck_component *next;
1538 list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
1540 if (com->lc_ops->lfsck_quit != NULL)
1541 com->lc_ops->lfsck_quit(env, com);
1544 list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
1546 if (com->lc_ops->lfsck_quit != NULL)
1547 com->lc_ops->lfsck_quit(env, com);
1551 static int lfsck_async_interpret(const struct lu_env *env,
1552 struct ptlrpc_request *req,
1555 struct lfsck_async_interpret_args *laia = args;
1556 struct lfsck_instance *lfsck;
1558 lfsck = container_of0(laia->laia_ltds, struct lfsck_instance,
1560 lfsck_interpret(env, lfsck, req, laia, rc);
1561 lfsck_tgt_put(laia->laia_ltd);
1562 if (rc != 0 && laia->laia_result != -EALREADY)
1563 laia->laia_result = rc;
1568 int lfsck_async_request(const struct lu_env *env, struct obd_export *exp,
1569 struct lfsck_request *lr,
1570 struct ptlrpc_request_set *set,
1571 ptlrpc_interpterer_t interpreter,
1572 void *args, int request)
1574 struct lfsck_async_interpret_args *laia;
1575 struct ptlrpc_request *req;
1576 struct lfsck_request *tmp;
1577 struct req_format *format;
1580 if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
1585 format = &RQF_LFSCK_NOTIFY;
1588 format = &RQF_LFSCK_QUERY;
1591 CERROR("%s: unknown async request: opc = %d\n",
1592 exp->exp_obd->obd_name, request);
1596 req = ptlrpc_request_alloc(class_exp2cliimp(exp), format);
1600 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, request);
1602 ptlrpc_request_free(req);
1607 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
1609 ptlrpc_request_set_replen(req);
1611 laia = ptlrpc_req_async_args(req);
1612 *laia = *(struct lfsck_async_interpret_args *)args;
1613 if (laia->laia_com != NULL)
1614 lfsck_component_get(laia->laia_com);
1615 req->rq_interpret_reply = interpreter;
1616 ptlrpc_set_add_req(set, req);
1621 /* external interfaces */
1623 int lfsck_get_speed(struct dt_device *key, void *buf, int len)
1626 struct lfsck_instance *lfsck;
1630 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1634 lfsck = lfsck_instance_find(key, true, false);
1635 if (likely(lfsck != NULL)) {
1636 rc = snprintf(buf, len, "%u\n",
1637 lfsck->li_bookmark_ram.lb_speed_limit);
1638 lfsck_instance_put(&env, lfsck);
1647 EXPORT_SYMBOL(lfsck_get_speed);
1649 int lfsck_set_speed(struct dt_device *key, int val)
1652 struct lfsck_instance *lfsck;
1656 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1660 lfsck = lfsck_instance_find(key, true, false);
1661 if (likely(lfsck != NULL)) {
1662 mutex_lock(&lfsck->li_mutex);
1663 __lfsck_set_speed(lfsck, val);
1664 rc = lfsck_bookmark_store(&env, lfsck);
1665 mutex_unlock(&lfsck->li_mutex);
1666 lfsck_instance_put(&env, lfsck);
1675 EXPORT_SYMBOL(lfsck_set_speed);
1677 int lfsck_get_windows(struct dt_device *key, void *buf, int len)
1680 struct lfsck_instance *lfsck;
1684 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1688 lfsck = lfsck_instance_find(key, true, false);
1689 if (likely(lfsck != NULL)) {
1690 rc = snprintf(buf, len, "%u\n",
1691 lfsck->li_bookmark_ram.lb_async_windows);
1692 lfsck_instance_put(&env, lfsck);
1701 EXPORT_SYMBOL(lfsck_get_windows);
1703 int lfsck_set_windows(struct dt_device *key, int val)
1706 struct lfsck_instance *lfsck;
1710 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1714 lfsck = lfsck_instance_find(key, true, false);
1715 if (likely(lfsck != NULL)) {
1716 if (val > LFSCK_ASYNC_WIN_MAX) {
1717 CERROR("%s: Too large async windows size, which "
1718 "may cause memory issues. The valid range "
1719 "is [0 - %u]. If you do not want to restrict "
1720 "the windows size for async requests pipeline, "
1721 "just set it as 0.\n",
1722 lfsck_lfsck2name(lfsck), LFSCK_ASYNC_WIN_MAX);
1724 } else if (lfsck->li_bookmark_ram.lb_async_windows != val) {
1725 mutex_lock(&lfsck->li_mutex);
1726 lfsck->li_bookmark_ram.lb_async_windows = val;
1727 rc = lfsck_bookmark_store(&env, lfsck);
1728 mutex_unlock(&lfsck->li_mutex);
1730 lfsck_instance_put(&env, lfsck);
1739 EXPORT_SYMBOL(lfsck_set_windows);
1741 int lfsck_dump(struct dt_device *key, void *buf, int len, enum lfsck_type type)
1744 struct lfsck_instance *lfsck;
1745 struct lfsck_component *com;
1749 rc = lu_env_init(&env, LCT_MD_THREAD | LCT_DT_THREAD);
1753 lfsck = lfsck_instance_find(key, true, false);
1754 if (likely(lfsck != NULL)) {
1755 com = lfsck_component_find(lfsck, type);
1756 if (likely(com != NULL)) {
1757 rc = com->lc_ops->lfsck_dump(&env, com, buf, len);
1758 lfsck_component_put(&env, com);
1763 lfsck_instance_put(&env, lfsck);
1772 EXPORT_SYMBOL(lfsck_dump);
1774 static int lfsck_stop_all(const struct lu_env *env,
1775 struct lfsck_instance *lfsck,
1776 struct lfsck_stop *stop)
1778 struct lfsck_thread_info *info = lfsck_env_info(env);
1779 struct lfsck_request *lr = &info->lti_lr;
1780 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1781 struct ptlrpc_request_set *set;
1782 struct lfsck_tgt_descs *ltds = &lfsck->li_mdt_descs;
1783 struct lfsck_tgt_desc *ltd;
1784 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1790 LASSERT(stop->ls_flags & LPF_BROADCAST);
1792 set = ptlrpc_prep_set();
1793 if (unlikely(set == NULL)) {
1794 CERROR("%s: cannot allocate memory for stop LFSCK on "
1795 "all targets\n", lfsck_lfsck2name(lfsck));
1800 memset(lr, 0, sizeof(*lr));
1801 lr->lr_event = LE_STOP;
1802 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1803 lr->lr_status = stop->ls_status;
1804 lr->lr_version = bk->lb_version;
1805 lr->lr_active = LFSCK_TYPES_ALL;
1806 lr->lr_param = stop->ls_flags;
1808 laia->laia_com = NULL;
1809 laia->laia_ltds = ltds;
1811 laia->laia_result = 0;
1812 laia->laia_shared = 1;
1814 down_read(<ds->ltd_rw_sem);
1815 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1816 ltd = lfsck_tgt_get(ltds, idx);
1817 LASSERT(ltd != NULL);
1819 laia->laia_ltd = ltd;
1820 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1821 lfsck_async_interpret, laia,
1824 lfsck_interpret(env, lfsck, NULL, laia, rc);
1826 CWARN("%s: cannot notify MDT %x for LFSCK stop: "
1827 "rc = %d\n", lfsck_lfsck2name(lfsck), idx, rc);
1831 up_read(<ds->ltd_rw_sem);
1833 rc = ptlrpc_set_wait(set);
1834 ptlrpc_set_destroy(set);
1837 rc = laia->laia_result;
1839 if (rc == -EALREADY)
1843 CWARN("%s: fail to stop LFSCK on some MDTs: rc = %d\n",
1844 lfsck_lfsck2name(lfsck), rc);
1846 RETURN(rc != 0 ? rc : rc1);
1849 static int lfsck_start_all(const struct lu_env *env,
1850 struct lfsck_instance *lfsck,
1851 struct lfsck_start *start)
1853 struct lfsck_thread_info *info = lfsck_env_info(env);
1854 struct lfsck_request *lr = &info->lti_lr;
1855 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1856 struct ptlrpc_request_set *set;
1857 struct lfsck_tgt_descs *ltds = &lfsck->li_mdt_descs;
1858 struct lfsck_tgt_desc *ltd;
1859 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1864 LASSERT(start->ls_flags & LPF_BROADCAST);
1866 set = ptlrpc_prep_set();
1867 if (unlikely(set == NULL)) {
1868 if (bk->lb_param & LPF_FAILOUT) {
1869 CERROR("%s: cannot allocate memory for start LFSCK on "
1870 "all targets, failout.\n",
1871 lfsck_lfsck2name(lfsck));
1875 CWARN("%s: cannot allocate memory for start LFSCK on "
1876 "all targets, partly scan.\n",
1877 lfsck_lfsck2name(lfsck));
1883 memset(lr, 0, sizeof(*lr));
1884 lr->lr_event = LE_START;
1885 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1886 lr->lr_speed = bk->lb_speed_limit;
1887 lr->lr_version = bk->lb_version;
1888 lr->lr_active = start->ls_active;
1889 lr->lr_param = start->ls_flags;
1890 lr->lr_async_windows = bk->lb_async_windows;
1891 lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
1894 laia->laia_com = NULL;
1895 laia->laia_ltds = ltds;
1897 laia->laia_result = 0;
1898 laia->laia_shared = 1;
1900 down_read(<ds->ltd_rw_sem);
1901 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1902 ltd = lfsck_tgt_get(ltds, idx);
1903 LASSERT(ltd != NULL);
1905 laia->laia_ltd = ltd;
1906 ltd->ltd_layout_done = 0;
1907 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1908 lfsck_async_interpret, laia,
1911 lfsck_interpret(env, lfsck, NULL, laia, rc);
1913 if (bk->lb_param & LPF_FAILOUT) {
1914 CERROR("%s: cannot notify MDT %x for LFSCK "
1915 "start, failout: rc = %d\n",
1916 lfsck_lfsck2name(lfsck), idx, rc);
1919 CWARN("%s: cannot notify MDT %x for LFSCK "
1920 "start, partly scan: rc = %d\n",
1921 lfsck_lfsck2name(lfsck), idx, rc);
1926 up_read(<ds->ltd_rw_sem);
1929 ptlrpc_set_destroy(set);
1934 rc = ptlrpc_set_wait(set);
1935 ptlrpc_set_destroy(set);
1938 rc = laia->laia_result;
1941 if (bk->lb_param & LPF_FAILOUT) {
1942 struct lfsck_stop *stop = &info->lti_stop;
1944 CERROR("%s: cannot start LFSCK on some MDTs, "
1945 "stop all: rc = %d\n",
1946 lfsck_lfsck2name(lfsck), rc);
1947 if (rc != -EALREADY) {
1948 stop->ls_status = LS_FAILED;
1949 stop->ls_flags = LPF_ALL_TGT | LPF_BROADCAST;
1950 lfsck_stop_all(env, lfsck, stop);
1953 CWARN("%s: cannot start LFSCK on some MDTs, "
1954 "partly scan: rc = %d\n",
1955 lfsck_lfsck2name(lfsck), rc);
1963 int lfsck_start(const struct lu_env *env, struct dt_device *key,
1964 struct lfsck_start_param *lsp)
1966 struct lfsck_start *start = lsp->lsp_start;
1967 struct lfsck_instance *lfsck;
1968 struct lfsck_bookmark *bk;
1969 struct ptlrpc_thread *thread;
1970 struct lfsck_component *com;
1971 struct l_wait_info lwi = { 0 };
1972 struct lfsck_thread_args *lta;
1980 lfsck = lfsck_instance_find(key, true, false);
1981 if (unlikely(lfsck == NULL))
1984 /* System is not ready, try again later. */
1985 if (unlikely(lfsck->li_namespace == NULL))
1986 GOTO(put, rc = -EAGAIN);
1988 /* start == NULL means auto trigger paused LFSCK. */
1989 if ((start == NULL) &&
1990 (cfs_list_empty(&lfsck->li_list_scan) ||
1991 OBD_FAIL_CHECK(OBD_FAIL_LFSCK_NO_AUTO)))
1994 bk = &lfsck->li_bookmark_ram;
1995 thread = &lfsck->li_thread;
1996 mutex_lock(&lfsck->li_mutex);
1997 spin_lock(&lfsck->li_lock);
1998 if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
2000 while (start->ls_active != 0) {
2001 if (!(type & start->ls_active)) {
2006 com = __lfsck_component_find(lfsck, type,
2007 &lfsck->li_list_scan);
2009 com = __lfsck_component_find(lfsck, type,
2010 &lfsck->li_list_double_scan);
2016 if (com->lc_ops->lfsck_join != NULL) {
2017 rc = com->lc_ops->lfsck_join( env, com, lsp);
2018 if (rc != 0 && rc != -EALREADY)
2021 start->ls_active &= ~type;
2024 spin_unlock(&lfsck->li_lock);
2027 spin_unlock(&lfsck->li_lock);
2029 lfsck->li_status = 0;
2030 lfsck->li_oit_over = 0;
2031 lfsck->li_start_unplug = 0;
2032 lfsck->li_drop_dryrun = 0;
2033 lfsck->li_new_scanned = 0;
2035 /* For auto trigger. */
2039 if (start->ls_flags & LPF_BROADCAST && !lfsck->li_master) {
2040 CERROR("%s: only allow to specify '-A | -o' via MDS\n",
2041 lfsck_lfsck2name(lfsck));
2043 GOTO(out, rc = -EPERM);
2046 start->ls_version = bk->lb_version;
2047 if (start->ls_valid & LSV_SPEED_LIMIT) {
2048 __lfsck_set_speed(lfsck, start->ls_speed_limit);
2052 if (start->ls_valid & LSV_ASYNC_WINDOWS &&
2053 bk->lb_async_windows != start->ls_async_windows) {
2054 bk->lb_async_windows = start->ls_async_windows;
2058 if (start->ls_valid & LSV_ERROR_HANDLE) {
2059 valid |= DOIV_ERROR_HANDLE;
2060 if (start->ls_flags & LPF_FAILOUT)
2061 flags |= DOIF_FAILOUT;
2063 if ((start->ls_flags & LPF_FAILOUT) &&
2064 !(bk->lb_param & LPF_FAILOUT)) {
2065 bk->lb_param |= LPF_FAILOUT;
2067 } else if (!(start->ls_flags & LPF_FAILOUT) &&
2068 (bk->lb_param & LPF_FAILOUT)) {
2069 bk->lb_param &= ~LPF_FAILOUT;
2074 if (start->ls_valid & LSV_DRYRUN) {
2075 valid |= DOIV_DRYRUN;
2076 if (start->ls_flags & LPF_DRYRUN)
2077 flags |= DOIF_DRYRUN;
2079 if ((start->ls_flags & LPF_DRYRUN) &&
2080 !(bk->lb_param & LPF_DRYRUN)) {
2081 bk->lb_param |= LPF_DRYRUN;
2083 } else if (!(start->ls_flags & LPF_DRYRUN) &&
2084 (bk->lb_param & LPF_DRYRUN)) {
2085 bk->lb_param &= ~LPF_DRYRUN;
2086 lfsck->li_drop_dryrun = 1;
2091 if (bk->lb_param & LPF_ALL_TGT &&
2092 !(start->ls_flags & LPF_ALL_TGT)) {
2093 bk->lb_param &= ~LPF_ALL_TGT;
2095 } else if (!(bk->lb_param & LPF_ALL_TGT) &&
2096 start->ls_flags & LPF_ALL_TGT) {
2097 bk->lb_param |= LPF_ALL_TGT;
2101 if (bk->lb_param & LPF_ORPHAN &&
2102 !(start->ls_flags & LPF_ORPHAN)) {
2103 bk->lb_param &= ~LPF_ORPHAN;
2105 } else if (!(bk->lb_param & LPF_ORPHAN) &&
2106 start->ls_flags & LPF_ORPHAN) {
2107 bk->lb_param |= LPF_ORPHAN;
2112 rc = lfsck_bookmark_store(env, lfsck);
2117 if (start->ls_flags & LPF_RESET)
2118 flags |= DOIF_RESET;
2120 if (start->ls_active != 0) {
2121 struct lfsck_component *next;
2123 if (start->ls_active == LFSCK_TYPES_ALL)
2124 start->ls_active = LFSCK_TYPES_SUPPORTED;
2126 if (start->ls_active & ~LFSCK_TYPES_SUPPORTED) {
2127 start->ls_active &= ~LFSCK_TYPES_SUPPORTED;
2128 GOTO(out, rc = -ENOTSUPP);
2131 cfs_list_for_each_entry_safe(com, next,
2132 &lfsck->li_list_scan, lc_link) {
2133 if (!(com->lc_type & start->ls_active)) {
2134 rc = com->lc_ops->lfsck_post(env, com, 0,
2141 while (start->ls_active != 0) {
2142 if (type & start->ls_active) {
2143 com = __lfsck_component_find(lfsck, type,
2144 &lfsck->li_list_idle);
2146 /* The component status will be updated
2147 * when its prep() is called later by
2148 * the LFSCK main engine. */
2149 cfs_list_del_init(&com->lc_link);
2150 cfs_list_add_tail(&com->lc_link,
2151 &lfsck->li_list_scan);
2153 start->ls_active &= ~type;
2159 cfs_list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
2160 start->ls_active |= com->lc_type;
2161 if (flags & DOIF_RESET) {
2162 rc = com->lc_ops->lfsck_reset(env, com, false);
2169 lfsck->li_args_dir = LUDA_64BITHASH | LUDA_VERIFY;
2170 if (bk->lb_param & LPF_DRYRUN) {
2171 lfsck->li_args_dir |= LUDA_VERIFY_DRYRUN;
2172 valid |= DOIV_DRYRUN;
2173 flags |= DOIF_DRYRUN;
2176 if (bk->lb_param & LPF_FAILOUT) {
2177 valid |= DOIV_ERROR_HANDLE;
2178 flags |= DOIF_FAILOUT;
2181 if (!cfs_list_empty(&lfsck->li_list_scan))
2182 flags |= DOIF_OUTUSED;
2184 lfsck->li_args_oit = (flags << DT_OTABLE_IT_FLAGS_SHIFT) | valid;
2185 thread_set_flags(thread, 0);
2186 lta = lfsck_thread_args_init(lfsck, NULL, lsp);
2188 GOTO(out, rc = PTR_ERR(lta));
2190 rc = PTR_ERR(kthread_run(lfsck_master_engine, lta, "lfsck"));
2191 if (IS_ERR_VALUE(rc)) {
2192 CERROR("%s: cannot start LFSCK thread: rc = %ld\n",
2193 lfsck_lfsck2name(lfsck), rc);
2194 lfsck_thread_args_fini(lta);
2199 l_wait_event(thread->t_ctl_waitq,
2200 thread_is_running(thread) ||
2201 thread_is_stopped(thread),
2203 if (start == NULL || !(start->ls_flags & LPF_BROADCAST)) {
2204 lfsck->li_start_unplug = 1;
2205 wake_up_all(&thread->t_ctl_waitq);
2210 /* release lfsck::li_mutex to avoid deadlock. */
2211 mutex_unlock(&lfsck->li_mutex);
2212 rc = lfsck_start_all(env, lfsck, start);
2214 spin_lock(&lfsck->li_lock);
2215 if (thread_is_stopped(thread)) {
2216 spin_unlock(&lfsck->li_lock);
2218 lfsck->li_status = LS_FAILED;
2219 lfsck->li_flags = 0;
2220 thread_set_flags(thread, SVC_STOPPING);
2221 spin_unlock(&lfsck->li_lock);
2223 lfsck->li_start_unplug = 1;
2224 wake_up_all(&thread->t_ctl_waitq);
2225 l_wait_event(thread->t_ctl_waitq,
2226 thread_is_stopped(thread),
2230 lfsck->li_start_unplug = 1;
2231 wake_up_all(&thread->t_ctl_waitq);
2237 mutex_unlock(&lfsck->li_mutex);
2240 lfsck_instance_put(env, lfsck);
2242 return rc < 0 ? rc : 0;
2244 EXPORT_SYMBOL(lfsck_start);
2246 int lfsck_stop(const struct lu_env *env, struct dt_device *key,
2247 struct lfsck_stop *stop)
2249 struct lfsck_instance *lfsck;
2250 struct ptlrpc_thread *thread;
2251 struct l_wait_info lwi = { 0 };
2256 lfsck = lfsck_instance_find(key, true, false);
2257 if (unlikely(lfsck == NULL))
2260 thread = &lfsck->li_thread;
2261 /* release lfsck::li_mutex to avoid deadlock. */
2262 if (stop != NULL && stop->ls_flags & LPF_BROADCAST) {
2263 if (!lfsck->li_master) {
2264 CERROR("%s: only allow to specify '-A' via MDS\n",
2265 lfsck_lfsck2name(lfsck));
2267 GOTO(out, rc = -EPERM);
2270 rc1 = lfsck_stop_all(env, lfsck, stop);
2273 mutex_lock(&lfsck->li_mutex);
2274 spin_lock(&lfsck->li_lock);
2275 if (thread_is_init(thread) || thread_is_stopped(thread)) {
2276 spin_unlock(&lfsck->li_lock);
2277 GOTO(out, rc = -EALREADY);
2281 lfsck->li_status = stop->ls_status;
2282 lfsck->li_flags = stop->ls_flags;
2284 lfsck->li_status = LS_STOPPED;
2285 lfsck->li_flags = 0;
2288 thread_set_flags(thread, SVC_STOPPING);
2289 spin_unlock(&lfsck->li_lock);
2291 wake_up_all(&thread->t_ctl_waitq);
2292 l_wait_event(thread->t_ctl_waitq,
2293 thread_is_stopped(thread),
2299 mutex_unlock(&lfsck->li_mutex);
2300 lfsck_instance_put(env, lfsck);
2302 return rc != 0 ? rc : rc1;
2304 EXPORT_SYMBOL(lfsck_stop);
2306 int lfsck_in_notify(const struct lu_env *env, struct dt_device *key,
2307 struct lfsck_request *lr)
2309 int rc = -EOPNOTSUPP;
2312 switch (lr->lr_event) {
2314 struct lfsck_start *start = &lfsck_env_info(env)->lti_start;
2315 struct lfsck_start_param lsp;
2317 memset(start, 0, sizeof(*start));
2318 start->ls_valid = lr->lr_valid;
2319 start->ls_speed_limit = lr->lr_speed;
2320 start->ls_version = lr->lr_version;
2321 start->ls_active = lr->lr_active;
2322 start->ls_flags = lr->lr_param & ~LPF_BROADCAST;
2323 start->ls_async_windows = lr->lr_async_windows;
2325 lsp.lsp_start = start;
2326 lsp.lsp_index = lr->lr_index;
2327 lsp.lsp_index_valid = 1;
2328 rc = lfsck_start(env, key, &lsp);
2332 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
2334 memset(stop, 0, sizeof(*stop));
2335 stop->ls_status = lr->lr_status;
2336 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
2337 rc = lfsck_stop(env, key, stop);
2340 case LE_PHASE1_DONE:
2341 case LE_PHASE2_DONE:
2342 case LE_FID_ACCESSED:
2344 case LE_CONDITIONAL_DESTROY: {
2345 struct lfsck_instance *lfsck;
2346 struct lfsck_component *com;
2348 lfsck = lfsck_instance_find(key, true, false);
2349 if (unlikely(lfsck == NULL))
2352 com = lfsck_component_find(lfsck, lr->lr_active);
2353 if (likely(com != NULL)) {
2354 rc = com->lc_ops->lfsck_in_notify(env, com, lr);
2355 lfsck_component_put(env, com);
2358 lfsck_instance_put(env, lfsck);
2367 EXPORT_SYMBOL(lfsck_in_notify);
2369 int lfsck_query(const struct lu_env *env, struct dt_device *key,
2370 struct lfsck_request *lr)
2372 struct lfsck_instance *lfsck;
2373 struct lfsck_component *com;
2377 lfsck = lfsck_instance_find(key, true, false);
2378 if (unlikely(lfsck == NULL))
2381 com = lfsck_component_find(lfsck, lr->lr_active);
2382 if (likely(com != NULL)) {
2383 rc = com->lc_ops->lfsck_query(env, com);
2384 lfsck_component_put(env, com);
2389 lfsck_instance_put(env, lfsck);
2393 EXPORT_SYMBOL(lfsck_query);
2395 int lfsck_register_namespace(const struct lu_env *env, struct dt_device *key,
2396 struct ldlm_namespace *ns)
2398 struct lfsck_instance *lfsck;
2401 lfsck = lfsck_instance_find(key, true, false);
2402 if (likely(lfsck != NULL)) {
2403 lfsck->li_namespace = ns;
2404 lfsck_instance_put(env, lfsck);
2410 EXPORT_SYMBOL(lfsck_register_namespace);
2412 int lfsck_register(const struct lu_env *env, struct dt_device *key,
2413 struct dt_device *next, struct obd_device *obd,
2414 lfsck_out_notify notify, void *notify_data, bool master)
2416 struct lfsck_instance *lfsck;
2417 struct dt_object *root = NULL;
2418 struct dt_object *obj;
2419 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
2423 lfsck = lfsck_instance_find(key, false, false);
2424 if (unlikely(lfsck != NULL))
2427 OBD_ALLOC_PTR(lfsck);
2431 mutex_init(&lfsck->li_mutex);
2432 spin_lock_init(&lfsck->li_lock);
2433 CFS_INIT_LIST_HEAD(&lfsck->li_link);
2434 CFS_INIT_LIST_HEAD(&lfsck->li_list_scan);
2435 CFS_INIT_LIST_HEAD(&lfsck->li_list_dir);
2436 CFS_INIT_LIST_HEAD(&lfsck->li_list_double_scan);
2437 CFS_INIT_LIST_HEAD(&lfsck->li_list_idle);
2438 atomic_set(&lfsck->li_ref, 1);
2439 atomic_set(&lfsck->li_double_scan_count, 0);
2440 init_waitqueue_head(&lfsck->li_thread.t_ctl_waitq);
2441 lfsck->li_out_notify = notify;
2442 lfsck->li_out_notify_data = notify_data;
2443 lfsck->li_next = next;
2444 lfsck->li_bottom = key;
2445 lfsck->li_obd = obd;
2447 rc = lfsck_tgt_descs_init(&lfsck->li_ost_descs);
2451 rc = lfsck_tgt_descs_init(&lfsck->li_mdt_descs);
2455 fid->f_seq = FID_SEQ_LOCAL_NAME;
2458 rc = local_oid_storage_init(env, lfsck->li_bottom, fid, &lfsck->li_los);
2462 rc = dt_root_get(env, key, fid);
2466 root = dt_locate(env, lfsck->li_bottom, fid);
2468 GOTO(out, rc = PTR_ERR(root));
2470 if (unlikely(!dt_try_as_dir(env, root)))
2471 GOTO(out, rc = -ENOTDIR);
2473 lfsck->li_local_root_fid = *fid;
2475 lfsck->li_master = 1;
2476 if (lfsck_dev_idx(lfsck->li_bottom) == 0) {
2477 rc = dt_lookup(env, root,
2478 (struct dt_rec *)(&lfsck->li_global_root_fid),
2479 (const struct dt_key *)"ROOT", BYPASS_CAPA);
2485 fid->f_seq = FID_SEQ_LOCAL_FILE;
2486 fid->f_oid = OTABLE_IT_OID;
2488 obj = dt_locate(env, lfsck->li_bottom, fid);
2490 GOTO(out, rc = PTR_ERR(obj));
2492 lfsck->li_obj_oit = obj;
2493 rc = obj->do_ops->do_index_try(env, obj, &dt_otable_features);
2495 if (rc == -ENOTSUPP)
2501 rc = lfsck_bookmark_setup(env, lfsck);
2506 rc = lfsck_fid_init(lfsck);
2510 rc = lfsck_namespace_setup(env, lfsck);
2515 rc = lfsck_layout_setup(env, lfsck);
2519 /* XXX: more LFSCK components initialization to be added here. */
2522 rc = lfsck_instance_add(lfsck);
2524 rc = lfsck_add_target_from_orphan(env, lfsck);
2526 if (root != NULL && !IS_ERR(root))
2527 lu_object_put(env, &root->do_lu);
2529 lfsck_instance_cleanup(env, lfsck);
2532 EXPORT_SYMBOL(lfsck_register);
2534 void lfsck_degister(const struct lu_env *env, struct dt_device *key)
2536 struct lfsck_instance *lfsck;
2538 lfsck = lfsck_instance_find(key, false, true);
2540 lfsck_instance_put(env, lfsck);
2542 EXPORT_SYMBOL(lfsck_degister);
2544 int lfsck_add_target(const struct lu_env *env, struct dt_device *key,
2545 struct dt_device *tgt, struct obd_export *exp,
2546 __u32 index, bool for_ost)
2548 struct lfsck_instance *lfsck;
2549 struct lfsck_tgt_desc *ltd;
2560 INIT_LIST_HEAD(<d->ltd_orphan_list);
2561 INIT_LIST_HEAD(<d->ltd_layout_list);
2562 INIT_LIST_HEAD(<d->ltd_layout_phase_list);
2563 atomic_set(<d->ltd_ref, 1);
2564 ltd->ltd_index = index;
2566 spin_lock(&lfsck_instance_lock);
2567 lfsck = __lfsck_instance_find(key, true, false);
2568 if (lfsck == NULL) {
2570 list_add_tail(<d->ltd_orphan_list,
2571 &lfsck_ost_orphan_list);
2573 list_add_tail(<d->ltd_orphan_list,
2574 &lfsck_mdt_orphan_list);
2575 spin_unlock(&lfsck_instance_lock);
2579 spin_unlock(&lfsck_instance_lock);
2581 rc = __lfsck_add_target(env, lfsck, ltd, for_ost, false);
2585 lfsck_instance_put(env, lfsck);
2589 EXPORT_SYMBOL(lfsck_add_target);
2591 void lfsck_del_target(const struct lu_env *env, struct dt_device *key,
2592 struct dt_device *tgt, __u32 index, bool for_ost)
2594 struct lfsck_instance *lfsck;
2595 struct lfsck_tgt_descs *ltds;
2596 struct lfsck_tgt_desc *ltd = NULL;
2597 struct list_head *head;
2600 head = &lfsck_ost_orphan_list;
2602 head = &lfsck_mdt_orphan_list;
2604 spin_lock(&lfsck_instance_lock);
2605 list_for_each_entry(ltd, head, ltd_orphan_list) {
2606 if (ltd->ltd_tgt == tgt) {
2607 list_del_init(<d->ltd_orphan_list);
2608 spin_unlock(&lfsck_instance_lock);
2615 lfsck = __lfsck_instance_find(key, true, false);
2616 spin_unlock(&lfsck_instance_lock);
2617 if (unlikely(lfsck == NULL))
2621 ltds = &lfsck->li_ost_descs;
2623 ltds = &lfsck->li_mdt_descs;
2625 down_write(<ds->ltd_rw_sem);
2626 LASSERT(ltds->ltd_tgts_bitmap != NULL);
2628 if (unlikely(index >= ltds->ltd_tgts_bitmap->size))
2631 ltd = LTD_TGT(ltds, index);
2632 if (unlikely(ltd == NULL))
2635 LASSERT(ltds->ltd_tgtnr > 0);
2638 cfs_bitmap_clear(ltds->ltd_tgts_bitmap, index);
2639 LTD_TGT(ltds, index) = NULL;
2644 head = &lfsck->li_ost_descs.ltd_orphan;
2646 head = &lfsck->li_ost_descs.ltd_orphan;
2648 list_for_each_entry(ltd, head, ltd_orphan_list) {
2649 if (ltd->ltd_tgt == tgt) {
2650 list_del_init(<d->ltd_orphan_list);
2656 up_write(<ds->ltd_rw_sem);
2658 spin_lock(<ds->ltd_lock);
2660 spin_unlock(<ds->ltd_lock);
2661 lfsck_stop_notify(env, lfsck, ltds, ltd, LT_LAYOUT);
2665 lfsck_instance_put(env, lfsck);
2667 EXPORT_SYMBOL(lfsck_del_target);
2669 static int __init lfsck_init(void)
2673 INIT_LIST_HEAD(&lfsck_ost_orphan_list);
2674 INIT_LIST_HEAD(&lfsck_mdt_orphan_list);
2675 lfsck_key_init_generic(&lfsck_thread_key, NULL);
2676 rc = lu_context_key_register(&lfsck_thread_key);
2678 tgt_register_lfsck_in_notify(lfsck_in_notify);
2679 tgt_register_lfsck_query(lfsck_query);
2685 static void __exit lfsck_exit(void)
2687 struct lfsck_tgt_desc *ltd;
2688 struct lfsck_tgt_desc *next;
2690 LASSERT(cfs_list_empty(&lfsck_instance_list));
2692 list_for_each_entry_safe(ltd, next, &lfsck_ost_orphan_list,
2694 list_del_init(<d->ltd_orphan_list);
2698 list_for_each_entry_safe(ltd, next, &lfsck_mdt_orphan_list,
2700 list_del_init(<d->ltd_orphan_list);
2704 lu_context_key_degister(&lfsck_thread_key);
2707 MODULE_AUTHOR("Intel Corporation <http://www.intel.com/>");
2708 MODULE_DESCRIPTION("LFSCK");
2709 MODULE_LICENSE("GPL");
2711 cfs_module(lfsck, LUSTRE_VERSION_STRING, lfsck_init, lfsck_exit);