4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2013, Intel Corporation.
26 * lustre/lfsck/lfsck_layout.c
28 * Author: Fan, Yong <fan.yong@intel.com>
32 # define EXPORT_SYMTAB
34 #define DEBUG_SUBSYSTEM S_LFSCK
36 #include <linux/bitops.h>
38 #include <lustre/lustre_idl.h>
39 #include <lu_object.h>
40 #include <dt_object.h>
41 #include <lustre_linkea.h>
42 #include <lustre_fid.h>
43 #include <lustre_lib.h>
44 #include <lustre_net.h>
45 #include <lustre/lustre_user.h>
46 #include <md_object.h>
47 #include <obd_class.h>
49 #include "lfsck_internal.h"
51 #define LFSCK_LAYOUT_MAGIC 0xB173AE14
53 static const char lfsck_layout_name[] = "lfsck_layout";
55 struct lfsck_layout_seq {
56 struct list_head lls_list;
59 __u64 lls_lastid_known;
60 struct dt_object *lls_lastid_obj;
61 unsigned int lls_dirty:1;
64 struct lfsck_layout_slave_target {
65 /* link into lfsck_layout_slave_data::llsd_master_list. */
66 struct list_head llst_list;
72 struct lfsck_layout_slave_data {
73 /* list for lfsck_layout_seq */
74 struct list_head llsd_seq_list;
76 /* list for the masters involve layout verification. */
77 struct list_head llsd_master_list;
82 struct lfsck_layout_object {
83 struct dt_object *llo_obj;
84 struct lu_attr llo_attr;
89 struct lfsck_layout_req {
90 struct list_head llr_list;
91 struct lfsck_layout_object *llr_parent;
92 struct dt_object *llr_child;
94 __u32 llr_lov_idx; /* offset in LOV EA */
97 struct lfsck_layout_master_data {
99 struct list_head llmd_req_list;
101 /* list for the ost targets involve layout verification. */
102 struct list_head llmd_ost_list;
104 /* list for the ost targets in phase1 scanning. */
105 struct list_head llmd_ost_phase1_list;
107 /* list for the ost targets in phase1 scanning. */
108 struct list_head llmd_ost_phase2_list;
110 /* list for the mdt targets involve layout verification. */
111 struct list_head llmd_mdt_list;
113 /* list for the mdt targets in phase1 scanning. */
114 struct list_head llmd_mdt_phase1_list;
116 /* list for the mdt targets in phase1 scanning. */
117 struct list_head llmd_mdt_phase2_list;
119 struct ptlrpc_thread llmd_thread;
120 atomic_t llmd_rpcs_in_flight;
121 __u32 llmd_touch_gen;
123 int llmd_assistant_status;
124 int llmd_post_result;
125 unsigned int llmd_to_post:1,
126 llmd_to_double_scan:1,
127 llmd_in_double_scan:1,
131 struct lfsck_layout_slave_async_args {
132 struct obd_export *llsaa_exp;
133 struct lfsck_component *llsaa_com;
134 struct lfsck_layout_slave_target *llsaa_llst;
138 lfsck_layout_llst_put(struct lfsck_layout_slave_target *llst)
140 if (atomic_dec_and_test(&llst->llst_ref)) {
141 LASSERT(list_empty(&llst->llst_list));
148 lfsck_layout_llst_add(struct lfsck_layout_slave_data *llsd, __u32 index)
150 struct lfsck_layout_slave_target *llst;
151 struct lfsck_layout_slave_target *tmp;
158 INIT_LIST_HEAD(&llst->llst_list);
160 llst->llst_index = index;
161 atomic_set(&llst->llst_ref, 1);
163 spin_lock(&llsd->llsd_lock);
164 list_for_each_entry(tmp, &llsd->llsd_master_list, llst_list) {
165 if (tmp->llst_index == index) {
171 list_add_tail(&llst->llst_list, &llsd->llsd_master_list);
172 spin_unlock(&llsd->llsd_lock);
181 lfsck_layout_llst_del(struct lfsck_layout_slave_data *llsd,
182 struct lfsck_layout_slave_target *llst)
186 spin_lock(&llsd->llsd_lock);
187 if (!list_empty(&llst->llst_list)) {
188 list_del_init(&llst->llst_list);
191 spin_unlock(&llsd->llsd_lock);
194 lfsck_layout_llst_put(llst);
197 static inline struct lfsck_layout_slave_target *
198 lfsck_layout_llst_find_and_del(struct lfsck_layout_slave_data *llsd,
201 struct lfsck_layout_slave_target *llst;
203 spin_lock(&llsd->llsd_lock);
204 list_for_each_entry(llst, &llsd->llsd_master_list, llst_list) {
205 if (llst->llst_index == index) {
206 list_del_init(&llst->llst_list);
207 spin_unlock(&llsd->llsd_lock);
212 spin_unlock(&llsd->llsd_lock);
217 static inline void lfsck_layout_object_put(const struct lu_env *env,
218 struct lfsck_layout_object *llo)
220 if (atomic_dec_and_test(&llo->llo_ref)) {
221 lfsck_object_put(env, llo->llo_obj);
226 static inline void lfsck_layout_req_fini(const struct lu_env *env,
227 struct lfsck_layout_req *llr)
229 lu_object_put(env, &llr->llr_child->do_lu);
230 lfsck_layout_object_put(env, llr->llr_parent);
234 static inline bool lfsck_layout_req_empty(struct lfsck_layout_master_data *llmd)
238 spin_lock(&llmd->llmd_lock);
239 if (list_empty(&llmd->llmd_req_list))
241 spin_unlock(&llmd->llmd_lock);
246 static void lfsck_layout_le_to_cpu(struct lfsck_layout *des,
247 const struct lfsck_layout *src)
251 des->ll_magic = le32_to_cpu(src->ll_magic);
252 des->ll_status = le32_to_cpu(src->ll_status);
253 des->ll_flags = le32_to_cpu(src->ll_flags);
254 des->ll_success_count = le32_to_cpu(src->ll_success_count);
255 des->ll_run_time_phase1 = le32_to_cpu(src->ll_run_time_phase1);
256 des->ll_run_time_phase2 = le32_to_cpu(src->ll_run_time_phase2);
257 des->ll_time_last_complete = le64_to_cpu(src->ll_time_last_complete);
258 des->ll_time_latest_start = le64_to_cpu(src->ll_time_latest_start);
259 des->ll_time_last_checkpoint =
260 le64_to_cpu(src->ll_time_last_checkpoint);
261 des->ll_pos_latest_start = le64_to_cpu(src->ll_pos_latest_start);
262 des->ll_pos_last_checkpoint = le64_to_cpu(src->ll_pos_last_checkpoint);
263 des->ll_pos_first_inconsistent =
264 le64_to_cpu(src->ll_pos_first_inconsistent);
265 des->ll_objs_checked_phase1 = le64_to_cpu(src->ll_objs_checked_phase1);
266 des->ll_objs_failed_phase1 = le64_to_cpu(src->ll_objs_failed_phase1);
267 des->ll_objs_checked_phase2 = le64_to_cpu(src->ll_objs_checked_phase2);
268 des->ll_objs_failed_phase2 = le64_to_cpu(src->ll_objs_failed_phase2);
269 for (i = 0; i < LLIT_MAX; i++)
270 des->ll_objs_repaired[i] =
271 le64_to_cpu(src->ll_objs_repaired[i]);
272 des->ll_objs_skipped = le64_to_cpu(src->ll_objs_skipped);
275 static void lfsck_layout_cpu_to_le(struct lfsck_layout *des,
276 const struct lfsck_layout *src)
280 des->ll_magic = cpu_to_le32(src->ll_magic);
281 des->ll_status = cpu_to_le32(src->ll_status);
282 des->ll_flags = cpu_to_le32(src->ll_flags);
283 des->ll_success_count = cpu_to_le32(src->ll_success_count);
284 des->ll_run_time_phase1 = cpu_to_le32(src->ll_run_time_phase1);
285 des->ll_run_time_phase2 = cpu_to_le32(src->ll_run_time_phase2);
286 des->ll_time_last_complete = cpu_to_le64(src->ll_time_last_complete);
287 des->ll_time_latest_start = cpu_to_le64(src->ll_time_latest_start);
288 des->ll_time_last_checkpoint =
289 cpu_to_le64(src->ll_time_last_checkpoint);
290 des->ll_pos_latest_start = cpu_to_le64(src->ll_pos_latest_start);
291 des->ll_pos_last_checkpoint = cpu_to_le64(src->ll_pos_last_checkpoint);
292 des->ll_pos_first_inconsistent =
293 cpu_to_le64(src->ll_pos_first_inconsistent);
294 des->ll_objs_checked_phase1 = cpu_to_le64(src->ll_objs_checked_phase1);
295 des->ll_objs_failed_phase1 = cpu_to_le64(src->ll_objs_failed_phase1);
296 des->ll_objs_checked_phase2 = cpu_to_le64(src->ll_objs_checked_phase2);
297 des->ll_objs_failed_phase2 = cpu_to_le64(src->ll_objs_failed_phase2);
298 for (i = 0; i < LLIT_MAX; i++)
299 des->ll_objs_repaired[i] =
300 cpu_to_le64(src->ll_objs_repaired[i]);
301 des->ll_objs_skipped = cpu_to_le64(src->ll_objs_skipped);
305 * \retval +ve: the lfsck_layout is broken, the caller should reset it.
306 * \retval 0: succeed.
307 * \retval -ve: failed cases.
309 static int lfsck_layout_load(const struct lu_env *env,
310 struct lfsck_component *com)
312 struct lfsck_layout *lo = com->lc_file_ram;
313 const struct dt_body_operations *dbo = com->lc_obj->do_body_ops;
314 ssize_t size = com->lc_file_size;
318 rc = dbo->dbo_read(env, com->lc_obj,
319 lfsck_buf_get(env, com->lc_file_disk, size), &pos,
324 CWARN("%s: failed to load lfsck_layout: rc = %d\n",
325 lfsck_lfsck2name(com->lc_lfsck), rc);
327 } else if (rc != size) {
328 CWARN("%s: crashed lfsck_layout, to be reset: rc = %d\n",
329 lfsck_lfsck2name(com->lc_lfsck), rc);
333 lfsck_layout_le_to_cpu(lo, com->lc_file_disk);
334 if (lo->ll_magic != LFSCK_LAYOUT_MAGIC) {
335 CWARN("%s: invalid lfsck_layout magic %#x != %#x, "
336 "to be reset\n", lfsck_lfsck2name(com->lc_lfsck),
337 lo->ll_magic, LFSCK_LAYOUT_MAGIC);
344 static int lfsck_layout_store(const struct lu_env *env,
345 struct lfsck_component *com)
347 struct dt_object *obj = com->lc_obj;
348 struct lfsck_instance *lfsck = com->lc_lfsck;
349 struct lfsck_layout *lo = com->lc_file_disk;
350 struct thandle *handle;
351 ssize_t size = com->lc_file_size;
356 lfsck_layout_cpu_to_le(lo, com->lc_file_ram);
357 handle = dt_trans_create(env, lfsck->li_bottom);
358 if (IS_ERR(handle)) {
359 rc = PTR_ERR(handle);
360 CERROR("%s: fail to create trans for storing lfsck_layout: "
361 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
365 rc = dt_declare_record_write(env, obj, size, pos, handle);
367 CERROR("%s: fail to declare trans for storing lfsck_layout(1): "
368 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
372 rc = dt_trans_start_local(env, lfsck->li_bottom, handle);
374 CERROR("%s: fail to start trans for storing lfsck_layout: "
375 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
379 rc = dt_record_write(env, obj, lfsck_buf_get(env, lo, size), &pos,
382 CERROR("%s: fail to store lfsck_layout(1): size = %d, "
383 "rc = %d\n", lfsck_lfsck2name(lfsck), (int)size, rc);
388 dt_trans_stop(env, lfsck->li_bottom, handle);
393 static int lfsck_layout_init(const struct lu_env *env,
394 struct lfsck_component *com)
396 struct lfsck_layout *lo = com->lc_file_ram;
399 memset(lo, 0, com->lc_file_size);
400 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
401 lo->ll_status = LS_INIT;
402 down_write(&com->lc_sem);
403 rc = lfsck_layout_store(env, com);
404 up_write(&com->lc_sem);
409 static int fid_is_for_ostobj(const struct lu_env *env, struct dt_device *dt,
410 struct dt_object *obj, const struct lu_fid *fid)
412 struct seq_server_site *ss = lu_site2seq(dt->dd_lu_dev.ld_site);
413 struct lu_seq_range range = { 0 };
414 struct lustre_mdt_attrs *lma;
417 fld_range_set_any(&range);
418 rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(fid), &range);
420 if (fld_range_is_ost(&range))
426 lma = &lfsck_env_info(env)->lti_lma;
427 rc = dt_xattr_get(env, obj, lfsck_buf_get(env, lma, sizeof(*lma)),
428 XATTR_NAME_LMA, BYPASS_CAPA);
429 if (rc == sizeof(*lma)) {
430 lustre_lma_swab(lma);
432 /* Generally, the low layer OSD create handler or OI scrub
433 * will set the LMAC_FID_ON_OST for all external visible
434 * OST-objects. But to make the otable-based iteration to
435 * be independent from OI scrub in spite of it got failure
436 * or not, we check the LMAC_FID_ON_OST here to guarantee
437 * that the LFSCK will not repair something by wrong. */
438 return lma->lma_compat & LMAC_FID_ON_OST ? 1 : 0;
441 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_FID, BYPASS_CAPA);
446 static struct lfsck_layout_seq *
447 lfsck_layout_seq_lookup(struct lfsck_layout_slave_data *llsd, __u64 seq)
449 struct lfsck_layout_seq *lls;
451 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
452 if (lls->lls_seq == seq)
455 if (lls->lls_seq > seq)
463 lfsck_layout_seq_insert(struct lfsck_layout_slave_data *llsd,
464 struct lfsck_layout_seq *lls)
466 struct lfsck_layout_seq *tmp;
467 struct list_head *pos = &llsd->llsd_seq_list;
469 list_for_each_entry(tmp, &llsd->llsd_seq_list, lls_list) {
470 if (lls->lls_seq < tmp->lls_seq) {
471 pos = &tmp->lls_list;
475 list_add_tail(&lls->lls_list, pos);
479 lfsck_layout_lastid_create(const struct lu_env *env,
480 struct lfsck_instance *lfsck,
481 struct dt_object *obj)
483 struct lfsck_thread_info *info = lfsck_env_info(env);
484 struct lu_attr *la = &info->lti_la;
485 struct dt_object_format *dof = &info->lti_dof;
486 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
487 struct dt_device *dt = lfsck->li_bottom;
494 CDEBUG(D_LFSCK, "To create LAST_ID for <seq> "LPX64"\n",
495 fid_seq(lfsck_dto2fid(obj)));
497 if (bk->lb_param & LPF_DRYRUN)
500 memset(la, 0, sizeof(*la));
501 la->la_mode = S_IFREG | S_IRUGO | S_IWUSR;
502 la->la_valid = LA_MODE | LA_UID | LA_GID;
503 dof->dof_type = dt_mode_to_dft(S_IFREG);
505 th = dt_trans_create(env, dt);
507 RETURN(rc = PTR_ERR(th));
509 rc = dt_declare_create(env, obj, la, NULL, dof, th);
513 rc = dt_declare_record_write(env, obj, sizeof(lastid), pos, th);
517 rc = dt_trans_start_local(env, dt, th);
521 dt_write_lock(env, obj, 0);
522 if (likely(!dt_object_exists(obj))) {
523 rc = dt_create(env, obj, la, NULL, dof, th);
525 rc = dt_record_write(env, obj,
526 lfsck_buf_get(env, &lastid, sizeof(lastid)),
529 dt_write_unlock(env, obj);
534 dt_trans_stop(env, dt, th);
540 lfsck_layout_lastid_reload(const struct lu_env *env,
541 struct lfsck_component *com,
542 struct lfsck_layout_seq *lls)
548 dt_read_lock(env, lls->lls_lastid_obj, 0);
549 rc = dt_record_read(env, lls->lls_lastid_obj,
550 lfsck_buf_get(env, &lastid, sizeof(lastid)), &pos);
551 dt_read_unlock(env, lls->lls_lastid_obj);
552 if (unlikely(rc != 0))
555 lastid = le64_to_cpu(lastid);
556 if (lastid < lls->lls_lastid_known) {
557 struct lfsck_instance *lfsck = com->lc_lfsck;
558 struct lfsck_layout *lo = com->lc_file_ram;
560 lls->lls_lastid = lls->lls_lastid_known;
562 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
563 LASSERT(lfsck->li_out_notify != NULL);
565 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
566 LE_LASTID_REBUILDING);
567 lo->ll_flags |= LF_CRASHED_LASTID;
569 } else if (lastid >= lls->lls_lastid) {
570 lls->lls_lastid = lastid;
578 lfsck_layout_lastid_store(const struct lu_env *env,
579 struct lfsck_component *com)
581 struct lfsck_instance *lfsck = com->lc_lfsck;
582 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
583 struct dt_device *dt = lfsck->li_bottom;
584 struct lfsck_layout_slave_data *llsd = com->lc_data;
585 struct lfsck_layout_seq *lls;
591 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
594 /* XXX: Add the code back if we really found related
595 * inconsistent cases in the future. */
597 if (!lls->lls_dirty) {
598 /* In OFD, before the pre-creation, the LAST_ID
599 * file will be updated firstly, which may hide
600 * some potential crashed cases. For example:
602 * The old obj1's ID is higher than old LAST_ID
603 * but lower than the new LAST_ID, but the LFSCK
604 * have not touch the obj1 until the OFD updated
605 * the LAST_ID. So the LFSCK does not regard it
606 * as crashed case. But when OFD does not create
607 * successfully, it will set the LAST_ID as the
608 * real created objects' ID, then LFSCK needs to
609 * found related inconsistency. */
610 rc = lfsck_layout_lastid_reload(env, com, lls);
611 if (likely(!lls->lls_dirty))
616 CDEBUG(D_LFSCK, "To sync the LAST_ID for <seq> "LPX64
617 " as <oid> "LPU64"\n", lls->lls_seq, lls->lls_lastid);
619 if (bk->lb_param & LPF_DRYRUN) {
624 th = dt_trans_create(env, dt);
627 CERROR("%s: (1) failed to store "LPX64": rc = %d\n",
628 lfsck_lfsck2name(com->lc_lfsck),
633 rc = dt_declare_record_write(env, lls->lls_lastid_obj,
634 sizeof(lastid), pos, th);
638 rc = dt_trans_start_local(env, dt, th);
642 lastid = cpu_to_le64(lls->lls_lastid);
643 dt_write_lock(env, lls->lls_lastid_obj, 0);
644 rc = dt_record_write(env, lls->lls_lastid_obj,
645 lfsck_buf_get(env, &lastid,
646 sizeof(lastid)), &pos, th);
647 dt_write_unlock(env, lls->lls_lastid_obj);
652 dt_trans_stop(env, dt, th);
655 CERROR("%s: (2) failed to store "LPX64": rc = %d\n",
656 lfsck_lfsck2name(com->lc_lfsck),
665 lfsck_layout_lastid_load(const struct lu_env *env,
666 struct lfsck_component *com,
667 struct lfsck_layout_seq *lls)
669 struct lfsck_instance *lfsck = com->lc_lfsck;
670 struct lfsck_layout *lo = com->lc_file_ram;
671 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
672 struct dt_object *obj;
677 lu_last_id_fid(fid, lls->lls_seq, lfsck_dev_idx(lfsck->li_bottom));
678 obj = dt_locate(env, lfsck->li_bottom, fid);
680 RETURN(PTR_ERR(obj));
682 /* LAST_ID crashed, to be rebuilt */
683 if (!dt_object_exists(obj)) {
684 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
685 LASSERT(lfsck->li_out_notify != NULL);
687 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
688 LE_LASTID_REBUILDING);
689 lo->ll_flags |= LF_CRASHED_LASTID;
691 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY4) &&
693 struct l_wait_info lwi = LWI_TIMEOUT(
694 cfs_time_seconds(cfs_fail_val),
697 up_write(&com->lc_sem);
698 l_wait_event(lfsck->li_thread.t_ctl_waitq,
699 !thread_is_running(&lfsck->li_thread),
701 down_write(&com->lc_sem);
705 rc = lfsck_layout_lastid_create(env, lfsck, obj);
707 dt_read_lock(env, obj, 0);
708 rc = dt_read(env, obj,
709 lfsck_buf_get(env, &lls->lls_lastid, sizeof(__u64)),
711 dt_read_unlock(env, obj);
712 if (rc != 0 && rc != sizeof(__u64))
713 GOTO(out, rc = (rc > 0 ? -EFAULT : rc));
715 if (rc == 0 && !(lo->ll_flags & LF_CRASHED_LASTID)) {
716 LASSERT(lfsck->li_out_notify != NULL);
718 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
719 LE_LASTID_REBUILDING);
720 lo->ll_flags |= LF_CRASHED_LASTID;
723 lls->lls_lastid = le64_to_cpu(lls->lls_lastid);
731 lfsck_object_put(env, obj);
733 lls->lls_lastid_obj = obj;
738 static int lfsck_layout_master_async_interpret(const struct lu_env *env,
739 struct ptlrpc_request *req,
742 struct lfsck_async_interpret_args *laia = args;
743 struct lfsck_component *com = laia->laia_com;
744 struct lfsck_layout_master_data *llmd = com->lc_data;
745 struct lfsck_tgt_descs *ltds = laia->laia_ltds;
746 struct lfsck_tgt_desc *ltd = laia->laia_ltd;
747 struct lfsck_request *lr = laia->laia_lr;
749 switch (lr->lr_event) {
752 struct lfsck_layout *lo = com->lc_file_ram;
754 lo->ll_flags |= LF_INCOMPLETE;
759 spin_lock(<ds->ltd_lock);
760 if (ltd->ltd_dead || ltd->ltd_layout_done) {
761 spin_unlock(<ds->ltd_lock);
766 if (lr->lr_flags & LEF_TO_OST) {
767 if (list_empty(<d->ltd_layout_list))
768 list_add_tail(<d->ltd_layout_list,
769 &llmd->llmd_ost_list);
770 if (list_empty(<d->ltd_layout_phase_list))
771 list_add_tail(<d->ltd_layout_phase_list,
772 &llmd->llmd_ost_phase1_list);
774 if (list_empty(<d->ltd_layout_list))
775 list_add_tail(<d->ltd_layout_list,
776 &llmd->llmd_mdt_list);
777 if (list_empty(<d->ltd_layout_phase_list))
778 list_add_tail(<d->ltd_layout_phase_list,
779 &llmd->llmd_mdt_phase1_list);
781 spin_unlock(<ds->ltd_lock);
788 CERROR("%s: fail to notify %s %x for layout: "
789 "event = %d, rc = %d\n",
790 lfsck_lfsck2name(com->lc_lfsck),
791 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
792 ltd->ltd_index, lr->lr_event, rc);
795 struct lfsck_reply *reply;
798 spin_lock(<ds->ltd_lock);
799 list_del_init(<d->ltd_layout_phase_list);
800 list_del_init(<d->ltd_layout_list);
801 spin_unlock(<ds->ltd_lock);
806 reply = req_capsule_server_get(&req->rq_pill,
810 CERROR("%s: invalid return value: rc = %d\n",
811 lfsck_lfsck2name(com->lc_lfsck), rc);
812 spin_lock(<ds->ltd_lock);
813 list_del_init(<d->ltd_layout_phase_list);
814 list_del_init(<d->ltd_layout_list);
815 spin_unlock(<ds->ltd_lock);
820 switch (reply->lr_status) {
821 case LS_SCANNING_PHASE1:
823 case LS_SCANNING_PHASE2:
824 spin_lock(<ds->ltd_lock);
825 list_del_init(<d->ltd_layout_phase_list);
826 if (ltd->ltd_dead || ltd->ltd_layout_done) {
827 spin_unlock(<ds->ltd_lock);
831 if (lr->lr_flags & LEF_TO_OST)
832 list_add_tail(<d->ltd_layout_phase_list,
833 &llmd->llmd_ost_phase2_list);
835 list_add_tail(<d->ltd_layout_phase_list,
836 &llmd->llmd_mdt_phase2_list);
837 spin_unlock(<ds->ltd_lock);
840 spin_lock(<ds->ltd_lock);
841 list_del_init(<d->ltd_layout_phase_list);
842 list_del_init(<d->ltd_layout_list);
843 spin_unlock(<ds->ltd_lock);
850 CERROR("%s: unexpected event: rc = %d\n",
851 lfsck_lfsck2name(com->lc_lfsck), lr->lr_event);
855 lfsck_component_put(env, com);
860 static int lfsck_layout_master_query_others(const struct lu_env *env,
861 struct lfsck_component *com)
863 struct lfsck_thread_info *info = lfsck_env_info(env);
864 struct lfsck_request *lr = &info->lti_lr;
865 struct lfsck_async_interpret_args *laia = &info->lti_laia;
866 struct lfsck_instance *lfsck = com->lc_lfsck;
867 struct lfsck_layout_master_data *llmd = com->lc_data;
868 struct ptlrpc_request_set *set;
869 struct lfsck_tgt_descs *ltds;
870 struct lfsck_tgt_desc *ltd;
871 struct list_head *head;
877 set = ptlrpc_prep_set();
881 llmd->llmd_touch_gen++;
882 memset(lr, 0, sizeof(*lr));
883 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
884 lr->lr_event = LE_QUERY;
885 lr->lr_active = LT_LAYOUT;
886 laia->laia_com = com;
889 if (!list_empty(&llmd->llmd_mdt_phase1_list)) {
890 ltds = &lfsck->li_mdt_descs;
892 head = &llmd->llmd_mdt_phase1_list;
896 ltds = &lfsck->li_ost_descs;
897 lr->lr_flags = LEF_TO_OST;
898 head = &llmd->llmd_ost_phase1_list;
901 laia->laia_ltds = ltds;
902 spin_lock(<ds->ltd_lock);
903 while (!list_empty(head)) {
904 ltd = list_entry(head->next,
905 struct lfsck_tgt_desc,
906 ltd_layout_phase_list);
907 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
910 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
911 list_del(<d->ltd_layout_phase_list);
912 list_add_tail(<d->ltd_layout_phase_list, head);
913 atomic_inc(<d->ltd_ref);
914 laia->laia_ltd = ltd;
915 spin_unlock(<ds->ltd_lock);
916 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
917 lfsck_layout_master_async_interpret,
920 CERROR("%s: fail to query %s %x for layout: rc = %d\n",
921 lfsck_lfsck2name(lfsck),
922 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
929 spin_lock(<ds->ltd_lock);
931 spin_unlock(<ds->ltd_lock);
934 rc = ptlrpc_set_wait(set);
936 ptlrpc_set_destroy(set);
942 if (!(lr->lr_flags & LEF_TO_OST) &&
943 list_empty(&llmd->llmd_mdt_phase1_list))
946 ptlrpc_set_destroy(set);
948 RETURN(rc1 != 0 ? rc1 : rc);
952 lfsck_layout_master_to_orphan(struct lfsck_layout_master_data *llmd)
954 return list_empty(&llmd->llmd_mdt_phase1_list) &&
955 (!list_empty(&llmd->llmd_ost_phase2_list) ||
956 list_empty(&llmd->llmd_ost_phase1_list));
959 static int lfsck_layout_master_notify_others(const struct lu_env *env,
960 struct lfsck_component *com,
961 struct lfsck_request *lr,
964 struct lfsck_thread_info *info = lfsck_env_info(env);
965 struct lfsck_async_interpret_args *laia = &info->lti_laia;
966 struct lfsck_instance *lfsck = com->lc_lfsck;
967 struct lfsck_layout_master_data *llmd = com->lc_data;
968 struct lfsck_layout *lo = com->lc_file_ram;
969 struct ptlrpc_request_set *set;
970 struct lfsck_tgt_descs *ltds;
971 struct lfsck_tgt_desc *ltd;
972 struct lfsck_tgt_desc *next;
973 struct list_head *head;
979 set = ptlrpc_prep_set();
983 lr->lr_active = LT_LAYOUT;
984 laia->laia_com = com;
987 switch (lr->lr_event) {
989 /* Notify OSTs firstly, then other MDTs if needed. */
990 lr->lr_flags |= LEF_TO_OST;
991 ltds = &lfsck->li_ost_descs;
994 laia->laia_ltds = ltds;
995 down_read(<ds->ltd_rw_sem);
996 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
997 ltd = lfsck_tgt_get(ltds, idx);
998 LASSERT(ltd != NULL);
1000 laia->laia_ltd = ltd;
1001 ltd->ltd_layout_done = 0;
1002 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1003 lfsck_layout_master_async_interpret,
1004 laia, LFSCK_NOTIFY);
1006 CERROR("%s: fail to notify %s %x for layout "
1008 lfsck_lfsck2name(lfsck),
1009 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1012 lo->ll_flags |= LF_INCOMPLETE;
1017 up_read(<ds->ltd_rw_sem);
1021 rc = ptlrpc_set_wait(set);
1023 ptlrpc_set_destroy(set);
1029 if (!(flags & LPF_ALL_MDT))
1032 ltds = &lfsck->li_mdt_descs;
1033 /* The sponsor broadcasts the request to other MDTs. */
1034 if (flags & LPF_BROADCAST) {
1035 flags &= ~LPF_ALL_MDT;
1036 lr->lr_flags &= ~LEF_TO_OST;
1040 /* non-sponsors link other MDT targets locallly. */
1041 spin_lock(<ds->ltd_lock);
1042 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1043 ltd = LTD_TGT(ltds, idx);
1044 LASSERT(ltd != NULL);
1046 if (!list_empty(<d->ltd_layout_list))
1049 list_add_tail(<d->ltd_layout_list,
1050 &llmd->llmd_mdt_list);
1051 list_add_tail(<d->ltd_layout_phase_list,
1052 &llmd->llmd_mdt_phase1_list);
1054 spin_unlock(<ds->ltd_lock);
1058 if (flags & LPF_BROADCAST)
1059 lr->lr_flags |= LEF_FORCE_STOP;
1060 case LE_PHASE2_DONE:
1061 /* Notify other MDTs if needed, then the OSTs. */
1062 if (flags & LPF_ALL_MDT) {
1063 /* The sponsor broadcasts the request to other MDTs. */
1064 if (flags & LPF_BROADCAST) {
1065 lr->lr_flags &= ~LEF_TO_OST;
1066 head = &llmd->llmd_mdt_list;
1067 ltds = &lfsck->li_mdt_descs;
1071 /* non-sponsors unlink other MDT targets locallly. */
1072 ltds = &lfsck->li_mdt_descs;
1073 spin_lock(<ds->ltd_lock);
1074 list_for_each_entry_safe(ltd, next,
1075 &llmd->llmd_mdt_list,
1077 list_del_init(<d->ltd_layout_phase_list);
1078 list_del_init(<d->ltd_layout_list);
1080 spin_unlock(<ds->ltd_lock);
1084 lr->lr_flags |= LEF_TO_OST;
1085 head = &llmd->llmd_ost_list;
1086 ltds = &lfsck->li_ost_descs;
1089 laia->laia_ltds = ltds;
1090 spin_lock(<ds->ltd_lock);
1091 while (!list_empty(head)) {
1092 ltd = list_entry(head->next, struct lfsck_tgt_desc,
1094 if (!list_empty(<d->ltd_layout_phase_list))
1095 list_del_init(<d->ltd_layout_phase_list);
1096 list_del_init(<d->ltd_layout_list);
1097 laia->laia_ltd = ltd;
1098 spin_unlock(<ds->ltd_lock);
1099 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1100 lfsck_layout_master_async_interpret,
1101 laia, LFSCK_NOTIFY);
1103 CERROR("%s: fail to notify %s %x for layout "
1104 "stop/phase2: rc = %d\n",
1105 lfsck_lfsck2name(lfsck),
1106 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1107 "MDT", ltd->ltd_index, rc);
1110 spin_lock(<ds->ltd_lock);
1112 spin_unlock(<ds->ltd_lock);
1114 if (!(flags & LPF_BROADCAST))
1119 rc = ptlrpc_set_wait(set);
1121 ptlrpc_set_destroy(set);
1127 flags &= ~LPF_BROADCAST;
1129 case LE_PHASE1_DONE:
1130 llmd->llmd_touch_gen++;
1131 lr->lr_flags &= ~LEF_TO_OST;
1132 ltds = &lfsck->li_mdt_descs;
1133 laia->laia_ltds = ltds;
1134 spin_lock(<ds->ltd_lock);
1135 while (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1136 ltd = list_entry(llmd->llmd_mdt_phase1_list.next,
1137 struct lfsck_tgt_desc,
1138 ltd_layout_phase_list);
1139 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1142 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1143 list_del_init(<d->ltd_layout_phase_list);
1144 list_add_tail(<d->ltd_layout_phase_list,
1145 &llmd->llmd_mdt_phase1_list);
1146 laia->laia_ltd = ltd;
1147 spin_unlock(<ds->ltd_lock);
1148 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1149 lfsck_layout_master_async_interpret,
1150 laia, LFSCK_NOTIFY);
1152 CERROR("%s: fail to notify MDT %x for layout "
1153 "phase1 done: rc = %d\n",
1154 lfsck_lfsck2name(lfsck),
1155 ltd->ltd_index, rc);
1158 spin_lock(<ds->ltd_lock);
1160 spin_unlock(<ds->ltd_lock);
1163 CERROR("%s: unexpected LFSCK event: rc = %d\n",
1164 lfsck_lfsck2name(lfsck), lr->lr_event);
1170 rc = ptlrpc_set_wait(set);
1171 ptlrpc_set_destroy(set);
1173 if (rc == 0 && lr->lr_event == LE_START &&
1174 list_empty(&llmd->llmd_ost_list))
1180 static int lfsck_layout_double_scan_result(const struct lu_env *env,
1181 struct lfsck_component *com,
1184 struct lfsck_instance *lfsck = com->lc_lfsck;
1185 struct lfsck_layout *lo = com->lc_file_ram;
1186 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1188 down_write(&com->lc_sem);
1190 lo->ll_run_time_phase2 += cfs_duration_sec(cfs_time_current() +
1191 HALF_SEC - lfsck->li_time_last_checkpoint);
1192 lo->ll_time_last_checkpoint = cfs_time_current_sec();
1193 lo->ll_objs_checked_phase2 += com->lc_new_checked;
1196 com->lc_journal = 0;
1197 if (lo->ll_flags & LF_INCOMPLETE)
1198 lo->ll_status = LS_PARTIAL;
1200 lo->ll_status = LS_COMPLETED;
1201 if (!(bk->lb_param & LPF_DRYRUN))
1202 lo->ll_flags &= ~(LF_SCANNED_ONCE | LF_INCONSISTENT);
1203 lo->ll_time_last_complete = lo->ll_time_last_checkpoint;
1204 lo->ll_success_count++;
1205 } else if (rc == 0) {
1206 lo->ll_status = lfsck->li_status;
1207 if (lo->ll_status == 0)
1208 lo->ll_status = LS_STOPPED;
1210 lo->ll_status = LS_FAILED;
1213 if (lo->ll_status != LS_PAUSED) {
1214 spin_lock(&lfsck->li_lock);
1215 list_del_init(&com->lc_link);
1216 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
1217 spin_unlock(&lfsck->li_lock);
1220 rc = lfsck_layout_store(env, com);
1222 up_write(&com->lc_sem);
1227 static int lfsck_layout_scan_orphan(const struct lu_env *env,
1228 struct lfsck_component *com,
1229 struct lfsck_tgt_desc *ltd)
1231 /* XXX: To be extended in other patch. */
1236 static int lfsck_layout_assistant(void *args)
1238 struct lfsck_thread_args *lta = args;
1239 struct lu_env *env = <a->lta_env;
1240 struct lfsck_component *com = lta->lta_com;
1241 struct lfsck_instance *lfsck = lta->lta_lfsck;
1242 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1243 struct lfsck_position *pos = &com->lc_pos_start;
1244 struct lfsck_thread_info *info = lfsck_env_info(env);
1245 struct lfsck_request *lr = &info->lti_lr;
1246 struct lfsck_layout_master_data *llmd = com->lc_data;
1247 struct ptlrpc_thread *mthread = &lfsck->li_thread;
1248 struct ptlrpc_thread *athread = &llmd->llmd_thread;
1249 struct lfsck_layout_req *llr;
1250 struct l_wait_info lwi = { 0 };
1256 if (lta->lta_lsp->lsp_start != NULL)
1257 flags = lta->lta_lsp->lsp_start->ls_flags;
1259 flags = bk->lb_param;
1260 memset(lr, 0, sizeof(*lr));
1261 lr->lr_event = LE_START;
1262 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1263 lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
1265 lr->lr_speed = bk->lb_speed_limit;
1266 lr->lr_version = bk->lb_version;
1267 lr->lr_param = bk->lb_param;
1268 lr->lr_async_windows = bk->lb_async_windows;
1269 if (pos->lp_oit_cookie <= 1)
1270 lr->lr_param |= LPF_RESET;
1272 rc = lfsck_layout_master_notify_others(env, com, lr, flags);
1274 CERROR("%s: fail to notify others for layout start: rc = %d\n",
1275 lfsck_lfsck2name(lfsck), rc);
1279 spin_lock(&llmd->llmd_lock);
1280 thread_set_flags(athread, SVC_RUNNING);
1281 spin_unlock(&llmd->llmd_lock);
1282 wake_up_all(&mthread->t_ctl_waitq);
1285 while (!list_empty(&llmd->llmd_req_list)) {
1286 bool wakeup = false;
1288 l_wait_event(athread->t_ctl_waitq,
1289 bk->lb_async_windows == 0 ||
1290 atomic_read(&llmd->llmd_rpcs_in_flight) <
1291 bk->lb_async_windows ||
1295 if (unlikely(llmd->llmd_exit))
1296 GOTO(cleanup1, rc = llmd->llmd_post_result);
1298 /* XXX: To be extended in other patch.
1300 * Compare the OST side attribute with local attribute,
1301 * and fix it if found inconsistency. */
1303 spin_lock(&llmd->llmd_lock);
1304 llr = list_entry(llmd->llmd_req_list.next,
1305 struct lfsck_layout_req,
1307 list_del_init(&llr->llr_list);
1308 if (bk->lb_async_windows != 0 &&
1309 llmd->llmd_prefetched >= bk->lb_async_windows)
1312 llmd->llmd_prefetched--;
1313 spin_unlock(&llmd->llmd_lock);
1315 wake_up_all(&mthread->t_ctl_waitq);
1317 lfsck_layout_req_fini(env, llr);
1320 /* Wakeup the master engine if it is waiting in checkpoint. */
1321 if (atomic_read(&llmd->llmd_rpcs_in_flight) == 0)
1322 wake_up_all(&mthread->t_ctl_waitq);
1324 l_wait_event(athread->t_ctl_waitq,
1325 !lfsck_layout_req_empty(llmd) ||
1327 llmd->llmd_to_post ||
1328 llmd->llmd_to_double_scan,
1331 if (unlikely(llmd->llmd_exit))
1332 GOTO(cleanup1, rc = llmd->llmd_post_result);
1334 if (!list_empty(&llmd->llmd_req_list))
1337 if (llmd->llmd_to_post) {
1338 llmd->llmd_to_post = 0;
1339 LASSERT(llmd->llmd_post_result > 0);
1341 memset(lr, 0, sizeof(*lr));
1342 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1343 lr->lr_event = LE_PHASE1_DONE;
1344 lr->lr_status = llmd->llmd_post_result;
1345 rc = lfsck_layout_master_notify_others(env, com, lr, 0);
1347 CERROR("%s: failed to notify others "
1348 "for layout post: rc = %d\n",
1349 lfsck_lfsck2name(lfsck), rc);
1351 /* Wakeup the master engine to go ahead. */
1352 wake_up_all(&mthread->t_ctl_waitq);
1355 if (llmd->llmd_to_double_scan) {
1356 llmd->llmd_to_double_scan = 0;
1357 atomic_inc(&lfsck->li_double_scan_count);
1358 llmd->llmd_in_double_scan = 1;
1359 wake_up_all(&mthread->t_ctl_waitq);
1361 while (llmd->llmd_in_double_scan) {
1362 struct lfsck_tgt_descs *ltds =
1363 &lfsck->li_ost_descs;
1364 struct lfsck_tgt_desc *ltd;
1366 rc = lfsck_layout_master_query_others(env, com);
1367 if (lfsck_layout_master_to_orphan(llmd))
1373 /* Pull LFSCK status on related targets once
1374 * per 30 seconds if we are not notified. */
1375 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(30),
1376 cfs_time_seconds(1),
1378 rc = l_wait_event(athread->t_ctl_waitq,
1379 lfsck_layout_master_to_orphan(llmd) ||
1381 !thread_is_running(mthread),
1384 if (unlikely(llmd->llmd_exit ||
1385 !thread_is_running(mthread)))
1386 GOTO(cleanup2, rc = 0);
1388 if (rc == -ETIMEDOUT)
1395 spin_lock(<ds->ltd_lock);
1397 &llmd->llmd_ost_phase2_list)) {
1399 llmd->llmd_ost_phase2_list.next,
1400 struct lfsck_tgt_desc,
1401 ltd_layout_phase_list);
1403 <d->ltd_layout_phase_list);
1404 spin_unlock(<ds->ltd_lock);
1406 rc = lfsck_layout_scan_orphan(env, com,
1409 bk->lb_param & LPF_FAILOUT)
1412 if (unlikely(llmd->llmd_exit ||
1413 !thread_is_running(mthread)))
1414 GOTO(cleanup2, rc = 0);
1416 spin_lock(<ds->ltd_lock);
1419 if (list_empty(&llmd->llmd_ost_phase1_list)) {
1420 spin_unlock(<ds->ltd_lock);
1421 GOTO(cleanup2, rc = 1);
1423 spin_unlock(<ds->ltd_lock);
1429 /* Cleanup the unfinished requests. */
1430 spin_lock(&llmd->llmd_lock);
1431 while (!list_empty(&llmd->llmd_req_list)) {
1432 llr = list_entry(llmd->llmd_req_list.next,
1433 struct lfsck_layout_req,
1435 list_del_init(&llr->llr_list);
1436 llmd->llmd_prefetched--;
1437 spin_unlock(&llmd->llmd_lock);
1438 lfsck_layout_req_fini(env, llr);
1439 spin_lock(&llmd->llmd_lock);
1441 spin_unlock(&llmd->llmd_lock);
1443 LASSERTF(llmd->llmd_prefetched == 0, "unmatched prefeteched objs %d\n",
1444 llmd->llmd_prefetched);
1446 l_wait_event(athread->t_ctl_waitq,
1447 atomic_read(&llmd->llmd_rpcs_in_flight) == 0,
1451 memset(lr, 0, sizeof(*lr));
1452 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1454 lr->lr_event = LE_PHASE2_DONE;
1457 } else if (rc == 0) {
1458 lr->lr_event = LE_STOP;
1459 if (lfsck->li_status == LS_PAUSED ||
1460 lfsck->li_status == LS_CO_PAUSED) {
1462 lr->lr_status = LS_CO_PAUSED;
1463 } else if (lfsck->li_status == LS_STOPPED ||
1464 lfsck->li_status == LS_CO_STOPPED) {
1465 flags = lfsck->li_flags;
1466 if (flags & LPF_BROADCAST)
1467 lr->lr_status = LS_STOPPED;
1469 lr->lr_status = LS_CO_STOPPED;
1474 lr->lr_event = LE_STOP;
1476 lr->lr_status = LS_CO_FAILED;
1479 rc1 = lfsck_layout_master_notify_others(env, com, lr, flags);
1481 CERROR("%s: failed to notify others for layout quit: rc = %d\n",
1482 lfsck_lfsck2name(lfsck), rc1);
1486 /* Under force exit case, some requests may be just freed without
1487 * verification, those objects should be re-handled when next run.
1488 * So not update the on-disk tracing file under such case. */
1489 if (!llmd->llmd_exit)
1490 rc1 = lfsck_layout_double_scan_result(env, com, rc);
1493 if (llmd->llmd_in_double_scan)
1494 atomic_dec(&lfsck->li_double_scan_count);
1496 spin_lock(&llmd->llmd_lock);
1497 llmd->llmd_assistant_status = (rc1 != 0 ? rc1 : rc);
1498 thread_set_flags(athread, SVC_STOPPED);
1499 wake_up_all(&mthread->t_ctl_waitq);
1500 spin_unlock(&llmd->llmd_lock);
1501 lfsck_thread_args_fini(lta);
1507 lfsck_layout_slave_async_interpret(const struct lu_env *env,
1508 struct ptlrpc_request *req,
1511 struct lfsck_layout_slave_async_args *llsaa = args;
1512 struct obd_export *exp = llsaa->llsaa_exp;
1513 struct lfsck_component *com = llsaa->llsaa_com;
1514 struct lfsck_layout_slave_target *llst = llsaa->llsaa_llst;
1515 struct lfsck_layout_slave_data *llsd = com->lc_data;
1519 /* It is quite probably caused by target crash,
1520 * to make the LFSCK can go ahead, assume that
1521 * the target finished the LFSCK prcoessing. */
1524 struct lfsck_reply *lr;
1526 lr = req_capsule_server_get(&req->rq_pill, &RMF_LFSCK_REPLY);
1527 if (lr->lr_status != LS_SCANNING_PHASE1 &&
1528 lr->lr_status != LS_SCANNING_PHASE2)
1532 lfsck_layout_llst_del(llsd, llst);
1533 lfsck_layout_llst_put(llst);
1534 lfsck_component_put(env, com);
1535 class_export_put(exp);
1540 static int lfsck_layout_async_query(const struct lu_env *env,
1541 struct lfsck_component *com,
1542 struct obd_export *exp,
1543 struct lfsck_layout_slave_target *llst,
1544 struct lfsck_request *lr,
1545 struct ptlrpc_request_set *set)
1547 struct lfsck_layout_slave_async_args *llsaa;
1548 struct ptlrpc_request *req;
1549 struct lfsck_request *tmp;
1553 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_QUERY);
1557 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_QUERY);
1559 ptlrpc_request_free(req);
1563 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
1565 ptlrpc_request_set_replen(req);
1567 llsaa = ptlrpc_req_async_args(req);
1568 llsaa->llsaa_exp = exp;
1569 llsaa->llsaa_com = lfsck_component_get(com);
1570 llsaa->llsaa_llst = llst;
1571 req->rq_interpret_reply = lfsck_layout_slave_async_interpret;
1572 ptlrpc_set_add_req(set, req);
1577 static int lfsck_layout_async_notify(const struct lu_env *env,
1578 struct obd_export *exp,
1579 struct lfsck_request *lr,
1580 struct ptlrpc_request_set *set)
1582 struct ptlrpc_request *req;
1583 struct lfsck_request *tmp;
1587 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
1591 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
1593 ptlrpc_request_free(req);
1597 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
1599 ptlrpc_request_set_replen(req);
1600 ptlrpc_set_add_req(set, req);
1606 lfsck_layout_slave_query_master(const struct lu_env *env,
1607 struct lfsck_component *com)
1609 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
1610 struct lfsck_instance *lfsck = com->lc_lfsck;
1611 struct lfsck_layout_slave_data *llsd = com->lc_data;
1612 struct lfsck_layout_slave_target *llst;
1613 struct obd_export *exp;
1614 struct ptlrpc_request_set *set;
1620 set = ptlrpc_prep_set();
1624 memset(lr, 0, sizeof(*lr));
1625 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1626 lr->lr_event = LE_QUERY;
1627 lr->lr_active = LT_LAYOUT;
1629 llsd->llsd_touch_gen++;
1630 spin_lock(&llsd->llsd_lock);
1631 while (!list_empty(&llsd->llsd_master_list)) {
1632 llst = list_entry(llsd->llsd_master_list.next,
1633 struct lfsck_layout_slave_target,
1635 if (llst->llst_gen == llsd->llsd_touch_gen)
1638 llst->llst_gen = llsd->llsd_touch_gen;
1639 list_del(&llst->llst_list);
1640 list_add_tail(&llst->llst_list,
1641 &llsd->llsd_master_list);
1642 atomic_inc(&llst->llst_ref);
1643 spin_unlock(&llsd->llsd_lock);
1645 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
1648 lfsck_layout_llst_del(llsd, llst);
1649 lfsck_layout_llst_put(llst);
1650 spin_lock(&llsd->llsd_lock);
1654 rc = lfsck_layout_async_query(env, com, exp, llst, lr, set);
1656 CERROR("%s: slave fail to query %s for layout: "
1657 "rc = %d\n", lfsck_lfsck2name(lfsck),
1658 exp->exp_obd->obd_name, rc);
1660 lfsck_layout_llst_put(llst);
1661 class_export_put(exp);
1665 spin_lock(&llsd->llsd_lock);
1667 spin_unlock(&llsd->llsd_lock);
1670 rc = ptlrpc_set_wait(set);
1671 ptlrpc_set_destroy(set);
1673 RETURN(rc1 != 0 ? rc1 : rc);
1677 lfsck_layout_slave_notify_master(const struct lu_env *env,
1678 struct lfsck_component *com,
1679 enum lfsck_events event, int result)
1681 struct lfsck_instance *lfsck = com->lc_lfsck;
1682 struct lfsck_layout_slave_data *llsd = com->lc_data;
1683 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
1684 struct lfsck_layout_slave_target *llst;
1685 struct obd_export *exp;
1686 struct ptlrpc_request_set *set;
1691 set = ptlrpc_prep_set();
1695 memset(lr, 0, sizeof(*lr));
1696 lr->lr_event = event;
1697 lr->lr_flags = LEF_FROM_OST;
1698 lr->lr_status = result;
1699 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1700 lr->lr_active = LT_LAYOUT;
1701 llsd->llsd_touch_gen++;
1702 spin_lock(&llsd->llsd_lock);
1703 while (!list_empty(&llsd->llsd_master_list)) {
1704 llst = list_entry(llsd->llsd_master_list.next,
1705 struct lfsck_layout_slave_target,
1707 if (llst->llst_gen == llsd->llsd_touch_gen)
1710 llst->llst_gen = llsd->llsd_touch_gen;
1711 list_del(&llst->llst_list);
1712 list_add_tail(&llst->llst_list,
1713 &llsd->llsd_master_list);
1714 atomic_inc(&llst->llst_ref);
1715 spin_unlock(&llsd->llsd_lock);
1717 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
1720 lfsck_layout_llst_del(llsd, llst);
1721 lfsck_layout_llst_put(llst);
1722 spin_lock(&llsd->llsd_lock);
1726 rc = lfsck_layout_async_notify(env, exp, lr, set);
1728 CERROR("%s: slave fail to notify %s for layout: "
1729 "rc = %d\n", lfsck_lfsck2name(lfsck),
1730 exp->exp_obd->obd_name, rc);
1733 lfsck_layout_llst_put(llst);
1734 class_export_put(exp);
1735 spin_lock(&llsd->llsd_lock);
1737 spin_unlock(&llsd->llsd_lock);
1740 rc = ptlrpc_set_wait(set);
1742 ptlrpc_set_destroy(set);
1749 static int lfsck_layout_reset(const struct lu_env *env,
1750 struct lfsck_component *com, bool init)
1752 struct lfsck_layout *lo = com->lc_file_ram;
1755 down_write(&com->lc_sem);
1757 memset(lo, 0, com->lc_file_size);
1759 __u32 count = lo->ll_success_count;
1760 __u64 last_time = lo->ll_time_last_complete;
1762 memset(lo, 0, com->lc_file_size);
1763 lo->ll_success_count = count;
1764 lo->ll_time_last_complete = last_time;
1767 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
1768 lo->ll_status = LS_INIT;
1770 rc = lfsck_layout_store(env, com);
1771 up_write(&com->lc_sem);
1776 static void lfsck_layout_fail(const struct lu_env *env,
1777 struct lfsck_component *com, bool new_checked)
1779 struct lfsck_layout *lo = com->lc_file_ram;
1781 down_write(&com->lc_sem);
1783 com->lc_new_checked++;
1784 lo->ll_objs_failed_phase1++;
1785 if (lo->ll_pos_first_inconsistent == 0) {
1786 struct lfsck_instance *lfsck = com->lc_lfsck;
1788 lo->ll_pos_first_inconsistent =
1789 lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
1792 up_write(&com->lc_sem);
1795 static int lfsck_layout_master_checkpoint(const struct lu_env *env,
1796 struct lfsck_component *com, bool init)
1798 struct lfsck_instance *lfsck = com->lc_lfsck;
1799 struct lfsck_layout *lo = com->lc_file_ram;
1800 struct lfsck_layout_master_data *llmd = com->lc_data;
1801 struct ptlrpc_thread *mthread = &lfsck->li_thread;
1802 struct ptlrpc_thread *athread = &llmd->llmd_thread;
1803 struct l_wait_info lwi = { 0 };
1806 if (com->lc_new_checked == 0 && !init)
1809 l_wait_event(mthread->t_ctl_waitq,
1810 (list_empty(&llmd->llmd_req_list) &&
1811 atomic_read(&llmd->llmd_rpcs_in_flight) == 0) ||
1812 !thread_is_running(mthread) ||
1813 thread_is_stopped(athread),
1816 if (!thread_is_running(mthread) || thread_is_stopped(athread))
1819 down_write(&com->lc_sem);
1821 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
1823 lo->ll_pos_last_checkpoint =
1824 lfsck->li_pos_current.lp_oit_cookie;
1825 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
1826 HALF_SEC - lfsck->li_time_last_checkpoint);
1827 lo->ll_time_last_checkpoint = cfs_time_current_sec();
1828 lo->ll_objs_checked_phase1 += com->lc_new_checked;
1829 com->lc_new_checked = 0;
1832 rc = lfsck_layout_store(env, com);
1833 up_write(&com->lc_sem);
1838 static int lfsck_layout_slave_checkpoint(const struct lu_env *env,
1839 struct lfsck_component *com, bool init)
1841 struct lfsck_instance *lfsck = com->lc_lfsck;
1842 struct lfsck_layout *lo = com->lc_file_ram;
1845 if (com->lc_new_checked == 0 && !init)
1848 down_write(&com->lc_sem);
1851 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
1853 lo->ll_pos_last_checkpoint =
1854 lfsck->li_pos_current.lp_oit_cookie;
1855 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
1856 HALF_SEC - lfsck->li_time_last_checkpoint);
1857 lo->ll_time_last_checkpoint = cfs_time_current_sec();
1858 lo->ll_objs_checked_phase1 += com->lc_new_checked;
1859 com->lc_new_checked = 0;
1862 rc = lfsck_layout_store(env, com);
1864 up_write(&com->lc_sem);
1869 static int lfsck_layout_prep(const struct lu_env *env,
1870 struct lfsck_component *com)
1872 struct lfsck_instance *lfsck = com->lc_lfsck;
1873 struct lfsck_layout *lo = com->lc_file_ram;
1874 struct lfsck_position *pos = &com->lc_pos_start;
1876 fid_zero(&pos->lp_dir_parent);
1877 pos->lp_dir_cookie = 0;
1878 if (lo->ll_status == LS_COMPLETED ||
1879 lo->ll_status == LS_PARTIAL) {
1882 rc = lfsck_layout_reset(env, com, false);
1887 down_write(&com->lc_sem);
1889 lo->ll_time_latest_start = cfs_time_current_sec();
1891 spin_lock(&lfsck->li_lock);
1892 if (lo->ll_flags & LF_SCANNED_ONCE) {
1893 if (!lfsck->li_drop_dryrun ||
1894 lo->ll_pos_first_inconsistent == 0) {
1895 lo->ll_status = LS_SCANNING_PHASE2;
1896 list_del_init(&com->lc_link);
1897 list_add_tail(&com->lc_link,
1898 &lfsck->li_list_double_scan);
1899 pos->lp_oit_cookie = 0;
1903 lo->ll_status = LS_SCANNING_PHASE1;
1904 lo->ll_run_time_phase1 = 0;
1905 lo->ll_run_time_phase2 = 0;
1906 lo->ll_objs_checked_phase1 = 0;
1907 lo->ll_objs_checked_phase2 = 0;
1908 lo->ll_objs_failed_phase1 = 0;
1909 lo->ll_objs_failed_phase2 = 0;
1910 for (i = 0; i < LLIT_MAX; i++)
1911 lo->ll_objs_repaired[i] = 0;
1913 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
1916 lo->ll_status = LS_SCANNING_PHASE1;
1917 if (!lfsck->li_drop_dryrun ||
1918 lo->ll_pos_first_inconsistent == 0)
1919 pos->lp_oit_cookie = lo->ll_pos_last_checkpoint + 1;
1921 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
1923 spin_unlock(&lfsck->li_lock);
1925 up_write(&com->lc_sem);
1930 static int lfsck_layout_slave_prep(const struct lu_env *env,
1931 struct lfsck_component *com,
1932 struct lfsck_start_param *lsp)
1934 struct lfsck_layout *lo = com->lc_file_ram;
1935 struct lfsck_layout_slave_data *llsd = com->lc_data;
1938 /* XXX: For a new scanning, generate OST-objects
1939 * bitmap for orphan detection. */
1941 rc = lfsck_layout_prep(env, com);
1942 if (rc != 0 || lo->ll_status != LS_SCANNING_PHASE1 ||
1943 !lsp->lsp_index_valid)
1946 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
1951 static int lfsck_layout_master_prep(const struct lu_env *env,
1952 struct lfsck_component *com,
1953 struct lfsck_start_param *lsp)
1955 struct lfsck_instance *lfsck = com->lc_lfsck;
1956 struct lfsck_layout_master_data *llmd = com->lc_data;
1957 struct ptlrpc_thread *mthread = &lfsck->li_thread;
1958 struct ptlrpc_thread *athread = &llmd->llmd_thread;
1959 struct lfsck_thread_args *lta;
1963 rc = lfsck_layout_prep(env, com);
1967 llmd->llmd_assistant_status = 0;
1968 llmd->llmd_post_result = 0;
1969 llmd->llmd_to_post = 0;
1970 llmd->llmd_to_double_scan = 0;
1971 llmd->llmd_in_double_scan = 0;
1972 llmd->llmd_exit = 0;
1973 thread_set_flags(athread, 0);
1975 lta = lfsck_thread_args_init(lfsck, com, lsp);
1977 RETURN(PTR_ERR(lta));
1979 rc = PTR_ERR(kthread_run(lfsck_layout_assistant, lta, "lfsck_layout"));
1980 if (IS_ERR_VALUE(rc)) {
1981 CERROR("%s: Cannot start LFSCK layout assistant thread: "
1982 "rc = %ld\n", lfsck_lfsck2name(lfsck), rc);
1983 lfsck_thread_args_fini(lta);
1985 struct l_wait_info lwi = { 0 };
1987 l_wait_event(mthread->t_ctl_waitq,
1988 thread_is_running(athread) ||
1989 thread_is_stopped(athread),
1991 if (unlikely(!thread_is_running(athread)))
1992 rc = llmd->llmd_assistant_status;
2000 static int lfsck_layout_master_exec_oit(const struct lu_env *env,
2001 struct lfsck_component *com,
2002 struct dt_object *obj)
2004 /* XXX: To be implemented in other patches.
2006 * For the given object, read its layout EA locally. For each stripe,
2007 * pre-fetch the OST-object's attribute and generate an structure
2008 * lfsck_layout_req on the list ::llmd_req_list.
2010 * For each request on the ::llmd_req_list, the lfsck_layout_assistant
2011 * thread will compare the OST side attribute with local attribute,
2012 * if inconsistent, then repair it.
2014 * All above processing is async mode with pipeline. */
2019 static int lfsck_layout_slave_exec_oit(const struct lu_env *env,
2020 struct lfsck_component *com,
2021 struct dt_object *obj)
2023 struct lfsck_instance *lfsck = com->lc_lfsck;
2024 struct lfsck_layout *lo = com->lc_file_ram;
2025 const struct lu_fid *fid = lfsck_dto2fid(obj);
2026 struct lfsck_layout_slave_data *llsd = com->lc_data;
2027 struct lfsck_layout_seq *lls;
2033 /* XXX: Update OST-objects bitmap for orphan detection. */
2035 LASSERT(llsd != NULL);
2037 down_write(&com->lc_sem);
2038 if (fid_is_idif(fid))
2040 else if (!fid_is_norm(fid) ||
2041 !fid_is_for_ostobj(env, lfsck->li_next, obj, fid))
2042 GOTO(unlock, rc = 0);
2045 com->lc_new_checked++;
2047 lls = lfsck_layout_seq_lookup(llsd, seq);
2050 if (unlikely(lls == NULL))
2051 GOTO(unlock, rc = -ENOMEM);
2053 INIT_LIST_HEAD(&lls->lls_list);
2055 rc = lfsck_layout_lastid_load(env, com, lls);
2057 lo->ll_objs_failed_phase1++;
2062 lfsck_layout_seq_insert(llsd, lls);
2065 if (unlikely(fid_is_last_id(fid)))
2066 GOTO(unlock, rc = 0);
2069 if (oid > lls->lls_lastid_known)
2070 lls->lls_lastid_known = oid;
2072 if (oid > lls->lls_lastid) {
2073 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
2074 /* OFD may create new objects during LFSCK scanning. */
2075 rc = lfsck_layout_lastid_reload(env, com, lls);
2076 if (unlikely(rc != 0))
2077 CWARN("%s: failed to reload LAST_ID for "LPX64
2079 lfsck_lfsck2name(com->lc_lfsck),
2081 if (oid <= lls->lls_lastid)
2082 GOTO(unlock, rc = 0);
2084 LASSERT(lfsck->li_out_notify != NULL);
2086 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
2087 LE_LASTID_REBUILDING);
2088 lo->ll_flags |= LF_CRASHED_LASTID;
2091 lls->lls_lastid = oid;
2095 GOTO(unlock, rc = 0);
2098 up_write(&com->lc_sem);
2103 static int lfsck_layout_exec_dir(const struct lu_env *env,
2104 struct lfsck_component *com,
2105 struct dt_object *obj,
2106 struct lu_dirent *ent)
2111 static int lfsck_layout_master_post(const struct lu_env *env,
2112 struct lfsck_component *com,
2113 int result, bool init)
2115 struct lfsck_instance *lfsck = com->lc_lfsck;
2116 struct lfsck_layout *lo = com->lc_file_ram;
2117 struct lfsck_layout_master_data *llmd = com->lc_data;
2118 struct ptlrpc_thread *mthread = &lfsck->li_thread;
2119 struct ptlrpc_thread *athread = &llmd->llmd_thread;
2120 struct l_wait_info lwi = { 0 };
2125 llmd->llmd_post_result = result;
2126 llmd->llmd_to_post = 1;
2127 if (llmd->llmd_post_result <= 0)
2128 llmd->llmd_exit = 1;
2130 wake_up_all(&athread->t_ctl_waitq);
2131 l_wait_event(mthread->t_ctl_waitq,
2132 (result > 0 && list_empty(&llmd->llmd_req_list) &&
2133 atomic_read(&llmd->llmd_rpcs_in_flight) == 0) ||
2134 thread_is_stopped(athread),
2137 if (llmd->llmd_assistant_status < 0)
2138 result = llmd->llmd_assistant_status;
2140 down_write(&com->lc_sem);
2141 spin_lock(&lfsck->li_lock);
2142 /* When LFSCK failed, there may be some prefetched objects those are
2143 * not been processed yet, we do not know the exactly position, then
2144 * just restart from last check-point next time. */
2145 if (!init && !llmd->llmd_exit)
2146 lo->ll_pos_last_checkpoint =
2147 lfsck->li_pos_current.lp_oit_cookie;
2150 lo->ll_status = LS_SCANNING_PHASE2;
2151 lo->ll_flags |= LF_SCANNED_ONCE;
2152 lo->ll_flags &= ~LF_UPGRADE;
2153 list_del_init(&com->lc_link);
2154 list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
2155 } else if (result == 0) {
2156 lo->ll_status = lfsck->li_status;
2157 if (lo->ll_status == 0)
2158 lo->ll_status = LS_STOPPED;
2159 if (lo->ll_status != LS_PAUSED) {
2160 list_del_init(&com->lc_link);
2161 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
2164 lo->ll_status = LS_FAILED;
2165 list_del_init(&com->lc_link);
2166 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
2168 spin_unlock(&lfsck->li_lock);
2171 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
2172 HALF_SEC - lfsck->li_time_last_checkpoint);
2173 lo->ll_time_last_checkpoint = cfs_time_current_sec();
2174 lo->ll_objs_checked_phase1 += com->lc_new_checked;
2175 com->lc_new_checked = 0;
2178 rc = lfsck_layout_store(env, com);
2179 up_write(&com->lc_sem);
2184 static int lfsck_layout_slave_post(const struct lu_env *env,
2185 struct lfsck_component *com,
2186 int result, bool init)
2188 struct lfsck_instance *lfsck = com->lc_lfsck;
2189 struct lfsck_layout *lo = com->lc_file_ram;
2193 rc = lfsck_layout_lastid_store(env, com);
2197 LASSERT(lfsck->li_out_notify != NULL);
2199 down_write(&com->lc_sem);
2201 spin_lock(&lfsck->li_lock);
2203 lo->ll_pos_last_checkpoint =
2204 lfsck->li_pos_current.lp_oit_cookie;
2206 lo->ll_status = LS_SCANNING_PHASE2;
2207 lo->ll_flags |= LF_SCANNED_ONCE;
2208 if (lo->ll_flags & LF_CRASHED_LASTID) {
2210 lo->ll_flags &= ~LF_CRASHED_LASTID;
2212 lo->ll_flags &= ~LF_UPGRADE;
2213 list_del_init(&com->lc_link);
2214 list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
2215 } else if (result == 0) {
2216 lo->ll_status = lfsck->li_status;
2217 if (lo->ll_status == 0)
2218 lo->ll_status = LS_STOPPED;
2219 if (lo->ll_status != LS_PAUSED) {
2220 list_del_init(&com->lc_link);
2221 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
2224 lo->ll_status = LS_FAILED;
2225 list_del_init(&com->lc_link);
2226 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
2228 spin_unlock(&lfsck->li_lock);
2231 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
2235 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
2236 HALF_SEC - lfsck->li_time_last_checkpoint);
2237 lo->ll_time_last_checkpoint = cfs_time_current_sec();
2238 lo->ll_objs_checked_phase1 += com->lc_new_checked;
2239 com->lc_new_checked = 0;
2242 rc = lfsck_layout_store(env, com);
2244 up_write(&com->lc_sem);
2246 lfsck_layout_slave_notify_master(env, com, LE_PHASE1_DONE, result);
2251 static int lfsck_layout_dump(const struct lu_env *env,
2252 struct lfsck_component *com, char *buf, int len)
2254 struct lfsck_instance *lfsck = com->lc_lfsck;
2255 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2256 struct lfsck_layout *lo = com->lc_file_ram;
2261 down_read(&com->lc_sem);
2262 rc = snprintf(buf, len,
2263 "name: lfsck_layout\n"
2269 lfsck_status2names(lo->ll_status));
2275 rc = lfsck_bits_dump(&buf, &len, lo->ll_flags, lfsck_flags_names,
2280 rc = lfsck_bits_dump(&buf, &len, bk->lb_param, lfsck_param_names,
2285 rc = lfsck_time_dump(&buf, &len, lo->ll_time_last_complete,
2286 "time_since_last_completed");
2290 rc = lfsck_time_dump(&buf, &len, lo->ll_time_latest_start,
2291 "time_since_latest_start");
2295 rc = lfsck_time_dump(&buf, &len, lo->ll_time_last_checkpoint,
2296 "time_since_last_checkpoint");
2300 rc = snprintf(buf, len,
2301 "latest_start_position: "LPU64"\n"
2302 "last_checkpoint_position: "LPU64"\n"
2303 "first_failure_position: "LPU64"\n",
2304 lo->ll_pos_latest_start,
2305 lo->ll_pos_last_checkpoint,
2306 lo->ll_pos_first_inconsistent);
2313 rc = snprintf(buf, len,
2314 "success_count: %u\n"
2315 "repaired_dangling: "LPU64"\n"
2316 "repaired_unmatched_pair: "LPU64"\n"
2317 "repaired_multiple_referenced: "LPU64"\n"
2318 "repaired_orphan: "LPU64"\n"
2319 "repaired_inconsistent_owner: "LPU64"\n"
2320 "repaired_others: "LPU64"\n"
2321 "skipped: "LPU64"\n"
2322 "failed_phase1: "LPU64"\n"
2323 "failed_phase2: "LPU64"\n",
2324 lo->ll_success_count,
2325 lo->ll_objs_repaired[LLIT_DANGLING - 1],
2326 lo->ll_objs_repaired[LLIT_UNMATCHED_PAIR - 1],
2327 lo->ll_objs_repaired[LLIT_MULTIPLE_REFERENCED - 1],
2328 lo->ll_objs_repaired[LLIT_ORPHAN - 1],
2329 lo->ll_objs_repaired[LLIT_INCONSISTENT_OWNER - 1],
2330 lo->ll_objs_repaired[LLIT_OTHERS - 1],
2331 lo->ll_objs_skipped,
2332 lo->ll_objs_failed_phase1,
2333 lo->ll_objs_failed_phase2);
2340 if (lo->ll_status == LS_SCANNING_PHASE1) {
2342 const struct dt_it_ops *iops;
2343 cfs_duration_t duration = cfs_time_current() -
2344 lfsck->li_time_last_checkpoint;
2345 __u64 checked = lo->ll_objs_checked_phase1 + com->lc_new_checked;
2346 __u64 speed = checked;
2347 __u64 new_checked = com->lc_new_checked * HZ;
2348 __u32 rtime = lo->ll_run_time_phase1 +
2349 cfs_duration_sec(duration + HALF_SEC);
2352 do_div(new_checked, duration);
2354 do_div(speed, rtime);
2355 rc = snprintf(buf, len,
2356 "checked_phase1: "LPU64"\n"
2357 "checked_phase2: "LPU64"\n"
2358 "run_time_phase1: %u seconds\n"
2359 "run_time_phase2: %u seconds\n"
2360 "average_speed_phase1: "LPU64" items/sec\n"
2361 "average_speed_phase2: N/A\n"
2362 "real-time_speed_phase1: "LPU64" items/sec\n"
2363 "real-time_speed_phase2: N/A\n",
2365 lo->ll_objs_checked_phase2,
2367 lo->ll_run_time_phase2,
2376 LASSERT(lfsck->li_di_oit != NULL);
2378 iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
2380 /* The low layer otable-based iteration position may NOT
2381 * exactly match the layout-based directory traversal
2382 * cookie. Generally, it is not a serious issue. But the
2383 * caller should NOT make assumption on that. */
2384 pos = iops->store(env, lfsck->li_di_oit);
2385 if (!lfsck->li_current_oit_processed)
2387 rc = snprintf(buf, len, "current_position: "LPU64"\n", pos);
2394 /* XXX: LS_SCANNING_PHASE2 will be handled in the future. */
2395 __u64 speed1 = lo->ll_objs_checked_phase1;
2396 __u64 speed2 = lo->ll_objs_checked_phase2;
2398 if (lo->ll_run_time_phase1 != 0)
2399 do_div(speed1, lo->ll_run_time_phase1);
2400 if (lo->ll_run_time_phase2 != 0)
2401 do_div(speed2, lo->ll_run_time_phase2);
2402 rc = snprintf(buf, len,
2403 "checked_phase1: "LPU64"\n"
2404 "checked_phase2: "LPU64"\n"
2405 "run_time_phase1: %u seconds\n"
2406 "run_time_phase2: %u seconds\n"
2407 "average_speed_phase1: "LPU64" items/sec\n"
2408 "average_speed_phase2: "LPU64" objs/sec\n"
2409 "real-time_speed_phase1: N/A\n"
2410 "real-time_speed_phase2: N/A\n"
2411 "current_position: N/A\n",
2412 lo->ll_objs_checked_phase1,
2413 lo->ll_objs_checked_phase2,
2414 lo->ll_run_time_phase1,
2415 lo->ll_run_time_phase2,
2427 up_read(&com->lc_sem);
2432 static int lfsck_layout_master_double_scan(const struct lu_env *env,
2433 struct lfsck_component *com)
2435 struct lfsck_layout_master_data *llmd = com->lc_data;
2436 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
2437 struct ptlrpc_thread *athread = &llmd->llmd_thread;
2438 struct lfsck_layout *lo = com->lc_file_ram;
2439 struct l_wait_info lwi = { 0 };
2441 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2))
2444 llmd->llmd_to_double_scan = 1;
2445 wake_up_all(&athread->t_ctl_waitq);
2446 l_wait_event(mthread->t_ctl_waitq,
2447 llmd->llmd_in_double_scan ||
2448 thread_is_stopped(athread),
2450 if (llmd->llmd_assistant_status < 0)
2451 return llmd->llmd_assistant_status;
2456 static int lfsck_layout_slave_double_scan(const struct lu_env *env,
2457 struct lfsck_component *com)
2459 struct lfsck_instance *lfsck = com->lc_lfsck;
2460 struct lfsck_layout_slave_data *llsd = com->lc_data;
2461 struct lfsck_layout *lo = com->lc_file_ram;
2462 struct ptlrpc_thread *thread = &lfsck->li_thread;
2466 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2))
2469 atomic_inc(&lfsck->li_double_scan_count);
2471 com->lc_new_checked = 0;
2472 com->lc_new_scanned = 0;
2473 com->lc_time_last_checkpoint = cfs_time_current();
2474 com->lc_time_next_checkpoint = com->lc_time_last_checkpoint +
2475 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
2478 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(30),
2481 rc = lfsck_layout_slave_query_master(env, com);
2482 if (list_empty(&llsd->llsd_master_list)) {
2483 if (unlikely(!thread_is_running(thread)))
2494 rc = l_wait_event(thread->t_ctl_waitq,
2495 !thread_is_running(thread) ||
2496 list_empty(&llsd->llsd_master_list),
2498 if (unlikely(!thread_is_running(thread)))
2501 if (rc == -ETIMEDOUT)
2504 GOTO(done, rc = (rc < 0 ? rc : 1));
2508 rc = lfsck_layout_double_scan_result(env, com, rc);
2510 if (atomic_dec_and_test(&lfsck->li_double_scan_count))
2511 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
2516 static void lfsck_layout_master_data_release(const struct lu_env *env,
2517 struct lfsck_component *com)
2519 struct lfsck_layout_master_data *llmd = com->lc_data;
2520 struct lfsck_instance *lfsck = com->lc_lfsck;
2521 struct lfsck_tgt_descs *ltds;
2522 struct lfsck_tgt_desc *ltd;
2523 struct lfsck_tgt_desc *next;
2525 LASSERT(llmd != NULL);
2526 LASSERT(thread_is_init(&llmd->llmd_thread) ||
2527 thread_is_stopped(&llmd->llmd_thread));
2528 LASSERT(list_empty(&llmd->llmd_req_list));
2529 LASSERT(atomic_read(&llmd->llmd_rpcs_in_flight) == 0);
2531 com->lc_data = NULL;
2533 ltds = &lfsck->li_ost_descs;
2534 spin_lock(<ds->ltd_lock);
2535 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase1_list,
2536 ltd_layout_phase_list) {
2537 list_del_init(<d->ltd_layout_phase_list);
2539 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase2_list,
2540 ltd_layout_phase_list) {
2541 list_del_init(<d->ltd_layout_phase_list);
2543 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_list,
2545 list_del_init(<d->ltd_layout_list);
2547 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase1_list,
2548 ltd_layout_phase_list) {
2549 list_del_init(<d->ltd_layout_phase_list);
2551 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase2_list,
2552 ltd_layout_phase_list) {
2553 list_del_init(<d->ltd_layout_phase_list);
2555 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_list,
2557 list_del_init(<d->ltd_layout_list);
2559 spin_unlock(<ds->ltd_lock);
2564 static void lfsck_layout_slave_data_release(const struct lu_env *env,
2565 struct lfsck_component *com)
2567 struct lfsck_layout_slave_data *llsd = com->lc_data;
2568 struct lfsck_layout_seq *lls;
2569 struct lfsck_layout_seq *next;
2570 struct lfsck_layout_slave_target *llst;
2571 struct lfsck_layout_slave_target *tmp;
2573 LASSERT(llsd != NULL);
2575 com->lc_data = NULL;
2577 list_for_each_entry_safe(lls, next, &llsd->llsd_seq_list,
2579 list_del_init(&lls->lls_list);
2580 lfsck_object_put(env, lls->lls_lastid_obj);
2584 list_for_each_entry_safe(llst, tmp, &llsd->llsd_master_list,
2586 list_del_init(&llst->llst_list);
2593 static void lfsck_layout_master_quit(const struct lu_env *env,
2594 struct lfsck_component *com)
2596 struct lfsck_layout_master_data *llmd = com->lc_data;
2597 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
2598 struct ptlrpc_thread *athread = &llmd->llmd_thread;
2599 struct l_wait_info lwi = { 0 };
2601 llmd->llmd_exit = 1;
2602 wake_up_all(&athread->t_ctl_waitq);
2603 l_wait_event(mthread->t_ctl_waitq,
2604 thread_is_init(athread) ||
2605 thread_is_stopped(athread),
2609 static int lfsck_layout_master_in_notify(const struct lu_env *env,
2610 struct lfsck_component *com,
2611 struct lfsck_request *lr)
2613 struct lfsck_instance *lfsck = com->lc_lfsck;
2614 struct lfsck_layout *lo = com->lc_file_ram;
2615 struct lfsck_layout_master_data *llmd = com->lc_data;
2616 struct lfsck_tgt_descs *ltds;
2617 struct lfsck_tgt_desc *ltd;
2620 if (lr->lr_event != LE_PHASE1_DONE &&
2621 lr->lr_event != LE_PHASE2_DONE &&
2622 lr->lr_event != LE_STOP)
2625 if (lr->lr_flags & LEF_FROM_OST)
2626 ltds = &lfsck->li_ost_descs;
2628 ltds = &lfsck->li_mdt_descs;
2629 spin_lock(<ds->ltd_lock);
2630 ltd = LTD_TGT(ltds, lr->lr_index);
2632 spin_unlock(<ds->ltd_lock);
2637 list_del_init(<d->ltd_layout_phase_list);
2638 switch (lr->lr_event) {
2639 case LE_PHASE1_DONE:
2640 if (lr->lr_status <= 0) {
2641 ltd->ltd_layout_done = 1;
2642 list_del_init(<d->ltd_layout_list);
2643 lo->ll_flags |= LF_INCOMPLETE;
2647 if (lr->lr_flags & LEF_FROM_OST) {
2648 if (list_empty(<d->ltd_layout_list))
2649 list_add_tail(<d->ltd_layout_list,
2650 &llmd->llmd_ost_list);
2651 list_add_tail(<d->ltd_layout_phase_list,
2652 &llmd->llmd_ost_phase2_list);
2654 if (list_empty(<d->ltd_layout_list))
2655 list_add_tail(<d->ltd_layout_list,
2656 &llmd->llmd_mdt_list);
2657 list_add_tail(<d->ltd_layout_phase_list,
2658 &llmd->llmd_mdt_phase2_list);
2661 case LE_PHASE2_DONE:
2662 ltd->ltd_layout_done = 1;
2663 list_del_init(<d->ltd_layout_list);
2666 ltd->ltd_layout_done = 1;
2667 list_del_init(<d->ltd_layout_list);
2668 if (!(lr->lr_flags & LEF_FORCE_STOP))
2669 lo->ll_flags |= LF_INCOMPLETE;
2674 spin_unlock(<ds->ltd_lock);
2676 if (lr->lr_flags & LEF_FORCE_STOP) {
2677 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
2679 memset(stop, 0, sizeof(*stop));
2680 stop->ls_status = lr->lr_status;
2681 stop->ls_flags = lr->lr_param;
2682 lfsck_stop(env, lfsck->li_bottom, stop);
2683 } else if (lfsck_layout_master_to_orphan(llmd)) {
2684 wake_up_all(&llmd->llmd_thread.t_ctl_waitq);
2690 static int lfsck_layout_slave_in_notify(const struct lu_env *env,
2691 struct lfsck_component *com,
2692 struct lfsck_request *lr)
2694 struct lfsck_instance *lfsck = com->lc_lfsck;
2695 struct lfsck_layout_slave_data *llsd = com->lc_data;
2696 struct lfsck_layout_slave_target *llst;
2699 if (lr->lr_event != LE_PHASE2_DONE &&
2700 lr->lr_event != LE_STOP)
2703 llst = lfsck_layout_llst_find_and_del(llsd, lr->lr_index);
2707 lfsck_layout_llst_put(llst);
2708 if (list_empty(&llsd->llsd_master_list)) {
2709 switch (lr->lr_event) {
2710 case LE_PHASE2_DONE:
2711 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
2714 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
2716 memset(stop, 0, sizeof(*stop));
2717 stop->ls_status = lr->lr_status;
2718 stop->ls_flags = lr->lr_param;
2719 lfsck_stop(env, lfsck->li_bottom, stop);
2730 static int lfsck_layout_query(const struct lu_env *env,
2731 struct lfsck_component *com)
2733 struct lfsck_layout *lo = com->lc_file_ram;
2735 return lo->ll_status;
2738 static int lfsck_layout_master_stop_notify(const struct lu_env *env,
2739 struct lfsck_component *com,
2740 struct lfsck_tgt_descs *ltds,
2741 struct lfsck_tgt_desc *ltd,
2742 struct ptlrpc_request_set *set)
2744 struct lfsck_thread_info *info = lfsck_env_info(env);
2745 struct lfsck_async_interpret_args *laia = &info->lti_laia;
2746 struct lfsck_request *lr = &info->lti_lr;
2747 struct lfsck_instance *lfsck = com->lc_lfsck;
2750 LASSERT(list_empty(<d->ltd_layout_list));
2751 LASSERT(list_empty(<d->ltd_layout_phase_list));
2753 memset(lr, 0, sizeof(*lr));
2754 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
2755 lr->lr_event = LE_STOP;
2756 lr->lr_active = LT_LAYOUT;
2757 if (ltds == &lfsck->li_ost_descs) {
2758 lr->lr_flags = LEF_TO_OST;
2760 if (ltd->ltd_index == lfsck_dev_idx(lfsck->li_bottom))
2765 lr->lr_status = LS_CO_STOPPED;
2767 laia->laia_com = com;
2768 laia->laia_ltds = ltds;
2769 laia->laia_ltd = ltd;
2772 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
2773 lfsck_layout_master_async_interpret,
2774 laia, LFSCK_NOTIFY);
2776 CERROR("%s: Fail to notify %s %x for co-stop: rc = %d\n",
2777 lfsck_lfsck2name(lfsck),
2778 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
2779 ltd->ltd_index, rc);
2784 /* with lfsck::li_lock held */
2785 static int lfsck_layout_slave_join(const struct lu_env *env,
2786 struct lfsck_component *com,
2787 struct lfsck_start_param *lsp)
2789 struct lfsck_instance *lfsck = com->lc_lfsck;
2790 struct lfsck_layout_slave_data *llsd = com->lc_data;
2791 struct lfsck_layout_slave_target *llst;
2792 struct lfsck_start *start = lsp->lsp_start;
2796 if (!lsp->lsp_index_valid || start == NULL ||
2797 !(start->ls_flags & LPF_ALL_MDT))
2800 spin_unlock(&lfsck->li_lock);
2801 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
2802 spin_lock(&lfsck->li_lock);
2803 if (rc == 0 && !thread_is_running(&lfsck->li_thread)) {
2804 spin_unlock(&lfsck->li_lock);
2805 llst = lfsck_layout_llst_find_and_del(llsd, lsp->lsp_index);
2807 lfsck_layout_llst_put(llst);
2808 spin_lock(&lfsck->li_lock);
2815 static struct lfsck_operations lfsck_layout_master_ops = {
2816 .lfsck_reset = lfsck_layout_reset,
2817 .lfsck_fail = lfsck_layout_fail,
2818 .lfsck_checkpoint = lfsck_layout_master_checkpoint,
2819 .lfsck_prep = lfsck_layout_master_prep,
2820 .lfsck_exec_oit = lfsck_layout_master_exec_oit,
2821 .lfsck_exec_dir = lfsck_layout_exec_dir,
2822 .lfsck_post = lfsck_layout_master_post,
2823 .lfsck_dump = lfsck_layout_dump,
2824 .lfsck_double_scan = lfsck_layout_master_double_scan,
2825 .lfsck_data_release = lfsck_layout_master_data_release,
2826 .lfsck_quit = lfsck_layout_master_quit,
2827 .lfsck_in_notify = lfsck_layout_master_in_notify,
2828 .lfsck_query = lfsck_layout_query,
2829 .lfsck_stop_notify = lfsck_layout_master_stop_notify,
2832 static struct lfsck_operations lfsck_layout_slave_ops = {
2833 .lfsck_reset = lfsck_layout_reset,
2834 .lfsck_fail = lfsck_layout_fail,
2835 .lfsck_checkpoint = lfsck_layout_slave_checkpoint,
2836 .lfsck_prep = lfsck_layout_slave_prep,
2837 .lfsck_exec_oit = lfsck_layout_slave_exec_oit,
2838 .lfsck_exec_dir = lfsck_layout_exec_dir,
2839 .lfsck_post = lfsck_layout_slave_post,
2840 .lfsck_dump = lfsck_layout_dump,
2841 .lfsck_double_scan = lfsck_layout_slave_double_scan,
2842 .lfsck_data_release = lfsck_layout_slave_data_release,
2843 .lfsck_in_notify = lfsck_layout_slave_in_notify,
2844 .lfsck_query = lfsck_layout_query,
2845 .lfsck_join = lfsck_layout_slave_join,
2848 int lfsck_layout_setup(const struct lu_env *env, struct lfsck_instance *lfsck)
2850 struct lfsck_component *com;
2851 struct lfsck_layout *lo;
2852 struct dt_object *root = NULL;
2853 struct dt_object *obj;
2861 INIT_LIST_HEAD(&com->lc_link);
2862 INIT_LIST_HEAD(&com->lc_link_dir);
2863 init_rwsem(&com->lc_sem);
2864 atomic_set(&com->lc_ref, 1);
2865 com->lc_lfsck = lfsck;
2866 com->lc_type = LT_LAYOUT;
2867 if (lfsck->li_master) {
2868 struct lfsck_layout_master_data *llmd;
2870 com->lc_ops = &lfsck_layout_master_ops;
2871 OBD_ALLOC_PTR(llmd);
2873 GOTO(out, rc = -ENOMEM);
2875 INIT_LIST_HEAD(&llmd->llmd_req_list);
2876 spin_lock_init(&llmd->llmd_lock);
2877 INIT_LIST_HEAD(&llmd->llmd_ost_list);
2878 INIT_LIST_HEAD(&llmd->llmd_ost_phase1_list);
2879 INIT_LIST_HEAD(&llmd->llmd_ost_phase2_list);
2880 INIT_LIST_HEAD(&llmd->llmd_mdt_list);
2881 INIT_LIST_HEAD(&llmd->llmd_mdt_phase1_list);
2882 INIT_LIST_HEAD(&llmd->llmd_mdt_phase2_list);
2883 init_waitqueue_head(&llmd->llmd_thread.t_ctl_waitq);
2884 atomic_set(&llmd->llmd_rpcs_in_flight, 0);
2885 com->lc_data = llmd;
2887 struct lfsck_layout_slave_data *llsd;
2889 com->lc_ops = &lfsck_layout_slave_ops;
2890 OBD_ALLOC_PTR(llsd);
2892 GOTO(out, rc = -ENOMEM);
2894 INIT_LIST_HEAD(&llsd->llsd_seq_list);
2895 INIT_LIST_HEAD(&llsd->llsd_master_list);
2896 spin_lock_init(&llsd->llsd_lock);
2897 com->lc_data = llsd;
2899 com->lc_file_size = sizeof(*lo);
2900 OBD_ALLOC(com->lc_file_ram, com->lc_file_size);
2901 if (com->lc_file_ram == NULL)
2902 GOTO(out, rc = -ENOMEM);
2904 OBD_ALLOC(com->lc_file_disk, com->lc_file_size);
2905 if (com->lc_file_disk == NULL)
2906 GOTO(out, rc = -ENOMEM);
2908 root = dt_locate(env, lfsck->li_bottom, &lfsck->li_local_root_fid);
2910 GOTO(out, rc = PTR_ERR(root));
2912 if (unlikely(!dt_try_as_dir(env, root)))
2913 GOTO(out, rc = -ENOTDIR);
2915 obj = local_file_find_or_create(env, lfsck->li_los, root,
2917 S_IFREG | S_IRUGO | S_IWUSR);
2919 GOTO(out, rc = PTR_ERR(obj));
2922 rc = lfsck_layout_load(env, com);
2924 rc = lfsck_layout_reset(env, com, true);
2925 else if (rc == -ENOENT)
2926 rc = lfsck_layout_init(env, com);
2931 lo = com->lc_file_ram;
2932 switch (lo->ll_status) {
2938 spin_lock(&lfsck->li_lock);
2939 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
2940 spin_unlock(&lfsck->li_lock);
2943 CERROR("%s: unknown lfsck_layout status: rc = %u\n",
2944 lfsck_lfsck2name(lfsck), lo->ll_status);
2946 case LS_SCANNING_PHASE1:
2947 case LS_SCANNING_PHASE2:
2948 /* No need to store the status to disk right now.
2949 * If the system crashed before the status stored,
2950 * it will be loaded back when next time. */
2951 lo->ll_status = LS_CRASHED;
2952 lo->ll_flags |= LF_INCOMPLETE;
2959 spin_lock(&lfsck->li_lock);
2960 list_add_tail(&com->lc_link, &lfsck->li_list_scan);
2961 spin_unlock(&lfsck->li_lock);
2965 if (lo->ll_flags & LF_CRASHED_LASTID) {
2966 LASSERT(lfsck->li_out_notify != NULL);
2968 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
2969 LE_LASTID_REBUILDING);
2975 if (root != NULL && !IS_ERR(root))
2976 lu_object_put(env, &root->do_lu);
2979 lfsck_component_cleanup(env, com);