4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2013, Intel Corporation.
26 * lustre/lfsck/lfsck_layout.c
28 * Author: Fan, Yong <fan.yong@intel.com>
32 # define EXPORT_SYMTAB
34 #define DEBUG_SUBSYSTEM S_LFSCK
36 #include <linux/bitops.h>
38 #include <lustre/lustre_idl.h>
39 #include <lu_object.h>
40 #include <dt_object.h>
41 #include <lustre_linkea.h>
42 #include <lustre_fid.h>
43 #include <lustre_lib.h>
44 #include <lustre_net.h>
45 #include <lustre/lustre_user.h>
46 #include <md_object.h>
47 #include <obd_class.h>
49 #include "lfsck_internal.h"
51 #define LFSCK_LAYOUT_MAGIC 0xB173AE14
53 static const char lfsck_layout_name[] = "lfsck_layout";
55 struct lfsck_layout_seq {
56 struct list_head lls_list;
59 __u64 lls_lastid_known;
60 struct dt_object *lls_lastid_obj;
61 unsigned int lls_dirty:1;
64 struct lfsck_layout_slave_target {
65 /* link into lfsck_layout_slave_data::llsd_master_list. */
66 struct list_head llst_list;
72 struct lfsck_layout_slave_data {
73 /* list for lfsck_layout_seq */
74 struct list_head llsd_seq_list;
76 /* list for the masters involve layout verification. */
77 struct list_head llsd_master_list;
82 struct lfsck_layout_object {
83 struct dt_object *llo_obj;
84 struct lu_attr llo_attr;
89 struct lfsck_layout_req {
90 struct list_head llr_list;
91 struct lfsck_layout_object *llr_parent;
92 struct dt_object *llr_child;
94 __u32 llr_lov_idx; /* offset in LOV EA */
97 struct lfsck_layout_master_data {
99 struct list_head llmd_req_list;
101 /* list for the ost targets involve layout verification. */
102 struct list_head llmd_ost_list;
104 /* list for the ost targets in phase1 scanning. */
105 struct list_head llmd_ost_phase1_list;
107 /* list for the ost targets in phase1 scanning. */
108 struct list_head llmd_ost_phase2_list;
110 struct ptlrpc_thread llmd_thread;
111 atomic_t llmd_rpcs_in_flight;
112 __u32 llmd_touch_gen;
114 int llmd_assistant_status;
115 int llmd_post_result;
116 unsigned int llmd_to_post:1,
117 llmd_to_double_scan:1,
118 llmd_in_double_scan:1,
122 struct lfsck_layout_slave_async_args {
123 struct obd_export *llsaa_exp;
124 struct lfsck_component *llsaa_com;
125 struct lfsck_layout_slave_target *llsaa_llst;
129 lfsck_layout_llst_put(struct lfsck_layout_slave_target *llst)
131 if (atomic_dec_and_test(&llst->llst_ref)) {
132 LASSERT(list_empty(&llst->llst_list));
139 lfsck_layout_llst_add(struct lfsck_layout_slave_data *llsd, __u32 index)
141 struct lfsck_layout_slave_target *llst;
142 struct lfsck_layout_slave_target *tmp;
149 INIT_LIST_HEAD(&llst->llst_list);
151 llst->llst_index = index;
152 atomic_set(&llst->llst_ref, 1);
154 spin_lock(&llsd->llsd_lock);
155 list_for_each_entry(tmp, &llsd->llsd_master_list, llst_list) {
156 if (tmp->llst_index == index) {
162 list_add_tail(&llst->llst_list, &llsd->llsd_master_list);
163 spin_unlock(&llsd->llsd_lock);
172 lfsck_layout_llst_del(struct lfsck_layout_slave_data *llsd,
173 struct lfsck_layout_slave_target *llst)
177 spin_lock(&llsd->llsd_lock);
178 if (!list_empty(&llst->llst_list)) {
179 list_del_init(&llst->llst_list);
182 spin_unlock(&llsd->llsd_lock);
185 lfsck_layout_llst_put(llst);
188 static inline struct lfsck_layout_slave_target *
189 lfsck_layout_llst_find_and_del(struct lfsck_layout_slave_data *llsd,
192 struct lfsck_layout_slave_target *llst;
194 spin_lock(&llsd->llsd_lock);
195 list_for_each_entry(llst, &llsd->llsd_master_list, llst_list) {
196 if (llst->llst_index == index) {
197 list_del_init(&llst->llst_list);
198 spin_unlock(&llsd->llsd_lock);
203 spin_unlock(&llsd->llsd_lock);
208 static inline void lfsck_layout_object_put(const struct lu_env *env,
209 struct lfsck_layout_object *llo)
211 if (atomic_dec_and_test(&llo->llo_ref)) {
212 lfsck_object_put(env, llo->llo_obj);
217 static inline void lfsck_layout_req_fini(const struct lu_env *env,
218 struct lfsck_layout_req *llr)
220 lu_object_put(env, &llr->llr_child->do_lu);
221 lfsck_layout_object_put(env, llr->llr_parent);
225 static inline bool lfsck_layout_req_empty(struct lfsck_layout_master_data *llmd)
229 spin_lock(&llmd->llmd_lock);
230 if (list_empty(&llmd->llmd_req_list))
232 spin_unlock(&llmd->llmd_lock);
237 static void lfsck_layout_le_to_cpu(struct lfsck_layout *des,
238 const struct lfsck_layout *src)
242 des->ll_magic = le32_to_cpu(src->ll_magic);
243 des->ll_status = le32_to_cpu(src->ll_status);
244 des->ll_flags = le32_to_cpu(src->ll_flags);
245 des->ll_success_count = le32_to_cpu(src->ll_success_count);
246 des->ll_run_time_phase1 = le32_to_cpu(src->ll_run_time_phase1);
247 des->ll_run_time_phase2 = le32_to_cpu(src->ll_run_time_phase2);
248 des->ll_time_last_complete = le64_to_cpu(src->ll_time_last_complete);
249 des->ll_time_latest_start = le64_to_cpu(src->ll_time_latest_start);
250 des->ll_time_last_checkpoint =
251 le64_to_cpu(src->ll_time_last_checkpoint);
252 des->ll_pos_latest_start = le64_to_cpu(src->ll_pos_latest_start);
253 des->ll_pos_last_checkpoint = le64_to_cpu(src->ll_pos_last_checkpoint);
254 des->ll_pos_first_inconsistent =
255 le64_to_cpu(src->ll_pos_first_inconsistent);
256 des->ll_objs_checked_phase1 = le64_to_cpu(src->ll_objs_checked_phase1);
257 des->ll_objs_failed_phase1 = le64_to_cpu(src->ll_objs_failed_phase1);
258 des->ll_objs_checked_phase2 = le64_to_cpu(src->ll_objs_checked_phase2);
259 des->ll_objs_failed_phase2 = le64_to_cpu(src->ll_objs_failed_phase2);
260 for (i = 0; i < LLIT_MAX; i++)
261 des->ll_objs_repaired[i] =
262 le64_to_cpu(src->ll_objs_repaired[i]);
263 des->ll_objs_skipped = le64_to_cpu(src->ll_objs_skipped);
266 static void lfsck_layout_cpu_to_le(struct lfsck_layout *des,
267 const struct lfsck_layout *src)
271 des->ll_magic = cpu_to_le32(src->ll_magic);
272 des->ll_status = cpu_to_le32(src->ll_status);
273 des->ll_flags = cpu_to_le32(src->ll_flags);
274 des->ll_success_count = cpu_to_le32(src->ll_success_count);
275 des->ll_run_time_phase1 = cpu_to_le32(src->ll_run_time_phase1);
276 des->ll_run_time_phase2 = cpu_to_le32(src->ll_run_time_phase2);
277 des->ll_time_last_complete = cpu_to_le64(src->ll_time_last_complete);
278 des->ll_time_latest_start = cpu_to_le64(src->ll_time_latest_start);
279 des->ll_time_last_checkpoint =
280 cpu_to_le64(src->ll_time_last_checkpoint);
281 des->ll_pos_latest_start = cpu_to_le64(src->ll_pos_latest_start);
282 des->ll_pos_last_checkpoint = cpu_to_le64(src->ll_pos_last_checkpoint);
283 des->ll_pos_first_inconsistent =
284 cpu_to_le64(src->ll_pos_first_inconsistent);
285 des->ll_objs_checked_phase1 = cpu_to_le64(src->ll_objs_checked_phase1);
286 des->ll_objs_failed_phase1 = cpu_to_le64(src->ll_objs_failed_phase1);
287 des->ll_objs_checked_phase2 = cpu_to_le64(src->ll_objs_checked_phase2);
288 des->ll_objs_failed_phase2 = cpu_to_le64(src->ll_objs_failed_phase2);
289 for (i = 0; i < LLIT_MAX; i++)
290 des->ll_objs_repaired[i] =
291 cpu_to_le64(src->ll_objs_repaired[i]);
292 des->ll_objs_skipped = cpu_to_le64(src->ll_objs_skipped);
296 * \retval +ve: the lfsck_layout is broken, the caller should reset it.
297 * \retval 0: succeed.
298 * \retval -ve: failed cases.
300 static int lfsck_layout_load(const struct lu_env *env,
301 struct lfsck_component *com)
303 struct lfsck_layout *lo = com->lc_file_ram;
304 const struct dt_body_operations *dbo = com->lc_obj->do_body_ops;
305 ssize_t size = com->lc_file_size;
309 rc = dbo->dbo_read(env, com->lc_obj,
310 lfsck_buf_get(env, com->lc_file_disk, size), &pos,
315 CWARN("%s: failed to load lfsck_layout: rc = %d\n",
316 lfsck_lfsck2name(com->lc_lfsck), rc);
318 } else if (rc != size) {
319 CWARN("%s: crashed lfsck_layout, to be reset: rc = %d\n",
320 lfsck_lfsck2name(com->lc_lfsck), rc);
324 lfsck_layout_le_to_cpu(lo, com->lc_file_disk);
325 if (lo->ll_magic != LFSCK_LAYOUT_MAGIC) {
326 CWARN("%s: invalid lfsck_layout magic %#x != %#x, "
327 "to be reset\n", lfsck_lfsck2name(com->lc_lfsck),
328 lo->ll_magic, LFSCK_LAYOUT_MAGIC);
335 static int lfsck_layout_store(const struct lu_env *env,
336 struct lfsck_component *com)
338 struct dt_object *obj = com->lc_obj;
339 struct lfsck_instance *lfsck = com->lc_lfsck;
340 struct lfsck_layout *lo = com->lc_file_disk;
341 struct thandle *handle;
342 ssize_t size = com->lc_file_size;
347 lfsck_layout_cpu_to_le(lo, com->lc_file_ram);
348 handle = dt_trans_create(env, lfsck->li_bottom);
349 if (IS_ERR(handle)) {
350 rc = PTR_ERR(handle);
351 CERROR("%s: fail to create trans for storing lfsck_layout: "
352 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
356 rc = dt_declare_record_write(env, obj, size, pos, handle);
358 CERROR("%s: fail to declare trans for storing lfsck_layout(1): "
359 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
363 rc = dt_trans_start_local(env, lfsck->li_bottom, handle);
365 CERROR("%s: fail to start trans for storing lfsck_layout: "
366 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
370 rc = dt_record_write(env, obj, lfsck_buf_get(env, lo, size), &pos,
373 CERROR("%s: fail to store lfsck_layout(1): size = %d, "
374 "rc = %d\n", lfsck_lfsck2name(lfsck), (int)size, rc);
379 dt_trans_stop(env, lfsck->li_bottom, handle);
384 static int lfsck_layout_init(const struct lu_env *env,
385 struct lfsck_component *com)
387 struct lfsck_layout *lo = com->lc_file_ram;
390 memset(lo, 0, com->lc_file_size);
391 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
392 lo->ll_status = LS_INIT;
393 down_write(&com->lc_sem);
394 rc = lfsck_layout_store(env, com);
395 up_write(&com->lc_sem);
400 static int fid_is_for_ostobj(const struct lu_env *env, struct dt_device *dt,
401 struct dt_object *obj, const struct lu_fid *fid)
403 struct seq_server_site *ss = lu_site2seq(dt->dd_lu_dev.ld_site);
404 struct lu_seq_range range = { 0 };
405 struct lustre_mdt_attrs *lma;
408 fld_range_set_any(&range);
409 rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(fid), &range);
411 if (fld_range_is_ost(&range))
417 lma = &lfsck_env_info(env)->lti_lma;
418 rc = dt_xattr_get(env, obj, lfsck_buf_get(env, lma, sizeof(*lma)),
419 XATTR_NAME_LMA, BYPASS_CAPA);
420 if (rc == sizeof(*lma)) {
421 lustre_lma_swab(lma);
423 /* Generally, the low layer OSD create handler or OI scrub
424 * will set the LMAC_FID_ON_OST for all external visible
425 * OST-objects. But to make the otable-based iteration to
426 * be independent from OI scrub in spite of it got failure
427 * or not, we check the LMAC_FID_ON_OST here to guarantee
428 * that the LFSCK will not repair something by wrong. */
429 return lma->lma_compat & LMAC_FID_ON_OST ? 1 : 0;
432 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_FID, BYPASS_CAPA);
437 static struct lfsck_layout_seq *
438 lfsck_layout_seq_lookup(struct lfsck_layout_slave_data *llsd, __u64 seq)
440 struct lfsck_layout_seq *lls;
442 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
443 if (lls->lls_seq == seq)
446 if (lls->lls_seq > seq)
454 lfsck_layout_seq_insert(struct lfsck_layout_slave_data *llsd,
455 struct lfsck_layout_seq *lls)
457 struct lfsck_layout_seq *tmp;
458 struct list_head *pos = &llsd->llsd_seq_list;
460 list_for_each_entry(tmp, &llsd->llsd_seq_list, lls_list) {
461 if (lls->lls_seq < tmp->lls_seq) {
462 pos = &tmp->lls_list;
466 list_add_tail(&lls->lls_list, pos);
470 lfsck_layout_lastid_create(const struct lu_env *env,
471 struct lfsck_instance *lfsck,
472 struct dt_object *obj)
474 struct lfsck_thread_info *info = lfsck_env_info(env);
475 struct lu_attr *la = &info->lti_la;
476 struct dt_object_format *dof = &info->lti_dof;
477 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
478 struct dt_device *dt = lfsck->li_bottom;
485 CDEBUG(D_LFSCK, "To create LAST_ID for <seq> "LPX64"\n",
486 fid_seq(lfsck_dto2fid(obj)));
488 if (bk->lb_param & LPF_DRYRUN)
491 memset(la, 0, sizeof(*la));
492 la->la_mode = S_IFREG | S_IRUGO | S_IWUSR;
493 la->la_valid = LA_MODE | LA_UID | LA_GID;
494 dof->dof_type = dt_mode_to_dft(S_IFREG);
496 th = dt_trans_create(env, dt);
498 RETURN(rc = PTR_ERR(th));
500 rc = dt_declare_create(env, obj, la, NULL, dof, th);
504 rc = dt_declare_record_write(env, obj, sizeof(lastid), pos, th);
508 rc = dt_trans_start_local(env, dt, th);
512 dt_write_lock(env, obj, 0);
513 if (likely(!dt_object_exists(obj))) {
514 rc = dt_create(env, obj, la, NULL, dof, th);
516 rc = dt_record_write(env, obj,
517 lfsck_buf_get(env, &lastid, sizeof(lastid)),
520 dt_write_unlock(env, obj);
525 dt_trans_stop(env, dt, th);
531 lfsck_layout_lastid_reload(const struct lu_env *env,
532 struct lfsck_component *com,
533 struct lfsck_layout_seq *lls)
539 dt_read_lock(env, lls->lls_lastid_obj, 0);
540 rc = dt_record_read(env, lls->lls_lastid_obj,
541 lfsck_buf_get(env, &lastid, sizeof(lastid)), &pos);
542 dt_read_unlock(env, lls->lls_lastid_obj);
543 if (unlikely(rc != 0))
546 lastid = le64_to_cpu(lastid);
547 if (lastid < lls->lls_lastid_known) {
548 struct lfsck_instance *lfsck = com->lc_lfsck;
549 struct lfsck_layout *lo = com->lc_file_ram;
551 lls->lls_lastid = lls->lls_lastid_known;
553 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
554 LASSERT(lfsck->li_out_notify != NULL);
556 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
557 LE_LASTID_REBUILDING);
558 lo->ll_flags |= LF_CRASHED_LASTID;
560 } else if (lastid >= lls->lls_lastid) {
561 lls->lls_lastid = lastid;
569 lfsck_layout_lastid_store(const struct lu_env *env,
570 struct lfsck_component *com)
572 struct lfsck_instance *lfsck = com->lc_lfsck;
573 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
574 struct dt_device *dt = lfsck->li_bottom;
575 struct lfsck_layout_slave_data *llsd = com->lc_data;
576 struct lfsck_layout_seq *lls;
582 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
585 /* XXX: Add the code back if we really found related
586 * inconsistent cases in the future. */
588 if (!lls->lls_dirty) {
589 /* In OFD, before the pre-creation, the LAST_ID
590 * file will be updated firstly, which may hide
591 * some potential crashed cases. For example:
593 * The old obj1's ID is higher than old LAST_ID
594 * but lower than the new LAST_ID, but the LFSCK
595 * have not touch the obj1 until the OFD updated
596 * the LAST_ID. So the LFSCK does not regard it
597 * as crashed case. But when OFD does not create
598 * successfully, it will set the LAST_ID as the
599 * real created objects' ID, then LFSCK needs to
600 * found related inconsistency. */
601 rc = lfsck_layout_lastid_reload(env, com, lls);
602 if (likely(!lls->lls_dirty))
607 CDEBUG(D_LFSCK, "To sync the LAST_ID for <seq> "LPX64
608 " as <oid> "LPU64"\n", lls->lls_seq, lls->lls_lastid);
610 if (bk->lb_param & LPF_DRYRUN) {
615 th = dt_trans_create(env, dt);
618 CERROR("%s: (1) failed to store "LPX64": rc = %d\n",
619 lfsck_lfsck2name(com->lc_lfsck),
624 rc = dt_declare_record_write(env, lls->lls_lastid_obj,
625 sizeof(lastid), pos, th);
629 rc = dt_trans_start_local(env, dt, th);
633 lastid = cpu_to_le64(lls->lls_lastid);
634 dt_write_lock(env, lls->lls_lastid_obj, 0);
635 rc = dt_record_write(env, lls->lls_lastid_obj,
636 lfsck_buf_get(env, &lastid,
637 sizeof(lastid)), &pos, th);
638 dt_write_unlock(env, lls->lls_lastid_obj);
643 dt_trans_stop(env, dt, th);
646 CERROR("%s: (2) failed to store "LPX64": rc = %d\n",
647 lfsck_lfsck2name(com->lc_lfsck),
656 lfsck_layout_lastid_load(const struct lu_env *env,
657 struct lfsck_component *com,
658 struct lfsck_layout_seq *lls)
660 struct lfsck_instance *lfsck = com->lc_lfsck;
661 struct lfsck_layout *lo = com->lc_file_ram;
662 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
663 struct dt_object *obj;
668 lu_last_id_fid(fid, lls->lls_seq, lfsck_dev_idx(lfsck->li_bottom));
669 obj = dt_locate(env, lfsck->li_bottom, fid);
671 RETURN(PTR_ERR(obj));
673 /* LAST_ID crashed, to be rebuilt */
674 if (!dt_object_exists(obj)) {
675 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
676 LASSERT(lfsck->li_out_notify != NULL);
678 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
679 LE_LASTID_REBUILDING);
680 lo->ll_flags |= LF_CRASHED_LASTID;
682 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY4) &&
684 struct l_wait_info lwi = LWI_TIMEOUT(
685 cfs_time_seconds(cfs_fail_val),
688 up_write(&com->lc_sem);
689 l_wait_event(lfsck->li_thread.t_ctl_waitq,
690 !thread_is_running(&lfsck->li_thread),
692 down_write(&com->lc_sem);
696 rc = lfsck_layout_lastid_create(env, lfsck, obj);
698 dt_read_lock(env, obj, 0);
699 rc = dt_read(env, obj,
700 lfsck_buf_get(env, &lls->lls_lastid, sizeof(__u64)),
702 dt_read_unlock(env, obj);
703 if (rc != 0 && rc != sizeof(__u64))
704 GOTO(out, rc = (rc > 0 ? -EFAULT : rc));
706 if (rc == 0 && !(lo->ll_flags & LF_CRASHED_LASTID)) {
707 LASSERT(lfsck->li_out_notify != NULL);
709 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
710 LE_LASTID_REBUILDING);
711 lo->ll_flags |= LF_CRASHED_LASTID;
714 lls->lls_lastid = le64_to_cpu(lls->lls_lastid);
722 lfsck_object_put(env, obj);
724 lls->lls_lastid_obj = obj;
729 static int lfsck_layout_master_async_interpret(const struct lu_env *env,
730 struct ptlrpc_request *req,
733 struct lfsck_async_interpret_args *laia = args;
734 struct lfsck_component *com = laia->laia_com;
735 struct lfsck_layout_master_data *llmd = com->lc_data;
736 struct lfsck_tgt_descs *ltds = laia->laia_ltds;
737 struct lfsck_tgt_desc *ltd = laia->laia_ltd;
738 struct lfsck_request *lr = laia->laia_lr;
740 switch (lr->lr_event) {
743 spin_lock(<ds->ltd_lock);
744 if (!ltd->ltd_dead && !ltd->ltd_layout_done) {
745 if (list_empty(<d->ltd_layout_list))
747 <d->ltd_layout_list,
748 &llmd->llmd_ost_list);
749 if (list_empty(<d->ltd_layout_phase_list))
751 <d->ltd_layout_phase_list,
752 &llmd->llmd_ost_phase1_list);
754 spin_unlock(<ds->ltd_lock);
756 struct lfsck_layout *lo = com->lc_file_ram;
758 lo->ll_flags |= LF_INCOMPLETE;
766 spin_lock(<ds->ltd_lock);
767 if (rc == 0 && !ltd->ltd_dead && !ltd->ltd_layout_done) {
768 struct lfsck_reply *reply;
770 reply = req_capsule_server_get(&req->rq_pill,
772 switch (reply->lr_status) {
773 case LS_SCANNING_PHASE1:
775 case LS_SCANNING_PHASE2:
776 list_del(<d->ltd_layout_phase_list);
777 list_add_tail(<d->ltd_layout_phase_list,
778 &llmd->llmd_ost_phase2_list);
781 list_del_init(<d->ltd_layout_phase_list);
782 list_del_init(<d->ltd_layout_list);
786 spin_unlock(<ds->ltd_lock);
790 CERROR("%s: unexpected event: rc = %d\n",
791 lfsck_lfsck2name(com->lc_lfsck), lr->lr_event);
798 static int lfsck_layout_master_query_others(const struct lu_env *env,
799 struct lfsck_component *com)
801 struct lfsck_thread_info *info = lfsck_env_info(env);
802 struct lfsck_request *lr = &info->lti_lr;
803 struct lfsck_async_interpret_args *laia = &info->lti_laia;
804 struct lfsck_instance *lfsck = com->lc_lfsck;
805 struct lfsck_layout_master_data *llmd = com->lc_data;
806 struct ptlrpc_request_set *set;
807 struct lfsck_tgt_descs *ltds;
808 struct lfsck_tgt_desc *ltd;
814 set = ptlrpc_prep_set();
818 llmd->llmd_touch_gen++;
819 ltds = &lfsck->li_ost_descs;
820 memset(lr, 0, sizeof(*lr));
821 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
822 lr->lr_event = LE_QUERY;
823 lr->lr_active = LT_LAYOUT;
825 laia->laia_com = com;
826 laia->laia_ltds = ltds;
828 spin_lock(<ds->ltd_lock);
829 while (!list_empty(&llmd->llmd_ost_phase1_list)) {
830 ltd = list_entry(llmd->llmd_ost_phase1_list.next,
831 struct lfsck_tgt_desc,
832 ltd_layout_phase_list);
833 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
836 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
837 list_del(<d->ltd_layout_phase_list);
838 list_add_tail(<d->ltd_layout_phase_list,
839 &llmd->llmd_ost_phase1_list);
840 atomic_inc(<d->ltd_ref);
841 laia->laia_ltd = ltd;
842 spin_unlock(<ds->ltd_lock);
843 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
844 lfsck_layout_master_async_interpret,
847 CERROR("%s: fail to query OST %x for layout: rc = %d\n",
848 lfsck_lfsck2name(lfsck), ltd->ltd_index, rc);
854 spin_lock(<ds->ltd_lock);
856 spin_unlock(<ds->ltd_lock);
859 rc = ptlrpc_set_wait(set);
860 ptlrpc_set_destroy(set);
862 RETURN(rc1 != 0 ? rc1 : rc);
866 lfsck_layout_master_to_orphan(struct lfsck_layout_master_data *llmd)
868 return !list_empty(&llmd->llmd_ost_phase2_list) ||
869 list_empty(&llmd->llmd_ost_phase1_list);
872 static int lfsck_layout_master_notify_others(const struct lu_env *env,
873 struct lfsck_component *com,
874 struct lfsck_request *lr)
876 struct lfsck_thread_info *info = lfsck_env_info(env);
877 struct lfsck_async_interpret_args *laia = &info->lti_laia;
878 struct lfsck_instance *lfsck = com->lc_lfsck;
879 struct lfsck_layout_master_data *llmd = com->lc_data;
880 struct lfsck_layout *lo = com->lc_file_ram;
881 struct ptlrpc_request_set *set;
882 struct lfsck_tgt_descs *ltds;
883 struct lfsck_tgt_desc *ltd;
889 set = ptlrpc_prep_set();
893 lr->lr_active = LT_LAYOUT;
894 laia->laia_com = com;
896 switch (lr->lr_event) {
898 ltds = &lfsck->li_ost_descs;
899 laia->laia_ltds = ltds;
900 down_read(<ds->ltd_rw_sem);
901 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
902 ltd = lfsck_tgt_get(ltds, idx);
903 LASSERT(ltd != NULL);
905 laia->laia_ltd = ltd;
906 ltd->ltd_layout_done = 0;
907 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
908 lfsck_layout_master_async_interpret,
911 CERROR("%s: fail to notify OST %x for layout "
913 lfsck_lfsck2name(lfsck), idx, rc);
915 lo->ll_flags |= LF_INCOMPLETE;
920 up_read(<ds->ltd_rw_sem);
924 ltds = &lfsck->li_ost_descs;
925 laia->laia_ltds = ltds;
926 spin_lock(<ds->ltd_lock);
927 while (!list_empty(&llmd->llmd_ost_list)) {
928 ltd = list_entry(llmd->llmd_ost_list.next,
929 struct lfsck_tgt_desc,
931 list_del_init(<d->ltd_layout_phase_list);
932 list_del_init(<d->ltd_layout_list);
933 laia->laia_ltd = ltd;
934 spin_unlock(<ds->ltd_lock);
935 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
936 lfsck_layout_master_async_interpret,
939 CERROR("%s: fail to notify OST %x for layout "
940 "stop/done: rc = %d\n",
941 lfsck_lfsck2name(lfsck),
945 spin_lock(<ds->ltd_lock);
947 spin_unlock(<ds->ltd_lock);
952 CERROR("%s: unexpected LFSCK event: rc = %d\n",
953 lfsck_lfsck2name(lfsck), lr->lr_event);
959 rc = ptlrpc_set_wait(set);
960 ptlrpc_set_destroy(set);
962 if (rc == 0 && lr->lr_event == LE_START &&
963 list_empty(&llmd->llmd_ost_list))
969 static int lfsck_layout_double_scan_result(const struct lu_env *env,
970 struct lfsck_component *com,
973 struct lfsck_instance *lfsck = com->lc_lfsck;
974 struct lfsck_layout *lo = com->lc_file_ram;
975 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
977 down_write(&com->lc_sem);
979 lo->ll_run_time_phase2 += cfs_duration_sec(cfs_time_current() +
980 HALF_SEC - lfsck->li_time_last_checkpoint);
981 lo->ll_time_last_checkpoint = cfs_time_current_sec();
982 lo->ll_objs_checked_phase2 += com->lc_new_checked;
986 if (lo->ll_flags & LF_INCOMPLETE)
987 lo->ll_status = LS_PARTIAL;
989 lo->ll_status = LS_COMPLETED;
990 if (!(bk->lb_param & LPF_DRYRUN))
991 lo->ll_flags &= ~(LF_SCANNED_ONCE | LF_INCONSISTENT);
992 lo->ll_time_last_complete = lo->ll_time_last_checkpoint;
993 lo->ll_success_count++;
994 } else if (rc == 0) {
995 lo->ll_status = lfsck->li_status;
996 if (lo->ll_status == 0)
997 lo->ll_status = LS_STOPPED;
999 lo->ll_status = LS_FAILED;
1002 if (lo->ll_status != LS_PAUSED) {
1003 spin_lock(&lfsck->li_lock);
1004 list_del_init(&com->lc_link);
1005 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
1006 spin_unlock(&lfsck->li_lock);
1009 rc = lfsck_layout_store(env, com);
1011 up_write(&com->lc_sem);
1016 static int lfsck_layout_scan_orphan(const struct lu_env *env,
1017 struct lfsck_component *com,
1018 struct lfsck_tgt_desc *ltd)
1020 /* XXX: To be extended in other patch. */
1025 static int lfsck_layout_assistant(void *args)
1027 struct lfsck_thread_args *lta = args;
1028 struct lu_env *env = <a->lta_env;
1029 struct lfsck_component *com = lta->lta_com;
1030 struct lfsck_instance *lfsck = lta->lta_lfsck;
1031 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1032 struct lfsck_position *pos = &com->lc_pos_start;
1033 struct lfsck_thread_info *info = lfsck_env_info(env);
1034 struct lfsck_request *lr = &info->lti_lr;
1035 struct lfsck_layout_master_data *llmd = com->lc_data;
1036 struct ptlrpc_thread *mthread = &lfsck->li_thread;
1037 struct ptlrpc_thread *athread = &llmd->llmd_thread;
1038 struct lfsck_layout_req *llr;
1039 struct l_wait_info lwi = { 0 };
1044 memset(lr, 0, sizeof(*lr));
1045 lr->lr_event = LE_START;
1046 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1047 lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
1049 lr->lr_speed = bk->lb_speed_limit;
1050 lr->lr_version = bk->lb_version;
1051 lr->lr_param = bk->lb_param;
1052 lr->lr_async_windows = bk->lb_async_windows;
1053 if (pos->lp_oit_cookie <= 1)
1054 lr->lr_param |= LPF_RESET;
1056 rc = lfsck_layout_master_notify_others(env, com, lr);
1058 CERROR("%s: fail to notify others for layout start: rc = %d\n",
1059 lfsck_lfsck2name(lfsck), rc);
1063 spin_lock(&llmd->llmd_lock);
1064 thread_set_flags(athread, SVC_RUNNING);
1065 spin_unlock(&llmd->llmd_lock);
1066 wake_up_all(&mthread->t_ctl_waitq);
1069 while (!list_empty(&llmd->llmd_req_list)) {
1070 bool wakeup = false;
1072 l_wait_event(athread->t_ctl_waitq,
1073 bk->lb_async_windows == 0 ||
1074 atomic_read(&llmd->llmd_rpcs_in_flight) <
1075 bk->lb_async_windows ||
1079 if (unlikely(llmd->llmd_exit))
1080 GOTO(cleanup1, rc = llmd->llmd_post_result);
1082 /* XXX: To be extended in other patch.
1084 * Compare the OST side attribute with local attribute,
1085 * and fix it if found inconsistency. */
1087 spin_lock(&llmd->llmd_lock);
1088 llr = list_entry(llmd->llmd_req_list.next,
1089 struct lfsck_layout_req,
1091 list_del_init(&llr->llr_list);
1092 if (bk->lb_async_windows != 0 &&
1093 llmd->llmd_prefetched >= bk->lb_async_windows)
1096 llmd->llmd_prefetched--;
1097 spin_unlock(&llmd->llmd_lock);
1099 wake_up_all(&mthread->t_ctl_waitq);
1101 lfsck_layout_req_fini(env, llr);
1104 /* Wakeup the master engine if it is waiting in checkpoint. */
1105 if (atomic_read(&llmd->llmd_rpcs_in_flight) == 0)
1106 wake_up_all(&mthread->t_ctl_waitq);
1108 l_wait_event(athread->t_ctl_waitq,
1109 !lfsck_layout_req_empty(llmd) ||
1111 llmd->llmd_to_post ||
1112 llmd->llmd_to_double_scan,
1115 if (unlikely(llmd->llmd_exit))
1116 GOTO(cleanup1, rc = llmd->llmd_post_result);
1118 if (!list_empty(&llmd->llmd_req_list))
1121 if (llmd->llmd_to_post) {
1122 llmd->llmd_to_post = 0;
1123 LASSERT(llmd->llmd_post_result > 0);
1125 memset(lr, 0, sizeof(*lr));
1126 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1127 lr->lr_event = LE_PHASE1_DONE;
1128 lr->lr_status = llmd->llmd_post_result;
1129 rc = lfsck_layout_master_notify_others(env, com, lr);
1131 CERROR("%s: failed to notify others "
1132 "for layout post: rc = %d\n",
1133 lfsck_lfsck2name(lfsck), rc);
1135 /* Wakeup the master engine to go ahead. */
1136 wake_up_all(&mthread->t_ctl_waitq);
1139 if (llmd->llmd_to_double_scan) {
1140 llmd->llmd_to_double_scan = 0;
1141 atomic_inc(&lfsck->li_double_scan_count);
1142 llmd->llmd_in_double_scan = 1;
1143 wake_up_all(&mthread->t_ctl_waitq);
1145 while (llmd->llmd_in_double_scan) {
1146 struct lfsck_tgt_descs *ltds =
1147 &lfsck->li_ost_descs;
1148 struct lfsck_tgt_desc *ltd;
1150 rc = lfsck_layout_master_query_others(env, com);
1151 if (lfsck_layout_master_to_orphan(llmd))
1157 /* Pull LFSCK status on related targets once
1158 * per 30 seconds if we are not notified. */
1159 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(30),
1160 cfs_time_seconds(1),
1162 rc = l_wait_event(athread->t_ctl_waitq,
1163 lfsck_layout_master_to_orphan(llmd) ||
1165 !thread_is_running(mthread),
1168 if (unlikely(llmd->llmd_exit ||
1169 !thread_is_running(mthread)))
1170 GOTO(cleanup2, rc = 0);
1172 if (rc == -ETIMEDOUT)
1179 spin_lock(<ds->ltd_lock);
1181 &llmd->llmd_ost_phase2_list)) {
1183 llmd->llmd_ost_phase2_list.next,
1184 struct lfsck_tgt_desc,
1185 ltd_layout_phase_list);
1187 <d->ltd_layout_phase_list);
1188 spin_unlock(<ds->ltd_lock);
1190 rc = lfsck_layout_scan_orphan(env, com,
1193 bk->lb_param & LPF_FAILOUT)
1196 if (unlikely(llmd->llmd_exit ||
1197 !thread_is_running(mthread)))
1198 GOTO(cleanup2, rc = 0);
1200 spin_lock(<ds->ltd_lock);
1203 if (list_empty(&llmd->llmd_ost_phase1_list)) {
1204 spin_unlock(<ds->ltd_lock);
1205 GOTO(cleanup2, rc = 1);
1207 spin_unlock(<ds->ltd_lock);
1213 /* Cleanup the unfinished requests. */
1214 spin_lock(&llmd->llmd_lock);
1215 while (!list_empty(&llmd->llmd_req_list)) {
1216 llr = list_entry(llmd->llmd_req_list.next,
1217 struct lfsck_layout_req,
1219 list_del_init(&llr->llr_list);
1220 llmd->llmd_prefetched--;
1221 spin_unlock(&llmd->llmd_lock);
1222 lfsck_layout_req_fini(env, llr);
1223 spin_lock(&llmd->llmd_lock);
1225 spin_unlock(&llmd->llmd_lock);
1227 LASSERTF(llmd->llmd_prefetched == 0, "unmatched prefeteched objs %d\n",
1228 llmd->llmd_prefetched);
1230 l_wait_event(athread->t_ctl_waitq,
1231 atomic_read(&llmd->llmd_rpcs_in_flight) == 0,
1235 memset(lr, 0, sizeof(*lr));
1236 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1238 lr->lr_event = LE_PHASE2_DONE;
1240 } else if (rc == 0) {
1241 lr->lr_event = LE_STOP;
1242 if (lfsck->li_status == LS_PAUSED ||
1243 lfsck->li_status == LS_CO_PAUSED)
1244 lr->lr_status = LS_CO_PAUSED;
1245 else if (lfsck->li_status == LS_STOPPED ||
1246 lfsck->li_status == LS_CO_STOPPED)
1247 lr->lr_status = LS_CO_STOPPED;
1251 lr->lr_event = LE_STOP;
1252 lr->lr_status = LS_CO_FAILED;
1255 rc1 = lfsck_layout_master_notify_others(env, com, lr);
1257 CERROR("%s: failed to notify others for layout quit: rc = %d\n",
1258 lfsck_lfsck2name(lfsck), rc1);
1262 /* Under force exit case, some requests may be just freed without
1263 * verification, those objects should be re-handled when next run.
1264 * So not update the on-disk tracing file under such case. */
1265 if (!llmd->llmd_exit)
1266 rc1 = lfsck_layout_double_scan_result(env, com, rc);
1269 if (llmd->llmd_in_double_scan)
1270 atomic_dec(&lfsck->li_double_scan_count);
1272 spin_lock(&llmd->llmd_lock);
1273 llmd->llmd_assistant_status = (rc1 != 0 ? rc1 : rc);
1274 thread_set_flags(athread, SVC_STOPPED);
1275 wake_up_all(&mthread->t_ctl_waitq);
1276 spin_unlock(&llmd->llmd_lock);
1277 lfsck_thread_args_fini(lta);
1283 lfsck_layout_slave_async_interpret(const struct lu_env *env,
1284 struct ptlrpc_request *req,
1287 struct lfsck_layout_slave_async_args *llsaa = args;
1288 struct obd_export *exp = llsaa->llsaa_exp;
1289 struct lfsck_component *com = llsaa->llsaa_com;
1290 struct lfsck_layout_slave_target *llst = llsaa->llsaa_llst;
1291 struct lfsck_layout_slave_data *llsd = com->lc_data;
1295 /* It is quite probably caused by target crash,
1296 * to make the LFSCK can go ahead, assume that
1297 * the target finished the LFSCK prcoessing. */
1300 struct lfsck_reply *lr;
1302 lr = req_capsule_server_get(&req->rq_pill, &RMF_LFSCK_REPLY);
1303 if (lr->lr_status != LS_SCANNING_PHASE1 &&
1304 lr->lr_status != LS_SCANNING_PHASE2)
1308 lfsck_layout_llst_del(llsd, llst);
1309 lfsck_layout_llst_put(llst);
1310 lfsck_component_put(env, com);
1311 class_export_put(exp);
1316 static int lfsck_layout_async_query(const struct lu_env *env,
1317 struct lfsck_component *com,
1318 struct obd_export *exp,
1319 struct lfsck_layout_slave_target *llst,
1320 struct lfsck_request *lr,
1321 struct ptlrpc_request_set *set)
1323 struct lfsck_layout_slave_async_args *llsaa;
1324 struct ptlrpc_request *req;
1325 struct lfsck_request *tmp;
1329 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_QUERY);
1333 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_QUERY);
1335 ptlrpc_request_free(req);
1339 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
1341 ptlrpc_request_set_replen(req);
1343 llsaa = ptlrpc_req_async_args(req);
1344 llsaa->llsaa_exp = exp;
1345 llsaa->llsaa_com = lfsck_component_get(com);
1346 llsaa->llsaa_llst = llst;
1347 req->rq_interpret_reply = lfsck_layout_slave_async_interpret;
1348 ptlrpc_set_add_req(set, req);
1353 static int lfsck_layout_async_notify(const struct lu_env *env,
1354 struct obd_export *exp,
1355 struct lfsck_request *lr,
1356 struct ptlrpc_request_set *set)
1358 struct ptlrpc_request *req;
1359 struct lfsck_request *tmp;
1363 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
1367 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
1369 ptlrpc_request_free(req);
1373 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
1375 ptlrpc_request_set_replen(req);
1376 ptlrpc_set_add_req(set, req);
1382 lfsck_layout_slave_query_master(const struct lu_env *env,
1383 struct lfsck_component *com)
1385 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
1386 struct lfsck_instance *lfsck = com->lc_lfsck;
1387 struct lfsck_layout_slave_data *llsd = com->lc_data;
1388 struct lfsck_layout_slave_target *llst;
1389 struct obd_export *exp;
1390 struct ptlrpc_request_set *set;
1396 set = ptlrpc_prep_set();
1400 memset(lr, 0, sizeof(*lr));
1401 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1402 lr->lr_event = LE_QUERY;
1403 lr->lr_active = LT_LAYOUT;
1405 llsd->llsd_touch_gen++;
1406 spin_lock(&llsd->llsd_lock);
1407 while (!list_empty(&llsd->llsd_master_list)) {
1408 llst = list_entry(llsd->llsd_master_list.next,
1409 struct lfsck_layout_slave_target,
1411 if (llst->llst_gen == llsd->llsd_touch_gen)
1414 llst->llst_gen = llsd->llsd_touch_gen;
1415 list_del(&llst->llst_list);
1416 list_add_tail(&llst->llst_list,
1417 &llsd->llsd_master_list);
1418 atomic_inc(&llst->llst_ref);
1419 spin_unlock(&llsd->llsd_lock);
1421 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
1424 lfsck_layout_llst_del(llsd, llst);
1425 lfsck_layout_llst_put(llst);
1426 spin_lock(&llsd->llsd_lock);
1430 rc = lfsck_layout_async_query(env, com, exp, llst, lr, set);
1432 CERROR("%s: slave fail to query %s for layout: "
1433 "rc = %d\n", lfsck_lfsck2name(lfsck),
1434 exp->exp_obd->obd_name, rc);
1436 lfsck_layout_llst_put(llst);
1437 class_export_put(exp);
1441 spin_lock(&llsd->llsd_lock);
1443 spin_unlock(&llsd->llsd_lock);
1446 rc = ptlrpc_set_wait(set);
1447 ptlrpc_set_destroy(set);
1449 RETURN(rc1 != 0 ? rc1 : rc);
1453 lfsck_layout_slave_notify_master(const struct lu_env *env,
1454 struct lfsck_component *com,
1455 enum lfsck_events event, int result)
1457 struct lfsck_instance *lfsck = com->lc_lfsck;
1458 struct lfsck_layout_slave_data *llsd = com->lc_data;
1459 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
1460 struct lfsck_layout_slave_target *llst;
1461 struct obd_export *exp;
1462 struct ptlrpc_request_set *set;
1467 set = ptlrpc_prep_set();
1471 memset(lr, 0, sizeof(*lr));
1472 lr->lr_event = event;
1473 lr->lr_status = result;
1474 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1475 lr->lr_active = LT_LAYOUT;
1476 llsd->llsd_touch_gen++;
1477 spin_lock(&llsd->llsd_lock);
1478 while (!list_empty(&llsd->llsd_master_list)) {
1479 llst = list_entry(llsd->llsd_master_list.next,
1480 struct lfsck_layout_slave_target,
1482 if (llst->llst_gen == llsd->llsd_touch_gen)
1485 llst->llst_gen = llsd->llsd_touch_gen;
1486 list_del(&llst->llst_list);
1487 list_add_tail(&llst->llst_list,
1488 &llsd->llsd_master_list);
1489 atomic_inc(&llst->llst_ref);
1490 spin_unlock(&llsd->llsd_lock);
1492 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
1495 lfsck_layout_llst_del(llsd, llst);
1496 lfsck_layout_llst_put(llst);
1497 spin_lock(&llsd->llsd_lock);
1501 rc = lfsck_layout_async_notify(env, exp, lr, set);
1503 CERROR("%s: slave fail to notify %s for layout: "
1504 "rc = %d\n", lfsck_lfsck2name(lfsck),
1505 exp->exp_obd->obd_name, rc);
1508 lfsck_layout_llst_put(llst);
1509 class_export_put(exp);
1510 spin_lock(&llsd->llsd_lock);
1512 spin_unlock(&llsd->llsd_lock);
1515 rc = ptlrpc_set_wait(set);
1517 ptlrpc_set_destroy(set);
1524 static int lfsck_layout_reset(const struct lu_env *env,
1525 struct lfsck_component *com, bool init)
1527 struct lfsck_layout *lo = com->lc_file_ram;
1530 down_write(&com->lc_sem);
1532 memset(lo, 0, com->lc_file_size);
1534 __u32 count = lo->ll_success_count;
1535 __u64 last_time = lo->ll_time_last_complete;
1537 memset(lo, 0, com->lc_file_size);
1538 lo->ll_success_count = count;
1539 lo->ll_time_last_complete = last_time;
1542 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
1543 lo->ll_status = LS_INIT;
1545 rc = lfsck_layout_store(env, com);
1546 up_write(&com->lc_sem);
1551 static void lfsck_layout_fail(const struct lu_env *env,
1552 struct lfsck_component *com, bool new_checked)
1554 struct lfsck_layout *lo = com->lc_file_ram;
1556 down_write(&com->lc_sem);
1558 com->lc_new_checked++;
1559 lo->ll_objs_failed_phase1++;
1560 if (lo->ll_pos_first_inconsistent == 0) {
1561 struct lfsck_instance *lfsck = com->lc_lfsck;
1563 lo->ll_pos_first_inconsistent =
1564 lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
1567 up_write(&com->lc_sem);
1570 static int lfsck_layout_master_checkpoint(const struct lu_env *env,
1571 struct lfsck_component *com, bool init)
1573 struct lfsck_instance *lfsck = com->lc_lfsck;
1574 struct lfsck_layout *lo = com->lc_file_ram;
1575 struct lfsck_layout_master_data *llmd = com->lc_data;
1576 struct ptlrpc_thread *mthread = &lfsck->li_thread;
1577 struct ptlrpc_thread *athread = &llmd->llmd_thread;
1578 struct l_wait_info lwi = { 0 };
1581 if (com->lc_new_checked == 0 && !init)
1584 l_wait_event(mthread->t_ctl_waitq,
1585 (list_empty(&llmd->llmd_req_list) &&
1586 atomic_read(&llmd->llmd_rpcs_in_flight) == 0) ||
1587 !thread_is_running(mthread) ||
1588 thread_is_stopped(athread),
1591 if (!thread_is_running(mthread) || thread_is_stopped(athread))
1594 down_write(&com->lc_sem);
1596 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
1598 lo->ll_pos_last_checkpoint =
1599 lfsck->li_pos_current.lp_oit_cookie;
1600 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
1601 HALF_SEC - lfsck->li_time_last_checkpoint);
1602 lo->ll_time_last_checkpoint = cfs_time_current_sec();
1603 lo->ll_objs_checked_phase1 += com->lc_new_checked;
1604 com->lc_new_checked = 0;
1607 rc = lfsck_layout_store(env, com);
1608 up_write(&com->lc_sem);
1613 static int lfsck_layout_slave_checkpoint(const struct lu_env *env,
1614 struct lfsck_component *com, bool init)
1616 struct lfsck_instance *lfsck = com->lc_lfsck;
1617 struct lfsck_layout *lo = com->lc_file_ram;
1620 if (com->lc_new_checked == 0 && !init)
1623 down_write(&com->lc_sem);
1626 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
1628 lo->ll_pos_last_checkpoint =
1629 lfsck->li_pos_current.lp_oit_cookie;
1630 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
1631 HALF_SEC - lfsck->li_time_last_checkpoint);
1632 lo->ll_time_last_checkpoint = cfs_time_current_sec();
1633 lo->ll_objs_checked_phase1 += com->lc_new_checked;
1634 com->lc_new_checked = 0;
1637 rc = lfsck_layout_store(env, com);
1639 up_write(&com->lc_sem);
1644 static int lfsck_layout_prep(const struct lu_env *env,
1645 struct lfsck_component *com)
1647 struct lfsck_instance *lfsck = com->lc_lfsck;
1648 struct lfsck_layout *lo = com->lc_file_ram;
1649 struct lfsck_position *pos = &com->lc_pos_start;
1651 fid_zero(&pos->lp_dir_parent);
1652 pos->lp_dir_cookie = 0;
1653 if (lo->ll_status == LS_COMPLETED ||
1654 lo->ll_status == LS_PARTIAL) {
1657 rc = lfsck_layout_reset(env, com, false);
1662 down_write(&com->lc_sem);
1664 lo->ll_time_latest_start = cfs_time_current_sec();
1666 spin_lock(&lfsck->li_lock);
1667 if (lo->ll_flags & LF_SCANNED_ONCE) {
1668 if (!lfsck->li_drop_dryrun ||
1669 lo->ll_pos_first_inconsistent == 0) {
1670 lo->ll_status = LS_SCANNING_PHASE2;
1671 list_del_init(&com->lc_link);
1672 list_add_tail(&com->lc_link,
1673 &lfsck->li_list_double_scan);
1674 pos->lp_oit_cookie = 0;
1678 lo->ll_status = LS_SCANNING_PHASE1;
1679 lo->ll_run_time_phase1 = 0;
1680 lo->ll_run_time_phase2 = 0;
1681 lo->ll_objs_checked_phase1 = 0;
1682 lo->ll_objs_checked_phase2 = 0;
1683 lo->ll_objs_failed_phase1 = 0;
1684 lo->ll_objs_failed_phase2 = 0;
1685 for (i = 0; i < LLIT_MAX; i++)
1686 lo->ll_objs_repaired[i] = 0;
1688 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
1691 lo->ll_status = LS_SCANNING_PHASE1;
1692 if (!lfsck->li_drop_dryrun ||
1693 lo->ll_pos_first_inconsistent == 0)
1694 pos->lp_oit_cookie = lo->ll_pos_last_checkpoint + 1;
1696 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
1698 spin_unlock(&lfsck->li_lock);
1700 up_write(&com->lc_sem);
1705 static int lfsck_layout_slave_prep(const struct lu_env *env,
1706 struct lfsck_component *com,
1707 struct lfsck_start_param *lsp)
1709 struct lfsck_layout *lo = com->lc_file_ram;
1710 struct lfsck_layout_slave_data *llsd = com->lc_data;
1713 /* XXX: For a new scanning, generate OST-objects
1714 * bitmap for orphan detection. */
1716 rc = lfsck_layout_prep(env, com);
1717 if (rc != 0 || lo->ll_status != LS_SCANNING_PHASE1 ||
1718 !lsp->lsp_index_valid)
1721 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
1726 static int lfsck_layout_master_prep(const struct lu_env *env,
1727 struct lfsck_component *com,
1728 struct lfsck_start_param *lsp)
1730 struct lfsck_instance *lfsck = com->lc_lfsck;
1731 struct lfsck_layout_master_data *llmd = com->lc_data;
1732 struct ptlrpc_thread *mthread = &lfsck->li_thread;
1733 struct ptlrpc_thread *athread = &llmd->llmd_thread;
1734 struct lfsck_thread_args *lta;
1738 rc = lfsck_layout_prep(env, com);
1742 llmd->llmd_assistant_status = 0;
1743 llmd->llmd_post_result = 0;
1744 llmd->llmd_to_post = 0;
1745 llmd->llmd_to_double_scan = 0;
1746 llmd->llmd_in_double_scan = 0;
1747 llmd->llmd_exit = 0;
1748 thread_set_flags(athread, 0);
1750 lta = lfsck_thread_args_init(lfsck, com, lsp);
1752 RETURN(PTR_ERR(lta));
1754 rc = PTR_ERR(kthread_run(lfsck_layout_assistant, lta, "lfsck_layout"));
1755 if (IS_ERR_VALUE(rc)) {
1756 CERROR("%s: Cannot start LFSCK layout assistant thread: "
1757 "rc = %ld\n", lfsck_lfsck2name(lfsck), rc);
1758 lfsck_thread_args_fini(lta);
1760 struct l_wait_info lwi = { 0 };
1762 l_wait_event(mthread->t_ctl_waitq,
1763 thread_is_running(athread) ||
1764 thread_is_stopped(athread),
1766 if (unlikely(!thread_is_running(athread)))
1767 rc = llmd->llmd_assistant_status;
1775 static int lfsck_layout_master_exec_oit(const struct lu_env *env,
1776 struct lfsck_component *com,
1777 struct dt_object *obj)
1779 /* XXX: To be implemented in other patches.
1781 * For the given object, read its layout EA locally. For each stripe,
1782 * pre-fetch the OST-object's attribute and generate an structure
1783 * lfsck_layout_req on the list ::llmd_req_list.
1785 * For each request on the ::llmd_req_list, the lfsck_layout_assistant
1786 * thread will compare the OST side attribute with local attribute,
1787 * if inconsistent, then repair it.
1789 * All above processing is async mode with pipeline. */
1794 static int lfsck_layout_slave_exec_oit(const struct lu_env *env,
1795 struct lfsck_component *com,
1796 struct dt_object *obj)
1798 struct lfsck_instance *lfsck = com->lc_lfsck;
1799 struct lfsck_layout *lo = com->lc_file_ram;
1800 const struct lu_fid *fid = lfsck_dto2fid(obj);
1801 struct lfsck_layout_slave_data *llsd = com->lc_data;
1802 struct lfsck_layout_seq *lls;
1808 /* XXX: Update OST-objects bitmap for orphan detection. */
1810 LASSERT(llsd != NULL);
1812 down_write(&com->lc_sem);
1813 if (fid_is_idif(fid))
1815 else if (!fid_is_norm(fid) ||
1816 !fid_is_for_ostobj(env, lfsck->li_next, obj, fid))
1817 GOTO(unlock, rc = 0);
1820 com->lc_new_checked++;
1822 lls = lfsck_layout_seq_lookup(llsd, seq);
1825 if (unlikely(lls == NULL))
1826 GOTO(unlock, rc = -ENOMEM);
1828 INIT_LIST_HEAD(&lls->lls_list);
1830 rc = lfsck_layout_lastid_load(env, com, lls);
1832 lo->ll_objs_failed_phase1++;
1837 lfsck_layout_seq_insert(llsd, lls);
1840 if (unlikely(fid_is_last_id(fid)))
1841 GOTO(unlock, rc = 0);
1844 if (oid > lls->lls_lastid_known)
1845 lls->lls_lastid_known = oid;
1847 if (oid > lls->lls_lastid) {
1848 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
1849 /* OFD may create new objects during LFSCK scanning. */
1850 rc = lfsck_layout_lastid_reload(env, com, lls);
1851 if (unlikely(rc != 0))
1852 CWARN("%s: failed to reload LAST_ID for "LPX64
1854 lfsck_lfsck2name(com->lc_lfsck),
1856 if (oid <= lls->lls_lastid)
1857 GOTO(unlock, rc = 0);
1859 LASSERT(lfsck->li_out_notify != NULL);
1861 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1862 LE_LASTID_REBUILDING);
1863 lo->ll_flags |= LF_CRASHED_LASTID;
1866 lls->lls_lastid = oid;
1870 GOTO(unlock, rc = 0);
1873 up_write(&com->lc_sem);
1878 static int lfsck_layout_exec_dir(const struct lu_env *env,
1879 struct lfsck_component *com,
1880 struct dt_object *obj,
1881 struct lu_dirent *ent)
1886 static int lfsck_layout_master_post(const struct lu_env *env,
1887 struct lfsck_component *com,
1888 int result, bool init)
1890 struct lfsck_instance *lfsck = com->lc_lfsck;
1891 struct lfsck_layout *lo = com->lc_file_ram;
1892 struct lfsck_layout_master_data *llmd = com->lc_data;
1893 struct ptlrpc_thread *mthread = &lfsck->li_thread;
1894 struct ptlrpc_thread *athread = &llmd->llmd_thread;
1895 struct l_wait_info lwi = { 0 };
1900 llmd->llmd_post_result = result;
1901 llmd->llmd_to_post = 1;
1902 if (llmd->llmd_post_result <= 0)
1903 llmd->llmd_exit = 1;
1905 wake_up_all(&athread->t_ctl_waitq);
1906 l_wait_event(mthread->t_ctl_waitq,
1907 (result > 0 && list_empty(&llmd->llmd_req_list) &&
1908 atomic_read(&llmd->llmd_rpcs_in_flight) == 0) ||
1909 thread_is_stopped(athread),
1912 if (llmd->llmd_assistant_status < 0)
1913 result = llmd->llmd_assistant_status;
1915 down_write(&com->lc_sem);
1916 spin_lock(&lfsck->li_lock);
1917 /* When LFSCK failed, there may be some prefetched objects those are
1918 * not been processed yet, we do not know the exactly position, then
1919 * just restart from last check-point next time. */
1920 if (!init && !llmd->llmd_exit)
1921 lo->ll_pos_last_checkpoint =
1922 lfsck->li_pos_current.lp_oit_cookie;
1925 lo->ll_status = LS_SCANNING_PHASE2;
1926 lo->ll_flags |= LF_SCANNED_ONCE;
1927 lo->ll_flags &= ~LF_UPGRADE;
1928 list_del_init(&com->lc_link);
1929 list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
1930 } else if (result == 0) {
1931 lo->ll_status = lfsck->li_status;
1932 if (lo->ll_status == 0)
1933 lo->ll_status = LS_STOPPED;
1934 if (lo->ll_status != LS_PAUSED) {
1935 list_del_init(&com->lc_link);
1936 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
1939 lo->ll_status = LS_FAILED;
1940 list_del_init(&com->lc_link);
1941 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
1943 spin_unlock(&lfsck->li_lock);
1946 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
1947 HALF_SEC - lfsck->li_time_last_checkpoint);
1948 lo->ll_time_last_checkpoint = cfs_time_current_sec();
1949 lo->ll_objs_checked_phase1 += com->lc_new_checked;
1950 com->lc_new_checked = 0;
1953 rc = lfsck_layout_store(env, com);
1954 up_write(&com->lc_sem);
1959 static int lfsck_layout_slave_post(const struct lu_env *env,
1960 struct lfsck_component *com,
1961 int result, bool init)
1963 struct lfsck_instance *lfsck = com->lc_lfsck;
1964 struct lfsck_layout *lo = com->lc_file_ram;
1968 rc = lfsck_layout_lastid_store(env, com);
1972 LASSERT(lfsck->li_out_notify != NULL);
1974 down_write(&com->lc_sem);
1976 spin_lock(&lfsck->li_lock);
1978 lo->ll_pos_last_checkpoint =
1979 lfsck->li_pos_current.lp_oit_cookie;
1981 lo->ll_status = LS_SCANNING_PHASE2;
1982 lo->ll_flags |= LF_SCANNED_ONCE;
1983 if (lo->ll_flags & LF_CRASHED_LASTID) {
1985 lo->ll_flags &= ~LF_CRASHED_LASTID;
1987 lo->ll_flags &= ~LF_UPGRADE;
1988 list_del_init(&com->lc_link);
1989 list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
1990 } else if (result == 0) {
1991 lo->ll_status = lfsck->li_status;
1992 if (lo->ll_status == 0)
1993 lo->ll_status = LS_STOPPED;
1994 if (lo->ll_status != LS_PAUSED) {
1995 list_del_init(&com->lc_link);
1996 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
1999 lo->ll_status = LS_FAILED;
2000 list_del_init(&com->lc_link);
2001 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
2003 spin_unlock(&lfsck->li_lock);
2006 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
2010 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
2011 HALF_SEC - lfsck->li_time_last_checkpoint);
2012 lo->ll_time_last_checkpoint = cfs_time_current_sec();
2013 lo->ll_objs_checked_phase1 += com->lc_new_checked;
2014 com->lc_new_checked = 0;
2017 rc = lfsck_layout_store(env, com);
2019 up_write(&com->lc_sem);
2021 lfsck_layout_slave_notify_master(env, com, LE_PHASE1_DONE, result);
2026 static int lfsck_layout_dump(const struct lu_env *env,
2027 struct lfsck_component *com, char *buf, int len)
2029 struct lfsck_instance *lfsck = com->lc_lfsck;
2030 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2031 struct lfsck_layout *lo = com->lc_file_ram;
2036 down_read(&com->lc_sem);
2037 rc = snprintf(buf, len,
2038 "name: lfsck_layout\n"
2044 lfsck_status2names(lo->ll_status));
2050 rc = lfsck_bits_dump(&buf, &len, lo->ll_flags, lfsck_flags_names,
2055 rc = lfsck_bits_dump(&buf, &len, bk->lb_param, lfsck_param_names,
2060 rc = lfsck_time_dump(&buf, &len, lo->ll_time_last_complete,
2061 "time_since_last_completed");
2065 rc = lfsck_time_dump(&buf, &len, lo->ll_time_latest_start,
2066 "time_since_latest_start");
2070 rc = lfsck_time_dump(&buf, &len, lo->ll_time_last_checkpoint,
2071 "time_since_last_checkpoint");
2075 rc = snprintf(buf, len,
2076 "latest_start_position: "LPU64"\n"
2077 "last_checkpoint_position: "LPU64"\n"
2078 "first_failure_position: "LPU64"\n",
2079 lo->ll_pos_latest_start,
2080 lo->ll_pos_last_checkpoint,
2081 lo->ll_pos_first_inconsistent);
2088 rc = snprintf(buf, len,
2089 "success_count: %u\n"
2090 "repaired_dangling: "LPU64"\n"
2091 "repaired_unmatched_pair: "LPU64"\n"
2092 "repaired_multiple_referenced: "LPU64"\n"
2093 "repaired_orphan: "LPU64"\n"
2094 "repaired_inconsistent_owner: "LPU64"\n"
2095 "repaired_others: "LPU64"\n"
2096 "skipped: "LPU64"\n"
2097 "failed_phase1: "LPU64"\n"
2098 "failed_phase2: "LPU64"\n",
2099 lo->ll_success_count,
2100 lo->ll_objs_repaired[LLIT_DANGLING - 1],
2101 lo->ll_objs_repaired[LLIT_UNMATCHED_PAIR - 1],
2102 lo->ll_objs_repaired[LLIT_MULTIPLE_REFERENCED - 1],
2103 lo->ll_objs_repaired[LLIT_ORPHAN - 1],
2104 lo->ll_objs_repaired[LLIT_INCONSISTENT_OWNER - 1],
2105 lo->ll_objs_repaired[LLIT_OTHERS - 1],
2106 lo->ll_objs_skipped,
2107 lo->ll_objs_failed_phase1,
2108 lo->ll_objs_failed_phase2);
2115 if (lo->ll_status == LS_SCANNING_PHASE1) {
2117 const struct dt_it_ops *iops;
2118 cfs_duration_t duration = cfs_time_current() -
2119 lfsck->li_time_last_checkpoint;
2120 __u64 checked = lo->ll_objs_checked_phase1 + com->lc_new_checked;
2121 __u64 speed = checked;
2122 __u64 new_checked = com->lc_new_checked * HZ;
2123 __u32 rtime = lo->ll_run_time_phase1 +
2124 cfs_duration_sec(duration + HALF_SEC);
2127 do_div(new_checked, duration);
2129 do_div(speed, rtime);
2130 rc = snprintf(buf, len,
2131 "checked_phase1: "LPU64"\n"
2132 "checked_phase2: "LPU64"\n"
2133 "run_time_phase1: %u seconds\n"
2134 "run_time_phase2: %u seconds\n"
2135 "average_speed_phase1: "LPU64" items/sec\n"
2136 "average_speed_phase2: N/A\n"
2137 "real-time_speed_phase1: "LPU64" items/sec\n"
2138 "real-time_speed_phase2: N/A\n",
2140 lo->ll_objs_checked_phase2,
2142 lo->ll_run_time_phase2,
2151 LASSERT(lfsck->li_di_oit != NULL);
2153 iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
2155 /* The low layer otable-based iteration position may NOT
2156 * exactly match the layout-based directory traversal
2157 * cookie. Generally, it is not a serious issue. But the
2158 * caller should NOT make assumption on that. */
2159 pos = iops->store(env, lfsck->li_di_oit);
2160 if (!lfsck->li_current_oit_processed)
2162 rc = snprintf(buf, len, "current_position: "LPU64"\n", pos);
2169 /* XXX: LS_SCANNING_PHASE2 will be handled in the future. */
2170 __u64 speed1 = lo->ll_objs_checked_phase1;
2171 __u64 speed2 = lo->ll_objs_checked_phase2;
2173 if (lo->ll_run_time_phase1 != 0)
2174 do_div(speed1, lo->ll_run_time_phase1);
2175 if (lo->ll_run_time_phase2 != 0)
2176 do_div(speed2, lo->ll_run_time_phase2);
2177 rc = snprintf(buf, len,
2178 "checked_phase1: "LPU64"\n"
2179 "checked_phase2: "LPU64"\n"
2180 "run_time_phase1: %u seconds\n"
2181 "run_time_phase2: %u seconds\n"
2182 "average_speed_phase1: "LPU64" items/sec\n"
2183 "average_speed_phase2: "LPU64" objs/sec\n"
2184 "real-time_speed_phase1: N/A\n"
2185 "real-time_speed_phase2: N/A\n"
2186 "current_position: N/A\n",
2187 lo->ll_objs_checked_phase1,
2188 lo->ll_objs_checked_phase2,
2189 lo->ll_run_time_phase1,
2190 lo->ll_run_time_phase2,
2202 up_read(&com->lc_sem);
2207 static int lfsck_layout_master_double_scan(const struct lu_env *env,
2208 struct lfsck_component *com)
2210 struct lfsck_layout_master_data *llmd = com->lc_data;
2211 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
2212 struct ptlrpc_thread *athread = &llmd->llmd_thread;
2213 struct lfsck_layout *lo = com->lc_file_ram;
2214 struct l_wait_info lwi = { 0 };
2216 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2))
2219 llmd->llmd_to_double_scan = 1;
2220 wake_up_all(&athread->t_ctl_waitq);
2221 l_wait_event(mthread->t_ctl_waitq,
2222 llmd->llmd_in_double_scan ||
2223 thread_is_stopped(athread),
2225 if (llmd->llmd_assistant_status < 0)
2226 return llmd->llmd_assistant_status;
2231 static int lfsck_layout_slave_double_scan(const struct lu_env *env,
2232 struct lfsck_component *com)
2234 struct lfsck_instance *lfsck = com->lc_lfsck;
2235 struct lfsck_layout_slave_data *llsd = com->lc_data;
2236 struct lfsck_layout *lo = com->lc_file_ram;
2237 struct ptlrpc_thread *thread = &lfsck->li_thread;
2241 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2))
2244 atomic_inc(&lfsck->li_double_scan_count);
2246 com->lc_new_checked = 0;
2247 com->lc_new_scanned = 0;
2248 com->lc_time_last_checkpoint = cfs_time_current();
2249 com->lc_time_next_checkpoint = com->lc_time_last_checkpoint +
2250 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
2253 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(30),
2256 rc = lfsck_layout_slave_query_master(env, com);
2257 if (list_empty(&llsd->llsd_master_list)) {
2258 if (unlikely(!thread_is_running(thread)))
2269 rc = l_wait_event(thread->t_ctl_waitq,
2270 !thread_is_running(thread) ||
2271 list_empty(&llsd->llsd_master_list),
2273 if (unlikely(!thread_is_running(thread)))
2276 if (rc == -ETIMEDOUT)
2279 GOTO(done, rc = (rc < 0 ? rc : 1));
2283 rc = lfsck_layout_double_scan_result(env, com, rc);
2285 if (atomic_dec_and_test(&lfsck->li_double_scan_count))
2286 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
2291 static void lfsck_layout_master_data_release(const struct lu_env *env,
2292 struct lfsck_component *com)
2294 struct lfsck_layout_master_data *llmd = com->lc_data;
2295 struct lfsck_instance *lfsck = com->lc_lfsck;
2296 struct lfsck_tgt_descs *ltds;
2297 struct lfsck_tgt_desc *ltd;
2298 struct lfsck_tgt_desc *next;
2300 LASSERT(llmd != NULL);
2301 LASSERT(thread_is_init(&llmd->llmd_thread) ||
2302 thread_is_stopped(&llmd->llmd_thread));
2303 LASSERT(list_empty(&llmd->llmd_req_list));
2304 LASSERT(atomic_read(&llmd->llmd_rpcs_in_flight) == 0);
2306 com->lc_data = NULL;
2308 ltds = &lfsck->li_ost_descs;
2309 spin_lock(<ds->ltd_lock);
2310 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase1_list,
2311 ltd_layout_phase_list) {
2312 list_del_init(<d->ltd_layout_phase_list);
2314 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase2_list,
2315 ltd_layout_phase_list) {
2316 list_del_init(<d->ltd_layout_phase_list);
2318 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_list,
2320 list_del_init(<d->ltd_layout_list);
2322 spin_unlock(<ds->ltd_lock);
2327 static void lfsck_layout_slave_data_release(const struct lu_env *env,
2328 struct lfsck_component *com)
2330 struct lfsck_layout_slave_data *llsd = com->lc_data;
2331 struct lfsck_layout_seq *lls;
2332 struct lfsck_layout_seq *next;
2333 struct lfsck_layout_slave_target *llst;
2334 struct lfsck_layout_slave_target *tmp;
2336 LASSERT(llsd != NULL);
2338 com->lc_data = NULL;
2340 list_for_each_entry_safe(lls, next, &llsd->llsd_seq_list,
2342 list_del_init(&lls->lls_list);
2343 lfsck_object_put(env, lls->lls_lastid_obj);
2347 list_for_each_entry_safe(llst, tmp, &llsd->llsd_master_list,
2349 list_del_init(&llst->llst_list);
2356 static void lfsck_layout_master_quit(const struct lu_env *env,
2357 struct lfsck_component *com)
2359 struct lfsck_layout_master_data *llmd = com->lc_data;
2360 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
2361 struct ptlrpc_thread *athread = &llmd->llmd_thread;
2362 struct l_wait_info lwi = { 0 };
2364 llmd->llmd_exit = 1;
2365 wake_up_all(&athread->t_ctl_waitq);
2366 l_wait_event(mthread->t_ctl_waitq,
2367 thread_is_init(athread) ||
2368 thread_is_stopped(athread),
2372 static int lfsck_layout_master_in_notify(const struct lu_env *env,
2373 struct lfsck_component *com,
2374 struct lfsck_request *lr)
2376 struct lfsck_instance *lfsck = com->lc_lfsck;
2377 struct lfsck_layout *lo = com->lc_file_ram;
2378 struct lfsck_layout_master_data *llmd = com->lc_data;
2379 struct lfsck_tgt_descs *ltds;
2380 struct lfsck_tgt_desc *ltd;
2383 if (lr->lr_event != LE_PHASE1_DONE)
2386 ltds = &lfsck->li_ost_descs;
2387 spin_lock(<ds->ltd_lock);
2388 ltd = LTD_TGT(ltds, lr->lr_index);
2390 spin_unlock(<ds->ltd_lock);
2395 list_del_init(<d->ltd_layout_phase_list);
2396 if (lr->lr_status > 0) {
2397 if (list_empty(<d->ltd_layout_list))
2398 list_add_tail(<d->ltd_layout_list,
2399 &llmd->llmd_ost_list);
2400 list_add_tail(<d->ltd_layout_phase_list,
2401 &llmd->llmd_ost_phase2_list);
2403 ltd->ltd_layout_done = 1;
2404 list_del_init(<d->ltd_layout_list);
2405 lo->ll_flags |= LF_INCOMPLETE;
2407 spin_unlock(<ds->ltd_lock);
2409 if (lfsck_layout_master_to_orphan(llmd))
2410 wake_up_all(&llmd->llmd_thread.t_ctl_waitq);
2415 static int lfsck_layout_slave_in_notify(const struct lu_env *env,
2416 struct lfsck_component *com,
2417 struct lfsck_request *lr)
2419 struct lfsck_instance *lfsck = com->lc_lfsck;
2420 struct lfsck_layout_slave_data *llsd = com->lc_data;
2421 struct lfsck_layout_slave_target *llst;
2424 if (lr->lr_event != LE_PHASE2_DONE &&
2425 lr->lr_event != LE_STOP)
2428 llst = lfsck_layout_llst_find_and_del(llsd, lr->lr_index);
2432 lfsck_layout_llst_put(llst);
2433 if (list_empty(&llsd->llsd_master_list)) {
2434 switch (lr->lr_event) {
2435 case LE_PHASE2_DONE:
2436 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
2439 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
2441 memset(stop, 0, sizeof(*stop));
2442 stop->ls_status = lr->lr_status;
2443 stop->ls_flags = lr->lr_param;
2444 lfsck_stop(env, lfsck->li_bottom, stop);
2455 static int lfsck_layout_query(const struct lu_env *env,
2456 struct lfsck_component *com)
2458 struct lfsck_layout *lo = com->lc_file_ram;
2460 return lo->ll_status;
2463 static int lfsck_layout_master_stop_notify(const struct lu_env *env,
2464 struct lfsck_component *com,
2465 struct lfsck_tgt_descs *ltds,
2466 struct lfsck_tgt_desc *ltd,
2467 struct ptlrpc_request_set *set)
2469 struct lfsck_thread_info *info = lfsck_env_info(env);
2470 struct lfsck_async_interpret_args *laia = &info->lti_laia;
2471 struct lfsck_request *lr = &info->lti_lr;
2472 struct lfsck_instance *lfsck = com->lc_lfsck;
2475 LASSERT(list_empty(<d->ltd_layout_list));
2476 LASSERT(list_empty(<d->ltd_layout_phase_list));
2478 memset(lr, 0, sizeof(*lr));
2479 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
2480 lr->lr_event = LE_STOP;
2481 lr->lr_active = LT_LAYOUT;
2482 lr->lr_status = LS_CO_STOPPED;
2484 laia->laia_com = com;
2485 laia->laia_ltds = ltds;
2486 laia->laia_ltd = ltd;
2489 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
2490 lfsck_layout_master_async_interpret,
2491 laia, LFSCK_NOTIFY);
2493 CERROR("%s: Fail to notify OST %x for stop: rc = %d\n",
2494 lfsck_lfsck2name(lfsck), ltd->ltd_index, rc);
2499 static struct lfsck_operations lfsck_layout_master_ops = {
2500 .lfsck_reset = lfsck_layout_reset,
2501 .lfsck_fail = lfsck_layout_fail,
2502 .lfsck_checkpoint = lfsck_layout_master_checkpoint,
2503 .lfsck_prep = lfsck_layout_master_prep,
2504 .lfsck_exec_oit = lfsck_layout_master_exec_oit,
2505 .lfsck_exec_dir = lfsck_layout_exec_dir,
2506 .lfsck_post = lfsck_layout_master_post,
2507 .lfsck_dump = lfsck_layout_dump,
2508 .lfsck_double_scan = lfsck_layout_master_double_scan,
2509 .lfsck_data_release = lfsck_layout_master_data_release,
2510 .lfsck_quit = lfsck_layout_master_quit,
2511 .lfsck_in_notify = lfsck_layout_master_in_notify,
2512 .lfsck_query = lfsck_layout_query,
2513 .lfsck_stop_notify = lfsck_layout_master_stop_notify,
2516 static struct lfsck_operations lfsck_layout_slave_ops = {
2517 .lfsck_reset = lfsck_layout_reset,
2518 .lfsck_fail = lfsck_layout_fail,
2519 .lfsck_checkpoint = lfsck_layout_slave_checkpoint,
2520 .lfsck_prep = lfsck_layout_slave_prep,
2521 .lfsck_exec_oit = lfsck_layout_slave_exec_oit,
2522 .lfsck_exec_dir = lfsck_layout_exec_dir,
2523 .lfsck_post = lfsck_layout_slave_post,
2524 .lfsck_dump = lfsck_layout_dump,
2525 .lfsck_double_scan = lfsck_layout_slave_double_scan,
2526 .lfsck_data_release = lfsck_layout_slave_data_release,
2527 .lfsck_in_notify = lfsck_layout_slave_in_notify,
2528 .lfsck_query = lfsck_layout_query,
2531 int lfsck_layout_setup(const struct lu_env *env, struct lfsck_instance *lfsck)
2533 struct lfsck_component *com;
2534 struct lfsck_layout *lo;
2535 struct dt_object *root = NULL;
2536 struct dt_object *obj;
2544 INIT_LIST_HEAD(&com->lc_link);
2545 INIT_LIST_HEAD(&com->lc_link_dir);
2546 init_rwsem(&com->lc_sem);
2547 atomic_set(&com->lc_ref, 1);
2548 com->lc_lfsck = lfsck;
2549 com->lc_type = LT_LAYOUT;
2550 if (lfsck->li_master) {
2551 struct lfsck_layout_master_data *llmd;
2553 com->lc_ops = &lfsck_layout_master_ops;
2554 OBD_ALLOC_PTR(llmd);
2556 GOTO(out, rc = -ENOMEM);
2558 INIT_LIST_HEAD(&llmd->llmd_req_list);
2559 INIT_LIST_HEAD(&llmd->llmd_ost_list);
2560 INIT_LIST_HEAD(&llmd->llmd_ost_phase1_list);
2561 INIT_LIST_HEAD(&llmd->llmd_ost_phase2_list);
2562 spin_lock_init(&llmd->llmd_lock);
2563 init_waitqueue_head(&llmd->llmd_thread.t_ctl_waitq);
2564 atomic_set(&llmd->llmd_rpcs_in_flight, 0);
2565 com->lc_data = llmd;
2567 struct lfsck_layout_slave_data *llsd;
2569 com->lc_ops = &lfsck_layout_slave_ops;
2570 OBD_ALLOC_PTR(llsd);
2572 GOTO(out, rc = -ENOMEM);
2574 INIT_LIST_HEAD(&llsd->llsd_seq_list);
2575 INIT_LIST_HEAD(&llsd->llsd_master_list);
2576 spin_lock_init(&llsd->llsd_lock);
2577 com->lc_data = llsd;
2579 com->lc_file_size = sizeof(*lo);
2580 OBD_ALLOC(com->lc_file_ram, com->lc_file_size);
2581 if (com->lc_file_ram == NULL)
2582 GOTO(out, rc = -ENOMEM);
2584 OBD_ALLOC(com->lc_file_disk, com->lc_file_size);
2585 if (com->lc_file_disk == NULL)
2586 GOTO(out, rc = -ENOMEM);
2588 root = dt_locate(env, lfsck->li_bottom, &lfsck->li_local_root_fid);
2590 GOTO(out, rc = PTR_ERR(root));
2592 if (unlikely(!dt_try_as_dir(env, root)))
2593 GOTO(out, rc = -ENOTDIR);
2595 obj = local_file_find_or_create(env, lfsck->li_los, root,
2597 S_IFREG | S_IRUGO | S_IWUSR);
2599 GOTO(out, rc = PTR_ERR(obj));
2602 rc = lfsck_layout_load(env, com);
2604 rc = lfsck_layout_reset(env, com, true);
2605 else if (rc == -ENOENT)
2606 rc = lfsck_layout_init(env, com);
2611 lo = com->lc_file_ram;
2612 switch (lo->ll_status) {
2618 spin_lock(&lfsck->li_lock);
2619 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
2620 spin_unlock(&lfsck->li_lock);
2623 CERROR("%s: unknown lfsck_layout status: rc = %u\n",
2624 lfsck_lfsck2name(lfsck), lo->ll_status);
2626 case LS_SCANNING_PHASE1:
2627 case LS_SCANNING_PHASE2:
2628 /* No need to store the status to disk right now.
2629 * If the system crashed before the status stored,
2630 * it will be loaded back when next time. */
2631 lo->ll_status = LS_CRASHED;
2632 lo->ll_flags |= LF_INCOMPLETE;
2639 spin_lock(&lfsck->li_lock);
2640 list_add_tail(&com->lc_link, &lfsck->li_list_scan);
2641 spin_unlock(&lfsck->li_lock);
2645 if (lo->ll_flags & LF_CRASHED_LASTID) {
2646 LASSERT(lfsck->li_out_notify != NULL);
2648 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
2649 LE_LASTID_REBUILDING);
2655 if (root != NULL && !IS_ERR(root))
2656 lu_object_put(env, &root->do_lu);
2659 lfsck_component_cleanup(env, com);