4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2013, Intel Corporation.
26 * lustre/lfsck/lfsck_layout.c
28 * Author: Fan, Yong <fan.yong@intel.com>
32 # define EXPORT_SYMTAB
34 #define DEBUG_SUBSYSTEM S_LFSCK
36 #include <linux/bitops.h>
37 #include <linux/rbtree.h>
39 #include <lustre/lustre_idl.h>
40 #include <lu_object.h>
41 #include <dt_object.h>
42 #include <lustre_linkea.h>
43 #include <lustre_fid.h>
44 #include <lustre_lib.h>
45 #include <lustre_net.h>
46 #include <lustre/lustre_user.h>
47 #include <md_object.h>
48 #include <obd_class.h>
50 #include "lfsck_internal.h"
52 #define LFSCK_LAYOUT_MAGIC 0xB173AE14
54 static const char lfsck_layout_name[] = "lfsck_layout";
56 struct lfsck_layout_seq {
57 struct list_head lls_list;
60 __u64 lls_lastid_known;
61 struct dt_object *lls_lastid_obj;
62 unsigned int lls_dirty:1;
65 struct lfsck_layout_slave_target {
66 /* link into lfsck_layout_slave_data::llsd_master_list. */
67 struct list_head llst_list;
73 struct lfsck_layout_slave_data {
74 /* list for lfsck_layout_seq */
75 struct list_head llsd_seq_list;
77 /* list for the masters involve layout verification. */
78 struct list_head llsd_master_list;
81 struct dt_object *llsd_rb_obj;
82 struct rb_root llsd_rb_root;
83 rwlock_t llsd_rb_lock;
84 unsigned int llsd_rbtree_valid:1;
87 struct lfsck_layout_object {
88 struct dt_object *llo_obj;
89 struct lu_attr llo_attr;
94 struct lfsck_layout_req {
95 struct list_head llr_list;
96 struct lfsck_layout_object *llr_parent;
97 struct dt_object *llr_child;
99 __u32 llr_lov_idx; /* offset in LOV EA */
102 struct lfsck_layout_master_data {
103 spinlock_t llmd_lock;
104 struct list_head llmd_req_list;
106 /* list for the ost targets involve layout verification. */
107 struct list_head llmd_ost_list;
109 /* list for the ost targets in phase1 scanning. */
110 struct list_head llmd_ost_phase1_list;
112 /* list for the ost targets in phase1 scanning. */
113 struct list_head llmd_ost_phase2_list;
115 /* list for the mdt targets involve layout verification. */
116 struct list_head llmd_mdt_list;
118 /* list for the mdt targets in phase1 scanning. */
119 struct list_head llmd_mdt_phase1_list;
121 /* list for the mdt targets in phase1 scanning. */
122 struct list_head llmd_mdt_phase2_list;
124 struct ptlrpc_thread llmd_thread;
125 __u32 llmd_touch_gen;
127 int llmd_assistant_status;
128 int llmd_post_result;
129 unsigned int llmd_to_post:1,
130 llmd_to_double_scan:1,
131 llmd_in_double_scan:1,
135 struct lfsck_layout_slave_async_args {
136 struct obd_export *llsaa_exp;
137 struct lfsck_component *llsaa_com;
138 struct lfsck_layout_slave_target *llsaa_llst;
141 static struct lfsck_layout_object *
142 lfsck_layout_object_init(const struct lu_env *env, struct dt_object *obj,
145 struct lfsck_layout_object *llo;
150 return ERR_PTR(-ENOMEM);
152 rc = dt_attr_get(env, obj, &llo->llo_attr, BYPASS_CAPA);
159 lu_object_get(&obj->do_lu);
161 /* The gen can be used to check whether some others have changed the
162 * file layout after LFSCK pre-fetching but before real verification. */
164 atomic_set(&llo->llo_ref, 1);
170 lfsck_layout_llst_put(struct lfsck_layout_slave_target *llst)
172 if (atomic_dec_and_test(&llst->llst_ref)) {
173 LASSERT(list_empty(&llst->llst_list));
180 lfsck_layout_llst_add(struct lfsck_layout_slave_data *llsd, __u32 index)
182 struct lfsck_layout_slave_target *llst;
183 struct lfsck_layout_slave_target *tmp;
190 INIT_LIST_HEAD(&llst->llst_list);
192 llst->llst_index = index;
193 atomic_set(&llst->llst_ref, 1);
195 spin_lock(&llsd->llsd_lock);
196 list_for_each_entry(tmp, &llsd->llsd_master_list, llst_list) {
197 if (tmp->llst_index == index) {
203 list_add_tail(&llst->llst_list, &llsd->llsd_master_list);
204 spin_unlock(&llsd->llsd_lock);
213 lfsck_layout_llst_del(struct lfsck_layout_slave_data *llsd,
214 struct lfsck_layout_slave_target *llst)
218 spin_lock(&llsd->llsd_lock);
219 if (!list_empty(&llst->llst_list)) {
220 list_del_init(&llst->llst_list);
223 spin_unlock(&llsd->llsd_lock);
226 lfsck_layout_llst_put(llst);
229 static inline struct lfsck_layout_slave_target *
230 lfsck_layout_llst_find_and_del(struct lfsck_layout_slave_data *llsd,
233 struct lfsck_layout_slave_target *llst;
235 spin_lock(&llsd->llsd_lock);
236 list_for_each_entry(llst, &llsd->llsd_master_list, llst_list) {
237 if (llst->llst_index == index) {
238 list_del_init(&llst->llst_list);
239 spin_unlock(&llsd->llsd_lock);
244 spin_unlock(&llsd->llsd_lock);
249 static inline void lfsck_layout_object_put(const struct lu_env *env,
250 struct lfsck_layout_object *llo)
252 if (atomic_dec_and_test(&llo->llo_ref)) {
253 lfsck_object_put(env, llo->llo_obj);
258 static struct lfsck_layout_req *
259 lfsck_layout_req_init(struct lfsck_layout_object *parent,
260 struct dt_object *child, __u32 ost_idx, __u32 lov_idx)
262 struct lfsck_layout_req *llr;
266 return ERR_PTR(-ENOMEM);
268 INIT_LIST_HEAD(&llr->llr_list);
269 atomic_inc(&parent->llo_ref);
270 llr->llr_parent = parent;
271 llr->llr_child = child;
272 llr->llr_ost_idx = ost_idx;
273 llr->llr_lov_idx = lov_idx;
278 static inline void lfsck_layout_req_fini(const struct lu_env *env,
279 struct lfsck_layout_req *llr)
281 lu_object_put(env, &llr->llr_child->do_lu);
282 lfsck_layout_object_put(env, llr->llr_parent);
286 static inline bool lfsck_layout_req_empty(struct lfsck_layout_master_data *llmd)
290 spin_lock(&llmd->llmd_lock);
291 if (list_empty(&llmd->llmd_req_list))
293 spin_unlock(&llmd->llmd_lock);
298 static int lfsck_layout_get_lovea(const struct lu_env *env,
299 struct dt_object *obj,
300 struct lu_buf *buf, ssize_t *buflen)
305 rc = dt_xattr_get(env, obj, buf, XATTR_NAME_LOV, BYPASS_CAPA);
307 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_LOV,
312 lu_buf_realloc(buf, rc);
314 *buflen = buf->lb_len;
316 if (buf->lb_buf == NULL)
328 if (unlikely(buf->lb_buf == NULL)) {
329 lu_buf_alloc(buf, rc);
331 *buflen = buf->lb_len;
333 if (buf->lb_buf == NULL)
342 static int lfsck_layout_verify_header(struct lov_mds_md_v1 *lmm)
347 magic = le32_to_cpu(lmm->lmm_magic);
348 /* If magic crashed, keep it there. Sometime later, during OST-object
349 * orphan handling, if some OST-object(s) back-point to it, it can be
350 * verified and repaired. */
351 if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3)
354 patten = le32_to_cpu(lmm->lmm_pattern);
355 /* XXX: currently, we only support LOV_PATTERN_RAID0. */
356 if (patten != LOV_PATTERN_RAID0)
362 #define LFSCK_RBTREE_BITMAP_SIZE PAGE_CACHE_SIZE
363 #define LFSCK_RBTREE_BITMAP_WIDTH (LFSCK_RBTREE_BITMAP_SIZE << 3)
364 #define LFSCK_RBTREE_BITMAP_MASK (LFSCK_RBTREE_BITMAP_SIZE - 1)
366 struct lfsck_rbtree_node {
367 struct rb_node lrn_node;
370 atomic_t lrn_known_count;
371 atomic_t lrn_accessed_count;
372 void *lrn_known_bitmap;
373 void *lrn_accessed_bitmap;
376 static inline int lfsck_rbtree_cmp(struct lfsck_rbtree_node *lrn,
377 __u64 seq, __u32 oid)
379 if (seq < lrn->lrn_seq)
382 if (seq > lrn->lrn_seq)
385 if (oid < lrn->lrn_first_oid)
388 if (oid >= lrn->lrn_first_oid + LFSCK_RBTREE_BITMAP_WIDTH)
394 /* The caller should hold lock. */
395 static struct lfsck_rbtree_node *
396 lfsck_rbtree_search(struct lfsck_layout_slave_data *llsd,
397 const struct lu_fid *fid)
399 struct rb_node *node = llsd->llsd_rb_root.rb_node;
400 struct lfsck_rbtree_node *lrn;
403 while (node != NULL) {
404 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
405 rc = lfsck_rbtree_cmp(lrn, fid_seq(fid), fid_oid(fid));
407 node = node->rb_left;
409 node = node->rb_right;
417 static struct lfsck_rbtree_node *lfsck_rbtree_new(const struct lu_env *env,
418 const struct lu_fid *fid)
420 struct lfsck_rbtree_node *lrn;
424 return ERR_PTR(-ENOMEM);
426 OBD_ALLOC(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
427 if (lrn->lrn_known_bitmap == NULL) {
430 return ERR_PTR(-ENOMEM);
433 OBD_ALLOC(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
434 if (lrn->lrn_accessed_bitmap == NULL) {
435 OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
438 return ERR_PTR(-ENOMEM);
441 rb_init_node(&lrn->lrn_node);
442 lrn->lrn_seq = fid_seq(fid);
443 lrn->lrn_first_oid = fid_oid(fid) & ~LFSCK_RBTREE_BITMAP_MASK;
444 atomic_set(&lrn->lrn_known_count, 0);
445 atomic_set(&lrn->lrn_accessed_count, 0);
450 static void lfsck_rbtree_free(struct lfsck_rbtree_node *lrn)
452 OBD_FREE(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
453 OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
457 /* The caller should hold lock. */
458 static struct lfsck_rbtree_node *
459 lfsck_rbtree_insert(struct lfsck_layout_slave_data *llsd,
460 struct lfsck_rbtree_node *lrn)
462 struct rb_node **pos = &(llsd->llsd_rb_root.rb_node);
463 struct rb_node *parent = NULL;
464 struct lfsck_rbtree_node *tmp;
469 tmp = rb_entry(*pos, struct lfsck_rbtree_node, lrn_node);
470 rc = lfsck_rbtree_cmp(tmp, lrn->lrn_seq, lrn->lrn_first_oid);
472 pos = &((*pos)->rb_left);
474 pos = &((*pos)->rb_right);
479 rb_link_node(&lrn->lrn_node, parent, pos);
480 rb_insert_color(&lrn->lrn_node, &llsd->llsd_rb_root);
485 static int lfsck_rbtree_setup(const struct lu_env *env,
486 struct lfsck_component *com)
488 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
489 struct lfsck_instance *lfsck = com->lc_lfsck;
490 struct dt_device *dev = lfsck->li_bottom;
491 struct lfsck_layout_slave_data *llsd = com->lc_data;
492 struct dt_object *obj;
494 fid->f_seq = FID_SEQ_LAYOUT_RBTREE;
495 fid->f_oid = lfsck_dev_idx(dev);
497 obj = dt_locate(env, dev, fid);
499 RETURN(PTR_ERR(obj));
501 /* XXX: Generate an in-RAM object to stand for the layout rbtree.
502 * Scanning the layout rbtree will be via the iteration over
503 * the object. In the future, the rbtree may be written onto
504 * disk with the object.
506 * Mark the object to be as exist. */
507 obj->do_lu.lo_header->loh_attr |= LOHA_EXISTS;
508 llsd->llsd_rb_obj = obj;
509 llsd->llsd_rbtree_valid = 1;
510 dev->dd_record_fid_accessed = 1;
515 static void lfsck_rbtree_cleanup(const struct lu_env *env,
516 struct lfsck_component *com)
518 struct lfsck_instance *lfsck = com->lc_lfsck;
519 struct lfsck_layout_slave_data *llsd = com->lc_data;
520 struct rb_node *node = rb_first(&llsd->llsd_rb_root);
521 struct rb_node *next;
522 struct lfsck_rbtree_node *lrn;
524 lfsck->li_bottom->dd_record_fid_accessed = 0;
525 /* Invalid the rbtree, then no others will use it. */
526 write_lock(&llsd->llsd_rb_lock);
527 llsd->llsd_rbtree_valid = 0;
528 write_unlock(&llsd->llsd_rb_lock);
530 while (node != NULL) {
531 next = rb_next(node);
532 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
533 rb_erase(node, &llsd->llsd_rb_root);
534 lfsck_rbtree_free(lrn);
538 if (llsd->llsd_rb_obj != NULL) {
539 lu_object_put(env, &llsd->llsd_rb_obj->do_lu);
540 llsd->llsd_rb_obj = NULL;
544 static void lfsck_rbtree_update_bitmap(const struct lu_env *env,
545 struct lfsck_component *com,
546 const struct lu_fid *fid,
549 struct lfsck_layout_slave_data *llsd = com->lc_data;
550 struct lfsck_rbtree_node *lrn;
556 CDEBUG(D_LFSCK, "%s: update bitmap for "DFID"\n",
557 lfsck_lfsck2name(com->lc_lfsck), PFID(fid));
559 if (unlikely(!fid_is_sane(fid) || fid_is_last_id(fid)))
562 if (!fid_is_idif(fid) && !fid_is_norm(fid))
565 read_lock(&llsd->llsd_rb_lock);
566 if (!llsd->llsd_rbtree_valid)
567 GOTO(unlock, rc = 0);
569 lrn = lfsck_rbtree_search(llsd, fid);
571 struct lfsck_rbtree_node *tmp;
575 read_unlock(&llsd->llsd_rb_lock);
576 tmp = lfsck_rbtree_new(env, fid);
578 GOTO(out, rc = PTR_ERR(tmp));
581 write_lock(&llsd->llsd_rb_lock);
582 if (!llsd->llsd_rbtree_valid) {
583 lfsck_rbtree_free(tmp);
584 GOTO(unlock, rc = 0);
587 lrn = lfsck_rbtree_insert(llsd, tmp);
589 lfsck_rbtree_free(tmp);
592 idx = fid_oid(fid) & LFSCK_RBTREE_BITMAP_MASK;
593 /* Any accessed object must be a known object. */
594 if (!test_and_set_bit(idx, lrn->lrn_known_bitmap))
595 atomic_inc(&lrn->lrn_known_count);
597 if (!test_and_set_bit(idx, lrn->lrn_accessed_bitmap))
598 atomic_inc(&lrn->lrn_accessed_count);
601 GOTO(unlock, rc = 0);
605 write_unlock(&llsd->llsd_rb_lock);
607 read_unlock(&llsd->llsd_rb_lock);
609 if (rc != 0 && accessed) {
610 struct lfsck_layout *lo = com->lc_file_ram;
612 CERROR("%s: Fail to update object accessed bitmap, will cause "
613 "incorrect LFSCK OST-object handling, so disable it to "
614 "cancel orphan handling for related device. rc = %d.\n",
615 lfsck_lfsck2name(com->lc_lfsck), rc);
616 lo->ll_flags |= LF_INCOMPLETE;
617 lfsck_rbtree_cleanup(env, com);
621 static void lfsck_layout_le_to_cpu(struct lfsck_layout *des,
622 const struct lfsck_layout *src)
626 des->ll_magic = le32_to_cpu(src->ll_magic);
627 des->ll_status = le32_to_cpu(src->ll_status);
628 des->ll_flags = le32_to_cpu(src->ll_flags);
629 des->ll_success_count = le32_to_cpu(src->ll_success_count);
630 des->ll_run_time_phase1 = le32_to_cpu(src->ll_run_time_phase1);
631 des->ll_run_time_phase2 = le32_to_cpu(src->ll_run_time_phase2);
632 des->ll_time_last_complete = le64_to_cpu(src->ll_time_last_complete);
633 des->ll_time_latest_start = le64_to_cpu(src->ll_time_latest_start);
634 des->ll_time_last_checkpoint =
635 le64_to_cpu(src->ll_time_last_checkpoint);
636 des->ll_pos_latest_start = le64_to_cpu(src->ll_pos_latest_start);
637 des->ll_pos_last_checkpoint = le64_to_cpu(src->ll_pos_last_checkpoint);
638 des->ll_pos_first_inconsistent =
639 le64_to_cpu(src->ll_pos_first_inconsistent);
640 des->ll_objs_checked_phase1 = le64_to_cpu(src->ll_objs_checked_phase1);
641 des->ll_objs_failed_phase1 = le64_to_cpu(src->ll_objs_failed_phase1);
642 des->ll_objs_checked_phase2 = le64_to_cpu(src->ll_objs_checked_phase2);
643 des->ll_objs_failed_phase2 = le64_to_cpu(src->ll_objs_failed_phase2);
644 for (i = 0; i < LLIT_MAX; i++)
645 des->ll_objs_repaired[i] =
646 le64_to_cpu(src->ll_objs_repaired[i]);
647 des->ll_objs_skipped = le64_to_cpu(src->ll_objs_skipped);
650 static void lfsck_layout_cpu_to_le(struct lfsck_layout *des,
651 const struct lfsck_layout *src)
655 des->ll_magic = cpu_to_le32(src->ll_magic);
656 des->ll_status = cpu_to_le32(src->ll_status);
657 des->ll_flags = cpu_to_le32(src->ll_flags);
658 des->ll_success_count = cpu_to_le32(src->ll_success_count);
659 des->ll_run_time_phase1 = cpu_to_le32(src->ll_run_time_phase1);
660 des->ll_run_time_phase2 = cpu_to_le32(src->ll_run_time_phase2);
661 des->ll_time_last_complete = cpu_to_le64(src->ll_time_last_complete);
662 des->ll_time_latest_start = cpu_to_le64(src->ll_time_latest_start);
663 des->ll_time_last_checkpoint =
664 cpu_to_le64(src->ll_time_last_checkpoint);
665 des->ll_pos_latest_start = cpu_to_le64(src->ll_pos_latest_start);
666 des->ll_pos_last_checkpoint = cpu_to_le64(src->ll_pos_last_checkpoint);
667 des->ll_pos_first_inconsistent =
668 cpu_to_le64(src->ll_pos_first_inconsistent);
669 des->ll_objs_checked_phase1 = cpu_to_le64(src->ll_objs_checked_phase1);
670 des->ll_objs_failed_phase1 = cpu_to_le64(src->ll_objs_failed_phase1);
671 des->ll_objs_checked_phase2 = cpu_to_le64(src->ll_objs_checked_phase2);
672 des->ll_objs_failed_phase2 = cpu_to_le64(src->ll_objs_failed_phase2);
673 for (i = 0; i < LLIT_MAX; i++)
674 des->ll_objs_repaired[i] =
675 cpu_to_le64(src->ll_objs_repaired[i]);
676 des->ll_objs_skipped = cpu_to_le64(src->ll_objs_skipped);
680 * \retval +ve: the lfsck_layout is broken, the caller should reset it.
681 * \retval 0: succeed.
682 * \retval -ve: failed cases.
684 static int lfsck_layout_load(const struct lu_env *env,
685 struct lfsck_component *com)
687 struct lfsck_layout *lo = com->lc_file_ram;
688 const struct dt_body_operations *dbo = com->lc_obj->do_body_ops;
689 ssize_t size = com->lc_file_size;
693 rc = dbo->dbo_read(env, com->lc_obj,
694 lfsck_buf_get(env, com->lc_file_disk, size), &pos,
699 CWARN("%s: failed to load lfsck_layout: rc = %d\n",
700 lfsck_lfsck2name(com->lc_lfsck), rc);
702 } else if (rc != size) {
703 CWARN("%s: crashed lfsck_layout, to be reset: rc = %d\n",
704 lfsck_lfsck2name(com->lc_lfsck), rc);
708 lfsck_layout_le_to_cpu(lo, com->lc_file_disk);
709 if (lo->ll_magic != LFSCK_LAYOUT_MAGIC) {
710 CWARN("%s: invalid lfsck_layout magic %#x != %#x, "
711 "to be reset\n", lfsck_lfsck2name(com->lc_lfsck),
712 lo->ll_magic, LFSCK_LAYOUT_MAGIC);
719 static int lfsck_layout_store(const struct lu_env *env,
720 struct lfsck_component *com)
722 struct dt_object *obj = com->lc_obj;
723 struct lfsck_instance *lfsck = com->lc_lfsck;
724 struct lfsck_layout *lo = com->lc_file_disk;
725 struct thandle *handle;
726 ssize_t size = com->lc_file_size;
731 lfsck_layout_cpu_to_le(lo, com->lc_file_ram);
732 handle = dt_trans_create(env, lfsck->li_bottom);
733 if (IS_ERR(handle)) {
734 rc = PTR_ERR(handle);
735 CERROR("%s: fail to create trans for storing lfsck_layout: "
736 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
740 rc = dt_declare_record_write(env, obj, size, pos, handle);
742 CERROR("%s: fail to declare trans for storing lfsck_layout(1): "
743 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
747 rc = dt_trans_start_local(env, lfsck->li_bottom, handle);
749 CERROR("%s: fail to start trans for storing lfsck_layout: "
750 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
754 rc = dt_record_write(env, obj, lfsck_buf_get(env, lo, size), &pos,
757 CERROR("%s: fail to store lfsck_layout(1): size = %d, "
758 "rc = %d\n", lfsck_lfsck2name(lfsck), (int)size, rc);
763 dt_trans_stop(env, lfsck->li_bottom, handle);
768 static int lfsck_layout_init(const struct lu_env *env,
769 struct lfsck_component *com)
771 struct lfsck_layout *lo = com->lc_file_ram;
774 memset(lo, 0, com->lc_file_size);
775 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
776 lo->ll_status = LS_INIT;
777 down_write(&com->lc_sem);
778 rc = lfsck_layout_store(env, com);
779 up_write(&com->lc_sem);
784 static int fid_is_for_ostobj(const struct lu_env *env, struct dt_device *dt,
785 struct dt_object *obj, const struct lu_fid *fid)
787 struct seq_server_site *ss = lu_site2seq(dt->dd_lu_dev.ld_site);
788 struct lu_seq_range range = { 0 };
789 struct lustre_mdt_attrs *lma;
792 fld_range_set_any(&range);
793 rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(fid), &range);
795 if (fld_range_is_ost(&range))
801 lma = &lfsck_env_info(env)->lti_lma;
802 rc = dt_xattr_get(env, obj, lfsck_buf_get(env, lma, sizeof(*lma)),
803 XATTR_NAME_LMA, BYPASS_CAPA);
804 if (rc == sizeof(*lma)) {
805 lustre_lma_swab(lma);
807 return lma->lma_compat & LMAC_FID_ON_OST ? 1 : 0;
810 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_FID, BYPASS_CAPA);
815 static struct lfsck_layout_seq *
816 lfsck_layout_seq_lookup(struct lfsck_layout_slave_data *llsd, __u64 seq)
818 struct lfsck_layout_seq *lls;
820 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
821 if (lls->lls_seq == seq)
824 if (lls->lls_seq > seq)
832 lfsck_layout_seq_insert(struct lfsck_layout_slave_data *llsd,
833 struct lfsck_layout_seq *lls)
835 struct lfsck_layout_seq *tmp;
836 struct list_head *pos = &llsd->llsd_seq_list;
838 list_for_each_entry(tmp, &llsd->llsd_seq_list, lls_list) {
839 if (lls->lls_seq < tmp->lls_seq) {
840 pos = &tmp->lls_list;
844 list_add_tail(&lls->lls_list, pos);
848 lfsck_layout_lastid_create(const struct lu_env *env,
849 struct lfsck_instance *lfsck,
850 struct dt_object *obj)
852 struct lfsck_thread_info *info = lfsck_env_info(env);
853 struct lu_attr *la = &info->lti_la;
854 struct dt_object_format *dof = &info->lti_dof;
855 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
856 struct dt_device *dt = lfsck->li_bottom;
863 CDEBUG(D_LFSCK, "To create LAST_ID for <seq> "LPX64"\n",
864 fid_seq(lfsck_dto2fid(obj)));
866 if (bk->lb_param & LPF_DRYRUN)
869 memset(la, 0, sizeof(*la));
870 la->la_mode = S_IFREG | S_IRUGO | S_IWUSR;
871 la->la_valid = LA_MODE | LA_UID | LA_GID;
872 dof->dof_type = dt_mode_to_dft(S_IFREG);
874 th = dt_trans_create(env, dt);
876 RETURN(rc = PTR_ERR(th));
878 rc = dt_declare_create(env, obj, la, NULL, dof, th);
882 rc = dt_declare_record_write(env, obj, sizeof(lastid), pos, th);
886 rc = dt_trans_start_local(env, dt, th);
890 dt_write_lock(env, obj, 0);
891 if (likely(!dt_object_exists(obj))) {
892 rc = dt_create(env, obj, la, NULL, dof, th);
894 rc = dt_record_write(env, obj,
895 lfsck_buf_get(env, &lastid, sizeof(lastid)),
898 dt_write_unlock(env, obj);
903 dt_trans_stop(env, dt, th);
909 lfsck_layout_lastid_reload(const struct lu_env *env,
910 struct lfsck_component *com,
911 struct lfsck_layout_seq *lls)
917 dt_read_lock(env, lls->lls_lastid_obj, 0);
918 rc = dt_record_read(env, lls->lls_lastid_obj,
919 lfsck_buf_get(env, &lastid, sizeof(lastid)), &pos);
920 dt_read_unlock(env, lls->lls_lastid_obj);
921 if (unlikely(rc != 0))
924 lastid = le64_to_cpu(lastid);
925 if (lastid < lls->lls_lastid_known) {
926 struct lfsck_instance *lfsck = com->lc_lfsck;
927 struct lfsck_layout *lo = com->lc_file_ram;
929 lls->lls_lastid = lls->lls_lastid_known;
931 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
932 LASSERT(lfsck->li_out_notify != NULL);
934 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
935 LE_LASTID_REBUILDING);
936 lo->ll_flags |= LF_CRASHED_LASTID;
938 } else if (lastid >= lls->lls_lastid) {
939 lls->lls_lastid = lastid;
947 lfsck_layout_lastid_store(const struct lu_env *env,
948 struct lfsck_component *com)
950 struct lfsck_instance *lfsck = com->lc_lfsck;
951 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
952 struct dt_device *dt = lfsck->li_bottom;
953 struct lfsck_layout_slave_data *llsd = com->lc_data;
954 struct lfsck_layout_seq *lls;
960 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
963 /* XXX: Add the code back if we really found related
964 * inconsistent cases in the future. */
966 if (!lls->lls_dirty) {
967 /* In OFD, before the pre-creation, the LAST_ID
968 * file will be updated firstly, which may hide
969 * some potential crashed cases. For example:
971 * The old obj1's ID is higher than old LAST_ID
972 * but lower than the new LAST_ID, but the LFSCK
973 * have not touch the obj1 until the OFD updated
974 * the LAST_ID. So the LFSCK does not regard it
975 * as crashed case. But when OFD does not create
976 * successfully, it will set the LAST_ID as the
977 * real created objects' ID, then LFSCK needs to
978 * found related inconsistency. */
979 rc = lfsck_layout_lastid_reload(env, com, lls);
980 if (likely(!lls->lls_dirty))
985 CDEBUG(D_LFSCK, "To sync the LAST_ID for <seq> "LPX64
986 " as <oid> "LPU64"\n", lls->lls_seq, lls->lls_lastid);
988 if (bk->lb_param & LPF_DRYRUN) {
993 th = dt_trans_create(env, dt);
996 CERROR("%s: (1) failed to store "LPX64": rc = %d\n",
997 lfsck_lfsck2name(com->lc_lfsck),
1002 rc = dt_declare_record_write(env, lls->lls_lastid_obj,
1003 sizeof(lastid), pos, th);
1007 rc = dt_trans_start_local(env, dt, th);
1011 lastid = cpu_to_le64(lls->lls_lastid);
1012 dt_write_lock(env, lls->lls_lastid_obj, 0);
1013 rc = dt_record_write(env, lls->lls_lastid_obj,
1014 lfsck_buf_get(env, &lastid,
1015 sizeof(lastid)), &pos, th);
1016 dt_write_unlock(env, lls->lls_lastid_obj);
1021 dt_trans_stop(env, dt, th);
1024 CERROR("%s: (2) failed to store "LPX64": rc = %d\n",
1025 lfsck_lfsck2name(com->lc_lfsck),
1034 lfsck_layout_lastid_load(const struct lu_env *env,
1035 struct lfsck_component *com,
1036 struct lfsck_layout_seq *lls)
1038 struct lfsck_instance *lfsck = com->lc_lfsck;
1039 struct lfsck_layout *lo = com->lc_file_ram;
1040 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
1041 struct dt_object *obj;
1046 lu_last_id_fid(fid, lls->lls_seq, lfsck_dev_idx(lfsck->li_bottom));
1047 obj = dt_locate(env, lfsck->li_bottom, fid);
1049 RETURN(PTR_ERR(obj));
1051 /* LAST_ID crashed, to be rebuilt */
1052 if (!dt_object_exists(obj)) {
1053 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
1054 LASSERT(lfsck->li_out_notify != NULL);
1056 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1057 LE_LASTID_REBUILDING);
1058 lo->ll_flags |= LF_CRASHED_LASTID;
1060 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY4) &&
1062 struct l_wait_info lwi = LWI_TIMEOUT(
1063 cfs_time_seconds(cfs_fail_val),
1066 up_write(&com->lc_sem);
1067 l_wait_event(lfsck->li_thread.t_ctl_waitq,
1068 !thread_is_running(&lfsck->li_thread),
1070 down_write(&com->lc_sem);
1074 rc = lfsck_layout_lastid_create(env, lfsck, obj);
1076 dt_read_lock(env, obj, 0);
1077 rc = dt_read(env, obj,
1078 lfsck_buf_get(env, &lls->lls_lastid, sizeof(__u64)),
1080 dt_read_unlock(env, obj);
1081 if (rc != 0 && rc != sizeof(__u64))
1082 GOTO(out, rc = (rc > 0 ? -EFAULT : rc));
1084 if (rc == 0 && !(lo->ll_flags & LF_CRASHED_LASTID)) {
1085 LASSERT(lfsck->li_out_notify != NULL);
1087 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1088 LE_LASTID_REBUILDING);
1089 lo->ll_flags |= LF_CRASHED_LASTID;
1092 lls->lls_lastid = le64_to_cpu(lls->lls_lastid);
1100 lfsck_object_put(env, obj);
1102 lls->lls_lastid_obj = obj;
1107 static int lfsck_layout_master_async_interpret(const struct lu_env *env,
1108 struct ptlrpc_request *req,
1111 struct lfsck_async_interpret_args *laia = args;
1112 struct lfsck_component *com = laia->laia_com;
1113 struct lfsck_layout_master_data *llmd = com->lc_data;
1114 struct lfsck_tgt_descs *ltds = laia->laia_ltds;
1115 struct lfsck_tgt_desc *ltd = laia->laia_ltd;
1116 struct lfsck_request *lr = laia->laia_lr;
1118 switch (lr->lr_event) {
1121 struct lfsck_layout *lo = com->lc_file_ram;
1123 CERROR("%s: fail to notify %s %x for layout start: "
1124 "rc = %d\n", lfsck_lfsck2name(com->lc_lfsck),
1125 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1126 ltd->ltd_index, rc);
1127 lo->ll_flags |= LF_INCOMPLETE;
1131 spin_lock(<ds->ltd_lock);
1132 if (ltd->ltd_dead || ltd->ltd_layout_done) {
1133 spin_unlock(<ds->ltd_lock);
1137 if (lr->lr_flags & LEF_TO_OST) {
1138 if (list_empty(<d->ltd_layout_list))
1139 list_add_tail(<d->ltd_layout_list,
1140 &llmd->llmd_ost_list);
1141 if (list_empty(<d->ltd_layout_phase_list))
1142 list_add_tail(<d->ltd_layout_phase_list,
1143 &llmd->llmd_ost_phase1_list);
1145 if (list_empty(<d->ltd_layout_list))
1146 list_add_tail(<d->ltd_layout_list,
1147 &llmd->llmd_mdt_list);
1148 if (list_empty(<d->ltd_layout_phase_list))
1149 list_add_tail(<d->ltd_layout_phase_list,
1150 &llmd->llmd_mdt_phase1_list);
1152 spin_unlock(<ds->ltd_lock);
1155 case LE_PHASE1_DONE:
1156 case LE_PHASE2_DONE:
1158 if (rc != 0 && rc != -EALREADY)
1159 CWARN("%s: fail to notify %s %x for layout: "
1160 "event = %d, rc = %d\n",
1161 lfsck_lfsck2name(com->lc_lfsck),
1162 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1163 ltd->ltd_index, lr->lr_event, rc);
1166 struct lfsck_reply *reply;
1169 spin_lock(<ds->ltd_lock);
1170 list_del_init(<d->ltd_layout_phase_list);
1171 list_del_init(<d->ltd_layout_list);
1172 spin_unlock(<ds->ltd_lock);
1176 reply = req_capsule_server_get(&req->rq_pill,
1178 if (reply == NULL) {
1180 CERROR("%s: invalid return value: rc = %d\n",
1181 lfsck_lfsck2name(com->lc_lfsck), rc);
1182 spin_lock(<ds->ltd_lock);
1183 list_del_init(<d->ltd_layout_phase_list);
1184 list_del_init(<d->ltd_layout_list);
1185 spin_unlock(<ds->ltd_lock);
1189 switch (reply->lr_status) {
1190 case LS_SCANNING_PHASE1:
1192 case LS_SCANNING_PHASE2:
1193 spin_lock(<ds->ltd_lock);
1194 list_del_init(<d->ltd_layout_phase_list);
1195 if (ltd->ltd_dead || ltd->ltd_layout_done) {
1196 spin_unlock(<ds->ltd_lock);
1200 if (lr->lr_flags & LEF_TO_OST)
1201 list_add_tail(<d->ltd_layout_phase_list,
1202 &llmd->llmd_ost_phase2_list);
1204 list_add_tail(<d->ltd_layout_phase_list,
1205 &llmd->llmd_mdt_phase2_list);
1206 spin_unlock(<ds->ltd_lock);
1209 spin_lock(<ds->ltd_lock);
1210 list_del_init(<d->ltd_layout_phase_list);
1211 list_del_init(<d->ltd_layout_list);
1212 spin_unlock(<ds->ltd_lock);
1218 CERROR("%s: unexpected event: rc = %d\n",
1219 lfsck_lfsck2name(com->lc_lfsck), lr->lr_event);
1223 if (!laia->laia_shared) {
1225 lfsck_component_put(env, com);
1231 static int lfsck_layout_master_query_others(const struct lu_env *env,
1232 struct lfsck_component *com)
1234 struct lfsck_thread_info *info = lfsck_env_info(env);
1235 struct lfsck_request *lr = &info->lti_lr;
1236 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1237 struct lfsck_instance *lfsck = com->lc_lfsck;
1238 struct lfsck_layout_master_data *llmd = com->lc_data;
1239 struct ptlrpc_request_set *set;
1240 struct lfsck_tgt_descs *ltds;
1241 struct lfsck_tgt_desc *ltd;
1242 struct list_head *head;
1247 set = ptlrpc_prep_set();
1251 llmd->llmd_touch_gen++;
1252 memset(lr, 0, sizeof(*lr));
1253 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1254 lr->lr_event = LE_QUERY;
1255 lr->lr_active = LT_LAYOUT;
1256 laia->laia_com = com;
1258 laia->laia_shared = 0;
1260 if (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1261 ltds = &lfsck->li_mdt_descs;
1263 head = &llmd->llmd_mdt_phase1_list;
1267 ltds = &lfsck->li_ost_descs;
1268 lr->lr_flags = LEF_TO_OST;
1269 head = &llmd->llmd_ost_phase1_list;
1272 laia->laia_ltds = ltds;
1273 spin_lock(<ds->ltd_lock);
1274 while (!list_empty(head)) {
1275 ltd = list_entry(head->next,
1276 struct lfsck_tgt_desc,
1277 ltd_layout_phase_list);
1278 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1281 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1282 list_del(<d->ltd_layout_phase_list);
1283 list_add_tail(<d->ltd_layout_phase_list, head);
1284 atomic_inc(<d->ltd_ref);
1285 laia->laia_ltd = ltd;
1286 spin_unlock(<ds->ltd_lock);
1287 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1288 lfsck_layout_master_async_interpret,
1291 CERROR("%s: fail to query %s %x for layout: rc = %d\n",
1292 lfsck_lfsck2name(lfsck),
1293 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1294 ltd->ltd_index, rc);
1298 spin_lock(<ds->ltd_lock);
1300 spin_unlock(<ds->ltd_lock);
1302 rc = ptlrpc_set_wait(set);
1304 ptlrpc_set_destroy(set);
1308 if (!(lr->lr_flags & LEF_TO_OST) &&
1309 list_empty(&llmd->llmd_mdt_phase1_list))
1312 ptlrpc_set_destroy(set);
1314 RETURN(rc1 != 0 ? rc1 : rc);
1318 lfsck_layout_master_to_orphan(struct lfsck_layout_master_data *llmd)
1320 return list_empty(&llmd->llmd_mdt_phase1_list) &&
1321 (!list_empty(&llmd->llmd_ost_phase2_list) ||
1322 list_empty(&llmd->llmd_ost_phase1_list));
1325 static int lfsck_layout_master_notify_others(const struct lu_env *env,
1326 struct lfsck_component *com,
1327 struct lfsck_request *lr)
1329 struct lfsck_thread_info *info = lfsck_env_info(env);
1330 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1331 struct lfsck_instance *lfsck = com->lc_lfsck;
1332 struct lfsck_layout_master_data *llmd = com->lc_data;
1333 struct lfsck_layout *lo = com->lc_file_ram;
1334 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1335 struct ptlrpc_request_set *set;
1336 struct lfsck_tgt_descs *ltds;
1337 struct lfsck_tgt_desc *ltd;
1338 struct lfsck_tgt_desc *next;
1339 struct list_head *head;
1344 set = ptlrpc_prep_set();
1348 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1349 lr->lr_active = LT_LAYOUT;
1350 laia->laia_com = com;
1352 laia->laia_shared = 0;
1353 switch (lr->lr_event) {
1355 /* Notify OSTs firstly, then handle other MDTs if needed. */
1356 ltds = &lfsck->li_ost_descs;
1357 laia->laia_ltds = ltds;
1358 down_read(<ds->ltd_rw_sem);
1359 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1360 ltd = lfsck_tgt_get(ltds, idx);
1361 LASSERT(ltd != NULL);
1363 laia->laia_ltd = ltd;
1364 ltd->ltd_layout_done = 0;
1365 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1366 lfsck_layout_master_async_interpret,
1367 laia, LFSCK_NOTIFY);
1369 CERROR("%s: fail to notify %s %x for layout "
1371 lfsck_lfsck2name(lfsck),
1372 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1375 lo->ll_flags |= LF_INCOMPLETE;
1378 up_read(<ds->ltd_rw_sem);
1381 rc = ptlrpc_set_wait(set);
1383 ptlrpc_set_destroy(set);
1387 if (!(bk->lb_param & LPF_ALL_TGT))
1390 /* link other MDT targets locallly. */
1391 spin_lock(<ds->ltd_lock);
1392 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1393 ltd = LTD_TGT(ltds, idx);
1394 LASSERT(ltd != NULL);
1396 if (!list_empty(<d->ltd_layout_list))
1399 list_add_tail(<d->ltd_layout_list,
1400 &llmd->llmd_mdt_list);
1401 list_add_tail(<d->ltd_layout_phase_list,
1402 &llmd->llmd_mdt_phase1_list);
1404 spin_unlock(<ds->ltd_lock);
1407 case LE_PHASE2_DONE:
1408 case LE_PEER_EXIT: {
1409 /* Handle other MDTs firstly if needed, then notify the OSTs. */
1410 if (bk->lb_param & LPF_ALL_TGT) {
1411 head = &llmd->llmd_mdt_list;
1412 ltds = &lfsck->li_mdt_descs;
1413 if (lr->lr_event == LE_STOP) {
1414 /* unlink other MDT targets locallly. */
1415 spin_lock(<ds->ltd_lock);
1416 list_for_each_entry_safe(ltd, next, head,
1418 list_del_init(<d->ltd_layout_phase_list);
1419 list_del_init(<d->ltd_layout_list);
1421 spin_unlock(<ds->ltd_lock);
1423 lr->lr_flags |= LEF_TO_OST;
1424 head = &llmd->llmd_ost_list;
1425 ltds = &lfsck->li_ost_descs;
1427 lr->lr_flags &= ~LEF_TO_OST;
1430 lr->lr_flags |= LEF_TO_OST;
1431 head = &llmd->llmd_ost_list;
1432 ltds = &lfsck->li_ost_descs;
1436 laia->laia_ltds = ltds;
1437 spin_lock(<ds->ltd_lock);
1438 while (!list_empty(head)) {
1439 ltd = list_entry(head->next, struct lfsck_tgt_desc,
1441 if (!list_empty(<d->ltd_layout_phase_list))
1442 list_del_init(<d->ltd_layout_phase_list);
1443 list_del_init(<d->ltd_layout_list);
1444 atomic_inc(<d->ltd_ref);
1445 laia->laia_ltd = ltd;
1446 spin_unlock(<ds->ltd_lock);
1447 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1448 lfsck_layout_master_async_interpret,
1449 laia, LFSCK_NOTIFY);
1451 CERROR("%s: fail to notify %s %x for layout "
1452 "stop/phase2: rc = %d\n",
1453 lfsck_lfsck2name(lfsck),
1454 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1455 "MDT", ltd->ltd_index, rc);
1458 spin_lock(<ds->ltd_lock);
1460 spin_unlock(<ds->ltd_lock);
1462 rc = ptlrpc_set_wait(set);
1464 ptlrpc_set_destroy(set);
1468 if (!(lr->lr_flags & LEF_TO_OST)) {
1469 lr->lr_flags |= LEF_TO_OST;
1470 head = &llmd->llmd_ost_list;
1471 ltds = &lfsck->li_ost_descs;
1476 case LE_PHASE1_DONE:
1477 llmd->llmd_touch_gen++;
1478 ltds = &lfsck->li_mdt_descs;
1479 laia->laia_ltds = ltds;
1480 spin_lock(<ds->ltd_lock);
1481 while (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1482 ltd = list_entry(llmd->llmd_mdt_phase1_list.next,
1483 struct lfsck_tgt_desc,
1484 ltd_layout_phase_list);
1485 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1488 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1489 list_del_init(<d->ltd_layout_phase_list);
1490 list_add_tail(<d->ltd_layout_phase_list,
1491 &llmd->llmd_mdt_phase1_list);
1492 atomic_inc(<d->ltd_ref);
1493 laia->laia_ltd = ltd;
1494 spin_unlock(<ds->ltd_lock);
1495 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1496 lfsck_layout_master_async_interpret,
1497 laia, LFSCK_NOTIFY);
1499 CERROR("%s: fail to notify MDT %x for layout "
1500 "phase1 done: rc = %d\n",
1501 lfsck_lfsck2name(lfsck),
1502 ltd->ltd_index, rc);
1505 spin_lock(<ds->ltd_lock);
1507 spin_unlock(<ds->ltd_lock);
1510 CERROR("%s: unexpected LFSCK event: rc = %d\n",
1511 lfsck_lfsck2name(lfsck), lr->lr_event);
1516 rc = ptlrpc_set_wait(set);
1517 ptlrpc_set_destroy(set);
1522 static int lfsck_layout_double_scan_result(const struct lu_env *env,
1523 struct lfsck_component *com,
1526 struct lfsck_instance *lfsck = com->lc_lfsck;
1527 struct lfsck_layout *lo = com->lc_file_ram;
1528 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1530 down_write(&com->lc_sem);
1532 lo->ll_run_time_phase2 += cfs_duration_sec(cfs_time_current() +
1533 HALF_SEC - lfsck->li_time_last_checkpoint);
1534 lo->ll_time_last_checkpoint = cfs_time_current_sec();
1535 lo->ll_objs_checked_phase2 += com->lc_new_checked;
1538 com->lc_journal = 0;
1539 if (lo->ll_flags & LF_INCOMPLETE)
1540 lo->ll_status = LS_PARTIAL;
1542 lo->ll_status = LS_COMPLETED;
1543 if (!(bk->lb_param & LPF_DRYRUN))
1544 lo->ll_flags &= ~(LF_SCANNED_ONCE | LF_INCONSISTENT);
1545 lo->ll_time_last_complete = lo->ll_time_last_checkpoint;
1546 lo->ll_success_count++;
1547 } else if (rc == 0) {
1548 lo->ll_status = lfsck->li_status;
1549 if (lo->ll_status == 0)
1550 lo->ll_status = LS_STOPPED;
1552 lo->ll_status = LS_FAILED;
1555 if (lo->ll_status != LS_PAUSED) {
1556 spin_lock(&lfsck->li_lock);
1557 list_del_init(&com->lc_link);
1558 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
1559 spin_unlock(&lfsck->li_lock);
1562 rc = lfsck_layout_store(env, com);
1564 up_write(&com->lc_sem);
1569 static int lfsck_layout_lock(const struct lu_env *env,
1570 struct lfsck_component *com,
1571 struct dt_object *obj,
1572 struct lustre_handle *lh, __u64 bits)
1574 struct lfsck_thread_info *info = lfsck_env_info(env);
1575 ldlm_policy_data_t *policy = &info->lti_policy;
1576 struct ldlm_res_id *resid = &info->lti_resid;
1577 struct lfsck_instance *lfsck = com->lc_lfsck;
1578 __u64 flags = LDLM_FL_ATOMIC_CB;
1581 LASSERT(lfsck->li_namespace != NULL);
1583 memset(policy, 0, sizeof(*policy));
1584 policy->l_inodebits.bits = bits;
1585 fid_build_reg_res_name(lfsck_dto2fid(obj), resid);
1586 rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid, LDLM_IBITS,
1587 policy, LCK_EX, &flags, ldlm_blocking_ast,
1588 ldlm_completion_ast, NULL, NULL, 0,
1589 LVB_T_NONE, NULL, lh);
1590 if (rc == ELDLM_OK) {
1593 memset(lh, 0, sizeof(*lh));
1600 static void lfsck_layout_unlock(struct lustre_handle *lh)
1602 if (lustre_handle_is_used(lh)) {
1603 ldlm_lock_decref(lh, LCK_EX);
1604 memset(lh, 0, sizeof(*lh));
1608 static int lfsck_layout_trans_stop(const struct lu_env *env,
1609 struct dt_device *dev,
1610 struct thandle *handle, int result)
1614 handle->th_result = result;
1615 rc = dt_trans_stop(env, dev, handle);
1624 static int lfsck_layout_scan_orphan(const struct lu_env *env,
1625 struct lfsck_component *com,
1626 struct lfsck_tgt_desc *ltd)
1628 /* XXX: To be extended in other patch. */
1633 /* For the MDT-object with dangling reference, we need to re-create
1634 * the missed OST-object with the known FID/owner information. */
1635 static int lfsck_layout_recreate_ostobj(const struct lu_env *env,
1636 struct lfsck_component *com,
1637 struct lfsck_layout_req *llr,
1640 struct lfsck_thread_info *info = lfsck_env_info(env);
1641 struct filter_fid *pfid = &info->lti_new_pfid;
1642 struct dt_allocation_hint *hint = &info->lti_hint;
1643 struct dt_object *parent = llr->llr_parent->llo_obj;
1644 struct dt_object *child = llr->llr_child;
1645 struct dt_device *dev = lfsck_obj2dt_dev(child);
1646 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
1647 struct thandle *handle;
1649 struct lustre_handle lh = { 0 };
1653 CDEBUG(D_LFSCK, "Repair dangling reference for: parent "DFID
1654 ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
1655 PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
1656 llr->llr_ost_idx, llr->llr_lov_idx, la->la_uid, la->la_gid);
1658 rc = lfsck_layout_lock(env, com, parent, &lh,
1659 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
1663 handle = dt_trans_create(env, dev);
1665 GOTO(unlock1, rc = PTR_ERR(handle));
1667 hint->dah_parent = NULL;
1669 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
1670 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
1671 pfid->ff_parent.f_ver = cpu_to_le32(llr->llr_lov_idx);
1672 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
1674 rc = dt_declare_create(env, child, la, hint, NULL, handle);
1678 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID,
1679 LU_XATTR_CREATE, handle);
1683 rc = dt_trans_start(env, dev, handle);
1687 dt_read_lock(env, parent, 0);
1688 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
1689 GOTO(unlock2, rc = 1);
1691 rc = dt_create(env, child, la, hint, NULL, handle);
1695 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, LU_XATTR_CREATE,
1696 handle, BYPASS_CAPA);
1701 dt_read_unlock(env, parent);
1704 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
1707 lfsck_layout_unlock(&lh);
1712 /* If the OST-object does not recognize the MDT-object as its parent, and
1713 * there is no other MDT-object claims as its parent, then just trust the
1714 * given MDT-object as its parent. So update the OST-object filter_fid. */
1715 static int lfsck_layout_repair_unmatched_pair(const struct lu_env *env,
1716 struct lfsck_component *com,
1717 struct lfsck_layout_req *llr,
1718 const struct lu_attr *pla)
1720 struct lfsck_thread_info *info = lfsck_env_info(env);
1721 struct filter_fid *pfid = &info->lti_new_pfid;
1722 struct lu_attr *tla = &info->lti_la3;
1723 struct dt_object *parent = llr->llr_parent->llo_obj;
1724 struct dt_object *child = llr->llr_child;
1725 struct dt_device *dev = lfsck_obj2dt_dev(child);
1726 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
1727 struct thandle *handle;
1729 struct lustre_handle lh = { 0 };
1733 CDEBUG(D_LFSCK, "Repair unmatched MDT-OST pair for: parent "DFID
1734 ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
1735 PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
1736 llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid);
1738 rc = lfsck_layout_lock(env, com, parent, &lh,
1739 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
1743 handle = dt_trans_create(env, dev);
1745 GOTO(unlock1, rc = PTR_ERR(handle));
1747 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
1748 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
1749 /* The ff_parent->f_ver is not the real parent fid->f_ver. Instead,
1750 * it is the OST-object index in the parent MDT-object layout. */
1751 pfid->ff_parent.f_ver = cpu_to_le32(llr->llr_lov_idx);
1752 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
1754 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle);
1758 tla->la_valid = LA_UID | LA_GID;
1759 tla->la_uid = pla->la_uid;
1760 tla->la_gid = pla->la_gid;
1761 rc = dt_declare_attr_set(env, child, tla, handle);
1765 rc = dt_trans_start(env, dev, handle);
1769 dt_write_lock(env, parent, 0);
1770 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
1771 GOTO(unlock2, rc = 1);
1773 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle,
1778 /* Get the latest parent's owner. */
1779 rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
1783 tla->la_valid = LA_UID | LA_GID;
1784 rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
1789 dt_write_unlock(env, parent);
1792 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
1795 lfsck_layout_unlock(&lh);
1800 /* If there are more than one MDT-objects claim as the OST-object's parent,
1801 * and the OST-object only recognizes one of them, then we need to generate
1802 * new OST-object(s) with new fid(s) for the non-recognized MDT-object(s). */
1803 static int lfsck_layout_repair_multiple_references(const struct lu_env *env,
1804 struct lfsck_component *com,
1805 struct lfsck_layout_req *llr,
1809 struct lfsck_thread_info *info = lfsck_env_info(env);
1810 struct dt_allocation_hint *hint = &info->lti_hint;
1811 struct dt_object_format *dof = &info->lti_dof;
1812 struct dt_device *pdev = com->lc_lfsck->li_next;
1813 struct ost_id *oi = &info->lti_oi;
1814 struct dt_object *parent = llr->llr_parent->llo_obj;
1815 struct dt_device *cdev = lfsck_obj2dt_dev(llr->llr_child);
1816 struct dt_object *child = NULL;
1817 struct lu_device *d = &cdev->dd_lu_dev;
1818 struct lu_object *o = NULL;
1819 struct thandle *handle;
1820 struct lov_mds_md_v1 *lmm;
1821 struct lov_ost_data_v1 *objs;
1822 struct lustre_handle lh = { 0 };
1827 CDEBUG(D_LFSCK, "Repair multiple references for: parent "DFID
1828 ", OST-index %u, stripe-index %u, owner %u:%u\n",
1829 PFID(lfsck_dto2fid(parent)), llr->llr_ost_idx,
1830 llr->llr_lov_idx, la->la_uid, la->la_gid);
1832 rc = lfsck_layout_lock(env, com, parent, &lh,
1833 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
1837 handle = dt_trans_create(env, pdev);
1839 GOTO(unlock1, rc = PTR_ERR(handle));
1841 o = lu_object_anon(env, d, NULL);
1843 GOTO(stop, rc = PTR_ERR(o));
1845 child = container_of(o, struct dt_object, do_lu);
1846 o = lu_object_locate(o->lo_header, d->ld_type);
1847 if (unlikely(o == NULL))
1848 GOTO(stop, rc = -EINVAL);
1850 child = container_of(o, struct dt_object, do_lu);
1851 la->la_valid = LA_UID | LA_GID;
1852 hint->dah_parent = NULL;
1854 dof->dof_type = DFT_REGULAR;
1855 rc = dt_declare_create(env, child, la, NULL, NULL, handle);
1859 rc = dt_declare_xattr_set(env, parent, buf, XATTR_NAME_LOV,
1860 LU_XATTR_REPLACE, handle);
1864 rc = dt_trans_start(env, pdev, handle);
1868 dt_write_lock(env, parent, 0);
1869 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
1870 GOTO(unlock2, rc = 0);
1872 rc = dt_xattr_get(env, parent, buf, XATTR_NAME_LOV, BYPASS_CAPA);
1873 if (unlikely(rc == 0 || rc == -ENODATA || rc == -ERANGE))
1874 GOTO(unlock2, rc = 0);
1877 rc = lfsck_layout_verify_header(lmm);
1881 /* Someone change layout during the LFSCK, no need to repair then. */
1882 if (le16_to_cpu(lmm->lmm_layout_gen) != llr->llr_parent->llo_gen)
1883 GOTO(unlock2, rc = 0);
1885 rc = dt_create(env, child, la, hint, dof, handle);
1889 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
1890 * been verified in lfsck_layout_verify_header() already. If some
1891 * new magic introduced in the future, then layout LFSCK needs to
1892 * be updated also. */
1893 magic = le32_to_cpu(lmm->lmm_magic);
1894 if (magic == LOV_MAGIC_V1) {
1895 objs = &(lmm->lmm_objects[0]);
1897 LASSERT(magic == LOV_MAGIC_V3);
1898 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
1901 lmm->lmm_layout_gen = cpu_to_le16(llr->llr_parent->llo_gen + 1);
1902 fid_to_ostid(lu_object_fid(&child->do_lu), oi);
1903 ostid_cpu_to_le(oi, &objs[llr->llr_lov_idx].l_ost_oi);
1904 objs[llr->llr_lov_idx].l_ost_gen = cpu_to_le32(0);
1905 objs[llr->llr_lov_idx].l_ost_idx = cpu_to_le32(llr->llr_ost_idx);
1906 rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV,
1907 LU_XATTR_REPLACE, handle, BYPASS_CAPA);
1909 GOTO(unlock2, rc = (rc == 0 ? 1 : rc));
1912 dt_write_unlock(env, parent);
1916 lu_object_put(env, &child->do_lu);
1918 dt_trans_stop(env, pdev, handle);
1921 lfsck_layout_unlock(&lh);
1926 /* If the MDT-object and the OST-object have different owner information,
1927 * then trust the MDT-object, because the normal chown/chgrp handle order
1928 * is from MDT to OST, and it is possible that some chown/chgrp operation
1929 * is partly done. */
1930 static int lfsck_layout_repair_owner(const struct lu_env *env,
1931 struct lfsck_component *com,
1932 struct lfsck_layout_req *llr,
1933 struct lu_attr *pla)
1935 struct lfsck_thread_info *info = lfsck_env_info(env);
1936 struct lu_attr *tla = &info->lti_la3;
1937 struct dt_object *parent = llr->llr_parent->llo_obj;
1938 struct dt_object *child = llr->llr_child;
1939 struct dt_device *dev = lfsck_obj2dt_dev(child);
1940 struct thandle *handle;
1944 CDEBUG(D_LFSCK, "Repair inconsistent file owner for: parent "DFID
1945 ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
1946 PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
1947 llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid);
1949 handle = dt_trans_create(env, dev);
1951 RETURN(PTR_ERR(handle));
1953 tla->la_uid = pla->la_uid;
1954 tla->la_gid = pla->la_gid;
1955 tla->la_valid = LA_UID | LA_GID;
1956 rc = dt_declare_attr_set(env, child, tla, handle);
1960 rc = dt_trans_start(env, dev, handle);
1964 /* Use the dt_object lock to serialize with destroy and attr_set. */
1965 dt_read_lock(env, parent, 0);
1966 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
1967 GOTO(unlock, rc = 1);
1969 /* Get the latest parent's owner. */
1970 rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
1972 CWARN("%s: fail to get the latest parent's ("DFID") owner, "
1973 "not sure whether some others chown/chgrp during the "
1974 "LFSCK: rc = %d\n", lfsck_lfsck2name(com->lc_lfsck),
1975 PFID(lfsck_dto2fid(parent)), rc);
1980 /* Some others chown/chgrp during the LFSCK, needs to do nothing. */
1981 if (unlikely(tla->la_uid != pla->la_uid ||
1982 tla->la_gid != pla->la_gid))
1983 GOTO(unlock, rc = 1);
1985 tla->la_valid = LA_UID | LA_GID;
1986 rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
1991 dt_read_unlock(env, parent);
1994 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
1999 /* Check whether the OST-object correctly back points to the
2000 * MDT-object (@parent) via the XATTR_NAME_FID xattr (@pfid). */
2001 static int lfsck_layout_check_parent(const struct lu_env *env,
2002 struct lfsck_component *com,
2003 struct dt_object *parent,
2004 const struct lu_fid *pfid,
2005 const struct lu_fid *cfid,
2006 const struct lu_attr *pla,
2007 const struct lu_attr *cla,
2008 struct lfsck_layout_req *llr,
2009 struct lu_buf *lov_ea, __u32 idx)
2011 struct lfsck_thread_info *info = lfsck_env_info(env);
2012 struct lu_buf *buf = &info->lti_big_buf;
2013 struct dt_object *tobj;
2014 struct lov_mds_md_v1 *lmm;
2015 struct lov_ost_data_v1 *objs;
2022 if (fid_is_zero(pfid)) {
2023 /* client never wrote. */
2024 if (cla->la_size == 0 && cla->la_blocks == 0) {
2025 if (unlikely(cla->la_uid != pla->la_uid ||
2026 cla->la_gid != pla->la_gid))
2027 RETURN (LLIT_INCONSISTENT_OWNER);
2032 RETURN(LLIT_UNMATCHED_PAIR);
2035 if (unlikely(!fid_is_sane(pfid)))
2036 RETURN(LLIT_UNMATCHED_PAIR);
2038 if (lu_fid_eq(pfid, lu_object_fid(&parent->do_lu))) {
2039 if (llr->llr_lov_idx == idx)
2042 RETURN(LLIT_UNMATCHED_PAIR);
2045 tobj = lfsck_object_find(env, com->lc_lfsck, pfid);
2047 RETURN(LLIT_UNMATCHED_PAIR);
2050 RETURN(PTR_ERR(tobj));
2052 if (!dt_object_exists(tobj))
2053 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
2055 /* Load the tobj's layout EA, in spite of it is a local MDT-object or
2056 * remote one on another MDT. Then check whether the given OST-object
2057 * is in such layout. If yes, it is multiple referenced, otherwise it
2058 * is unmatched referenced case. */
2059 rc = lfsck_layout_get_lovea(env, tobj, buf, NULL);
2061 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
2067 rc = lfsck_layout_verify_header(lmm);
2071 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
2072 * been verified in lfsck_layout_verify_header() already. If some
2073 * new magic introduced in the future, then layout LFSCK needs to
2074 * be updated also. */
2075 magic = le32_to_cpu(lmm->lmm_magic);
2076 if (magic == LOV_MAGIC_V1) {
2077 objs = &(lmm->lmm_objects[0]);
2079 LASSERT(magic == LOV_MAGIC_V3);
2080 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
2083 count = le16_to_cpu(lmm->lmm_stripe_count);
2084 for (i = 0; i < count; i++, objs++) {
2085 struct lu_fid *tfid = &info->lti_fid2;
2086 struct ost_id *oi = &info->lti_oi;
2088 ostid_le_to_cpu(&objs->l_ost_oi, oi);
2089 ostid_to_fid(tfid, oi, le32_to_cpu(objs->l_ost_idx));
2090 if (lu_fid_eq(cfid, tfid)) {
2093 GOTO(out, rc = LLIT_MULTIPLE_REFERENCED);
2097 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
2100 lfsck_object_put(env, tobj);
2105 static int lfsck_layout_assistant_handle_one(const struct lu_env *env,
2106 struct lfsck_component *com,
2107 struct lfsck_layout_req *llr)
2109 struct lfsck_layout *lo = com->lc_file_ram;
2110 struct lfsck_thread_info *info = lfsck_env_info(env);
2111 struct filter_fid_old *pea = &info->lti_old_pfid;
2112 struct lu_fid *pfid = &info->lti_fid;
2113 struct lu_buf *buf = NULL;
2114 struct dt_object *parent = llr->llr_parent->llo_obj;
2115 struct dt_object *child = llr->llr_child;
2116 struct lu_attr *pla = &info->lti_la;
2117 struct lu_attr *cla = &info->lti_la2;
2118 struct lfsck_instance *lfsck = com->lc_lfsck;
2119 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2120 enum lfsck_layout_inconsistency_type type = LLIT_NONE;
2125 rc = dt_attr_get(env, parent, pla, BYPASS_CAPA);
2127 if (lu_object_is_dying(parent->do_lu.lo_header))
2133 rc = dt_attr_get(env, child, cla, BYPASS_CAPA);
2134 if (rc == -ENOENT) {
2135 if (lu_object_is_dying(parent->do_lu.lo_header))
2138 type = LLIT_DANGLING;
2145 buf = lfsck_buf_get(env, pea, sizeof(struct filter_fid_old));
2146 rc= dt_xattr_get(env, child, buf, XATTR_NAME_FID, BYPASS_CAPA);
2147 if (unlikely(rc >= 0 && rc != sizeof(struct filter_fid_old) &&
2148 rc != sizeof(struct filter_fid))) {
2149 type = LLIT_UNMATCHED_PAIR;
2153 if (rc < 0 && rc != -ENODATA)
2156 if (rc == -ENODATA) {
2159 fid_le_to_cpu(pfid, &pea->ff_parent);
2160 /* OST-object does not save parent FID::f_ver, instead,
2161 * the OST-object index in the parent MDT-object layout
2162 * EA reuses the pfid->f_ver. */
2167 rc = lfsck_layout_check_parent(env, com, parent, pfid,
2168 lu_object_fid(&child->do_lu),
2169 pla, cla, llr, buf, idx);
2178 if (unlikely(cla->la_uid != pla->la_uid ||
2179 cla->la_gid != pla->la_gid)) {
2180 type = LLIT_INCONSISTENT_OWNER;
2185 if (bk->lb_param & LPF_DRYRUN) {
2186 if (type != LLIT_NONE)
2194 memset(cla, 0, sizeof(*cla));
2195 cla->la_uid = pla->la_uid;
2196 cla->la_gid = pla->la_gid;
2197 cla->la_mode = S_IFREG | 0666;
2198 cla->la_valid = LA_TYPE | LA_MODE | LA_UID | LA_GID |
2199 LA_ATIME | LA_MTIME | LA_CTIME;
2200 rc = lfsck_layout_recreate_ostobj(env, com, llr, cla);
2202 case LLIT_UNMATCHED_PAIR:
2203 rc = lfsck_layout_repair_unmatched_pair(env, com, llr, pla);
2205 case LLIT_MULTIPLE_REFERENCED:
2206 rc = lfsck_layout_repair_multiple_references(env, com, llr,
2209 case LLIT_INCONSISTENT_OWNER:
2210 rc = lfsck_layout_repair_owner(env, com, llr, pla);
2220 down_write(&com->lc_sem);
2222 /* If cannot touch the target server,
2223 * mark the LFSCK as INCOMPLETE. */
2224 if (rc == -ENOTCONN || rc == -ESHUTDOWN || rc == -ETIMEDOUT ||
2225 rc == -EHOSTDOWN || rc == -EHOSTUNREACH) {
2226 CERROR("%s: Fail to take with OST %x: rc = %d.\n",
2227 lfsck_lfsck2name(lfsck), llr->llr_ost_idx, rc);
2228 lo->ll_flags |= LF_INCOMPLETE;
2229 lo->ll_objs_skipped++;
2232 lo->ll_objs_failed_phase1++;
2234 } else if (rc > 0) {
2235 LASSERTF(type > LLIT_NONE && type <= LLIT_MAX,
2236 "unknown type = %d\n", type);
2238 lo->ll_objs_repaired[type - 1]++;
2240 up_write(&com->lc_sem);
2245 static int lfsck_layout_assistant(void *args)
2247 struct lfsck_thread_args *lta = args;
2248 struct lu_env *env = <a->lta_env;
2249 struct lfsck_component *com = lta->lta_com;
2250 struct lfsck_instance *lfsck = lta->lta_lfsck;
2251 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2252 struct lfsck_position *pos = &com->lc_pos_start;
2253 struct lfsck_thread_info *info = lfsck_env_info(env);
2254 struct lfsck_request *lr = &info->lti_lr;
2255 struct lfsck_layout_master_data *llmd = com->lc_data;
2256 struct ptlrpc_thread *mthread = &lfsck->li_thread;
2257 struct ptlrpc_thread *athread = &llmd->llmd_thread;
2258 struct lfsck_layout_req *llr;
2259 struct l_wait_info lwi = { 0 };
2264 memset(lr, 0, sizeof(*lr));
2265 lr->lr_event = LE_START;
2266 lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
2268 lr->lr_speed = bk->lb_speed_limit;
2269 lr->lr_version = bk->lb_version;
2270 lr->lr_param = bk->lb_param;
2271 lr->lr_async_windows = bk->lb_async_windows;
2272 lr->lr_flags = LEF_TO_OST;
2273 if (pos->lp_oit_cookie <= 1)
2274 lr->lr_param |= LPF_RESET;
2276 rc = lfsck_layout_master_notify_others(env, com, lr);
2278 CERROR("%s: fail to notify others for layout start: rc = %d\n",
2279 lfsck_lfsck2name(lfsck), rc);
2283 spin_lock(&llmd->llmd_lock);
2284 thread_set_flags(athread, SVC_RUNNING);
2285 spin_unlock(&llmd->llmd_lock);
2286 wake_up_all(&mthread->t_ctl_waitq);
2289 while (!list_empty(&llmd->llmd_req_list)) {
2290 bool wakeup = false;
2292 if (unlikely(llmd->llmd_exit))
2293 GOTO(cleanup1, rc = llmd->llmd_post_result);
2295 llr = list_entry(llmd->llmd_req_list.next,
2296 struct lfsck_layout_req,
2298 /* Only the lfsck_layout_assistant thread itself can
2299 * remove the "llr" from the head of the list, LFSCK
2300 * engine thread only inserts other new "lld" at the
2301 * end of the list. So it is safe to handle current
2302 * "llr" without the spin_lock. */
2303 rc = lfsck_layout_assistant_handle_one(env, com, llr);
2304 spin_lock(&llmd->llmd_lock);
2305 list_del_init(&llr->llr_list);
2306 if (bk->lb_async_windows != 0 &&
2307 llmd->llmd_prefetched >= bk->lb_async_windows)
2310 llmd->llmd_prefetched--;
2311 spin_unlock(&llmd->llmd_lock);
2313 wake_up_all(&mthread->t_ctl_waitq);
2315 lfsck_layout_req_fini(env, llr);
2316 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
2320 /* Wakeup the master engine if it is waiting in checkpoint. */
2321 wake_up_all(&mthread->t_ctl_waitq);
2323 l_wait_event(athread->t_ctl_waitq,
2324 !lfsck_layout_req_empty(llmd) ||
2326 llmd->llmd_to_post ||
2327 llmd->llmd_to_double_scan,
2330 if (unlikely(llmd->llmd_exit))
2331 GOTO(cleanup1, rc = llmd->llmd_post_result);
2333 if (!list_empty(&llmd->llmd_req_list))
2336 if (llmd->llmd_to_post) {
2337 llmd->llmd_to_post = 0;
2338 LASSERT(llmd->llmd_post_result > 0);
2340 memset(lr, 0, sizeof(*lr));
2341 lr->lr_event = LE_PHASE1_DONE;
2342 lr->lr_status = llmd->llmd_post_result;
2343 rc = lfsck_layout_master_notify_others(env, com, lr);
2345 CERROR("%s: failed to notify others "
2346 "for layout post: rc = %d\n",
2347 lfsck_lfsck2name(lfsck), rc);
2349 /* Wakeup the master engine to go ahead. */
2350 wake_up_all(&mthread->t_ctl_waitq);
2353 if (llmd->llmd_to_double_scan) {
2354 llmd->llmd_to_double_scan = 0;
2355 atomic_inc(&lfsck->li_double_scan_count);
2356 llmd->llmd_in_double_scan = 1;
2357 wake_up_all(&mthread->t_ctl_waitq);
2359 while (llmd->llmd_in_double_scan) {
2360 struct lfsck_tgt_descs *ltds =
2361 &lfsck->li_ost_descs;
2362 struct lfsck_tgt_desc *ltd;
2364 rc = lfsck_layout_master_query_others(env, com);
2365 if (lfsck_layout_master_to_orphan(llmd))
2371 /* Pull LFSCK status on related targets once
2372 * per 30 seconds if we are not notified. */
2373 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(30),
2374 cfs_time_seconds(1),
2376 rc = l_wait_event(athread->t_ctl_waitq,
2377 lfsck_layout_master_to_orphan(llmd) ||
2379 !thread_is_running(mthread),
2382 if (unlikely(llmd->llmd_exit ||
2383 !thread_is_running(mthread)))
2384 GOTO(cleanup2, rc = 0);
2386 if (rc == -ETIMEDOUT)
2393 spin_lock(<ds->ltd_lock);
2395 &llmd->llmd_ost_phase2_list)) {
2397 llmd->llmd_ost_phase2_list.next,
2398 struct lfsck_tgt_desc,
2399 ltd_layout_phase_list);
2401 <d->ltd_layout_phase_list);
2402 spin_unlock(<ds->ltd_lock);
2404 rc = lfsck_layout_scan_orphan(env, com,
2407 bk->lb_param & LPF_FAILOUT)
2410 if (unlikely(llmd->llmd_exit ||
2411 !thread_is_running(mthread)))
2412 GOTO(cleanup2, rc = 0);
2414 spin_lock(<ds->ltd_lock);
2417 if (list_empty(&llmd->llmd_ost_phase1_list)) {
2418 spin_unlock(<ds->ltd_lock);
2419 GOTO(cleanup2, rc = 1);
2421 spin_unlock(<ds->ltd_lock);
2427 /* Cleanup the unfinished requests. */
2428 spin_lock(&llmd->llmd_lock);
2430 llmd->llmd_assistant_status = rc;
2432 while (!list_empty(&llmd->llmd_req_list)) {
2433 llr = list_entry(llmd->llmd_req_list.next,
2434 struct lfsck_layout_req,
2436 list_del_init(&llr->llr_list);
2437 llmd->llmd_prefetched--;
2438 spin_unlock(&llmd->llmd_lock);
2439 lfsck_layout_req_fini(env, llr);
2440 spin_lock(&llmd->llmd_lock);
2442 spin_unlock(&llmd->llmd_lock);
2444 LASSERTF(llmd->llmd_prefetched == 0, "unmatched prefeteched objs %d\n",
2445 llmd->llmd_prefetched);
2448 memset(lr, 0, sizeof(*lr));
2450 lr->lr_event = LE_PHASE2_DONE;
2452 } else if (rc == 0) {
2453 if (lfsck->li_flags & LPF_ALL_TGT) {
2454 lr->lr_event = LE_STOP;
2455 lr->lr_status = LS_STOPPED;
2457 lr->lr_event = LE_PEER_EXIT;
2458 switch (lfsck->li_status) {
2461 lr->lr_status = LS_CO_PAUSED;
2465 lr->lr_status = LS_CO_STOPPED;
2468 CERROR("%s: unknown status: rc = %d\n",
2469 lfsck_lfsck2name(lfsck),
2471 lr->lr_status = LS_CO_FAILED;
2476 if (lfsck->li_flags & LPF_ALL_TGT) {
2477 lr->lr_event = LE_STOP;
2478 lr->lr_status = LS_FAILED;
2480 lr->lr_event = LE_PEER_EXIT;
2481 lr->lr_status = LS_CO_FAILED;
2485 rc1 = lfsck_layout_master_notify_others(env, com, lr);
2487 CERROR("%s: failed to notify others for layout quit: rc = %d\n",
2488 lfsck_lfsck2name(lfsck), rc1);
2492 /* Under force exit case, some requests may be just freed without
2493 * verification, those objects should be re-handled when next run.
2494 * So not update the on-disk tracing file under such case. */
2495 if (!llmd->llmd_exit)
2496 rc1 = lfsck_layout_double_scan_result(env, com, rc);
2499 if (llmd->llmd_in_double_scan)
2500 atomic_dec(&lfsck->li_double_scan_count);
2502 spin_lock(&llmd->llmd_lock);
2503 llmd->llmd_assistant_status = (rc1 != 0 ? rc1 : rc);
2504 thread_set_flags(athread, SVC_STOPPED);
2505 wake_up_all(&mthread->t_ctl_waitq);
2506 spin_unlock(&llmd->llmd_lock);
2507 lfsck_thread_args_fini(lta);
2513 lfsck_layout_slave_async_interpret(const struct lu_env *env,
2514 struct ptlrpc_request *req,
2517 struct lfsck_layout_slave_async_args *llsaa = args;
2518 struct obd_export *exp = llsaa->llsaa_exp;
2519 struct lfsck_component *com = llsaa->llsaa_com;
2520 struct lfsck_layout_slave_target *llst = llsaa->llsaa_llst;
2521 struct lfsck_layout_slave_data *llsd = com->lc_data;
2525 /* It is quite probably caused by target crash,
2526 * to make the LFSCK can go ahead, assume that
2527 * the target finished the LFSCK prcoessing. */
2530 struct lfsck_reply *lr;
2532 lr = req_capsule_server_get(&req->rq_pill, &RMF_LFSCK_REPLY);
2533 if (lr->lr_status != LS_SCANNING_PHASE1 &&
2534 lr->lr_status != LS_SCANNING_PHASE2)
2538 lfsck_layout_llst_del(llsd, llst);
2539 lfsck_layout_llst_put(llst);
2540 lfsck_component_put(env, com);
2541 class_export_put(exp);
2546 static int lfsck_layout_async_query(const struct lu_env *env,
2547 struct lfsck_component *com,
2548 struct obd_export *exp,
2549 struct lfsck_layout_slave_target *llst,
2550 struct lfsck_request *lr,
2551 struct ptlrpc_request_set *set)
2553 struct lfsck_layout_slave_async_args *llsaa;
2554 struct ptlrpc_request *req;
2555 struct lfsck_request *tmp;
2559 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_QUERY);
2563 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_QUERY);
2565 ptlrpc_request_free(req);
2569 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
2571 ptlrpc_request_set_replen(req);
2573 llsaa = ptlrpc_req_async_args(req);
2574 llsaa->llsaa_exp = exp;
2575 llsaa->llsaa_com = lfsck_component_get(com);
2576 llsaa->llsaa_llst = llst;
2577 req->rq_interpret_reply = lfsck_layout_slave_async_interpret;
2578 ptlrpc_set_add_req(set, req);
2583 static int lfsck_layout_async_notify(const struct lu_env *env,
2584 struct obd_export *exp,
2585 struct lfsck_request *lr,
2586 struct ptlrpc_request_set *set)
2588 struct ptlrpc_request *req;
2589 struct lfsck_request *tmp;
2593 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
2597 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
2599 ptlrpc_request_free(req);
2603 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
2605 ptlrpc_request_set_replen(req);
2606 ptlrpc_set_add_req(set, req);
2612 lfsck_layout_slave_query_master(const struct lu_env *env,
2613 struct lfsck_component *com)
2615 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
2616 struct lfsck_instance *lfsck = com->lc_lfsck;
2617 struct lfsck_layout_slave_data *llsd = com->lc_data;
2618 struct lfsck_layout_slave_target *llst;
2619 struct obd_export *exp;
2620 struct ptlrpc_request_set *set;
2625 set = ptlrpc_prep_set();
2629 memset(lr, 0, sizeof(*lr));
2630 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
2631 lr->lr_event = LE_QUERY;
2632 lr->lr_active = LT_LAYOUT;
2634 llsd->llsd_touch_gen++;
2635 spin_lock(&llsd->llsd_lock);
2636 while (!list_empty(&llsd->llsd_master_list)) {
2637 llst = list_entry(llsd->llsd_master_list.next,
2638 struct lfsck_layout_slave_target,
2640 if (llst->llst_gen == llsd->llsd_touch_gen)
2643 llst->llst_gen = llsd->llsd_touch_gen;
2644 list_del(&llst->llst_list);
2645 list_add_tail(&llst->llst_list,
2646 &llsd->llsd_master_list);
2647 atomic_inc(&llst->llst_ref);
2648 spin_unlock(&llsd->llsd_lock);
2650 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
2653 lfsck_layout_llst_del(llsd, llst);
2654 lfsck_layout_llst_put(llst);
2655 spin_lock(&llsd->llsd_lock);
2659 rc = lfsck_layout_async_query(env, com, exp, llst, lr, set);
2661 CERROR("%s: slave fail to query %s for layout: "
2662 "rc = %d\n", lfsck_lfsck2name(lfsck),
2663 exp->exp_obd->obd_name, rc);
2665 lfsck_layout_llst_put(llst);
2666 class_export_put(exp);
2668 spin_lock(&llsd->llsd_lock);
2670 spin_unlock(&llsd->llsd_lock);
2672 rc = ptlrpc_set_wait(set);
2673 ptlrpc_set_destroy(set);
2675 RETURN(rc1 != 0 ? rc1 : rc);
2679 lfsck_layout_slave_notify_master(const struct lu_env *env,
2680 struct lfsck_component *com,
2681 enum lfsck_events event, int result)
2683 struct lfsck_instance *lfsck = com->lc_lfsck;
2684 struct lfsck_layout_slave_data *llsd = com->lc_data;
2685 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
2686 struct lfsck_layout_slave_target *llst;
2687 struct obd_export *exp;
2688 struct ptlrpc_request_set *set;
2692 set = ptlrpc_prep_set();
2696 memset(lr, 0, sizeof(*lr));
2697 lr->lr_event = event;
2698 lr->lr_flags = LEF_FROM_OST;
2699 lr->lr_status = result;
2700 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
2701 lr->lr_active = LT_LAYOUT;
2702 llsd->llsd_touch_gen++;
2703 spin_lock(&llsd->llsd_lock);
2704 while (!list_empty(&llsd->llsd_master_list)) {
2705 llst = list_entry(llsd->llsd_master_list.next,
2706 struct lfsck_layout_slave_target,
2708 if (llst->llst_gen == llsd->llsd_touch_gen)
2711 llst->llst_gen = llsd->llsd_touch_gen;
2712 list_del(&llst->llst_list);
2713 list_add_tail(&llst->llst_list,
2714 &llsd->llsd_master_list);
2715 atomic_inc(&llst->llst_ref);
2716 spin_unlock(&llsd->llsd_lock);
2718 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
2721 lfsck_layout_llst_del(llsd, llst);
2722 lfsck_layout_llst_put(llst);
2723 spin_lock(&llsd->llsd_lock);
2727 rc = lfsck_layout_async_notify(env, exp, lr, set);
2729 CERROR("%s: slave fail to notify %s for layout: "
2730 "rc = %d\n", lfsck_lfsck2name(lfsck),
2731 exp->exp_obd->obd_name, rc);
2732 lfsck_layout_llst_put(llst);
2733 class_export_put(exp);
2734 spin_lock(&llsd->llsd_lock);
2736 spin_unlock(&llsd->llsd_lock);
2738 ptlrpc_set_wait(set);
2739 ptlrpc_set_destroy(set);
2746 static int lfsck_layout_reset(const struct lu_env *env,
2747 struct lfsck_component *com, bool init)
2749 struct lfsck_layout *lo = com->lc_file_ram;
2752 down_write(&com->lc_sem);
2754 memset(lo, 0, com->lc_file_size);
2756 __u32 count = lo->ll_success_count;
2757 __u64 last_time = lo->ll_time_last_complete;
2759 memset(lo, 0, com->lc_file_size);
2760 lo->ll_success_count = count;
2761 lo->ll_time_last_complete = last_time;
2764 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
2765 lo->ll_status = LS_INIT;
2767 rc = lfsck_layout_store(env, com);
2768 up_write(&com->lc_sem);
2773 static void lfsck_layout_fail(const struct lu_env *env,
2774 struct lfsck_component *com, bool new_checked)
2776 struct lfsck_layout *lo = com->lc_file_ram;
2778 down_write(&com->lc_sem);
2780 com->lc_new_checked++;
2781 lo->ll_objs_failed_phase1++;
2782 if (lo->ll_pos_first_inconsistent == 0) {
2783 struct lfsck_instance *lfsck = com->lc_lfsck;
2785 lo->ll_pos_first_inconsistent =
2786 lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
2789 up_write(&com->lc_sem);
2792 static int lfsck_layout_master_checkpoint(const struct lu_env *env,
2793 struct lfsck_component *com, bool init)
2795 struct lfsck_instance *lfsck = com->lc_lfsck;
2796 struct lfsck_layout *lo = com->lc_file_ram;
2797 struct lfsck_layout_master_data *llmd = com->lc_data;
2798 struct ptlrpc_thread *mthread = &lfsck->li_thread;
2799 struct ptlrpc_thread *athread = &llmd->llmd_thread;
2800 struct l_wait_info lwi = { 0 };
2803 if (com->lc_new_checked == 0 && !init)
2806 l_wait_event(mthread->t_ctl_waitq,
2807 list_empty(&llmd->llmd_req_list) ||
2808 !thread_is_running(mthread) ||
2809 thread_is_stopped(athread),
2812 if (!thread_is_running(mthread) || thread_is_stopped(athread))
2815 down_write(&com->lc_sem);
2817 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
2819 lo->ll_pos_last_checkpoint =
2820 lfsck->li_pos_current.lp_oit_cookie;
2821 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
2822 HALF_SEC - lfsck->li_time_last_checkpoint);
2823 lo->ll_time_last_checkpoint = cfs_time_current_sec();
2824 lo->ll_objs_checked_phase1 += com->lc_new_checked;
2825 com->lc_new_checked = 0;
2828 rc = lfsck_layout_store(env, com);
2829 up_write(&com->lc_sem);
2834 static int lfsck_layout_slave_checkpoint(const struct lu_env *env,
2835 struct lfsck_component *com, bool init)
2837 struct lfsck_instance *lfsck = com->lc_lfsck;
2838 struct lfsck_layout *lo = com->lc_file_ram;
2841 if (com->lc_new_checked == 0 && !init)
2844 down_write(&com->lc_sem);
2847 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
2849 lo->ll_pos_last_checkpoint =
2850 lfsck->li_pos_current.lp_oit_cookie;
2851 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
2852 HALF_SEC - lfsck->li_time_last_checkpoint);
2853 lo->ll_time_last_checkpoint = cfs_time_current_sec();
2854 lo->ll_objs_checked_phase1 += com->lc_new_checked;
2855 com->lc_new_checked = 0;
2858 rc = lfsck_layout_store(env, com);
2860 up_write(&com->lc_sem);
2865 static int lfsck_layout_prep(const struct lu_env *env,
2866 struct lfsck_component *com,
2867 struct lfsck_start *start)
2869 struct lfsck_instance *lfsck = com->lc_lfsck;
2870 struct lfsck_layout *lo = com->lc_file_ram;
2871 struct lfsck_position *pos = &com->lc_pos_start;
2873 fid_zero(&pos->lp_dir_parent);
2874 pos->lp_dir_cookie = 0;
2875 if (lo->ll_status == LS_COMPLETED ||
2876 lo->ll_status == LS_PARTIAL ||
2877 /* To handle orphan, must scan from the beginning. */
2878 (start != NULL && start->ls_flags & LPF_ORPHAN)) {
2881 rc = lfsck_layout_reset(env, com, false);
2886 down_write(&com->lc_sem);
2887 lo->ll_time_latest_start = cfs_time_current_sec();
2888 spin_lock(&lfsck->li_lock);
2889 if (lo->ll_flags & LF_SCANNED_ONCE) {
2890 if (!lfsck->li_drop_dryrun ||
2891 lo->ll_pos_first_inconsistent == 0) {
2892 lo->ll_status = LS_SCANNING_PHASE2;
2893 list_del_init(&com->lc_link);
2894 list_add_tail(&com->lc_link,
2895 &lfsck->li_list_double_scan);
2896 pos->lp_oit_cookie = 0;
2900 lo->ll_status = LS_SCANNING_PHASE1;
2901 lo->ll_run_time_phase1 = 0;
2902 lo->ll_run_time_phase2 = 0;
2903 lo->ll_objs_checked_phase1 = 0;
2904 lo->ll_objs_checked_phase2 = 0;
2905 lo->ll_objs_failed_phase1 = 0;
2906 lo->ll_objs_failed_phase2 = 0;
2907 for (i = 0; i < LLIT_MAX; i++)
2908 lo->ll_objs_repaired[i] = 0;
2910 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
2913 lo->ll_status = LS_SCANNING_PHASE1;
2914 if (!lfsck->li_drop_dryrun ||
2915 lo->ll_pos_first_inconsistent == 0)
2916 pos->lp_oit_cookie = lo->ll_pos_last_checkpoint + 1;
2918 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
2920 spin_unlock(&lfsck->li_lock);
2921 up_write(&com->lc_sem);
2926 static int lfsck_layout_slave_prep(const struct lu_env *env,
2927 struct lfsck_component *com,
2928 struct lfsck_start_param *lsp)
2930 struct lfsck_layout_slave_data *llsd = com->lc_data;
2931 struct lfsck_start *start = lsp->lsp_start;
2934 rc = lfsck_layout_prep(env, com, start);
2935 if (rc != 0 || !lsp->lsp_index_valid)
2938 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
2939 if (rc == 0 && start != NULL && start->ls_flags & LPF_ORPHAN) {
2940 LASSERT(!llsd->llsd_rbtree_valid);
2942 write_lock(&llsd->llsd_rb_lock);
2943 rc = lfsck_rbtree_setup(env, com);
2944 write_unlock(&llsd->llsd_rb_lock);
2950 static int lfsck_layout_master_prep(const struct lu_env *env,
2951 struct lfsck_component *com,
2952 struct lfsck_start_param *lsp)
2954 struct lfsck_instance *lfsck = com->lc_lfsck;
2955 struct lfsck_layout_master_data *llmd = com->lc_data;
2956 struct ptlrpc_thread *mthread = &lfsck->li_thread;
2957 struct ptlrpc_thread *athread = &llmd->llmd_thread;
2958 struct lfsck_thread_args *lta;
2962 rc = lfsck_layout_prep(env, com, lsp->lsp_start);
2966 llmd->llmd_assistant_status = 0;
2967 llmd->llmd_post_result = 0;
2968 llmd->llmd_to_post = 0;
2969 llmd->llmd_to_double_scan = 0;
2970 llmd->llmd_in_double_scan = 0;
2971 llmd->llmd_exit = 0;
2972 thread_set_flags(athread, 0);
2974 lta = lfsck_thread_args_init(lfsck, com, lsp);
2976 RETURN(PTR_ERR(lta));
2978 rc = PTR_ERR(kthread_run(lfsck_layout_assistant, lta, "lfsck_layout"));
2979 if (IS_ERR_VALUE(rc)) {
2980 CERROR("%s: Cannot start LFSCK layout assistant thread: "
2981 "rc = %ld\n", lfsck_lfsck2name(lfsck), rc);
2982 lfsck_thread_args_fini(lta);
2984 struct l_wait_info lwi = { 0 };
2986 l_wait_event(mthread->t_ctl_waitq,
2987 thread_is_running(athread) ||
2988 thread_is_stopped(athread),
2990 if (unlikely(!thread_is_running(athread)))
2991 rc = llmd->llmd_assistant_status;
2999 /* Pre-fetch the attribute for each stripe in the given layout EA. */
3000 static int lfsck_layout_scan_stripes(const struct lu_env *env,
3001 struct lfsck_component *com,
3002 struct dt_object *parent,
3003 struct lov_mds_md_v1 *lmm)
3005 struct lfsck_thread_info *info = lfsck_env_info(env);
3006 struct lfsck_instance *lfsck = com->lc_lfsck;
3007 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3008 struct lfsck_layout *lo = com->lc_file_ram;
3009 struct lfsck_layout_master_data *llmd = com->lc_data;
3010 struct lfsck_layout_object *llo = NULL;
3011 struct lov_ost_data_v1 *objs;
3012 struct lfsck_tgt_descs *ltds = &lfsck->li_ost_descs;
3013 struct ptlrpc_thread *mthread = &lfsck->li_thread;
3014 struct ptlrpc_thread *athread = &llmd->llmd_thread;
3015 struct l_wait_info lwi = { 0 };
3024 buf = lfsck_buf_get(env, &info->lti_old_pfid,
3025 sizeof(struct filter_fid_old));
3026 count = le16_to_cpu(lmm->lmm_stripe_count);
3027 gen = le16_to_cpu(lmm->lmm_layout_gen);
3028 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
3029 * been verified in lfsck_layout_verify_header() already. If some
3030 * new magic introduced in the future, then layout LFSCK needs to
3031 * be updated also. */
3032 magic = le32_to_cpu(lmm->lmm_magic);
3033 if (magic == LOV_MAGIC_V1) {
3034 objs = &(lmm->lmm_objects[0]);
3036 LASSERT(magic == LOV_MAGIC_V3);
3037 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
3040 for (i = 0; i < count; i++, objs++) {
3041 struct lu_fid *fid = &info->lti_fid;
3042 struct ost_id *oi = &info->lti_oi;
3043 struct lfsck_layout_req *llr;
3044 struct lfsck_tgt_desc *tgt = NULL;
3045 struct dt_object *cobj = NULL;
3047 le32_to_cpu(objs->l_ost_idx);
3048 bool wakeup = false;
3050 l_wait_event(mthread->t_ctl_waitq,
3051 bk->lb_async_windows == 0 ||
3052 llmd->llmd_prefetched < bk->lb_async_windows ||
3053 !thread_is_running(mthread) ||
3054 thread_is_stopped(athread),
3057 if (unlikely(!thread_is_running(mthread)) ||
3058 thread_is_stopped(athread))
3061 ostid_le_to_cpu(&objs->l_ost_oi, oi);
3062 ostid_to_fid(fid, oi, index);
3063 tgt = lfsck_tgt_get(ltds, index);
3064 if (unlikely(tgt == NULL)) {
3065 CERROR("%s: Cannot talk with OST %x which is not join "
3066 "the layout LFSCK.\n",
3067 lfsck_lfsck2name(lfsck), index);
3068 lo->ll_flags |= LF_INCOMPLETE;
3072 cobj = lfsck_object_find_by_dev(env, tgt->ltd_tgt, fid);
3078 rc = dt_declare_attr_get(env, cobj, BYPASS_CAPA);
3082 rc = dt_declare_xattr_get(env, cobj, buf, XATTR_NAME_FID,
3088 llo = lfsck_layout_object_init(env, parent, gen);
3095 llr = lfsck_layout_req_init(llo, cobj, index, i);
3102 spin_lock(&llmd->llmd_lock);
3103 if (llmd->llmd_assistant_status < 0) {
3104 spin_unlock(&llmd->llmd_lock);
3105 lfsck_layout_req_fini(env, llr);
3107 RETURN(llmd->llmd_assistant_status);
3110 list_add_tail(&llr->llr_list, &llmd->llmd_req_list);
3111 if (llmd->llmd_prefetched == 0)
3114 llmd->llmd_prefetched++;
3115 spin_unlock(&llmd->llmd_lock);
3117 wake_up_all(&athread->t_ctl_waitq);
3120 down_write(&com->lc_sem);
3121 com->lc_new_checked++;
3123 lo->ll_objs_failed_phase1++;
3124 up_write(&com->lc_sem);
3126 if (cobj != NULL && !IS_ERR(cobj))
3127 lu_object_put(env, &cobj->do_lu);
3129 if (likely(tgt != NULL))
3132 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
3139 if (llo != NULL && !IS_ERR(llo))
3140 lfsck_layout_object_put(env, llo);
3145 /* For the given object, read its layout EA locally. For each stripe, pre-fetch
3146 * the OST-object's attribute and generate an structure lfsck_layout_req on the
3147 * list ::llmd_req_list.
3149 * For each request on above list, the lfsck_layout_assistant thread compares
3150 * the OST side attribute with local attribute, if inconsistent, then repair it.
3152 * All above processing is async mode with pipeline. */
3153 static int lfsck_layout_master_exec_oit(const struct lu_env *env,
3154 struct lfsck_component *com,
3155 struct dt_object *obj)
3157 struct lfsck_thread_info *info = lfsck_env_info(env);
3158 struct ost_id *oi = &info->lti_oi;
3159 struct lfsck_layout *lo = com->lc_file_ram;
3160 struct lfsck_layout_master_data *llmd = com->lc_data;
3161 struct lfsck_instance *lfsck = com->lc_lfsck;
3162 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3163 struct thandle *handle = NULL;
3164 struct lu_buf *buf = &info->lti_big_buf;
3165 struct lov_mds_md_v1 *lmm = NULL;
3166 struct dt_device *dev = lfsck->li_bottom;
3167 struct lustre_handle lh = { 0 };
3168 ssize_t buflen = buf->lb_len;
3170 bool locked = false;
3171 bool stripe = false;
3174 if (!S_ISREG(lfsck_object_type(obj)))
3177 if (llmd->llmd_assistant_status < 0)
3178 GOTO(out, rc = -ESRCH);
3180 fid_to_lmm_oi(lfsck_dto2fid(obj), oi);
3181 lmm_oi_cpu_to_le(oi, oi);
3182 dt_read_lock(env, obj, 0);
3186 rc = lfsck_layout_get_lovea(env, obj, buf, &buflen);
3192 rc = lfsck_layout_verify_header(lmm);
3196 if (memcmp(oi, &lmm->lmm_oi, sizeof(*oi)) == 0)
3197 GOTO(out, stripe = true);
3199 /* Inconsistent lmm_oi, should be repaired. */
3200 CDEBUG(D_LFSCK, "Repair bad lmm_oi for "DFID"\n",
3201 PFID(lfsck_dto2fid(obj)));
3203 if (bk->lb_param & LPF_DRYRUN) {
3204 down_write(&com->lc_sem);
3205 lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
3206 up_write(&com->lc_sem);
3208 GOTO(out, stripe = true);
3211 if (!lustre_handle_is_used(&lh)) {
3212 dt_read_unlock(env, obj);
3214 buf->lb_len = buflen;
3215 rc = lfsck_layout_lock(env, com, obj, &lh,
3216 MDS_INODELOCK_LAYOUT |
3217 MDS_INODELOCK_XATTR);
3221 handle = dt_trans_create(env, dev);
3223 GOTO(out, rc = PTR_ERR(handle));
3225 rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_LOV,
3226 LU_XATTR_REPLACE, handle);
3230 rc = dt_trans_start_local(env, dev, handle);
3234 dt_write_lock(env, obj, 0);
3241 rc = dt_xattr_set(env, obj, buf, XATTR_NAME_LOV,
3242 LU_XATTR_REPLACE, handle, BYPASS_CAPA);
3246 down_write(&com->lc_sem);
3247 lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
3248 up_write(&com->lc_sem);
3250 GOTO(out, stripe = true);
3254 if (lustre_handle_is_used(&lh))
3255 dt_write_unlock(env, obj);
3257 dt_read_unlock(env, obj);
3260 if (handle != NULL && !IS_ERR(handle))
3261 dt_trans_stop(env, dev, handle);
3263 lfsck_layout_unlock(&lh);
3265 rc = lfsck_layout_scan_stripes(env, com, obj, lmm);
3267 down_write(&com->lc_sem);
3268 com->lc_new_checked++;
3270 lo->ll_objs_failed_phase1++;
3271 up_write(&com->lc_sem);
3273 buf->lb_len = buflen;
3278 static int lfsck_layout_slave_exec_oit(const struct lu_env *env,
3279 struct lfsck_component *com,
3280 struct dt_object *obj)
3282 struct lfsck_instance *lfsck = com->lc_lfsck;
3283 struct lfsck_layout *lo = com->lc_file_ram;
3284 const struct lu_fid *fid = lfsck_dto2fid(obj);
3285 struct lfsck_layout_slave_data *llsd = com->lc_data;
3286 struct lfsck_layout_seq *lls;
3292 LASSERT(llsd != NULL);
3294 lfsck_rbtree_update_bitmap(env, com, fid, false);
3296 down_write(&com->lc_sem);
3297 if (fid_is_idif(fid))
3299 else if (!fid_is_norm(fid) ||
3300 !fid_is_for_ostobj(env, lfsck->li_next, obj, fid))
3301 GOTO(unlock, rc = 0);
3304 com->lc_new_checked++;
3306 lls = lfsck_layout_seq_lookup(llsd, seq);
3309 if (unlikely(lls == NULL))
3310 GOTO(unlock, rc = -ENOMEM);
3312 INIT_LIST_HEAD(&lls->lls_list);
3314 rc = lfsck_layout_lastid_load(env, com, lls);
3316 lo->ll_objs_failed_phase1++;
3321 lfsck_layout_seq_insert(llsd, lls);
3324 if (unlikely(fid_is_last_id(fid)))
3325 GOTO(unlock, rc = 0);
3328 if (oid > lls->lls_lastid_known)
3329 lls->lls_lastid_known = oid;
3331 if (oid > lls->lls_lastid) {
3332 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
3333 /* OFD may create new objects during LFSCK scanning. */
3334 rc = lfsck_layout_lastid_reload(env, com, lls);
3335 if (unlikely(rc != 0))
3336 CWARN("%s: failed to reload LAST_ID for "LPX64
3338 lfsck_lfsck2name(com->lc_lfsck),
3340 if (oid <= lls->lls_lastid)
3341 GOTO(unlock, rc = 0);
3343 LASSERT(lfsck->li_out_notify != NULL);
3345 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
3346 LE_LASTID_REBUILDING);
3347 lo->ll_flags |= LF_CRASHED_LASTID;
3350 lls->lls_lastid = oid;
3354 GOTO(unlock, rc = 0);
3357 up_write(&com->lc_sem);
3362 static int lfsck_layout_exec_dir(const struct lu_env *env,
3363 struct lfsck_component *com,
3364 struct dt_object *obj,
3365 struct lu_dirent *ent)
3370 static int lfsck_layout_master_post(const struct lu_env *env,
3371 struct lfsck_component *com,
3372 int result, bool init)
3374 struct lfsck_instance *lfsck = com->lc_lfsck;
3375 struct lfsck_layout *lo = com->lc_file_ram;
3376 struct lfsck_layout_master_data *llmd = com->lc_data;
3377 struct ptlrpc_thread *mthread = &lfsck->li_thread;
3378 struct ptlrpc_thread *athread = &llmd->llmd_thread;
3379 struct l_wait_info lwi = { 0 };
3384 llmd->llmd_post_result = result;
3385 llmd->llmd_to_post = 1;
3386 if (llmd->llmd_post_result <= 0)
3387 llmd->llmd_exit = 1;
3389 wake_up_all(&athread->t_ctl_waitq);
3390 l_wait_event(mthread->t_ctl_waitq,
3391 (result > 0 && list_empty(&llmd->llmd_req_list)) ||
3392 thread_is_stopped(athread),
3395 if (llmd->llmd_assistant_status < 0)
3396 result = llmd->llmd_assistant_status;
3398 down_write(&com->lc_sem);
3399 spin_lock(&lfsck->li_lock);
3400 /* When LFSCK failed, there may be some prefetched objects those are
3401 * not been processed yet, we do not know the exactly position, then
3402 * just restart from last check-point next time. */
3403 if (!init && !llmd->llmd_exit)
3404 lo->ll_pos_last_checkpoint =
3405 lfsck->li_pos_current.lp_oit_cookie;
3408 lo->ll_status = LS_SCANNING_PHASE2;
3409 lo->ll_flags |= LF_SCANNED_ONCE;
3410 lo->ll_flags &= ~LF_UPGRADE;
3411 list_del_init(&com->lc_link);
3412 list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
3413 } else if (result == 0) {
3414 lo->ll_status = lfsck->li_status;
3415 if (lo->ll_status == 0)
3416 lo->ll_status = LS_STOPPED;
3417 if (lo->ll_status != LS_PAUSED) {
3418 list_del_init(&com->lc_link);
3419 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
3422 lo->ll_status = LS_FAILED;
3423 list_del_init(&com->lc_link);
3424 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
3426 spin_unlock(&lfsck->li_lock);
3429 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
3430 HALF_SEC - lfsck->li_time_last_checkpoint);
3431 lo->ll_time_last_checkpoint = cfs_time_current_sec();
3432 lo->ll_objs_checked_phase1 += com->lc_new_checked;
3433 com->lc_new_checked = 0;
3436 rc = lfsck_layout_store(env, com);
3437 up_write(&com->lc_sem);
3442 static int lfsck_layout_slave_post(const struct lu_env *env,
3443 struct lfsck_component *com,
3444 int result, bool init)
3446 struct lfsck_instance *lfsck = com->lc_lfsck;
3447 struct lfsck_layout *lo = com->lc_file_ram;
3451 rc = lfsck_layout_lastid_store(env, com);
3455 LASSERT(lfsck->li_out_notify != NULL);
3457 down_write(&com->lc_sem);
3459 spin_lock(&lfsck->li_lock);
3461 lo->ll_pos_last_checkpoint =
3462 lfsck->li_pos_current.lp_oit_cookie;
3464 lo->ll_status = LS_SCANNING_PHASE2;
3465 lo->ll_flags |= LF_SCANNED_ONCE;
3466 if (lo->ll_flags & LF_CRASHED_LASTID) {
3468 lo->ll_flags &= ~LF_CRASHED_LASTID;
3470 lo->ll_flags &= ~LF_UPGRADE;
3471 list_del_init(&com->lc_link);
3472 list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
3473 } else if (result == 0) {
3474 lo->ll_status = lfsck->li_status;
3475 if (lo->ll_status == 0)
3476 lo->ll_status = LS_STOPPED;
3477 if (lo->ll_status != LS_PAUSED) {
3478 list_del_init(&com->lc_link);
3479 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
3482 lo->ll_status = LS_FAILED;
3483 list_del_init(&com->lc_link);
3484 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
3486 spin_unlock(&lfsck->li_lock);
3489 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
3493 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
3494 HALF_SEC - lfsck->li_time_last_checkpoint);
3495 lo->ll_time_last_checkpoint = cfs_time_current_sec();
3496 lo->ll_objs_checked_phase1 += com->lc_new_checked;
3497 com->lc_new_checked = 0;
3500 rc = lfsck_layout_store(env, com);
3502 up_write(&com->lc_sem);
3504 lfsck_layout_slave_notify_master(env, com, LE_PHASE1_DONE, result);
3507 lfsck_rbtree_cleanup(env, com);
3512 static int lfsck_layout_dump(const struct lu_env *env,
3513 struct lfsck_component *com, char *buf, int len)
3515 struct lfsck_instance *lfsck = com->lc_lfsck;
3516 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3517 struct lfsck_layout *lo = com->lc_file_ram;
3522 down_read(&com->lc_sem);
3523 rc = snprintf(buf, len,
3524 "name: lfsck_layout\n"
3530 lfsck_status2names(lo->ll_status));
3536 rc = lfsck_bits_dump(&buf, &len, lo->ll_flags, lfsck_flags_names,
3541 rc = lfsck_bits_dump(&buf, &len, bk->lb_param, lfsck_param_names,
3546 rc = lfsck_time_dump(&buf, &len, lo->ll_time_last_complete,
3547 "time_since_last_completed");
3551 rc = lfsck_time_dump(&buf, &len, lo->ll_time_latest_start,
3552 "time_since_latest_start");
3556 rc = lfsck_time_dump(&buf, &len, lo->ll_time_last_checkpoint,
3557 "time_since_last_checkpoint");
3561 rc = snprintf(buf, len,
3562 "latest_start_position: "LPU64"\n"
3563 "last_checkpoint_position: "LPU64"\n"
3564 "first_failure_position: "LPU64"\n",
3565 lo->ll_pos_latest_start,
3566 lo->ll_pos_last_checkpoint,
3567 lo->ll_pos_first_inconsistent);
3574 rc = snprintf(buf, len,
3575 "success_count: %u\n"
3576 "repaired_dangling: "LPU64"\n"
3577 "repaired_unmatched_pair: "LPU64"\n"
3578 "repaired_multiple_referenced: "LPU64"\n"
3579 "repaired_orphan: "LPU64"\n"
3580 "repaired_inconsistent_owner: "LPU64"\n"
3581 "repaired_others: "LPU64"\n"
3582 "skipped: "LPU64"\n"
3583 "failed_phase1: "LPU64"\n"
3584 "failed_phase2: "LPU64"\n",
3585 lo->ll_success_count,
3586 lo->ll_objs_repaired[LLIT_DANGLING - 1],
3587 lo->ll_objs_repaired[LLIT_UNMATCHED_PAIR - 1],
3588 lo->ll_objs_repaired[LLIT_MULTIPLE_REFERENCED - 1],
3589 lo->ll_objs_repaired[LLIT_ORPHAN - 1],
3590 lo->ll_objs_repaired[LLIT_INCONSISTENT_OWNER - 1],
3591 lo->ll_objs_repaired[LLIT_OTHERS - 1],
3592 lo->ll_objs_skipped,
3593 lo->ll_objs_failed_phase1,
3594 lo->ll_objs_failed_phase2);
3601 if (lo->ll_status == LS_SCANNING_PHASE1) {
3603 const struct dt_it_ops *iops;
3604 cfs_duration_t duration = cfs_time_current() -
3605 lfsck->li_time_last_checkpoint;
3606 __u64 checked = lo->ll_objs_checked_phase1 + com->lc_new_checked;
3607 __u64 speed = checked;
3608 __u64 new_checked = com->lc_new_checked * HZ;
3609 __u32 rtime = lo->ll_run_time_phase1 +
3610 cfs_duration_sec(duration + HALF_SEC);
3613 do_div(new_checked, duration);
3615 do_div(speed, rtime);
3616 rc = snprintf(buf, len,
3617 "checked_phase1: "LPU64"\n"
3618 "checked_phase2: "LPU64"\n"
3619 "run_time_phase1: %u seconds\n"
3620 "run_time_phase2: %u seconds\n"
3621 "average_speed_phase1: "LPU64" items/sec\n"
3622 "average_speed_phase2: N/A\n"
3623 "real-time_speed_phase1: "LPU64" items/sec\n"
3624 "real-time_speed_phase2: N/A\n",
3626 lo->ll_objs_checked_phase2,
3628 lo->ll_run_time_phase2,
3637 LASSERT(lfsck->li_di_oit != NULL);
3639 iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
3641 /* The low layer otable-based iteration position may NOT
3642 * exactly match the layout-based directory traversal
3643 * cookie. Generally, it is not a serious issue. But the
3644 * caller should NOT make assumption on that. */
3645 pos = iops->store(env, lfsck->li_di_oit);
3646 if (!lfsck->li_current_oit_processed)
3648 rc = snprintf(buf, len, "current_position: "LPU64"\n", pos);
3655 /* XXX: LS_SCANNING_PHASE2 will be handled in the future. */
3656 __u64 speed1 = lo->ll_objs_checked_phase1;
3657 __u64 speed2 = lo->ll_objs_checked_phase2;
3659 if (lo->ll_run_time_phase1 != 0)
3660 do_div(speed1, lo->ll_run_time_phase1);
3661 if (lo->ll_run_time_phase2 != 0)
3662 do_div(speed2, lo->ll_run_time_phase2);
3663 rc = snprintf(buf, len,
3664 "checked_phase1: "LPU64"\n"
3665 "checked_phase2: "LPU64"\n"
3666 "run_time_phase1: %u seconds\n"
3667 "run_time_phase2: %u seconds\n"
3668 "average_speed_phase1: "LPU64" items/sec\n"
3669 "average_speed_phase2: "LPU64" objs/sec\n"
3670 "real-time_speed_phase1: N/A\n"
3671 "real-time_speed_phase2: N/A\n"
3672 "current_position: N/A\n",
3673 lo->ll_objs_checked_phase1,
3674 lo->ll_objs_checked_phase2,
3675 lo->ll_run_time_phase1,
3676 lo->ll_run_time_phase2,
3688 up_read(&com->lc_sem);
3693 static int lfsck_layout_master_double_scan(const struct lu_env *env,
3694 struct lfsck_component *com)
3696 struct lfsck_layout_master_data *llmd = com->lc_data;
3697 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
3698 struct ptlrpc_thread *athread = &llmd->llmd_thread;
3699 struct lfsck_layout *lo = com->lc_file_ram;
3700 struct l_wait_info lwi = { 0 };
3702 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2))
3705 llmd->llmd_to_double_scan = 1;
3706 wake_up_all(&athread->t_ctl_waitq);
3707 l_wait_event(mthread->t_ctl_waitq,
3708 llmd->llmd_in_double_scan ||
3709 thread_is_stopped(athread),
3711 if (llmd->llmd_assistant_status < 0)
3712 return llmd->llmd_assistant_status;
3717 static int lfsck_layout_slave_double_scan(const struct lu_env *env,
3718 struct lfsck_component *com)
3720 struct lfsck_instance *lfsck = com->lc_lfsck;
3721 struct lfsck_layout_slave_data *llsd = com->lc_data;
3722 struct lfsck_layout *lo = com->lc_file_ram;
3723 struct ptlrpc_thread *thread = &lfsck->li_thread;
3727 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2)) {
3728 lfsck_rbtree_cleanup(env, com);
3729 lfsck_layout_slave_notify_master(env, com, LE_PHASE2_DONE, 0);
3733 atomic_inc(&lfsck->li_double_scan_count);
3735 com->lc_new_checked = 0;
3736 com->lc_new_scanned = 0;
3737 com->lc_time_last_checkpoint = cfs_time_current();
3738 com->lc_time_next_checkpoint = com->lc_time_last_checkpoint +
3739 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
3742 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(30),
3745 rc = lfsck_layout_slave_query_master(env, com);
3746 if (list_empty(&llsd->llsd_master_list)) {
3747 if (unlikely(!thread_is_running(thread)))
3758 rc = l_wait_event(thread->t_ctl_waitq,
3759 !thread_is_running(thread) ||
3760 list_empty(&llsd->llsd_master_list),
3762 if (unlikely(!thread_is_running(thread)))
3765 if (rc == -ETIMEDOUT)
3768 GOTO(done, rc = (rc < 0 ? rc : 1));
3772 rc = lfsck_layout_double_scan_result(env, com, rc);
3774 lfsck_rbtree_cleanup(env, com);
3775 lfsck_layout_slave_notify_master(env, com, LE_PHASE2_DONE, rc);
3776 if (atomic_dec_and_test(&lfsck->li_double_scan_count))
3777 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
3782 static void lfsck_layout_master_data_release(const struct lu_env *env,
3783 struct lfsck_component *com)
3785 struct lfsck_layout_master_data *llmd = com->lc_data;
3786 struct lfsck_instance *lfsck = com->lc_lfsck;
3787 struct lfsck_tgt_descs *ltds;
3788 struct lfsck_tgt_desc *ltd;
3789 struct lfsck_tgt_desc *next;
3791 LASSERT(llmd != NULL);
3792 LASSERT(thread_is_init(&llmd->llmd_thread) ||
3793 thread_is_stopped(&llmd->llmd_thread));
3794 LASSERT(list_empty(&llmd->llmd_req_list));
3796 com->lc_data = NULL;
3798 ltds = &lfsck->li_ost_descs;
3799 spin_lock(<ds->ltd_lock);
3800 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase1_list,
3801 ltd_layout_phase_list) {
3802 list_del_init(<d->ltd_layout_phase_list);
3804 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase2_list,
3805 ltd_layout_phase_list) {
3806 list_del_init(<d->ltd_layout_phase_list);
3808 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_list,
3810 list_del_init(<d->ltd_layout_list);
3812 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase1_list,
3813 ltd_layout_phase_list) {
3814 list_del_init(<d->ltd_layout_phase_list);
3816 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase2_list,
3817 ltd_layout_phase_list) {
3818 list_del_init(<d->ltd_layout_phase_list);
3820 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_list,
3822 list_del_init(<d->ltd_layout_list);
3824 spin_unlock(<ds->ltd_lock);
3829 static void lfsck_layout_slave_data_release(const struct lu_env *env,
3830 struct lfsck_component *com)
3832 struct lfsck_layout_slave_data *llsd = com->lc_data;
3833 struct lfsck_layout_seq *lls;
3834 struct lfsck_layout_seq *next;
3835 struct lfsck_layout_slave_target *llst;
3836 struct lfsck_layout_slave_target *tmp;
3838 LASSERT(llsd != NULL);
3840 list_for_each_entry_safe(lls, next, &llsd->llsd_seq_list,
3842 list_del_init(&lls->lls_list);
3843 lfsck_object_put(env, lls->lls_lastid_obj);
3847 list_for_each_entry_safe(llst, tmp, &llsd->llsd_master_list,
3849 list_del_init(&llst->llst_list);
3853 lfsck_rbtree_cleanup(env, com);
3854 com->lc_data = NULL;
3858 static void lfsck_layout_master_quit(const struct lu_env *env,
3859 struct lfsck_component *com)
3861 struct lfsck_layout_master_data *llmd = com->lc_data;
3862 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
3863 struct ptlrpc_thread *athread = &llmd->llmd_thread;
3864 struct l_wait_info lwi = { 0 };
3866 llmd->llmd_exit = 1;
3867 wake_up_all(&athread->t_ctl_waitq);
3868 l_wait_event(mthread->t_ctl_waitq,
3869 thread_is_init(athread) ||
3870 thread_is_stopped(athread),
3874 static void lfsck_layout_slave_quit(const struct lu_env *env,
3875 struct lfsck_component *com)
3877 lfsck_rbtree_cleanup(env, com);
3880 static int lfsck_layout_master_in_notify(const struct lu_env *env,
3881 struct lfsck_component *com,
3882 struct lfsck_request *lr)
3884 struct lfsck_instance *lfsck = com->lc_lfsck;
3885 struct lfsck_layout *lo = com->lc_file_ram;
3886 struct lfsck_layout_master_data *llmd = com->lc_data;
3887 struct lfsck_tgt_descs *ltds;
3888 struct lfsck_tgt_desc *ltd;
3892 if (lr->lr_event != LE_PHASE1_DONE &&
3893 lr->lr_event != LE_PHASE2_DONE &&
3894 lr->lr_event != LE_PEER_EXIT)
3897 if (lr->lr_flags & LEF_FROM_OST)
3898 ltds = &lfsck->li_ost_descs;
3900 ltds = &lfsck->li_mdt_descs;
3901 spin_lock(<ds->ltd_lock);
3902 ltd = LTD_TGT(ltds, lr->lr_index);
3904 spin_unlock(<ds->ltd_lock);
3909 list_del_init(<d->ltd_layout_phase_list);
3910 switch (lr->lr_event) {
3911 case LE_PHASE1_DONE:
3912 if (lr->lr_status <= 0) {
3913 ltd->ltd_layout_done = 1;
3914 list_del_init(<d->ltd_layout_list);
3915 CWARN("%s: %s %x failed/stopped at phase1: rc = %d.\n",
3916 lfsck_lfsck2name(lfsck),
3917 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
3918 ltd->ltd_index, lr->lr_status);
3919 lo->ll_flags |= LF_INCOMPLETE;
3924 if (lr->lr_flags & LEF_FROM_OST) {
3925 if (list_empty(<d->ltd_layout_list))
3926 list_add_tail(<d->ltd_layout_list,
3927 &llmd->llmd_ost_list);
3928 list_add_tail(<d->ltd_layout_phase_list,
3929 &llmd->llmd_ost_phase2_list);
3931 if (list_empty(<d->ltd_layout_list))
3932 list_add_tail(<d->ltd_layout_list,
3933 &llmd->llmd_mdt_list);
3934 list_add_tail(<d->ltd_layout_phase_list,
3935 &llmd->llmd_mdt_phase2_list);
3938 case LE_PHASE2_DONE:
3939 ltd->ltd_layout_done = 1;
3940 list_del_init(<d->ltd_layout_list);
3944 ltd->ltd_layout_done = 1;
3945 list_del_init(<d->ltd_layout_list);
3946 if (!(lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT)) {
3947 CWARN("%s: the peer %s %x exit layout LFSCK.\n",
3948 lfsck_lfsck2name(lfsck),
3949 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
3951 lo->ll_flags |= LF_INCOMPLETE;
3957 spin_unlock(<ds->ltd_lock);
3959 if (fail && lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT) {
3960 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
3962 memset(stop, 0, sizeof(*stop));
3963 stop->ls_status = lr->lr_status;
3964 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
3965 lfsck_stop(env, lfsck->li_bottom, stop);
3966 } else if (lfsck_layout_master_to_orphan(llmd)) {
3967 wake_up_all(&llmd->llmd_thread.t_ctl_waitq);
3973 static int lfsck_layout_slave_in_notify(const struct lu_env *env,
3974 struct lfsck_component *com,
3975 struct lfsck_request *lr)
3977 struct lfsck_instance *lfsck = com->lc_lfsck;
3978 struct lfsck_layout_slave_data *llsd = com->lc_data;
3979 struct lfsck_layout_slave_target *llst;
3982 if (lr->lr_event == LE_FID_ACCESSED) {
3983 lfsck_rbtree_update_bitmap(env, com, &lr->lr_fid, true);
3988 if (lr->lr_event != LE_PHASE2_DONE && lr->lr_event != LE_PEER_EXIT)
3991 llst = lfsck_layout_llst_find_and_del(llsd, lr->lr_index);
3995 lfsck_layout_llst_put(llst);
3996 if (list_empty(&llsd->llsd_master_list))
3997 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
3999 if (lr->lr_event == LE_PEER_EXIT &&
4000 lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT) {
4001 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
4003 memset(stop, 0, sizeof(*stop));
4004 stop->ls_status = lr->lr_status;
4005 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
4006 lfsck_stop(env, lfsck->li_bottom, stop);
4012 static int lfsck_layout_query(const struct lu_env *env,
4013 struct lfsck_component *com)
4015 struct lfsck_layout *lo = com->lc_file_ram;
4017 return lo->ll_status;
4020 static int lfsck_layout_master_stop_notify(const struct lu_env *env,
4021 struct lfsck_component *com,
4022 struct lfsck_tgt_descs *ltds,
4023 struct lfsck_tgt_desc *ltd,
4024 struct ptlrpc_request_set *set)
4026 struct lfsck_thread_info *info = lfsck_env_info(env);
4027 struct lfsck_async_interpret_args *laia = &info->lti_laia;
4028 struct lfsck_request *lr = &info->lti_lr;
4029 struct lfsck_instance *lfsck = com->lc_lfsck;
4032 spin_lock(<ds->ltd_lock);
4033 if (list_empty(<d->ltd_layout_list)) {
4034 LASSERT(list_empty(<d->ltd_layout_phase_list));
4035 spin_unlock(<ds->ltd_lock);
4040 list_del_init(<d->ltd_layout_phase_list);
4041 list_del_init(<d->ltd_layout_list);
4042 spin_unlock(<ds->ltd_lock);
4044 memset(lr, 0, sizeof(*lr));
4045 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
4046 lr->lr_event = LE_PEER_EXIT;
4047 lr->lr_active = LT_LAYOUT;
4048 lr->lr_status = LS_CO_PAUSED;
4049 if (ltds == &lfsck->li_ost_descs)
4050 lr->lr_flags = LEF_TO_OST;
4052 laia->laia_com = com;
4053 laia->laia_ltds = ltds;
4054 atomic_inc(<d->ltd_ref);
4055 laia->laia_ltd = ltd;
4057 laia->laia_shared = 0;
4059 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
4060 lfsck_layout_master_async_interpret,
4061 laia, LFSCK_NOTIFY);
4063 CERROR("%s: Fail to notify %s %x for co-stop: rc = %d\n",
4064 lfsck_lfsck2name(lfsck),
4065 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
4066 ltd->ltd_index, rc);
4073 /* with lfsck::li_lock held */
4074 static int lfsck_layout_slave_join(const struct lu_env *env,
4075 struct lfsck_component *com,
4076 struct lfsck_start_param *lsp)
4078 struct lfsck_instance *lfsck = com->lc_lfsck;
4079 struct lfsck_layout_slave_data *llsd = com->lc_data;
4080 struct lfsck_layout_slave_target *llst;
4081 struct lfsck_start *start = lsp->lsp_start;
4085 if (!lsp->lsp_index_valid || start == NULL ||
4086 !(start->ls_flags & LPF_ALL_TGT) ||
4087 !(lfsck->li_bookmark_ram.lb_param & LPF_ALL_TGT))
4090 spin_unlock(&lfsck->li_lock);
4091 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
4092 spin_lock(&lfsck->li_lock);
4093 if (rc == 0 && !thread_is_running(&lfsck->li_thread)) {
4094 spin_unlock(&lfsck->li_lock);
4095 llst = lfsck_layout_llst_find_and_del(llsd, lsp->lsp_index);
4097 lfsck_layout_llst_put(llst);
4098 spin_lock(&lfsck->li_lock);
4105 static struct lfsck_operations lfsck_layout_master_ops = {
4106 .lfsck_reset = lfsck_layout_reset,
4107 .lfsck_fail = lfsck_layout_fail,
4108 .lfsck_checkpoint = lfsck_layout_master_checkpoint,
4109 .lfsck_prep = lfsck_layout_master_prep,
4110 .lfsck_exec_oit = lfsck_layout_master_exec_oit,
4111 .lfsck_exec_dir = lfsck_layout_exec_dir,
4112 .lfsck_post = lfsck_layout_master_post,
4113 .lfsck_interpret = lfsck_layout_master_async_interpret,
4114 .lfsck_dump = lfsck_layout_dump,
4115 .lfsck_double_scan = lfsck_layout_master_double_scan,
4116 .lfsck_data_release = lfsck_layout_master_data_release,
4117 .lfsck_quit = lfsck_layout_master_quit,
4118 .lfsck_in_notify = lfsck_layout_master_in_notify,
4119 .lfsck_query = lfsck_layout_query,
4120 .lfsck_stop_notify = lfsck_layout_master_stop_notify,
4123 static struct lfsck_operations lfsck_layout_slave_ops = {
4124 .lfsck_reset = lfsck_layout_reset,
4125 .lfsck_fail = lfsck_layout_fail,
4126 .lfsck_checkpoint = lfsck_layout_slave_checkpoint,
4127 .lfsck_prep = lfsck_layout_slave_prep,
4128 .lfsck_exec_oit = lfsck_layout_slave_exec_oit,
4129 .lfsck_exec_dir = lfsck_layout_exec_dir,
4130 .lfsck_post = lfsck_layout_slave_post,
4131 .lfsck_dump = lfsck_layout_dump,
4132 .lfsck_double_scan = lfsck_layout_slave_double_scan,
4133 .lfsck_data_release = lfsck_layout_slave_data_release,
4134 .lfsck_quit = lfsck_layout_slave_quit,
4135 .lfsck_in_notify = lfsck_layout_slave_in_notify,
4136 .lfsck_query = lfsck_layout_query,
4137 .lfsck_join = lfsck_layout_slave_join,
4140 int lfsck_layout_setup(const struct lu_env *env, struct lfsck_instance *lfsck)
4142 struct lfsck_component *com;
4143 struct lfsck_layout *lo;
4144 struct dt_object *root = NULL;
4145 struct dt_object *obj;
4153 INIT_LIST_HEAD(&com->lc_link);
4154 INIT_LIST_HEAD(&com->lc_link_dir);
4155 init_rwsem(&com->lc_sem);
4156 atomic_set(&com->lc_ref, 1);
4157 com->lc_lfsck = lfsck;
4158 com->lc_type = LT_LAYOUT;
4159 if (lfsck->li_master) {
4160 struct lfsck_layout_master_data *llmd;
4162 com->lc_ops = &lfsck_layout_master_ops;
4163 OBD_ALLOC_PTR(llmd);
4165 GOTO(out, rc = -ENOMEM);
4167 INIT_LIST_HEAD(&llmd->llmd_req_list);
4168 spin_lock_init(&llmd->llmd_lock);
4169 INIT_LIST_HEAD(&llmd->llmd_ost_list);
4170 INIT_LIST_HEAD(&llmd->llmd_ost_phase1_list);
4171 INIT_LIST_HEAD(&llmd->llmd_ost_phase2_list);
4172 INIT_LIST_HEAD(&llmd->llmd_mdt_list);
4173 INIT_LIST_HEAD(&llmd->llmd_mdt_phase1_list);
4174 INIT_LIST_HEAD(&llmd->llmd_mdt_phase2_list);
4175 init_waitqueue_head(&llmd->llmd_thread.t_ctl_waitq);
4176 com->lc_data = llmd;
4178 struct lfsck_layout_slave_data *llsd;
4180 com->lc_ops = &lfsck_layout_slave_ops;
4181 OBD_ALLOC_PTR(llsd);
4183 GOTO(out, rc = -ENOMEM);
4185 INIT_LIST_HEAD(&llsd->llsd_seq_list);
4186 INIT_LIST_HEAD(&llsd->llsd_master_list);
4187 spin_lock_init(&llsd->llsd_lock);
4188 llsd->llsd_rb_root = RB_ROOT;
4189 rwlock_init(&llsd->llsd_rb_lock);
4190 com->lc_data = llsd;
4192 com->lc_file_size = sizeof(*lo);
4193 OBD_ALLOC(com->lc_file_ram, com->lc_file_size);
4194 if (com->lc_file_ram == NULL)
4195 GOTO(out, rc = -ENOMEM);
4197 OBD_ALLOC(com->lc_file_disk, com->lc_file_size);
4198 if (com->lc_file_disk == NULL)
4199 GOTO(out, rc = -ENOMEM);
4201 root = dt_locate(env, lfsck->li_bottom, &lfsck->li_local_root_fid);
4203 GOTO(out, rc = PTR_ERR(root));
4205 if (unlikely(!dt_try_as_dir(env, root)))
4206 GOTO(out, rc = -ENOTDIR);
4208 obj = local_file_find_or_create(env, lfsck->li_los, root,
4210 S_IFREG | S_IRUGO | S_IWUSR);
4212 GOTO(out, rc = PTR_ERR(obj));
4215 rc = lfsck_layout_load(env, com);
4217 rc = lfsck_layout_reset(env, com, true);
4218 else if (rc == -ENOENT)
4219 rc = lfsck_layout_init(env, com);
4224 lo = com->lc_file_ram;
4225 switch (lo->ll_status) {
4231 spin_lock(&lfsck->li_lock);
4232 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
4233 spin_unlock(&lfsck->li_lock);
4236 CERROR("%s: unknown lfsck_layout status: rc = %u\n",
4237 lfsck_lfsck2name(lfsck), lo->ll_status);
4239 case LS_SCANNING_PHASE1:
4240 case LS_SCANNING_PHASE2:
4241 /* No need to store the status to disk right now.
4242 * If the system crashed before the status stored,
4243 * it will be loaded back when next time. */
4244 lo->ll_status = LS_CRASHED;
4245 lo->ll_flags |= LF_INCOMPLETE;
4252 spin_lock(&lfsck->li_lock);
4253 list_add_tail(&com->lc_link, &lfsck->li_list_scan);
4254 spin_unlock(&lfsck->li_lock);
4258 if (lo->ll_flags & LF_CRASHED_LASTID) {
4259 LASSERT(lfsck->li_out_notify != NULL);
4261 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
4262 LE_LASTID_REBUILDING);
4268 if (root != NULL && !IS_ERR(root))
4269 lu_object_put(env, &root->do_lu);
4272 lfsck_component_cleanup(env, com);