4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2013, Intel Corporation.
26 * lustre/lfsck/lfsck_layout.c
28 * Author: Fan, Yong <fan.yong@intel.com>
32 # define EXPORT_SYMTAB
34 #define DEBUG_SUBSYSTEM S_LFSCK
36 #include <linux/bitops.h>
37 #include <linux/rbtree.h>
39 #include <lustre/lustre_idl.h>
40 #include <lu_object.h>
41 #include <dt_object.h>
42 #include <lustre_linkea.h>
43 #include <lustre_fid.h>
44 #include <lustre_lib.h>
45 #include <lustre_net.h>
46 #include <lustre/lustre_user.h>
47 #include <md_object.h>
48 #include <obd_class.h>
50 #include "lfsck_internal.h"
52 #define LFSCK_LAYOUT_MAGIC 0xB173AE14
54 static const char lfsck_layout_name[] = "lfsck_layout";
56 struct lfsck_layout_seq {
57 struct list_head lls_list;
60 __u64 lls_lastid_known;
61 struct dt_object *lls_lastid_obj;
62 unsigned int lls_dirty:1;
65 struct lfsck_layout_slave_target {
66 /* link into lfsck_layout_slave_data::llsd_master_list. */
67 struct list_head llst_list;
68 /* The position for next record in the rbtree for iteration. */
69 struct lu_fid llst_fid;
70 /* Dummy hash for iteration against the rbtree. */
77 struct lfsck_layout_slave_data {
78 /* list for lfsck_layout_seq */
79 struct list_head llsd_seq_list;
81 /* list for the masters involve layout verification. */
82 struct list_head llsd_master_list;
85 struct dt_object *llsd_rb_obj;
86 struct rb_root llsd_rb_root;
87 rwlock_t llsd_rb_lock;
88 unsigned int llsd_rbtree_valid:1;
91 struct lfsck_layout_object {
92 struct dt_object *llo_obj;
93 struct lu_attr llo_attr;
98 struct lfsck_layout_req {
99 struct list_head llr_list;
100 struct lfsck_layout_object *llr_parent;
101 struct dt_object *llr_child;
103 __u32 llr_lov_idx; /* offset in LOV EA */
106 struct lfsck_layout_master_data {
107 spinlock_t llmd_lock;
108 struct list_head llmd_req_list;
110 /* list for the ost targets involve layout verification. */
111 struct list_head llmd_ost_list;
113 /* list for the ost targets in phase1 scanning. */
114 struct list_head llmd_ost_phase1_list;
116 /* list for the ost targets in phase1 scanning. */
117 struct list_head llmd_ost_phase2_list;
119 /* list for the mdt targets involve layout verification. */
120 struct list_head llmd_mdt_list;
122 /* list for the mdt targets in phase1 scanning. */
123 struct list_head llmd_mdt_phase1_list;
125 /* list for the mdt targets in phase1 scanning. */
126 struct list_head llmd_mdt_phase2_list;
128 struct ptlrpc_thread llmd_thread;
129 __u32 llmd_touch_gen;
131 int llmd_assistant_status;
132 int llmd_post_result;
133 unsigned int llmd_to_post:1,
134 llmd_to_double_scan:1,
135 llmd_in_double_scan:1,
139 struct lfsck_layout_slave_async_args {
140 struct obd_export *llsaa_exp;
141 struct lfsck_component *llsaa_com;
142 struct lfsck_layout_slave_target *llsaa_llst;
145 static struct lfsck_layout_object *
146 lfsck_layout_object_init(const struct lu_env *env, struct dt_object *obj,
149 struct lfsck_layout_object *llo;
154 return ERR_PTR(-ENOMEM);
156 rc = dt_attr_get(env, obj, &llo->llo_attr, BYPASS_CAPA);
163 lu_object_get(&obj->do_lu);
165 /* The gen can be used to check whether some others have changed the
166 * file layout after LFSCK pre-fetching but before real verification. */
168 atomic_set(&llo->llo_ref, 1);
174 lfsck_layout_llst_put(struct lfsck_layout_slave_target *llst)
176 if (atomic_dec_and_test(&llst->llst_ref)) {
177 LASSERT(list_empty(&llst->llst_list));
184 lfsck_layout_llst_add(struct lfsck_layout_slave_data *llsd, __u32 index)
186 struct lfsck_layout_slave_target *llst;
187 struct lfsck_layout_slave_target *tmp;
194 INIT_LIST_HEAD(&llst->llst_list);
196 llst->llst_index = index;
197 atomic_set(&llst->llst_ref, 1);
199 spin_lock(&llsd->llsd_lock);
200 list_for_each_entry(tmp, &llsd->llsd_master_list, llst_list) {
201 if (tmp->llst_index == index) {
207 list_add_tail(&llst->llst_list, &llsd->llsd_master_list);
208 spin_unlock(&llsd->llsd_lock);
217 lfsck_layout_llst_del(struct lfsck_layout_slave_data *llsd,
218 struct lfsck_layout_slave_target *llst)
222 spin_lock(&llsd->llsd_lock);
223 if (!list_empty(&llst->llst_list)) {
224 list_del_init(&llst->llst_list);
227 spin_unlock(&llsd->llsd_lock);
230 lfsck_layout_llst_put(llst);
233 static inline struct lfsck_layout_slave_target *
234 lfsck_layout_llst_find_and_del(struct lfsck_layout_slave_data *llsd,
235 __u32 index, bool unlink)
237 struct lfsck_layout_slave_target *llst;
239 spin_lock(&llsd->llsd_lock);
240 list_for_each_entry(llst, &llsd->llsd_master_list, llst_list) {
241 if (llst->llst_index == index) {
243 list_del_init(&llst->llst_list);
245 atomic_inc(&llst->llst_ref);
246 spin_unlock(&llsd->llsd_lock);
251 spin_unlock(&llsd->llsd_lock);
256 static inline void lfsck_layout_object_put(const struct lu_env *env,
257 struct lfsck_layout_object *llo)
259 if (atomic_dec_and_test(&llo->llo_ref)) {
260 lfsck_object_put(env, llo->llo_obj);
265 static struct lfsck_layout_req *
266 lfsck_layout_req_init(struct lfsck_layout_object *parent,
267 struct dt_object *child, __u32 ost_idx, __u32 lov_idx)
269 struct lfsck_layout_req *llr;
273 return ERR_PTR(-ENOMEM);
275 INIT_LIST_HEAD(&llr->llr_list);
276 atomic_inc(&parent->llo_ref);
277 llr->llr_parent = parent;
278 llr->llr_child = child;
279 llr->llr_ost_idx = ost_idx;
280 llr->llr_lov_idx = lov_idx;
285 static inline void lfsck_layout_req_fini(const struct lu_env *env,
286 struct lfsck_layout_req *llr)
288 lu_object_put(env, &llr->llr_child->do_lu);
289 lfsck_layout_object_put(env, llr->llr_parent);
293 static inline bool lfsck_layout_req_empty(struct lfsck_layout_master_data *llmd)
297 spin_lock(&llmd->llmd_lock);
298 if (list_empty(&llmd->llmd_req_list))
300 spin_unlock(&llmd->llmd_lock);
305 static int lfsck_layout_get_lovea(const struct lu_env *env,
306 struct dt_object *obj,
307 struct lu_buf *buf, ssize_t *buflen)
312 rc = dt_xattr_get(env, obj, buf, XATTR_NAME_LOV, BYPASS_CAPA);
314 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_LOV,
319 lu_buf_realloc(buf, rc);
321 *buflen = buf->lb_len;
323 if (buf->lb_buf == NULL)
335 if (unlikely(buf->lb_buf == NULL)) {
336 lu_buf_alloc(buf, rc);
338 *buflen = buf->lb_len;
340 if (buf->lb_buf == NULL)
349 static int lfsck_layout_verify_header(struct lov_mds_md_v1 *lmm)
354 magic = le32_to_cpu(lmm->lmm_magic);
355 /* If magic crashed, keep it there. Sometime later, during OST-object
356 * orphan handling, if some OST-object(s) back-point to it, it can be
357 * verified and repaired. */
358 if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3) {
362 lmm_oi_cpu_to_le(&oi, &lmm->lmm_oi);
363 if ((magic & LOV_MAGIC_MASK) == LOV_MAGIC_MAGIC)
368 CDEBUG(D_LFSCK, "%s LOV EA magic %u on "DOSTID"\n",
369 rc == -EINVAL ? "Unknown" : "Unsupported",
375 pattern = le32_to_cpu(lmm->lmm_pattern);
376 /* XXX: currently, we only support LOV_PATTERN_RAID0. */
377 if (lov_pattern(pattern) != LOV_PATTERN_RAID0) {
380 lmm_oi_cpu_to_le(&oi, &lmm->lmm_oi);
381 CDEBUG(D_LFSCK, "Unsupported LOV EA pattern %u on "DOSTID"\n",
382 pattern, POSTID(&oi));
390 #define LFSCK_RBTREE_BITMAP_SIZE PAGE_CACHE_SIZE
391 #define LFSCK_RBTREE_BITMAP_WIDTH (LFSCK_RBTREE_BITMAP_SIZE << 3)
392 #define LFSCK_RBTREE_BITMAP_MASK (LFSCK_RBTREE_BITMAP_WIDTH - 1)
394 struct lfsck_rbtree_node {
395 struct rb_node lrn_node;
398 atomic_t lrn_known_count;
399 atomic_t lrn_accessed_count;
400 void *lrn_known_bitmap;
401 void *lrn_accessed_bitmap;
404 static inline int lfsck_rbtree_cmp(struct lfsck_rbtree_node *lrn,
405 __u64 seq, __u32 oid)
407 if (seq < lrn->lrn_seq)
410 if (seq > lrn->lrn_seq)
413 if (oid < lrn->lrn_first_oid)
416 if (oid - lrn->lrn_first_oid >= LFSCK_RBTREE_BITMAP_WIDTH)
422 /* The caller should hold llsd->llsd_rb_lock. */
423 static struct lfsck_rbtree_node *
424 lfsck_rbtree_search(struct lfsck_layout_slave_data *llsd,
425 const struct lu_fid *fid, bool *exact)
427 struct rb_node *node = llsd->llsd_rb_root.rb_node;
428 struct rb_node *prev = NULL;
429 struct lfsck_rbtree_node *lrn = NULL;
435 while (node != NULL) {
437 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
438 rc = lfsck_rbtree_cmp(lrn, fid_seq(fid), fid_oid(fid));
440 node = node->rb_left;
442 node = node->rb_right;
450 /* If there is no exactly matched one, then to the next valid one. */
453 /* The rbtree is empty. */
460 node = rb_next(prev);
462 /* The end of the rbtree. */
466 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
471 static struct lfsck_rbtree_node *lfsck_rbtree_new(const struct lu_env *env,
472 const struct lu_fid *fid)
474 struct lfsck_rbtree_node *lrn;
478 return ERR_PTR(-ENOMEM);
480 OBD_ALLOC(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
481 if (lrn->lrn_known_bitmap == NULL) {
484 return ERR_PTR(-ENOMEM);
487 OBD_ALLOC(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
488 if (lrn->lrn_accessed_bitmap == NULL) {
489 OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
492 return ERR_PTR(-ENOMEM);
495 RB_CLEAR_NODE(&lrn->lrn_node);
496 lrn->lrn_seq = fid_seq(fid);
497 lrn->lrn_first_oid = fid_oid(fid) & ~LFSCK_RBTREE_BITMAP_MASK;
498 atomic_set(&lrn->lrn_known_count, 0);
499 atomic_set(&lrn->lrn_accessed_count, 0);
504 static void lfsck_rbtree_free(struct lfsck_rbtree_node *lrn)
506 OBD_FREE(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
507 OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
511 /* The caller should hold lock. */
512 static struct lfsck_rbtree_node *
513 lfsck_rbtree_insert(struct lfsck_layout_slave_data *llsd,
514 struct lfsck_rbtree_node *lrn)
516 struct rb_node **pos = &llsd->llsd_rb_root.rb_node;
517 struct rb_node *parent = NULL;
518 struct lfsck_rbtree_node *tmp;
521 while (*pos != NULL) {
523 tmp = rb_entry(parent, struct lfsck_rbtree_node, lrn_node);
524 rc = lfsck_rbtree_cmp(tmp, lrn->lrn_seq, lrn->lrn_first_oid);
526 pos = &(*pos)->rb_left;
528 pos = &(*pos)->rb_right;
533 rb_link_node(&lrn->lrn_node, parent, pos);
534 rb_insert_color(&lrn->lrn_node, &llsd->llsd_rb_root);
539 extern const struct dt_index_operations lfsck_orphan_index_ops;
541 static int lfsck_rbtree_setup(const struct lu_env *env,
542 struct lfsck_component *com)
544 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
545 struct lfsck_instance *lfsck = com->lc_lfsck;
546 struct dt_device *dev = lfsck->li_bottom;
547 struct lfsck_layout_slave_data *llsd = com->lc_data;
548 struct dt_object *obj;
550 fid->f_seq = FID_SEQ_LAYOUT_RBTREE;
551 fid->f_oid = lfsck_dev_idx(dev);
553 obj = dt_locate(env, dev, fid);
555 RETURN(PTR_ERR(obj));
557 /* Generate an in-RAM object to stand for the layout rbtree.
558 * Scanning the layout rbtree will be via the iteration over
559 * the object. In the future, the rbtree may be written onto
560 * disk with the object.
562 * Mark the object to be as exist. */
563 obj->do_lu.lo_header->loh_attr |= LOHA_EXISTS;
564 obj->do_index_ops = &lfsck_orphan_index_ops;
565 llsd->llsd_rb_obj = obj;
566 llsd->llsd_rbtree_valid = 1;
567 dev->dd_record_fid_accessed = 1;
572 static void lfsck_rbtree_cleanup(const struct lu_env *env,
573 struct lfsck_component *com)
575 struct lfsck_instance *lfsck = com->lc_lfsck;
576 struct lfsck_layout_slave_data *llsd = com->lc_data;
577 struct rb_node *node = rb_first(&llsd->llsd_rb_root);
578 struct rb_node *next;
579 struct lfsck_rbtree_node *lrn;
581 lfsck->li_bottom->dd_record_fid_accessed = 0;
582 /* Invalid the rbtree, then no others will use it. */
583 write_lock(&llsd->llsd_rb_lock);
584 llsd->llsd_rbtree_valid = 0;
585 write_unlock(&llsd->llsd_rb_lock);
587 while (node != NULL) {
588 next = rb_next(node);
589 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
590 rb_erase(node, &llsd->llsd_rb_root);
591 lfsck_rbtree_free(lrn);
595 if (llsd->llsd_rb_obj != NULL) {
596 lu_object_put(env, &llsd->llsd_rb_obj->do_lu);
597 llsd->llsd_rb_obj = NULL;
601 static void lfsck_rbtree_update_bitmap(const struct lu_env *env,
602 struct lfsck_component *com,
603 const struct lu_fid *fid,
606 struct lfsck_layout_slave_data *llsd = com->lc_data;
607 struct lfsck_rbtree_node *lrn;
613 CDEBUG(D_LFSCK, "%s: update bitmap for "DFID"\n",
614 lfsck_lfsck2name(com->lc_lfsck), PFID(fid));
616 if (unlikely(!fid_is_sane(fid) || fid_is_last_id(fid)))
619 if (!fid_is_idif(fid) && !fid_is_norm(fid))
622 read_lock(&llsd->llsd_rb_lock);
623 if (!llsd->llsd_rbtree_valid)
624 GOTO(unlock, rc = 0);
626 lrn = lfsck_rbtree_search(llsd, fid, NULL);
628 struct lfsck_rbtree_node *tmp;
632 read_unlock(&llsd->llsd_rb_lock);
633 tmp = lfsck_rbtree_new(env, fid);
635 GOTO(out, rc = PTR_ERR(tmp));
638 write_lock(&llsd->llsd_rb_lock);
639 if (!llsd->llsd_rbtree_valid) {
640 lfsck_rbtree_free(tmp);
641 GOTO(unlock, rc = 0);
644 lrn = lfsck_rbtree_insert(llsd, tmp);
646 lfsck_rbtree_free(tmp);
649 idx = fid_oid(fid) & LFSCK_RBTREE_BITMAP_MASK;
650 /* Any accessed object must be a known object. */
651 if (!test_and_set_bit(idx, lrn->lrn_known_bitmap))
652 atomic_inc(&lrn->lrn_known_count);
653 if (accessed && !test_and_set_bit(idx, lrn->lrn_accessed_bitmap))
654 atomic_inc(&lrn->lrn_accessed_count);
656 GOTO(unlock, rc = 0);
660 write_unlock(&llsd->llsd_rb_lock);
662 read_unlock(&llsd->llsd_rb_lock);
664 if (rc != 0 && accessed) {
665 struct lfsck_layout *lo = com->lc_file_ram;
667 CERROR("%s: Fail to update object accessed bitmap, will cause "
668 "incorrect LFSCK OST-object handling, so disable it to "
669 "cancel orphan handling for related device. rc = %d.\n",
670 lfsck_lfsck2name(com->lc_lfsck), rc);
671 lo->ll_flags |= LF_INCOMPLETE;
672 lfsck_rbtree_cleanup(env, com);
676 static void lfsck_layout_le_to_cpu(struct lfsck_layout *des,
677 const struct lfsck_layout *src)
681 des->ll_magic = le32_to_cpu(src->ll_magic);
682 des->ll_status = le32_to_cpu(src->ll_status);
683 des->ll_flags = le32_to_cpu(src->ll_flags);
684 des->ll_success_count = le32_to_cpu(src->ll_success_count);
685 des->ll_run_time_phase1 = le32_to_cpu(src->ll_run_time_phase1);
686 des->ll_run_time_phase2 = le32_to_cpu(src->ll_run_time_phase2);
687 des->ll_time_last_complete = le64_to_cpu(src->ll_time_last_complete);
688 des->ll_time_latest_start = le64_to_cpu(src->ll_time_latest_start);
689 des->ll_time_last_checkpoint =
690 le64_to_cpu(src->ll_time_last_checkpoint);
691 des->ll_pos_latest_start = le64_to_cpu(src->ll_pos_latest_start);
692 des->ll_pos_last_checkpoint = le64_to_cpu(src->ll_pos_last_checkpoint);
693 des->ll_pos_first_inconsistent =
694 le64_to_cpu(src->ll_pos_first_inconsistent);
695 des->ll_objs_checked_phase1 = le64_to_cpu(src->ll_objs_checked_phase1);
696 des->ll_objs_failed_phase1 = le64_to_cpu(src->ll_objs_failed_phase1);
697 des->ll_objs_checked_phase2 = le64_to_cpu(src->ll_objs_checked_phase2);
698 des->ll_objs_failed_phase2 = le64_to_cpu(src->ll_objs_failed_phase2);
699 for (i = 0; i < LLIT_MAX; i++)
700 des->ll_objs_repaired[i] =
701 le64_to_cpu(src->ll_objs_repaired[i]);
702 des->ll_objs_skipped = le64_to_cpu(src->ll_objs_skipped);
705 static void lfsck_layout_cpu_to_le(struct lfsck_layout *des,
706 const struct lfsck_layout *src)
710 des->ll_magic = cpu_to_le32(src->ll_magic);
711 des->ll_status = cpu_to_le32(src->ll_status);
712 des->ll_flags = cpu_to_le32(src->ll_flags);
713 des->ll_success_count = cpu_to_le32(src->ll_success_count);
714 des->ll_run_time_phase1 = cpu_to_le32(src->ll_run_time_phase1);
715 des->ll_run_time_phase2 = cpu_to_le32(src->ll_run_time_phase2);
716 des->ll_time_last_complete = cpu_to_le64(src->ll_time_last_complete);
717 des->ll_time_latest_start = cpu_to_le64(src->ll_time_latest_start);
718 des->ll_time_last_checkpoint =
719 cpu_to_le64(src->ll_time_last_checkpoint);
720 des->ll_pos_latest_start = cpu_to_le64(src->ll_pos_latest_start);
721 des->ll_pos_last_checkpoint = cpu_to_le64(src->ll_pos_last_checkpoint);
722 des->ll_pos_first_inconsistent =
723 cpu_to_le64(src->ll_pos_first_inconsistent);
724 des->ll_objs_checked_phase1 = cpu_to_le64(src->ll_objs_checked_phase1);
725 des->ll_objs_failed_phase1 = cpu_to_le64(src->ll_objs_failed_phase1);
726 des->ll_objs_checked_phase2 = cpu_to_le64(src->ll_objs_checked_phase2);
727 des->ll_objs_failed_phase2 = cpu_to_le64(src->ll_objs_failed_phase2);
728 for (i = 0; i < LLIT_MAX; i++)
729 des->ll_objs_repaired[i] =
730 cpu_to_le64(src->ll_objs_repaired[i]);
731 des->ll_objs_skipped = cpu_to_le64(src->ll_objs_skipped);
735 * \retval +ve: the lfsck_layout is broken, the caller should reset it.
736 * \retval 0: succeed.
737 * \retval -ve: failed cases.
739 static int lfsck_layout_load(const struct lu_env *env,
740 struct lfsck_component *com)
742 struct lfsck_layout *lo = com->lc_file_ram;
743 const struct dt_body_operations *dbo = com->lc_obj->do_body_ops;
744 ssize_t size = com->lc_file_size;
748 rc = dbo->dbo_read(env, com->lc_obj,
749 lfsck_buf_get(env, com->lc_file_disk, size), &pos,
754 CWARN("%s: failed to load lfsck_layout: rc = %d\n",
755 lfsck_lfsck2name(com->lc_lfsck), rc);
757 } else if (rc != size) {
758 CWARN("%s: crashed lfsck_layout, to be reset: rc = %d\n",
759 lfsck_lfsck2name(com->lc_lfsck), rc);
763 lfsck_layout_le_to_cpu(lo, com->lc_file_disk);
764 if (lo->ll_magic != LFSCK_LAYOUT_MAGIC) {
765 CWARN("%s: invalid lfsck_layout magic %#x != %#x, "
766 "to be reset\n", lfsck_lfsck2name(com->lc_lfsck),
767 lo->ll_magic, LFSCK_LAYOUT_MAGIC);
774 static int lfsck_layout_store(const struct lu_env *env,
775 struct lfsck_component *com)
777 struct dt_object *obj = com->lc_obj;
778 struct lfsck_instance *lfsck = com->lc_lfsck;
779 struct lfsck_layout *lo = com->lc_file_disk;
780 struct thandle *handle;
781 ssize_t size = com->lc_file_size;
786 lfsck_layout_cpu_to_le(lo, com->lc_file_ram);
787 handle = dt_trans_create(env, lfsck->li_bottom);
788 if (IS_ERR(handle)) {
789 rc = PTR_ERR(handle);
790 CERROR("%s: fail to create trans for storing lfsck_layout: "
791 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
795 rc = dt_declare_record_write(env, obj, lfsck_buf_get(env, lo, size),
798 CERROR("%s: fail to declare trans for storing lfsck_layout(1): "
799 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
803 rc = dt_trans_start_local(env, lfsck->li_bottom, handle);
805 CERROR("%s: fail to start trans for storing lfsck_layout: "
806 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
810 rc = dt_record_write(env, obj, lfsck_buf_get(env, lo, size), &pos,
813 CERROR("%s: fail to store lfsck_layout(1): size = %d, "
814 "rc = %d\n", lfsck_lfsck2name(lfsck), (int)size, rc);
819 dt_trans_stop(env, lfsck->li_bottom, handle);
824 static int lfsck_layout_init(const struct lu_env *env,
825 struct lfsck_component *com)
827 struct lfsck_layout *lo = com->lc_file_ram;
830 memset(lo, 0, com->lc_file_size);
831 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
832 lo->ll_status = LS_INIT;
833 down_write(&com->lc_sem);
834 rc = lfsck_layout_store(env, com);
835 up_write(&com->lc_sem);
840 static int fid_is_for_ostobj(const struct lu_env *env, struct dt_device *dt,
841 struct dt_object *obj, const struct lu_fid *fid)
843 struct seq_server_site *ss = lu_site2seq(dt->dd_lu_dev.ld_site);
844 struct lu_seq_range range = { 0 };
845 struct lustre_mdt_attrs *lma;
848 fld_range_set_any(&range);
849 rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(fid), &range);
851 if (fld_range_is_ost(&range))
857 lma = &lfsck_env_info(env)->lti_lma;
858 rc = dt_xattr_get(env, obj, lfsck_buf_get(env, lma, sizeof(*lma)),
859 XATTR_NAME_LMA, BYPASS_CAPA);
860 if (rc == sizeof(*lma)) {
861 lustre_lma_swab(lma);
863 return lma->lma_compat & LMAC_FID_ON_OST ? 1 : 0;
866 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_FID, BYPASS_CAPA);
871 static struct lfsck_layout_seq *
872 lfsck_layout_seq_lookup(struct lfsck_layout_slave_data *llsd, __u64 seq)
874 struct lfsck_layout_seq *lls;
876 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
877 if (lls->lls_seq == seq)
880 if (lls->lls_seq > seq)
888 lfsck_layout_seq_insert(struct lfsck_layout_slave_data *llsd,
889 struct lfsck_layout_seq *lls)
891 struct lfsck_layout_seq *tmp;
892 struct list_head *pos = &llsd->llsd_seq_list;
894 list_for_each_entry(tmp, &llsd->llsd_seq_list, lls_list) {
895 if (lls->lls_seq < tmp->lls_seq) {
896 pos = &tmp->lls_list;
900 list_add_tail(&lls->lls_list, pos);
904 lfsck_layout_lastid_create(const struct lu_env *env,
905 struct lfsck_instance *lfsck,
906 struct dt_object *obj)
908 struct lfsck_thread_info *info = lfsck_env_info(env);
909 struct lu_attr *la = &info->lti_la;
910 struct dt_object_format *dof = &info->lti_dof;
911 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
912 struct dt_device *dt = lfsck->li_bottom;
919 CDEBUG(D_LFSCK, "To create LAST_ID for <seq> "LPX64"\n",
920 fid_seq(lfsck_dto2fid(obj)));
922 if (bk->lb_param & LPF_DRYRUN)
925 memset(la, 0, sizeof(*la));
926 la->la_mode = S_IFREG | S_IRUGO | S_IWUSR;
927 la->la_valid = LA_MODE | LA_UID | LA_GID;
928 dof->dof_type = dt_mode_to_dft(S_IFREG);
930 th = dt_trans_create(env, dt);
932 RETURN(rc = PTR_ERR(th));
934 rc = dt_declare_create(env, obj, la, NULL, dof, th);
938 rc = dt_declare_record_write(env, obj,
939 lfsck_buf_get(env, &lastid,
945 rc = dt_trans_start_local(env, dt, th);
949 dt_write_lock(env, obj, 0);
950 if (likely(!dt_object_exists(obj))) {
951 rc = dt_create(env, obj, la, NULL, dof, th);
953 rc = dt_record_write(env, obj,
954 lfsck_buf_get(env, &lastid, sizeof(lastid)),
957 dt_write_unlock(env, obj);
962 dt_trans_stop(env, dt, th);
968 lfsck_layout_lastid_reload(const struct lu_env *env,
969 struct lfsck_component *com,
970 struct lfsck_layout_seq *lls)
976 dt_read_lock(env, lls->lls_lastid_obj, 0);
977 rc = dt_record_read(env, lls->lls_lastid_obj,
978 lfsck_buf_get(env, &lastid, sizeof(lastid)), &pos);
979 dt_read_unlock(env, lls->lls_lastid_obj);
980 if (unlikely(rc != 0))
983 lastid = le64_to_cpu(lastid);
984 if (lastid < lls->lls_lastid_known) {
985 struct lfsck_instance *lfsck = com->lc_lfsck;
986 struct lfsck_layout *lo = com->lc_file_ram;
988 lls->lls_lastid = lls->lls_lastid_known;
990 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
991 LASSERT(lfsck->li_out_notify != NULL);
993 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
994 LE_LASTID_REBUILDING);
995 lo->ll_flags |= LF_CRASHED_LASTID;
997 } else if (lastid >= lls->lls_lastid) {
998 lls->lls_lastid = lastid;
1006 lfsck_layout_lastid_store(const struct lu_env *env,
1007 struct lfsck_component *com)
1009 struct lfsck_instance *lfsck = com->lc_lfsck;
1010 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1011 struct dt_device *dt = lfsck->li_bottom;
1012 struct lfsck_layout_slave_data *llsd = com->lc_data;
1013 struct lfsck_layout_seq *lls;
1019 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
1022 /* XXX: Add the code back if we really found related
1023 * inconsistent cases in the future. */
1025 if (!lls->lls_dirty) {
1026 /* In OFD, before the pre-creation, the LAST_ID
1027 * file will be updated firstly, which may hide
1028 * some potential crashed cases. For example:
1030 * The old obj1's ID is higher than old LAST_ID
1031 * but lower than the new LAST_ID, but the LFSCK
1032 * have not touch the obj1 until the OFD updated
1033 * the LAST_ID. So the LFSCK does not regard it
1034 * as crashed case. But when OFD does not create
1035 * successfully, it will set the LAST_ID as the
1036 * real created objects' ID, then LFSCK needs to
1037 * found related inconsistency. */
1038 rc = lfsck_layout_lastid_reload(env, com, lls);
1039 if (likely(!lls->lls_dirty))
1044 CDEBUG(D_LFSCK, "To sync the LAST_ID for <seq> "LPX64
1045 " as <oid> "LPU64"\n", lls->lls_seq, lls->lls_lastid);
1047 if (bk->lb_param & LPF_DRYRUN) {
1052 th = dt_trans_create(env, dt);
1055 CERROR("%s: (1) failed to store "LPX64": rc = %d\n",
1056 lfsck_lfsck2name(com->lc_lfsck),
1061 lastid = cpu_to_le64(lls->lls_lastid);
1062 rc = dt_declare_record_write(env, lls->lls_lastid_obj,
1063 lfsck_buf_get(env, &lastid,
1069 rc = dt_trans_start_local(env, dt, th);
1073 dt_write_lock(env, lls->lls_lastid_obj, 0);
1074 rc = dt_record_write(env, lls->lls_lastid_obj,
1075 lfsck_buf_get(env, &lastid,
1076 sizeof(lastid)), &pos, th);
1077 dt_write_unlock(env, lls->lls_lastid_obj);
1082 dt_trans_stop(env, dt, th);
1085 CERROR("%s: (2) failed to store "LPX64": rc = %d\n",
1086 lfsck_lfsck2name(com->lc_lfsck),
1095 lfsck_layout_lastid_load(const struct lu_env *env,
1096 struct lfsck_component *com,
1097 struct lfsck_layout_seq *lls)
1099 struct lfsck_instance *lfsck = com->lc_lfsck;
1100 struct lfsck_layout *lo = com->lc_file_ram;
1101 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
1102 struct dt_object *obj;
1107 lu_last_id_fid(fid, lls->lls_seq, lfsck_dev_idx(lfsck->li_bottom));
1108 obj = dt_locate(env, lfsck->li_bottom, fid);
1110 RETURN(PTR_ERR(obj));
1112 /* LAST_ID crashed, to be rebuilt */
1113 if (!dt_object_exists(obj)) {
1114 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
1115 LASSERT(lfsck->li_out_notify != NULL);
1117 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1118 LE_LASTID_REBUILDING);
1119 lo->ll_flags |= LF_CRASHED_LASTID;
1121 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY4) &&
1123 struct l_wait_info lwi = LWI_TIMEOUT(
1124 cfs_time_seconds(cfs_fail_val),
1127 up_write(&com->lc_sem);
1128 l_wait_event(lfsck->li_thread.t_ctl_waitq,
1129 !thread_is_running(&lfsck->li_thread),
1131 down_write(&com->lc_sem);
1135 rc = lfsck_layout_lastid_create(env, lfsck, obj);
1137 dt_read_lock(env, obj, 0);
1138 rc = dt_read(env, obj,
1139 lfsck_buf_get(env, &lls->lls_lastid, sizeof(__u64)),
1141 dt_read_unlock(env, obj);
1142 if (rc != 0 && rc != sizeof(__u64))
1143 GOTO(out, rc = (rc > 0 ? -EFAULT : rc));
1145 if (rc == 0 && !(lo->ll_flags & LF_CRASHED_LASTID)) {
1146 LASSERT(lfsck->li_out_notify != NULL);
1148 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1149 LE_LASTID_REBUILDING);
1150 lo->ll_flags |= LF_CRASHED_LASTID;
1153 lls->lls_lastid = le64_to_cpu(lls->lls_lastid);
1161 lfsck_object_put(env, obj);
1163 lls->lls_lastid_obj = obj;
1168 static void lfsck_layout_record_failure(const struct lu_env *env,
1169 struct lfsck_instance *lfsck,
1170 struct lfsck_layout *lo)
1172 lo->ll_objs_failed_phase1++;
1173 if (unlikely(lo->ll_pos_first_inconsistent == 0))
1174 lo->ll_pos_first_inconsistent =
1175 lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
1179 static int lfsck_layout_master_async_interpret(const struct lu_env *env,
1180 struct ptlrpc_request *req,
1183 struct lfsck_async_interpret_args *laia = args;
1184 struct lfsck_component *com = laia->laia_com;
1185 struct lfsck_layout_master_data *llmd = com->lc_data;
1186 struct lfsck_tgt_descs *ltds = laia->laia_ltds;
1187 struct lfsck_tgt_desc *ltd = laia->laia_ltd;
1188 struct lfsck_request *lr = laia->laia_lr;
1190 switch (lr->lr_event) {
1193 struct lfsck_layout *lo = com->lc_file_ram;
1195 CERROR("%s: fail to notify %s %x for layout start: "
1196 "rc = %d\n", lfsck_lfsck2name(com->lc_lfsck),
1197 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1198 ltd->ltd_index, rc);
1199 lo->ll_flags |= LF_INCOMPLETE;
1203 spin_lock(<ds->ltd_lock);
1204 if (ltd->ltd_dead || ltd->ltd_layout_done) {
1205 spin_unlock(<ds->ltd_lock);
1209 if (lr->lr_flags & LEF_TO_OST) {
1210 if (list_empty(<d->ltd_layout_list))
1211 list_add_tail(<d->ltd_layout_list,
1212 &llmd->llmd_ost_list);
1213 if (list_empty(<d->ltd_layout_phase_list))
1214 list_add_tail(<d->ltd_layout_phase_list,
1215 &llmd->llmd_ost_phase1_list);
1217 if (list_empty(<d->ltd_layout_list))
1218 list_add_tail(<d->ltd_layout_list,
1219 &llmd->llmd_mdt_list);
1220 if (list_empty(<d->ltd_layout_phase_list))
1221 list_add_tail(<d->ltd_layout_phase_list,
1222 &llmd->llmd_mdt_phase1_list);
1224 spin_unlock(<ds->ltd_lock);
1227 case LE_PHASE1_DONE:
1228 case LE_PHASE2_DONE:
1230 if (rc != 0 && rc != -EALREADY)
1231 CWARN("%s: fail to notify %s %x for layout: "
1232 "event = %d, rc = %d\n",
1233 lfsck_lfsck2name(com->lc_lfsck),
1234 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1235 ltd->ltd_index, lr->lr_event, rc);
1238 struct lfsck_reply *reply;
1241 spin_lock(<ds->ltd_lock);
1242 list_del_init(<d->ltd_layout_phase_list);
1243 list_del_init(<d->ltd_layout_list);
1244 spin_unlock(<ds->ltd_lock);
1248 reply = req_capsule_server_get(&req->rq_pill,
1250 if (reply == NULL) {
1252 CERROR("%s: invalid return value: rc = %d\n",
1253 lfsck_lfsck2name(com->lc_lfsck), rc);
1254 spin_lock(<ds->ltd_lock);
1255 list_del_init(<d->ltd_layout_phase_list);
1256 list_del_init(<d->ltd_layout_list);
1257 spin_unlock(<ds->ltd_lock);
1261 switch (reply->lr_status) {
1262 case LS_SCANNING_PHASE1:
1264 case LS_SCANNING_PHASE2:
1265 spin_lock(<ds->ltd_lock);
1266 list_del_init(<d->ltd_layout_phase_list);
1267 if (ltd->ltd_dead || ltd->ltd_layout_done) {
1268 spin_unlock(<ds->ltd_lock);
1272 if (lr->lr_flags & LEF_TO_OST)
1273 list_add_tail(<d->ltd_layout_phase_list,
1274 &llmd->llmd_ost_phase2_list);
1276 list_add_tail(<d->ltd_layout_phase_list,
1277 &llmd->llmd_mdt_phase2_list);
1278 spin_unlock(<ds->ltd_lock);
1281 spin_lock(<ds->ltd_lock);
1282 list_del_init(<d->ltd_layout_phase_list);
1283 list_del_init(<d->ltd_layout_list);
1284 spin_unlock(<ds->ltd_lock);
1290 CERROR("%s: unexpected event: rc = %d\n",
1291 lfsck_lfsck2name(com->lc_lfsck), lr->lr_event);
1295 if (!laia->laia_shared) {
1297 lfsck_component_put(env, com);
1303 static int lfsck_layout_master_query_others(const struct lu_env *env,
1304 struct lfsck_component *com)
1306 struct lfsck_thread_info *info = lfsck_env_info(env);
1307 struct lfsck_request *lr = &info->lti_lr;
1308 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1309 struct lfsck_instance *lfsck = com->lc_lfsck;
1310 struct lfsck_layout_master_data *llmd = com->lc_data;
1311 struct ptlrpc_request_set *set;
1312 struct lfsck_tgt_descs *ltds;
1313 struct lfsck_tgt_desc *ltd;
1314 struct list_head *head;
1319 set = ptlrpc_prep_set();
1323 llmd->llmd_touch_gen++;
1324 memset(lr, 0, sizeof(*lr));
1325 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1326 lr->lr_event = LE_QUERY;
1327 lr->lr_active = LFSCK_TYPE_LAYOUT;
1328 laia->laia_com = com;
1330 laia->laia_shared = 0;
1332 if (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1333 ltds = &lfsck->li_mdt_descs;
1335 head = &llmd->llmd_mdt_phase1_list;
1339 ltds = &lfsck->li_ost_descs;
1340 lr->lr_flags = LEF_TO_OST;
1341 head = &llmd->llmd_ost_phase1_list;
1344 laia->laia_ltds = ltds;
1345 spin_lock(<ds->ltd_lock);
1346 while (!list_empty(head)) {
1347 ltd = list_entry(head->next,
1348 struct lfsck_tgt_desc,
1349 ltd_layout_phase_list);
1350 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1353 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1354 list_del(<d->ltd_layout_phase_list);
1355 list_add_tail(<d->ltd_layout_phase_list, head);
1356 atomic_inc(<d->ltd_ref);
1357 laia->laia_ltd = ltd;
1358 spin_unlock(<ds->ltd_lock);
1359 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1360 lfsck_layout_master_async_interpret,
1363 CERROR("%s: fail to query %s %x for layout: rc = %d\n",
1364 lfsck_lfsck2name(lfsck),
1365 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1366 ltd->ltd_index, rc);
1370 spin_lock(<ds->ltd_lock);
1372 spin_unlock(<ds->ltd_lock);
1374 rc = ptlrpc_set_wait(set);
1376 ptlrpc_set_destroy(set);
1380 if (!(lr->lr_flags & LEF_TO_OST) &&
1381 list_empty(&llmd->llmd_mdt_phase1_list))
1384 ptlrpc_set_destroy(set);
1386 RETURN(rc1 != 0 ? rc1 : rc);
1390 lfsck_layout_master_to_orphan(struct lfsck_layout_master_data *llmd)
1392 return list_empty(&llmd->llmd_mdt_phase1_list) &&
1393 (!list_empty(&llmd->llmd_ost_phase2_list) ||
1394 list_empty(&llmd->llmd_ost_phase1_list));
1397 static int lfsck_layout_master_notify_others(const struct lu_env *env,
1398 struct lfsck_component *com,
1399 struct lfsck_request *lr)
1401 struct lfsck_thread_info *info = lfsck_env_info(env);
1402 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1403 struct lfsck_instance *lfsck = com->lc_lfsck;
1404 struct lfsck_layout_master_data *llmd = com->lc_data;
1405 struct lfsck_layout *lo = com->lc_file_ram;
1406 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1407 struct ptlrpc_request_set *set;
1408 struct lfsck_tgt_descs *ltds;
1409 struct lfsck_tgt_desc *ltd;
1410 struct lfsck_tgt_desc *next;
1411 struct list_head *head;
1416 set = ptlrpc_prep_set();
1420 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1421 lr->lr_active = LFSCK_TYPE_LAYOUT;
1422 laia->laia_com = com;
1424 laia->laia_shared = 0;
1425 switch (lr->lr_event) {
1427 /* Notify OSTs firstly, then handle other MDTs if needed. */
1428 ltds = &lfsck->li_ost_descs;
1429 laia->laia_ltds = ltds;
1430 down_read(<ds->ltd_rw_sem);
1431 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1432 ltd = lfsck_tgt_get(ltds, idx);
1433 LASSERT(ltd != NULL);
1435 laia->laia_ltd = ltd;
1436 ltd->ltd_layout_done = 0;
1437 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1438 lfsck_layout_master_async_interpret,
1439 laia, LFSCK_NOTIFY);
1441 CERROR("%s: fail to notify %s %x for layout "
1443 lfsck_lfsck2name(lfsck),
1444 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1447 lo->ll_flags |= LF_INCOMPLETE;
1450 up_read(<ds->ltd_rw_sem);
1453 rc = ptlrpc_set_wait(set);
1455 ptlrpc_set_destroy(set);
1459 if (!(bk->lb_param & LPF_ALL_TGT))
1462 /* link other MDT targets locallly. */
1463 ltds = &lfsck->li_mdt_descs;
1464 spin_lock(<ds->ltd_lock);
1465 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1466 ltd = LTD_TGT(ltds, idx);
1467 LASSERT(ltd != NULL);
1469 if (!list_empty(<d->ltd_layout_list))
1472 list_add_tail(<d->ltd_layout_list,
1473 &llmd->llmd_mdt_list);
1474 list_add_tail(<d->ltd_layout_phase_list,
1475 &llmd->llmd_mdt_phase1_list);
1477 spin_unlock(<ds->ltd_lock);
1480 case LE_PHASE2_DONE:
1481 case LE_PEER_EXIT: {
1482 /* Handle other MDTs firstly if needed, then notify the OSTs. */
1483 if (bk->lb_param & LPF_ALL_TGT) {
1484 head = &llmd->llmd_mdt_list;
1485 ltds = &lfsck->li_mdt_descs;
1486 if (lr->lr_event == LE_STOP) {
1487 /* unlink other MDT targets locallly. */
1488 spin_lock(<ds->ltd_lock);
1489 list_for_each_entry_safe(ltd, next, head,
1491 list_del_init(<d->ltd_layout_phase_list);
1492 list_del_init(<d->ltd_layout_list);
1494 spin_unlock(<ds->ltd_lock);
1496 lr->lr_flags |= LEF_TO_OST;
1497 head = &llmd->llmd_ost_list;
1498 ltds = &lfsck->li_ost_descs;
1500 lr->lr_flags &= ~LEF_TO_OST;
1503 lr->lr_flags |= LEF_TO_OST;
1504 head = &llmd->llmd_ost_list;
1505 ltds = &lfsck->li_ost_descs;
1509 laia->laia_ltds = ltds;
1510 spin_lock(<ds->ltd_lock);
1511 while (!list_empty(head)) {
1512 ltd = list_entry(head->next, struct lfsck_tgt_desc,
1514 if (!list_empty(<d->ltd_layout_phase_list))
1515 list_del_init(<d->ltd_layout_phase_list);
1516 list_del_init(<d->ltd_layout_list);
1517 atomic_inc(<d->ltd_ref);
1518 laia->laia_ltd = ltd;
1519 spin_unlock(<ds->ltd_lock);
1520 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1521 lfsck_layout_master_async_interpret,
1522 laia, LFSCK_NOTIFY);
1524 CERROR("%s: fail to notify %s %x for layout "
1525 "stop/phase2: rc = %d\n",
1526 lfsck_lfsck2name(lfsck),
1527 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1528 "MDT", ltd->ltd_index, rc);
1531 spin_lock(<ds->ltd_lock);
1533 spin_unlock(<ds->ltd_lock);
1535 rc = ptlrpc_set_wait(set);
1537 ptlrpc_set_destroy(set);
1541 if (!(lr->lr_flags & LEF_TO_OST)) {
1542 lr->lr_flags |= LEF_TO_OST;
1543 head = &llmd->llmd_ost_list;
1544 ltds = &lfsck->li_ost_descs;
1549 case LE_PHASE1_DONE:
1550 llmd->llmd_touch_gen++;
1551 ltds = &lfsck->li_mdt_descs;
1552 laia->laia_ltds = ltds;
1553 spin_lock(<ds->ltd_lock);
1554 while (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1555 ltd = list_entry(llmd->llmd_mdt_phase1_list.next,
1556 struct lfsck_tgt_desc,
1557 ltd_layout_phase_list);
1558 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1561 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1562 list_del_init(<d->ltd_layout_phase_list);
1563 list_add_tail(<d->ltd_layout_phase_list,
1564 &llmd->llmd_mdt_phase1_list);
1565 atomic_inc(<d->ltd_ref);
1566 laia->laia_ltd = ltd;
1567 spin_unlock(<ds->ltd_lock);
1568 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1569 lfsck_layout_master_async_interpret,
1570 laia, LFSCK_NOTIFY);
1572 CERROR("%s: fail to notify MDT %x for layout "
1573 "phase1 done: rc = %d\n",
1574 lfsck_lfsck2name(lfsck),
1575 ltd->ltd_index, rc);
1578 spin_lock(<ds->ltd_lock);
1580 spin_unlock(<ds->ltd_lock);
1583 CERROR("%s: unexpected LFSCK event: rc = %d\n",
1584 lfsck_lfsck2name(lfsck), lr->lr_event);
1589 rc = ptlrpc_set_wait(set);
1590 ptlrpc_set_destroy(set);
1595 static int lfsck_layout_double_scan_result(const struct lu_env *env,
1596 struct lfsck_component *com,
1599 struct lfsck_instance *lfsck = com->lc_lfsck;
1600 struct lfsck_layout *lo = com->lc_file_ram;
1601 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1603 down_write(&com->lc_sem);
1604 lo->ll_run_time_phase2 += cfs_duration_sec(cfs_time_current() +
1605 HALF_SEC - lfsck->li_time_last_checkpoint);
1606 lo->ll_time_last_checkpoint = cfs_time_current_sec();
1607 lo->ll_objs_checked_phase2 += com->lc_new_checked;
1610 com->lc_journal = 0;
1611 if (lo->ll_flags & LF_INCOMPLETE)
1612 lo->ll_status = LS_PARTIAL;
1614 lo->ll_status = LS_COMPLETED;
1615 if (!(bk->lb_param & LPF_DRYRUN))
1616 lo->ll_flags &= ~(LF_SCANNED_ONCE | LF_INCONSISTENT);
1617 lo->ll_time_last_complete = lo->ll_time_last_checkpoint;
1618 lo->ll_success_count++;
1619 } else if (rc == 0) {
1620 lo->ll_status = lfsck->li_status;
1621 if (lo->ll_status == 0)
1622 lo->ll_status = LS_STOPPED;
1624 lo->ll_status = LS_FAILED;
1627 rc = lfsck_layout_store(env, com);
1628 up_write(&com->lc_sem);
1633 static int lfsck_layout_lock(const struct lu_env *env,
1634 struct lfsck_component *com,
1635 struct dt_object *obj,
1636 struct lustre_handle *lh, __u64 bits)
1638 struct lfsck_thread_info *info = lfsck_env_info(env);
1639 ldlm_policy_data_t *policy = &info->lti_policy;
1640 struct ldlm_res_id *resid = &info->lti_resid;
1641 struct lfsck_instance *lfsck = com->lc_lfsck;
1642 __u64 flags = LDLM_FL_ATOMIC_CB;
1645 LASSERT(lfsck->li_namespace != NULL);
1647 memset(policy, 0, sizeof(*policy));
1648 policy->l_inodebits.bits = bits;
1649 fid_build_reg_res_name(lfsck_dto2fid(obj), resid);
1650 rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid, LDLM_IBITS,
1651 policy, LCK_EX, &flags, ldlm_blocking_ast,
1652 ldlm_completion_ast, NULL, NULL, 0,
1653 LVB_T_NONE, NULL, lh);
1654 if (rc == ELDLM_OK) {
1657 memset(lh, 0, sizeof(*lh));
1664 static void lfsck_layout_unlock(struct lustre_handle *lh)
1666 if (lustre_handle_is_used(lh)) {
1667 ldlm_lock_decref(lh, LCK_EX);
1668 memset(lh, 0, sizeof(*lh));
1672 static int lfsck_layout_trans_stop(const struct lu_env *env,
1673 struct dt_device *dev,
1674 struct thandle *handle, int result)
1678 handle->th_result = result;
1679 rc = dt_trans_stop(env, dev, handle);
1689 * Get the system default stripe size.
1691 * \param[in] env pointer to the thread context
1692 * \param[in] lfsck pointer to the lfsck instance
1693 * \param[out] size pointer to the default stripe size
1695 * \retval 0 for success
1696 * \retval negative error number on failure
1698 static int lfsck_layout_get_def_stripesize(const struct lu_env *env,
1699 struct lfsck_instance *lfsck,
1702 struct lov_user_md *lum = &lfsck_env_info(env)->lti_lum;
1703 struct dt_object *root;
1706 root = dt_locate(env, lfsck->li_next, &lfsck->li_local_root_fid);
1708 return PTR_ERR(root);
1710 /* Get the default stripe size via xattr_get on the backend root. */
1711 rc = dt_xattr_get(env, root, lfsck_buf_get(env, lum, sizeof(*lum)),
1712 XATTR_NAME_LOV, BYPASS_CAPA);
1714 /* The lum->lmm_stripe_size is LE mode. The *size also
1715 * should be LE mode. So it is unnecessary to convert. */
1716 *size = lum->lmm_stripe_size;
1718 } else if (unlikely(rc == 0)) {
1722 lfsck_object_put(env, root);
1728 * \retval +1: repaired
1729 * \retval 0: did nothing
1730 * \retval -ve: on error
1732 static int lfsck_layout_refill_lovea(const struct lu_env *env,
1733 struct thandle *handle,
1734 struct dt_object *parent,
1735 struct lu_fid *cfid,
1737 struct lov_ost_data_v1 *slot,
1738 int fl, __u32 ost_idx)
1740 struct ost_id *oi = &lfsck_env_info(env)->lti_oi;
1741 struct lov_mds_md_v1 *lmm = buf->lb_buf;
1744 fid_to_ostid(cfid, oi);
1745 ostid_cpu_to_le(oi, &slot->l_ost_oi);
1746 slot->l_ost_gen = cpu_to_le32(0);
1747 slot->l_ost_idx = cpu_to_le32(ost_idx);
1749 if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_HOLE) {
1750 struct lov_ost_data_v1 *objs;
1754 count = le16_to_cpu(lmm->lmm_stripe_count);
1755 if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_V1)
1756 objs = &lmm->lmm_objects[0];
1758 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
1759 for (i = 0; i < count; i++, objs++) {
1760 if (objs != slot && lovea_slot_is_dummy(objs))
1764 /* If the @slot is the last dummy slot to be refilled,
1765 * then drop LOV_PATTERN_F_HOLE from lmm::lmm_pattern. */
1767 lmm->lmm_pattern &= ~cpu_to_le32(LOV_PATTERN_F_HOLE);
1770 rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV, fl, handle,
1779 * \retval +1: repaired
1780 * \retval 0: did nothing
1781 * \retval -ve: on error
1783 static int lfsck_layout_extend_lovea(const struct lu_env *env,
1784 struct lfsck_instance *lfsck,
1785 struct thandle *handle,
1786 struct dt_object *parent,
1787 struct lu_fid *cfid,
1788 struct lu_buf *buf, int fl,
1789 __u32 ost_idx, __u32 ea_off, bool reset)
1791 struct lov_mds_md_v1 *lmm = buf->lb_buf;
1792 struct lov_ost_data_v1 *objs;
1797 if (fl == LU_XATTR_CREATE || reset) {
1798 __u32 pattern = LOV_PATTERN_RAID0;
1801 LASSERT(buf->lb_len == lov_mds_md_size(count, LOV_MAGIC_V1));
1803 if (ea_off != 0 || reset)
1804 pattern |= LOV_PATTERN_F_HOLE;
1806 memset(lmm, 0, buf->lb_len);
1807 lmm->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
1808 lmm->lmm_pattern = cpu_to_le32(pattern);
1809 fid_to_lmm_oi(lfsck_dto2fid(parent), &lmm->lmm_oi);
1810 lmm_oi_cpu_to_le(&lmm->lmm_oi, &lmm->lmm_oi);
1812 rc = lfsck_layout_get_def_stripesize(env, lfsck,
1813 &lmm->lmm_stripe_size);
1817 objs = &lmm->lmm_objects[ea_off];
1819 __u32 magic = le32_to_cpu(lmm->lmm_magic);
1822 count = le16_to_cpu(lmm->lmm_stripe_count);
1823 if (magic == LOV_MAGIC_V1)
1824 objs = &lmm->lmm_objects[count];
1826 objs = &((struct lov_mds_md_v3 *)lmm)->
1829 gap = ea_off - count;
1832 LASSERT(buf->lb_len == lov_mds_md_size(count, magic));
1835 memset(objs, 0, gap * sizeof(*objs));
1836 lmm->lmm_pattern |= cpu_to_le32(LOV_PATTERN_F_HOLE);
1839 lmm->lmm_layout_gen =
1840 cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
1844 lmm->lmm_stripe_count = cpu_to_le16(count);
1845 rc = lfsck_layout_refill_lovea(env, handle, parent, cfid, buf, objs,
1852 * \retval +1: repaired
1853 * \retval 0: did nothing
1854 * \retval -ve: on error
1856 static int lfsck_layout_update_pfid(const struct lu_env *env,
1857 struct lfsck_component *com,
1858 struct dt_object *parent,
1859 struct lu_fid *cfid,
1860 struct dt_device *cdev, __u32 ea_off)
1862 struct filter_fid *pfid = &lfsck_env_info(env)->lti_new_pfid;
1863 struct dt_object *child;
1864 struct thandle *handle;
1865 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
1870 child = lfsck_object_find_by_dev(env, cdev, cfid);
1872 RETURN(PTR_ERR(child));
1874 handle = dt_trans_create(env, cdev);
1876 GOTO(out, rc = PTR_ERR(handle));
1878 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
1879 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
1880 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
1881 * MDT-object's FID::f_ver, instead it is the OST-object index in its
1882 * parent MDT-object's layout EA. */
1883 pfid->ff_parent.f_stripe_idx = cpu_to_le32(ea_off);
1884 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
1886 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle);
1890 rc = dt_trans_start(env, cdev, handle);
1894 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle,
1897 GOTO(stop, rc = (rc == 0 ? 1 : rc));
1900 dt_trans_stop(env, cdev, handle);
1903 lu_object_put(env, &child->do_lu);
1909 * This function will create the MDT-object will the given (partial) LOV EA.
1911 * Under some data corruption cases, the MDT-object of the file may be lost,
1912 * but its OST-objects, or some of them are there. The layout LFSCK needs to
1913 * re-create the MDT-object with the orphan OST-object(s) information.
1915 * On the other hand, the LFSCK may has created some OST-object for repairing
1916 * dangling LOV EA reference, but as the LFSCK processing, it may find that
1917 * the old OST-object is there and should replace the former new created OST
1918 * object. Unfortunately, some others have modified such newly created object.
1919 * To keep the data (both new and old), the LFSCK will create MDT-object with
1920 * new FID to reference the original OST-object.
1922 * \param[in] env pointer to the thread context
1923 * \param[in] com pointer to the lfsck component
1924 * \param[in] ltd pointer to target device descriptor
1925 * \param[in] rec pointer to the record for the orphan OST-object
1926 * \param[in] cfid pointer to FID for the orphan OST-object
1927 * \param[in] infix additional information, such as the FID for original
1928 * MDT-object and the stripe offset in the LOV EA
1929 * \param[in] type the type for describing why the orphan MDT-object is
1930 * created. The rules are as following:
1932 * type "C": Multiple OST-objects claim the same MDT-object and the
1933 * same slot in the layout EA. Then the LFSCK will create
1934 * new MDT-object(s) to hold the conflict OST-object(s).
1936 * type "N": The orphan OST-object does not know which one was the
1937 * real parent MDT-object, so the LFSCK uses new FID for
1938 * its parent MDT-object.
1940 * type "R": The orphan OST-object knows its parent MDT-object FID,
1941 * but does not know the position (the file name) in the
1944 * The orphan name will be like:
1945 * ${FID}-${infix}-${type}-${conflict_version}
1947 * \param[in] ea_off the stripe offset in the LOV EA
1949 * \retval positive on repaired something
1950 * \retval if needs to repair nothing
1951 * \retval negative error number on failure
1953 static int lfsck_layout_recreate_parent(const struct lu_env *env,
1954 struct lfsck_component *com,
1955 struct lfsck_tgt_desc *ltd,
1956 struct lu_orphan_rec *rec,
1957 struct lu_fid *cfid,
1962 struct lfsck_thread_info *info = lfsck_env_info(env);
1963 char *name = info->lti_key;
1964 struct lu_attr *la = &info->lti_la;
1965 struct dt_object_format *dof = &info->lti_dof;
1966 struct lfsck_instance *lfsck = com->lc_lfsck;
1967 struct lu_fid *pfid = &rec->lor_fid;
1968 struct lu_fid *tfid = &info->lti_fid3;
1969 struct dt_device *next = lfsck->li_next;
1970 struct dt_object *pobj = NULL;
1971 struct dt_object *cobj = NULL;
1972 struct thandle *th = NULL;
1973 struct lu_buf *pbuf = NULL;
1974 struct lu_buf *ea_buf = &info->lti_big_buf;
1975 struct lustre_handle lh = { 0 };
1976 int buflen = ea_buf->lb_len;
1981 /* Create .lustre/lost+found/MDTxxxx when needed. */
1982 if (unlikely(lfsck->li_lpf_obj == NULL)) {
1983 rc = lfsck_create_lpf(env, lfsck);
1988 if (fid_is_zero(pfid)) {
1989 struct filter_fid *ff = &info->lti_new_pfid;
1991 rc = lfsck_fid_alloc(env, lfsck, pfid, false);
1995 ff->ff_parent.f_seq = cpu_to_le64(pfid->f_seq);
1996 ff->ff_parent.f_oid = cpu_to_le32(pfid->f_oid);
1997 /* Currently, the filter_fid::ff_parent::f_ver is not the
1998 * real parent MDT-object's FID::f_ver, instead it is the
1999 * OST-object index in its parent MDT-object's layout EA. */
2000 ff->ff_parent.f_stripe_idx = cpu_to_le32(ea_off);
2001 pbuf = lfsck_buf_get(env, ff, sizeof(struct filter_fid));
2002 cobj = lfsck_object_find_by_dev(env, ltd->ltd_tgt, cfid);
2004 RETURN(PTR_ERR(cobj));
2007 CDEBUG(D_LFSCK, "Re-create the lost MDT-object: parent "DFID", child "
2008 DFID", OST-index %u, stripe-index %u, infix %s, type %s\n",
2009 PFID(pfid), PFID(cfid), ltd->ltd_index, ea_off, infix, type);
2011 pobj = lfsck_object_find_by_dev(env, lfsck->li_bottom, pfid);
2013 GOTO(put, rc = PTR_ERR(pobj));
2015 LASSERT(infix != NULL);
2016 LASSERT(type != NULL);
2019 snprintf(name, NAME_MAX, DFID"%s-%s-%d", PFID(pfid), infix,
2021 rc = dt_lookup(env, lfsck->li_lpf_obj, (struct dt_rec *)tfid,
2022 (const struct dt_key *)name, BYPASS_CAPA);
2023 if (rc != 0 && rc != -ENOENT)
2027 memset(la, 0, sizeof(*la));
2028 la->la_uid = rec->lor_uid;
2029 la->la_gid = rec->lor_gid;
2030 la->la_mode = S_IFREG | S_IRUSR;
2031 la->la_valid = LA_MODE | LA_UID | LA_GID;
2033 memset(dof, 0, sizeof(*dof));
2034 dof->dof_type = dt_mode_to_dft(S_IFREG);
2036 rc = lov_mds_md_size(ea_off + 1, LOV_MAGIC_V1);
2038 lu_buf_realloc(ea_buf, rc);
2039 buflen = ea_buf->lb_len;
2040 if (ea_buf->lb_buf == NULL)
2041 GOTO(put, rc = -ENOMEM);
2043 ea_buf->lb_len = rc;
2046 /* Hold update lock on the .lustre/lost+found/MDTxxxx/.
2048 * XXX: Currently, we do not grab the PDO lock as normal create cases,
2049 * because creating MDT-object for orphan OST-object is rare, we
2050 * do not much care about the performance. It can be improved in
2051 * the future when needed. */
2052 rc = lfsck_layout_lock(env, com, lfsck->li_lpf_obj, &lh,
2053 MDS_INODELOCK_UPDATE);
2057 th = dt_trans_create(env, next);
2059 GOTO(unlock, rc = PTR_ERR(th));
2061 /* 1a. Update OST-object's parent information remotely.
2063 * If other subsequent modifications failed, then next LFSCK scanning
2064 * will process the OST-object as orphan again with known parent FID. */
2066 rc = dt_declare_xattr_set(env, cobj, pbuf, XATTR_NAME_FID, 0, th);
2071 /* 2a. Create the MDT-object locally. */
2072 rc = dt_declare_create(env, pobj, la, NULL, dof, th);
2076 /* 3a. Add layout EA for the MDT-object. */
2077 rc = dt_declare_xattr_set(env, pobj, ea_buf, XATTR_NAME_LOV,
2078 LU_XATTR_CREATE, th);
2082 /* 4a. Insert the MDT-object to .lustre/lost+found/MDTxxxx/ */
2083 rc = dt_declare_insert(env, lfsck->li_lpf_obj,
2084 (const struct dt_rec *)pfid,
2085 (const struct dt_key *)name, th);
2089 rc = dt_trans_start(env, next, th);
2093 /* 1b. Update OST-object's parent information remotely. */
2095 rc = dt_xattr_set(env, cobj, pbuf, XATTR_NAME_FID, 0, th,
2101 dt_write_lock(env, pobj, 0);
2102 /* 2b. Create the MDT-object locally. */
2103 rc = dt_create(env, pobj, la, NULL, dof, th);
2105 /* 3b. Add layout EA for the MDT-object. */
2106 rc = lfsck_layout_extend_lovea(env, lfsck, th, pobj, cfid,
2107 ea_buf, LU_XATTR_CREATE,
2108 ltd->ltd_index, ea_off, false);
2109 dt_write_unlock(env, pobj);
2113 /* 4b. Insert the MDT-object to .lustre/lost+found/MDTxxxx/ */
2114 rc = dt_insert(env, lfsck->li_lpf_obj,
2115 (const struct dt_rec *)pfid,
2116 (const struct dt_key *)name, th, BYPASS_CAPA, 1);
2121 dt_trans_stop(env, next, th);
2124 lfsck_layout_unlock(&lh);
2127 if (cobj != NULL && !IS_ERR(cobj))
2128 lu_object_put(env, &cobj->do_lu);
2129 if (pobj != NULL && !IS_ERR(pobj))
2130 lu_object_put(env, &pobj->do_lu);
2131 ea_buf->lb_len = buflen;
2133 return rc >= 0 ? 1 : rc;
2136 static int lfsck_layout_master_conditional_destroy(const struct lu_env *env,
2137 struct lfsck_component *com,
2138 const struct lu_fid *fid,
2141 struct lfsck_thread_info *info = lfsck_env_info(env);
2142 struct lfsck_request *lr = &info->lti_lr;
2143 struct lfsck_instance *lfsck = com->lc_lfsck;
2144 struct lfsck_tgt_desc *ltd;
2145 struct ptlrpc_request *req;
2146 struct lfsck_request *tmp;
2147 struct obd_export *exp;
2151 ltd = lfsck_tgt_get(&lfsck->li_ost_descs, index);
2152 if (unlikely(ltd == NULL))
2156 if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
2157 GOTO(put, rc = -EOPNOTSUPP);
2159 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
2161 GOTO(put, rc = -ENOMEM);
2163 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
2165 ptlrpc_request_free(req);
2170 memset(lr, 0, sizeof(*lr));
2171 lr->lr_event = LE_CONDITIONAL_DESTROY;
2172 lr->lr_active = LFSCK_TYPE_LAYOUT;
2175 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
2177 ptlrpc_request_set_replen(req);
2179 rc = ptlrpc_queue_wait(req);
2180 ptlrpc_req_finished(req);
2190 static int lfsck_layout_slave_conditional_destroy(const struct lu_env *env,
2191 struct lfsck_component *com,
2192 struct lfsck_request *lr)
2194 struct lfsck_thread_info *info = lfsck_env_info(env);
2195 struct lu_attr *la = &info->lti_la;
2196 ldlm_policy_data_t *policy = &info->lti_policy;
2197 struct ldlm_res_id *resid = &info->lti_resid;
2198 struct lfsck_instance *lfsck = com->lc_lfsck;
2199 struct dt_device *dev = lfsck->li_bottom;
2200 struct lu_fid *fid = &lr->lr_fid;
2201 struct dt_object *obj;
2202 struct thandle *th = NULL;
2203 struct lustre_handle lh = { 0 };
2208 obj = lfsck_object_find_by_dev(env, dev, fid);
2210 RETURN(PTR_ERR(obj));
2212 dt_read_lock(env, obj, 0);
2213 if (dt_object_exists(obj) == 0) {
2214 dt_read_unlock(env, obj);
2216 GOTO(put, rc = -ENOENT);
2219 /* Get obj's attr without lock firstly. */
2220 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
2221 dt_read_unlock(env, obj);
2225 if (likely(la->la_ctime != 0 || la->la_mode & S_ISUID))
2226 GOTO(put, rc = -ETXTBSY);
2228 /* Acquire extent lock on [0, EOF] to sync with all possible written. */
2229 LASSERT(lfsck->li_namespace != NULL);
2231 memset(policy, 0, sizeof(*policy));
2232 policy->l_extent.end = OBD_OBJECT_EOF;
2233 ost_fid_build_resid(fid, resid);
2234 rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid, LDLM_EXTENT,
2235 policy, LCK_EX, &flags, ldlm_blocking_ast,
2236 ldlm_completion_ast, NULL, NULL, 0,
2237 LVB_T_NONE, NULL, &lh);
2239 GOTO(put, rc = -EIO);
2241 dt_write_lock(env, obj, 0);
2242 /* Get obj's attr within lock again. */
2243 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
2247 if (la->la_ctime != 0)
2248 GOTO(unlock, rc = -ETXTBSY);
2250 th = dt_trans_create(env, dev);
2252 GOTO(unlock, rc = PTR_ERR(th));
2254 rc = dt_declare_ref_del(env, obj, th);
2258 rc = dt_declare_destroy(env, obj, th);
2262 rc = dt_trans_start_local(env, dev, th);
2266 rc = dt_ref_del(env, obj, th);
2270 rc = dt_destroy(env, obj, th);
2272 CDEBUG(D_LFSCK, "Destroy the empty OST-object "DFID" which "
2273 "was created for reparing dangling referenced case. "
2274 "But the original missed OST-object is found now.\n",
2280 dt_trans_stop(env, dev, th);
2283 dt_write_unlock(env, obj);
2284 ldlm_lock_decref(&lh, LCK_EX);
2287 lu_object_put(env, &obj->do_lu);
2293 * Some OST-object has occupied the specified layout EA slot.
2294 * Such OST-object may be generated by the LFSCK when repair
2295 * dangling referenced MDT-object, which can be indicated by
2296 * attr::la_ctime == 0 but without S_ISUID in la_mode. If it
2297 * is true and such OST-object has not been modified yet, we
2298 * will replace it with the orphan OST-object; otherwise the
2299 * LFSCK will create new MDT-object to reference the orphan.
2301 * \retval +1: repaired
2302 * \retval 0: did nothing
2303 * \retval -ve: on error
2305 static int lfsck_layout_conflict_create(const struct lu_env *env,
2306 struct lfsck_component *com,
2307 struct lfsck_tgt_desc *ltd,
2308 struct lu_orphan_rec *rec,
2309 struct dt_object *parent,
2310 struct lu_fid *cfid,
2311 struct lu_buf *ea_buf,
2312 struct lov_ost_data_v1 *slot,
2313 __u32 ea_off, __u32 ori_len)
2315 struct lfsck_thread_info *info = lfsck_env_info(env);
2316 struct lu_fid *cfid2 = &info->lti_fid2;
2317 struct ost_id *oi = &info->lti_oi;
2318 char *infix = info->lti_tmpbuf;
2319 struct lov_mds_md_v1 *lmm = ea_buf->lb_buf;
2320 struct dt_device *dev = com->lc_lfsck->li_bottom;
2321 struct thandle *th = NULL;
2322 struct lustre_handle lh = { 0 };
2323 __u32 ost_idx2 = le32_to_cpu(slot->l_ost_idx);
2327 ostid_le_to_cpu(&slot->l_ost_oi, oi);
2328 ostid_to_fid(cfid2, oi, ost_idx2);
2330 CDEBUG(D_LFSCK, "Handle layout EA conflict: parent "DFID
2331 ", cur-child "DFID" on the OST %u, orphan-child "
2332 DFID" on the OST %u, stripe-index %u\n",
2333 PFID(lfsck_dto2fid(parent)), PFID(cfid2), ost_idx2,
2334 PFID(cfid), ltd->ltd_index, ea_off);
2336 /* Hold layout lock on the parent to prevent others to access. */
2337 rc = lfsck_layout_lock(env, com, parent, &lh,
2338 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2342 rc = lfsck_layout_master_conditional_destroy(env, com, cfid2, ost_idx2);
2344 /* If the conflict OST-obejct is not created for fixing dangling
2345 * referenced MDT-object in former LFSCK check/repair, or it has
2346 * been modified by others, then we cannot destroy it. Re-create
2347 * a new MDT-object for the orphan OST-object. */
2348 if (rc == -ETXTBSY) {
2349 /* No need the layout lock on the original parent. */
2350 lfsck_layout_unlock(&lh);
2351 ea_buf->lb_len = ori_len;
2353 fid_zero(&rec->lor_fid);
2354 snprintf(infix, LFSCK_TMPBUF_LEN, "-"DFID"-%x",
2355 PFID(lu_object_fid(&parent->do_lu)), ea_off);
2356 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2357 infix, "C", ea_off);
2362 if (rc != 0 && rc != -ENOENT)
2365 th = dt_trans_create(env, dev);
2367 GOTO(unlock, rc = PTR_ERR(th));
2369 rc = dt_declare_xattr_set(env, parent, ea_buf, XATTR_NAME_LOV,
2370 LU_XATTR_REPLACE, th);
2374 rc = dt_trans_start_local(env, dev, th);
2378 dt_write_lock(env, parent, 0);
2379 lmm->lmm_layout_gen = cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
2380 rc = lfsck_layout_refill_lovea(env, th, parent, cfid, ea_buf, slot,
2381 LU_XATTR_REPLACE, ltd->ltd_index);
2382 dt_write_unlock(env, parent);
2387 dt_trans_stop(env, dev, th);
2390 lfsck_layout_unlock(&lh);
2393 ea_buf->lb_len = ori_len;
2395 return rc >= 0 ? 1 : rc;
2399 * \retval +1: repaired
2400 * \retval 0: did nothing
2401 * \retval -ve: on error
2403 static int lfsck_layout_recreate_lovea(const struct lu_env *env,
2404 struct lfsck_component *com,
2405 struct lfsck_tgt_desc *ltd,
2406 struct lu_orphan_rec *rec,
2407 struct dt_object *parent,
2408 struct lu_fid *cfid,
2409 __u32 ost_idx, __u32 ea_off)
2411 struct lfsck_thread_info *info = lfsck_env_info(env);
2412 struct lu_buf *buf = &info->lti_big_buf;
2413 struct lu_fid *fid = &info->lti_fid2;
2414 struct ost_id *oi = &info->lti_oi;
2415 struct lfsck_instance *lfsck = com->lc_lfsck;
2416 struct dt_device *dt = lfsck->li_bottom;
2417 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2418 struct thandle *handle = NULL;
2419 size_t buflen = buf->lb_len;
2420 struct lov_mds_md_v1 *lmm;
2421 struct lov_ost_data_v1 *objs;
2422 struct lustre_handle lh = { 0 };
2429 bool locked = false;
2432 CDEBUG(D_LFSCK, "Re-create the crashed layout EA: parent "
2433 DFID", child "DFID", OST-index %u, stripe-index %u\n",
2434 PFID(lfsck_dto2fid(parent)), PFID(cfid), ost_idx, ea_off);
2436 rc = lfsck_layout_lock(env, com, parent, &lh,
2437 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2443 dt_write_unlock(env, parent);
2447 if (handle != NULL) {
2448 dt_trans_stop(env, dt, handle);
2453 GOTO(unlock_layout, rc);
2455 if (buf->lb_len < rc) {
2456 lu_buf_realloc(buf, rc);
2457 buflen = buf->lb_len;
2458 if (buf->lb_buf == NULL)
2459 GOTO(unlock_layout, rc = -ENOMEM);
2462 if (!(bk->lb_param & LPF_DRYRUN)) {
2463 handle = dt_trans_create(env, dt);
2465 GOTO(unlock_layout, rc = PTR_ERR(handle));
2467 rc = dt_declare_xattr_set(env, parent, buf, XATTR_NAME_LOV,
2472 rc = dt_trans_start_local(env, dt, handle);
2477 dt_write_lock(env, parent, 0);
2479 rc = dt_xattr_get(env, parent, buf, XATTR_NAME_LOV, BYPASS_CAPA);
2480 if (rc == -ERANGE) {
2481 rc = dt_xattr_get(env, parent, &LU_BUF_NULL, XATTR_NAME_LOV,
2485 } else if (rc == -ENODATA || rc == 0) {
2486 rc = lov_mds_md_size(ea_off + 1, LOV_MAGIC_V1);
2487 /* If the declared is not big enough, re-try. */
2488 if (buf->lb_len < rc)
2491 fl = LU_XATTR_CREATE;
2492 } else if (rc < 0) {
2493 GOTO(unlock_parent, rc);
2494 } else if (unlikely(buf->lb_len == 0)) {
2497 fl = LU_XATTR_REPLACE;
2500 if (fl == LU_XATTR_CREATE) {
2501 if (bk->lb_param & LPF_DRYRUN)
2502 GOTO(unlock_parent, rc = 1);
2504 LASSERT(buf->lb_len >= rc);
2507 rc = lfsck_layout_extend_lovea(env, lfsck, handle, parent, cfid,
2508 buf, fl, ost_idx, ea_off, false);
2510 GOTO(unlock_parent, rc);
2514 rc1 = lfsck_layout_verify_header(lmm);
2516 /* If the LOV EA crashed, the rebuild it. */
2517 if (rc1 == -EINVAL) {
2518 if (bk->lb_param & LPF_DRYRUN)
2519 GOTO(unlock_parent, rc = 1);
2521 LASSERT(buf->lb_len >= rc);
2524 memset(lmm, 0, buf->lb_len);
2525 rc = lfsck_layout_extend_lovea(env, lfsck, handle, parent, cfid,
2526 buf, fl, ost_idx, ea_off, true);
2528 GOTO(unlock_parent, rc);
2531 /* For other unknown magic/pattern, keep the current LOV EA. */
2533 GOTO(unlock_parent, rc = rc1);
2535 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
2536 * been verified in lfsck_layout_verify_header() already. If some
2537 * new magic introduced in the future, then layout LFSCK needs to
2538 * be updated also. */
2539 magic = le32_to_cpu(lmm->lmm_magic);
2540 if (magic == LOV_MAGIC_V1) {
2541 objs = &lmm->lmm_objects[0];
2543 LASSERT(magic == LOV_MAGIC_V3);
2544 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
2547 count = le16_to_cpu(lmm->lmm_stripe_count);
2549 GOTO(unlock_parent, rc = -EINVAL);
2552 /* Exceed the current end of MDT-object layout EA. Then extend it. */
2553 if (count <= ea_off) {
2554 if (bk->lb_param & LPF_DRYRUN)
2555 GOTO(unlock_parent, rc = 1);
2557 rc = lov_mds_md_size(ea_off + 1, magic);
2558 /* If the declared is not big enough, re-try. */
2559 if (buf->lb_len < rc)
2563 rc = lfsck_layout_extend_lovea(env, lfsck, handle, parent, cfid,
2564 buf, fl, ost_idx, ea_off, false);
2566 GOTO(unlock_parent, rc);
2569 LASSERTF(rc > 0, "invalid rc = %d\n", rc);
2572 for (i = 0; i < count; i++, objs++) {
2573 /* The MDT-object was created via lfsck_layout_recover_create()
2574 * by others before, and we fill the dummy layout EA. */
2575 if (lovea_slot_is_dummy(objs)) {
2579 if (bk->lb_param & LPF_DRYRUN)
2580 GOTO(unlock_parent, rc = 1);
2582 lmm->lmm_layout_gen =
2583 cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
2584 rc = lfsck_layout_refill_lovea(env, handle, parent,
2585 cfid, buf, objs, fl,
2587 GOTO(unlock_parent, rc);
2590 ostid_le_to_cpu(&objs->l_ost_oi, oi);
2591 ostid_to_fid(fid, oi, le32_to_cpu(objs->l_ost_idx));
2592 /* It should be rare case, the slot is there, but the LFSCK
2593 * does not handle it during the first-phase cycle scanning. */
2594 if (unlikely(lu_fid_eq(fid, cfid))) {
2596 GOTO(unlock_parent, rc = 0);
2598 /* Rare case that the OST-object index
2599 * does not match the parent MDT-object
2600 * layout EA. We trust the later one. */
2601 if (bk->lb_param & LPF_DRYRUN)
2602 GOTO(unlock_parent, rc = 1);
2604 dt_write_unlock(env, parent);
2606 dt_trans_stop(env, dt, handle);
2607 lfsck_layout_unlock(&lh);
2608 buf->lb_len = buflen;
2609 rc = lfsck_layout_update_pfid(env, com, parent,
2610 cfid, ltd->ltd_tgt, i);
2617 /* The MDT-object exists, but related layout EA slot is occupied
2619 if (bk->lb_param & LPF_DRYRUN)
2620 GOTO(unlock_parent, rc = 1);
2622 dt_write_unlock(env, parent);
2624 dt_trans_stop(env, dt, handle);
2625 lfsck_layout_unlock(&lh);
2626 if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_V1)
2627 objs = &lmm->lmm_objects[ea_off];
2629 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[ea_off];
2630 rc = lfsck_layout_conflict_create(env, com, ltd, rec, parent, cfid,
2631 buf, objs, ea_off, buflen);
2637 dt_write_unlock(env, parent);
2641 dt_trans_stop(env, dt, handle);
2644 lfsck_layout_unlock(&lh);
2645 buf->lb_len = buflen;
2650 static int lfsck_layout_scan_orphan_one(const struct lu_env *env,
2651 struct lfsck_component *com,
2652 struct lfsck_tgt_desc *ltd,
2653 struct lu_orphan_rec *rec,
2654 struct lu_fid *cfid)
2656 struct lfsck_layout *lo = com->lc_file_ram;
2657 struct lu_fid *pfid = &rec->lor_fid;
2658 struct dt_object *parent = NULL;
2659 __u32 ea_off = pfid->f_stripe_idx;
2663 if (!fid_is_sane(cfid))
2664 GOTO(out, rc = -EINVAL);
2666 if (fid_is_zero(pfid)) {
2667 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2673 if (!fid_is_sane(pfid))
2674 GOTO(out, rc = -EINVAL);
2676 parent = lfsck_object_find_by_dev(env, com->lc_lfsck->li_bottom, pfid);
2678 GOTO(out, rc = PTR_ERR(parent));
2680 if (unlikely(dt_object_remote(parent) != 0))
2681 GOTO(put, rc = -EXDEV);
2683 if (dt_object_exists(parent) == 0) {
2684 lu_object_put(env, &parent->do_lu);
2685 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2690 if (!S_ISREG(lu_object_attr(&parent->do_lu)))
2691 GOTO(put, rc = -EISDIR);
2693 rc = lfsck_layout_recreate_lovea(env, com, ltd, rec, parent, cfid,
2694 ltd->ltd_index, ea_off);
2700 lu_object_put(env, &parent->do_lu);
2702 /* The layout EA is changed, need to be reloaded next time. */
2703 lu_object_put_nocache(env, &parent->do_lu);
2706 down_write(&com->lc_sem);
2707 com->lc_new_scanned++;
2708 com->lc_new_checked++;
2710 lo->ll_objs_repaired[LLIT_ORPHAN - 1]++;
2712 } else if (rc < 0) {
2713 lo->ll_objs_failed_phase2++;
2715 up_write(&com->lc_sem);
2720 static int lfsck_layout_scan_orphan(const struct lu_env *env,
2721 struct lfsck_component *com,
2722 struct lfsck_tgt_desc *ltd)
2724 struct lfsck_layout *lo = com->lc_file_ram;
2725 struct lfsck_instance *lfsck = com->lc_lfsck;
2726 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2727 struct lfsck_thread_info *info = lfsck_env_info(env);
2728 struct ost_id *oi = &info->lti_oi;
2729 struct lu_fid *fid = &info->lti_fid;
2730 struct dt_object *obj;
2731 const struct dt_it_ops *iops;
2736 CDEBUG(D_LFSCK, "%s: start the orphan scanning for OST%04x\n",
2737 lfsck_lfsck2name(lfsck), ltd->ltd_index);
2739 ostid_set_seq(oi, FID_SEQ_IDIF);
2740 ostid_set_id(oi, 0);
2741 ostid_to_fid(fid, oi, ltd->ltd_index);
2742 obj = lfsck_object_find_by_dev(env, ltd->ltd_tgt, fid);
2743 if (unlikely(IS_ERR(obj)))
2744 RETURN(PTR_ERR(obj));
2746 rc = obj->do_ops->do_index_try(env, obj, &dt_lfsck_orphan_features);
2750 iops = &obj->do_index_ops->dio_it;
2751 di = iops->init(env, obj, 0, BYPASS_CAPA);
2753 GOTO(put, rc = PTR_ERR(di));
2755 rc = iops->load(env, di, 0);
2757 /* -ESRCH means that the orphan OST-objects rbtree has been
2758 * cleanup because of the OSS server restart or other errors. */
2759 lo->ll_flags |= LF_INCOMPLETE;
2764 rc = iops->next(env, di);
2776 struct lu_orphan_rec *rec = &info->lti_rec;
2778 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY3) &&
2780 struct ptlrpc_thread *thread = &lfsck->li_thread;
2781 struct l_wait_info lwi;
2783 lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val),
2785 l_wait_event(thread->t_ctl_waitq,
2786 !thread_is_running(thread),
2790 key = iops->key(env, di);
2791 com->lc_fid_latest_scanned_phase2 = *(struct lu_fid *)key;
2792 rc = iops->rec(env, di, (struct dt_rec *)rec, 0);
2794 rc = lfsck_layout_scan_orphan_one(env, com, ltd, rec,
2795 &com->lc_fid_latest_scanned_phase2);
2796 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
2799 lfsck_control_speed_by_self(com);
2801 rc = iops->next(env, di);
2802 } while (rc < 0 && !(bk->lb_param & LPF_FAILOUT));
2809 iops->fini(env, di);
2811 lu_object_put(env, &obj->do_lu);
2813 CDEBUG(D_LFSCK, "%s: finish the orphan scanning for OST%04x, rc = %d\n",
2814 lfsck_lfsck2name(lfsck), ltd->ltd_index, rc);
2816 return rc > 0 ? 0 : rc;
2819 /* For the MDT-object with dangling reference, we need to repare the
2820 * inconsistency according to the LFSCK sponsor's requirement:
2822 * 1) Keep the inconsistency there and report the inconsistency case,
2823 * then give the chance to the application to find related issues,
2824 * and the users can make the decision about how to handle it with
2825 * more human knownledge. (by default)
2827 * 2) Re-create the missed OST-object with the FID/owner information. */
2828 static int lfsck_layout_repair_dangling(const struct lu_env *env,
2829 struct lfsck_component *com,
2830 struct lfsck_layout_req *llr,
2831 const struct lu_attr *pla)
2833 struct lfsck_thread_info *info = lfsck_env_info(env);
2834 struct filter_fid *pfid = &info->lti_new_pfid;
2835 struct dt_allocation_hint *hint = &info->lti_hint;
2836 struct lu_attr *cla = &info->lti_la2;
2837 struct dt_object *parent = llr->llr_parent->llo_obj;
2838 struct dt_object *child = llr->llr_child;
2839 struct dt_device *dev = lfsck_obj2dt_dev(child);
2840 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
2841 struct thandle *handle;
2843 struct lustre_handle lh = { 0 };
2848 if (com->lc_lfsck->li_bookmark_ram.lb_param & LPF_CREATE_OSTOBJ)
2853 CDEBUG(D_LFSCK, "Found dangling reference for: parent "DFID
2854 ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u. %s",
2855 PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
2856 llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid,
2857 create ? "Create the lost OST-object as required.\n" :
2858 "Keep the MDT-object there by default.\n");
2863 memset(cla, 0, sizeof(*cla));
2864 cla->la_uid = pla->la_uid;
2865 cla->la_gid = pla->la_gid;
2866 cla->la_mode = S_IFREG | 0666;
2867 cla->la_valid = LA_TYPE | LA_MODE | LA_UID | LA_GID |
2868 LA_ATIME | LA_MTIME | LA_CTIME;
2870 rc = lfsck_layout_lock(env, com, parent, &lh,
2871 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2875 handle = dt_trans_create(env, dev);
2877 GOTO(unlock1, rc = PTR_ERR(handle));
2879 hint->dah_parent = NULL;
2881 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
2882 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
2883 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
2884 * MDT-object's FID::f_ver, instead it is the OST-object index in its
2885 * parent MDT-object's layout EA. */
2886 pfid->ff_parent.f_stripe_idx = cpu_to_le32(llr->llr_lov_idx);
2887 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
2889 rc = dt_declare_create(env, child, cla, hint, NULL, handle);
2893 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID,
2894 LU_XATTR_CREATE, handle);
2898 rc = dt_trans_start(env, dev, handle);
2902 dt_read_lock(env, parent, 0);
2903 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
2904 GOTO(unlock2, rc = 1);
2906 rc = dt_create(env, child, cla, hint, NULL, handle);
2910 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, LU_XATTR_CREATE,
2911 handle, BYPASS_CAPA);
2916 dt_read_unlock(env, parent);
2919 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
2922 lfsck_layout_unlock(&lh);
2927 /* If the OST-object does not recognize the MDT-object as its parent, and
2928 * there is no other MDT-object claims as its parent, then just trust the
2929 * given MDT-object as its parent. So update the OST-object filter_fid. */
2930 static int lfsck_layout_repair_unmatched_pair(const struct lu_env *env,
2931 struct lfsck_component *com,
2932 struct lfsck_layout_req *llr,
2933 const struct lu_attr *pla)
2935 struct lfsck_thread_info *info = lfsck_env_info(env);
2936 struct filter_fid *pfid = &info->lti_new_pfid;
2937 struct lu_attr *tla = &info->lti_la3;
2938 struct dt_object *parent = llr->llr_parent->llo_obj;
2939 struct dt_object *child = llr->llr_child;
2940 struct dt_device *dev = lfsck_obj2dt_dev(child);
2941 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
2942 struct thandle *handle;
2944 struct lustre_handle lh = { 0 };
2948 CDEBUG(D_LFSCK, "Repair unmatched MDT-OST pair for: parent "DFID
2949 ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
2950 PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
2951 llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid);
2953 rc = lfsck_layout_lock(env, com, parent, &lh,
2954 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2958 handle = dt_trans_create(env, dev);
2960 GOTO(unlock1, rc = PTR_ERR(handle));
2962 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
2963 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
2964 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
2965 * MDT-object's FID::f_ver, instead it is the OST-object index in its
2966 * parent MDT-object's layout EA. */
2967 pfid->ff_parent.f_stripe_idx = cpu_to_le32(llr->llr_lov_idx);
2968 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
2970 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle);
2974 tla->la_valid = LA_UID | LA_GID;
2975 tla->la_uid = pla->la_uid;
2976 tla->la_gid = pla->la_gid;
2977 rc = dt_declare_attr_set(env, child, tla, handle);
2981 rc = dt_trans_start(env, dev, handle);
2985 dt_write_lock(env, parent, 0);
2986 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
2987 GOTO(unlock2, rc = 1);
2989 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle,
2994 /* Get the latest parent's owner. */
2995 rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
2999 tla->la_valid = LA_UID | LA_GID;
3000 rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
3005 dt_write_unlock(env, parent);
3008 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
3011 lfsck_layout_unlock(&lh);
3016 /* If there are more than one MDT-objects claim as the OST-object's parent,
3017 * and the OST-object only recognizes one of them, then we need to generate
3018 * new OST-object(s) with new fid(s) for the non-recognized MDT-object(s). */
3019 static int lfsck_layout_repair_multiple_references(const struct lu_env *env,
3020 struct lfsck_component *com,
3021 struct lfsck_layout_req *llr,
3025 struct lfsck_thread_info *info = lfsck_env_info(env);
3026 struct dt_allocation_hint *hint = &info->lti_hint;
3027 struct dt_object_format *dof = &info->lti_dof;
3028 struct dt_device *pdev = com->lc_lfsck->li_next;
3029 struct ost_id *oi = &info->lti_oi;
3030 struct dt_object *parent = llr->llr_parent->llo_obj;
3031 struct dt_device *cdev = lfsck_obj2dt_dev(llr->llr_child);
3032 struct dt_object *child = NULL;
3033 struct lu_device *d = &cdev->dd_lu_dev;
3034 struct lu_object *o = NULL;
3035 struct thandle *handle;
3036 struct lov_mds_md_v1 *lmm;
3037 struct lov_ost_data_v1 *objs;
3038 struct lustre_handle lh = { 0 };
3043 CDEBUG(D_LFSCK, "Repair multiple references for: parent "DFID
3044 ", OST-index %u, stripe-index %u, owner %u:%u\n",
3045 PFID(lfsck_dto2fid(parent)), llr->llr_ost_idx,
3046 llr->llr_lov_idx, la->la_uid, la->la_gid);
3048 rc = lfsck_layout_lock(env, com, parent, &lh,
3049 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
3053 handle = dt_trans_create(env, pdev);
3055 GOTO(unlock1, rc = PTR_ERR(handle));
3057 o = lu_object_anon(env, d, NULL);
3059 GOTO(stop, rc = PTR_ERR(o));
3061 child = container_of(o, struct dt_object, do_lu);
3062 o = lu_object_locate(o->lo_header, d->ld_type);
3063 if (unlikely(o == NULL))
3064 GOTO(stop, rc = -EINVAL);
3066 child = container_of(o, struct dt_object, do_lu);
3067 la->la_valid = LA_UID | LA_GID;
3068 hint->dah_parent = NULL;
3070 dof->dof_type = DFT_REGULAR;
3071 rc = dt_declare_create(env, child, la, NULL, NULL, handle);
3075 rc = dt_declare_xattr_set(env, parent, buf, XATTR_NAME_LOV,
3076 LU_XATTR_REPLACE, handle);
3080 rc = dt_trans_start(env, pdev, handle);
3084 dt_write_lock(env, parent, 0);
3085 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
3086 GOTO(unlock2, rc = 0);
3088 rc = dt_xattr_get(env, parent, buf, XATTR_NAME_LOV, BYPASS_CAPA);
3089 if (unlikely(rc == 0 || rc == -ENODATA || rc == -ERANGE))
3090 GOTO(unlock2, rc = 0);
3093 /* Someone change layout during the LFSCK, no need to repair then. */
3094 if (le16_to_cpu(lmm->lmm_layout_gen) != llr->llr_parent->llo_gen)
3095 GOTO(unlock2, rc = 0);
3097 rc = dt_create(env, child, la, hint, dof, handle);
3101 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
3102 * been verified in lfsck_layout_verify_header() already. If some
3103 * new magic introduced in the future, then layout LFSCK needs to
3104 * be updated also. */
3105 magic = le32_to_cpu(lmm->lmm_magic);
3106 if (magic == LOV_MAGIC_V1) {
3107 objs = &lmm->lmm_objects[0];
3109 LASSERT(magic == LOV_MAGIC_V3);
3110 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
3113 lmm->lmm_layout_gen = cpu_to_le16(llr->llr_parent->llo_gen + 1);
3114 fid_to_ostid(lu_object_fid(&child->do_lu), oi);
3115 ostid_cpu_to_le(oi, &objs[llr->llr_lov_idx].l_ost_oi);
3116 objs[llr->llr_lov_idx].l_ost_gen = cpu_to_le32(0);
3117 objs[llr->llr_lov_idx].l_ost_idx = cpu_to_le32(llr->llr_ost_idx);
3118 rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV,
3119 LU_XATTR_REPLACE, handle, BYPASS_CAPA);
3121 GOTO(unlock2, rc = (rc == 0 ? 1 : rc));
3124 dt_write_unlock(env, parent);
3128 lu_object_put(env, &child->do_lu);
3130 dt_trans_stop(env, pdev, handle);
3133 lfsck_layout_unlock(&lh);
3138 /* If the MDT-object and the OST-object have different owner information,
3139 * then trust the MDT-object, because the normal chown/chgrp handle order
3140 * is from MDT to OST, and it is possible that some chown/chgrp operation
3141 * is partly done. */
3142 static int lfsck_layout_repair_owner(const struct lu_env *env,
3143 struct lfsck_component *com,
3144 struct lfsck_layout_req *llr,
3145 struct lu_attr *pla)
3147 struct lfsck_thread_info *info = lfsck_env_info(env);
3148 struct lu_attr *tla = &info->lti_la3;
3149 struct dt_object *parent = llr->llr_parent->llo_obj;
3150 struct dt_object *child = llr->llr_child;
3151 struct dt_device *dev = lfsck_obj2dt_dev(child);
3152 struct thandle *handle;
3156 CDEBUG(D_LFSCK, "Repair inconsistent file owner for: parent "DFID
3157 ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
3158 PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
3159 llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid);
3161 handle = dt_trans_create(env, dev);
3163 RETURN(PTR_ERR(handle));
3165 tla->la_uid = pla->la_uid;
3166 tla->la_gid = pla->la_gid;
3167 tla->la_valid = LA_UID | LA_GID;
3168 rc = dt_declare_attr_set(env, child, tla, handle);
3172 rc = dt_trans_start(env, dev, handle);
3176 /* Use the dt_object lock to serialize with destroy and attr_set. */
3177 dt_read_lock(env, parent, 0);
3178 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
3179 GOTO(unlock, rc = 1);
3181 /* Get the latest parent's owner. */
3182 rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
3184 CWARN("%s: fail to get the latest parent's ("DFID") owner, "
3185 "not sure whether some others chown/chgrp during the "
3186 "LFSCK: rc = %d\n", lfsck_lfsck2name(com->lc_lfsck),
3187 PFID(lfsck_dto2fid(parent)), rc);
3192 /* Some others chown/chgrp during the LFSCK, needs to do nothing. */
3193 if (unlikely(tla->la_uid != pla->la_uid ||
3194 tla->la_gid != pla->la_gid))
3195 GOTO(unlock, rc = 1);
3197 tla->la_valid = LA_UID | LA_GID;
3198 rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
3203 dt_read_unlock(env, parent);
3206 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
3211 /* Check whether the OST-object correctly back points to the
3212 * MDT-object (@parent) via the XATTR_NAME_FID xattr (@pfid). */
3213 static int lfsck_layout_check_parent(const struct lu_env *env,
3214 struct lfsck_component *com,
3215 struct dt_object *parent,
3216 const struct lu_fid *pfid,
3217 const struct lu_fid *cfid,
3218 const struct lu_attr *pla,
3219 const struct lu_attr *cla,
3220 struct lfsck_layout_req *llr,
3221 struct lu_buf *lov_ea, __u32 idx)
3223 struct lfsck_thread_info *info = lfsck_env_info(env);
3224 struct lu_buf *buf = &info->lti_big_buf;
3225 struct dt_object *tobj;
3226 struct lov_mds_md_v1 *lmm;
3227 struct lov_ost_data_v1 *objs;
3234 if (fid_is_zero(pfid)) {
3235 /* client never wrote. */
3236 if (cla->la_size == 0 && cla->la_blocks == 0) {
3237 if (unlikely(cla->la_uid != pla->la_uid ||
3238 cla->la_gid != pla->la_gid))
3239 RETURN (LLIT_INCONSISTENT_OWNER);
3244 RETURN(LLIT_UNMATCHED_PAIR);
3247 if (unlikely(!fid_is_sane(pfid)))
3248 RETURN(LLIT_UNMATCHED_PAIR);
3250 if (lu_fid_eq(pfid, lu_object_fid(&parent->do_lu))) {
3251 if (llr->llr_lov_idx == idx)
3254 RETURN(LLIT_UNMATCHED_PAIR);
3257 tobj = lfsck_object_find(env, com->lc_lfsck, pfid);
3259 RETURN(LLIT_UNMATCHED_PAIR);
3262 RETURN(PTR_ERR(tobj));
3264 if (!dt_object_exists(tobj))
3265 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3267 /* Load the tobj's layout EA, in spite of it is a local MDT-object or
3268 * remote one on another MDT. Then check whether the given OST-object
3269 * is in such layout. If yes, it is multiple referenced, otherwise it
3270 * is unmatched referenced case. */
3271 rc = lfsck_layout_get_lovea(env, tobj, buf, NULL);
3273 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3279 magic = le32_to_cpu(lmm->lmm_magic);
3280 if (magic == LOV_MAGIC_V1) {
3281 objs = &lmm->lmm_objects[0];
3283 LASSERT(magic == LOV_MAGIC_V3);
3284 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
3287 count = le16_to_cpu(lmm->lmm_stripe_count);
3288 for (i = 0; i < count; i++, objs++) {
3289 struct lu_fid *tfid = &info->lti_fid2;
3290 struct ost_id *oi = &info->lti_oi;
3292 if (lovea_slot_is_dummy(objs))
3295 ostid_le_to_cpu(&objs->l_ost_oi, oi);
3296 ostid_to_fid(tfid, oi, le32_to_cpu(objs->l_ost_idx));
3297 if (lu_fid_eq(cfid, tfid)) {
3300 GOTO(out, rc = LLIT_MULTIPLE_REFERENCED);
3304 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3307 lfsck_object_put(env, tobj);
3312 static int lfsck_layout_assistant_handle_one(const struct lu_env *env,
3313 struct lfsck_component *com,
3314 struct lfsck_layout_req *llr)
3316 struct lfsck_layout *lo = com->lc_file_ram;
3317 struct lfsck_thread_info *info = lfsck_env_info(env);
3318 struct filter_fid_old *pea = &info->lti_old_pfid;
3319 struct lu_fid *pfid = &info->lti_fid;
3320 struct lu_buf *buf = NULL;
3321 struct dt_object *parent = llr->llr_parent->llo_obj;
3322 struct dt_object *child = llr->llr_child;
3323 struct lu_attr *pla = &info->lti_la;
3324 struct lu_attr *cla = &info->lti_la2;
3325 struct lfsck_instance *lfsck = com->lc_lfsck;
3326 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3327 enum lfsck_layout_inconsistency_type type = LLIT_NONE;
3332 rc = dt_attr_get(env, parent, pla, BYPASS_CAPA);
3334 if (lu_object_is_dying(parent->do_lu.lo_header))
3340 rc = dt_attr_get(env, child, cla, BYPASS_CAPA);
3341 if (rc == -ENOENT) {
3342 if (lu_object_is_dying(parent->do_lu.lo_header))
3345 type = LLIT_DANGLING;
3352 buf = lfsck_buf_get(env, pea, sizeof(struct filter_fid_old));
3353 rc= dt_xattr_get(env, child, buf, XATTR_NAME_FID, BYPASS_CAPA);
3354 if (unlikely(rc >= 0 && rc != sizeof(struct filter_fid_old) &&
3355 rc != sizeof(struct filter_fid))) {
3356 type = LLIT_UNMATCHED_PAIR;
3360 if (rc < 0 && rc != -ENODATA)
3363 if (rc == -ENODATA) {
3366 fid_le_to_cpu(pfid, &pea->ff_parent);
3367 /* Currently, the filter_fid::ff_parent::f_ver is not the
3368 * real parent MDT-object's FID::f_ver, instead it is the
3369 * OST-object index in its parent MDT-object's layout EA. */
3370 idx = pfid->f_stripe_idx;
3374 rc = lfsck_layout_check_parent(env, com, parent, pfid,
3375 lu_object_fid(&child->do_lu),
3376 pla, cla, llr, buf, idx);
3385 if (unlikely(cla->la_uid != pla->la_uid ||
3386 cla->la_gid != pla->la_gid)) {
3387 type = LLIT_INCONSISTENT_OWNER;
3392 if (bk->lb_param & LPF_DRYRUN) {
3393 if (type != LLIT_NONE)
3401 rc = lfsck_layout_repair_dangling(env, com, llr, pla);
3403 case LLIT_UNMATCHED_PAIR:
3404 rc = lfsck_layout_repair_unmatched_pair(env, com, llr, pla);
3406 case LLIT_MULTIPLE_REFERENCED:
3407 rc = lfsck_layout_repair_multiple_references(env, com, llr,
3410 case LLIT_INCONSISTENT_OWNER:
3411 rc = lfsck_layout_repair_owner(env, com, llr, pla);
3421 down_write(&com->lc_sem);
3423 struct lfsck_layout_master_data *llmd = com->lc_data;
3425 if (unlikely(llmd->llmd_exit)) {
3427 } else if (rc == -ENOTCONN || rc == -ESHUTDOWN ||
3428 rc == -ETIMEDOUT || rc == -EHOSTDOWN ||
3429 rc == -EHOSTUNREACH) {
3430 /* If cannot touch the target server,
3431 * mark the LFSCK as INCOMPLETE. */
3432 CERROR("%s: Fail to talk with OST %x: rc = %d.\n",
3433 lfsck_lfsck2name(lfsck), llr->llr_ost_idx, rc);
3434 lo->ll_flags |= LF_INCOMPLETE;
3435 lo->ll_objs_skipped++;
3438 lfsck_layout_record_failure(env, lfsck, lo);
3440 } else if (rc > 0) {
3441 LASSERTF(type > LLIT_NONE && type <= LLIT_MAX,
3442 "unknown type = %d\n", type);
3444 lo->ll_objs_repaired[type - 1]++;
3445 if (bk->lb_param & LPF_DRYRUN &&
3446 unlikely(lo->ll_pos_first_inconsistent == 0))
3447 lo->ll_pos_first_inconsistent =
3448 lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
3451 up_write(&com->lc_sem);
3456 static int lfsck_layout_assistant(void *args)
3458 struct lfsck_thread_args *lta = args;
3459 struct lu_env *env = <a->lta_env;
3460 struct lfsck_component *com = lta->lta_com;
3461 struct lfsck_instance *lfsck = lta->lta_lfsck;
3462 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3463 struct lfsck_position *pos = &com->lc_pos_start;
3464 struct lfsck_thread_info *info = lfsck_env_info(env);
3465 struct lfsck_request *lr = &info->lti_lr;
3466 struct lfsck_layout_master_data *llmd = com->lc_data;
3467 struct ptlrpc_thread *mthread = &lfsck->li_thread;
3468 struct ptlrpc_thread *athread = &llmd->llmd_thread;
3469 struct lfsck_layout_req *llr;
3470 struct l_wait_info lwi = { 0 };
3475 memset(lr, 0, sizeof(*lr));
3476 lr->lr_event = LE_START;
3477 lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
3478 LSV_ASYNC_WINDOWS | LSV_CREATE_OSTOBJ;
3479 lr->lr_speed = bk->lb_speed_limit;
3480 lr->lr_version = bk->lb_version;
3481 lr->lr_param = bk->lb_param;
3482 lr->lr_async_windows = bk->lb_async_windows;
3483 lr->lr_flags = LEF_TO_OST;
3484 if (pos->lp_oit_cookie <= 1)
3485 lr->lr_param |= LPF_RESET;
3487 rc = lfsck_layout_master_notify_others(env, com, lr);
3489 CERROR("%s: fail to notify others for layout start: rc = %d\n",
3490 lfsck_lfsck2name(lfsck), rc);
3494 spin_lock(&llmd->llmd_lock);
3495 thread_set_flags(athread, SVC_RUNNING);
3496 spin_unlock(&llmd->llmd_lock);
3497 wake_up_all(&mthread->t_ctl_waitq);
3500 while (!list_empty(&llmd->llmd_req_list)) {
3501 bool wakeup = false;
3503 if (unlikely(llmd->llmd_exit ||
3504 !thread_is_running(mthread)))
3505 GOTO(cleanup1, rc = llmd->llmd_post_result);
3507 llr = list_entry(llmd->llmd_req_list.next,
3508 struct lfsck_layout_req,
3510 /* Only the lfsck_layout_assistant thread itself can
3511 * remove the "llr" from the head of the list, LFSCK
3512 * engine thread only inserts other new "lld" at the
3513 * end of the list. So it is safe to handle current
3514 * "llr" without the spin_lock. */
3515 rc = lfsck_layout_assistant_handle_one(env, com, llr);
3516 spin_lock(&llmd->llmd_lock);
3517 list_del_init(&llr->llr_list);
3518 llmd->llmd_prefetched--;
3519 /* Wake up the main engine thread only when the list
3520 * is empty or half of the prefetched items have been
3521 * handled to avoid too frequent thread schedule. */
3522 if (llmd->llmd_prefetched == 0 ||
3523 (bk->lb_async_windows != 0 &&
3524 bk->lb_async_windows / 2 ==
3525 llmd->llmd_prefetched))
3527 spin_unlock(&llmd->llmd_lock);
3529 wake_up_all(&mthread->t_ctl_waitq);
3531 lfsck_layout_req_fini(env, llr);
3532 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
3536 l_wait_event(athread->t_ctl_waitq,
3537 !lfsck_layout_req_empty(llmd) ||
3539 llmd->llmd_to_post ||
3540 llmd->llmd_to_double_scan,
3543 if (unlikely(llmd->llmd_exit))
3544 GOTO(cleanup1, rc = llmd->llmd_post_result);
3546 if (!list_empty(&llmd->llmd_req_list))
3549 if (llmd->llmd_to_post) {
3550 llmd->llmd_to_post = 0;
3551 LASSERT(llmd->llmd_post_result > 0);
3553 memset(lr, 0, sizeof(*lr));
3554 lr->lr_event = LE_PHASE1_DONE;
3555 lr->lr_status = llmd->llmd_post_result;
3556 rc = lfsck_layout_master_notify_others(env, com, lr);
3558 CERROR("%s: failed to notify others "
3559 "for layout post: rc = %d\n",
3560 lfsck_lfsck2name(lfsck), rc);
3562 /* Wakeup the master engine to go ahead. */
3563 wake_up_all(&mthread->t_ctl_waitq);
3566 if (llmd->llmd_to_double_scan) {
3567 llmd->llmd_to_double_scan = 0;
3568 atomic_inc(&lfsck->li_double_scan_count);
3569 llmd->llmd_in_double_scan = 1;
3570 wake_up_all(&mthread->t_ctl_waitq);
3572 com->lc_new_checked = 0;
3573 com->lc_new_scanned = 0;
3574 com->lc_time_last_checkpoint = cfs_time_current();
3575 com->lc_time_next_checkpoint =
3576 com->lc_time_last_checkpoint +
3577 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
3579 /* flush all async updating before handling orphan. */
3580 dt_sync(env, lfsck->li_next);
3582 while (llmd->llmd_in_double_scan) {
3583 struct lfsck_tgt_descs *ltds =
3584 &lfsck->li_ost_descs;
3585 struct lfsck_tgt_desc *ltd;
3587 rc = lfsck_layout_master_query_others(env, com);
3588 if (lfsck_layout_master_to_orphan(llmd))
3594 /* Pull LFSCK status on related targets once
3595 * per 30 seconds if we are not notified. */
3596 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(30),
3597 cfs_time_seconds(1),
3599 rc = l_wait_event(athread->t_ctl_waitq,
3600 lfsck_layout_master_to_orphan(llmd) ||
3602 !thread_is_running(mthread),
3605 if (unlikely(llmd->llmd_exit ||
3606 !thread_is_running(mthread)))
3607 GOTO(cleanup2, rc = 0);
3609 if (rc == -ETIMEDOUT)
3616 spin_lock(<ds->ltd_lock);
3618 &llmd->llmd_ost_phase2_list)) {
3620 llmd->llmd_ost_phase2_list.next,
3621 struct lfsck_tgt_desc,
3622 ltd_layout_phase_list);
3624 <d->ltd_layout_phase_list);
3625 spin_unlock(<ds->ltd_lock);
3627 if (bk->lb_param & LPF_ALL_TGT) {
3628 rc = lfsck_layout_scan_orphan(
3631 bk->lb_param & LPF_FAILOUT)
3635 if (unlikely(llmd->llmd_exit ||
3636 !thread_is_running(mthread)))
3637 GOTO(cleanup2, rc = 0);
3639 spin_lock(<ds->ltd_lock);
3642 if (list_empty(&llmd->llmd_ost_phase1_list)) {
3643 spin_unlock(<ds->ltd_lock);
3644 GOTO(cleanup2, rc = 1);
3646 spin_unlock(<ds->ltd_lock);
3652 /* Cleanup the unfinished requests. */
3653 spin_lock(&llmd->llmd_lock);
3655 llmd->llmd_assistant_status = rc;
3657 while (!list_empty(&llmd->llmd_req_list)) {
3658 llr = list_entry(llmd->llmd_req_list.next,
3659 struct lfsck_layout_req,
3661 list_del_init(&llr->llr_list);
3662 llmd->llmd_prefetched--;
3663 spin_unlock(&llmd->llmd_lock);
3664 lfsck_layout_req_fini(env, llr);
3665 spin_lock(&llmd->llmd_lock);
3667 spin_unlock(&llmd->llmd_lock);
3669 LASSERTF(llmd->llmd_prefetched == 0, "unmatched prefeteched objs %d\n",
3670 llmd->llmd_prefetched);
3673 memset(lr, 0, sizeof(*lr));
3675 lr->lr_event = LE_PHASE2_DONE;
3677 } else if (rc == 0) {
3678 if (lfsck->li_flags & LPF_ALL_TGT) {
3679 lr->lr_event = LE_STOP;
3680 lr->lr_status = LS_STOPPED;
3682 lr->lr_event = LE_PEER_EXIT;
3683 switch (lfsck->li_status) {
3686 lr->lr_status = LS_CO_PAUSED;
3690 lr->lr_status = LS_CO_STOPPED;
3693 CERROR("%s: unknown status: rc = %d\n",
3694 lfsck_lfsck2name(lfsck),
3696 lr->lr_status = LS_CO_FAILED;
3701 if (lfsck->li_flags & LPF_ALL_TGT) {
3702 lr->lr_event = LE_STOP;
3703 lr->lr_status = LS_FAILED;
3705 lr->lr_event = LE_PEER_EXIT;
3706 lr->lr_status = LS_CO_FAILED;
3710 rc1 = lfsck_layout_master_notify_others(env, com, lr);
3712 CERROR("%s: failed to notify others for layout quit: rc = %d\n",
3713 lfsck_lfsck2name(lfsck), rc1);
3717 /* Under force exit case, some requests may be just freed without
3718 * verification, those objects should be re-handled when next run.
3719 * So not update the on-disk tracing file under such case. */
3720 if (llmd->llmd_in_double_scan && !llmd->llmd_exit)
3721 rc1 = lfsck_layout_double_scan_result(env, com, rc);
3724 if (llmd->llmd_in_double_scan)
3725 atomic_dec(&lfsck->li_double_scan_count);
3727 spin_lock(&llmd->llmd_lock);
3728 llmd->llmd_assistant_status = (rc1 != 0 ? rc1 : rc);
3729 thread_set_flags(athread, SVC_STOPPED);
3730 wake_up_all(&mthread->t_ctl_waitq);
3731 spin_unlock(&llmd->llmd_lock);
3732 lfsck_thread_args_fini(lta);
3738 lfsck_layout_slave_async_interpret(const struct lu_env *env,
3739 struct ptlrpc_request *req,
3742 struct lfsck_layout_slave_async_args *llsaa = args;
3743 struct obd_export *exp = llsaa->llsaa_exp;
3744 struct lfsck_component *com = llsaa->llsaa_com;
3745 struct lfsck_layout_slave_target *llst = llsaa->llsaa_llst;
3746 struct lfsck_layout_slave_data *llsd = com->lc_data;
3750 /* It is quite probably caused by target crash,
3751 * to make the LFSCK can go ahead, assume that
3752 * the target finished the LFSCK prcoessing. */
3755 struct lfsck_reply *lr;
3757 lr = req_capsule_server_get(&req->rq_pill, &RMF_LFSCK_REPLY);
3758 if (lr->lr_status != LS_SCANNING_PHASE1 &&
3759 lr->lr_status != LS_SCANNING_PHASE2)
3763 lfsck_layout_llst_del(llsd, llst);
3764 lfsck_layout_llst_put(llst);
3765 lfsck_component_put(env, com);
3766 class_export_put(exp);
3771 static int lfsck_layout_async_query(const struct lu_env *env,
3772 struct lfsck_component *com,
3773 struct obd_export *exp,
3774 struct lfsck_layout_slave_target *llst,
3775 struct lfsck_request *lr,
3776 struct ptlrpc_request_set *set)
3778 struct lfsck_layout_slave_async_args *llsaa;
3779 struct ptlrpc_request *req;
3780 struct lfsck_request *tmp;
3784 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_QUERY);
3788 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_QUERY);
3790 ptlrpc_request_free(req);
3794 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
3796 ptlrpc_request_set_replen(req);
3798 llsaa = ptlrpc_req_async_args(req);
3799 llsaa->llsaa_exp = exp;
3800 llsaa->llsaa_com = lfsck_component_get(com);
3801 llsaa->llsaa_llst = llst;
3802 req->rq_interpret_reply = lfsck_layout_slave_async_interpret;
3803 ptlrpc_set_add_req(set, req);
3808 static int lfsck_layout_async_notify(const struct lu_env *env,
3809 struct obd_export *exp,
3810 struct lfsck_request *lr,
3811 struct ptlrpc_request_set *set)
3813 struct ptlrpc_request *req;
3814 struct lfsck_request *tmp;
3818 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
3822 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
3824 ptlrpc_request_free(req);
3828 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
3830 ptlrpc_request_set_replen(req);
3831 ptlrpc_set_add_req(set, req);
3837 lfsck_layout_slave_query_master(const struct lu_env *env,
3838 struct lfsck_component *com)
3840 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
3841 struct lfsck_instance *lfsck = com->lc_lfsck;
3842 struct lfsck_layout_slave_data *llsd = com->lc_data;
3843 struct lfsck_layout_slave_target *llst;
3844 struct obd_export *exp;
3845 struct ptlrpc_request_set *set;
3850 set = ptlrpc_prep_set();
3854 memset(lr, 0, sizeof(*lr));
3855 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
3856 lr->lr_event = LE_QUERY;
3857 lr->lr_active = LFSCK_TYPE_LAYOUT;
3859 llsd->llsd_touch_gen++;
3860 spin_lock(&llsd->llsd_lock);
3861 while (!list_empty(&llsd->llsd_master_list)) {
3862 llst = list_entry(llsd->llsd_master_list.next,
3863 struct lfsck_layout_slave_target,
3865 if (llst->llst_gen == llsd->llsd_touch_gen)
3868 llst->llst_gen = llsd->llsd_touch_gen;
3869 list_del(&llst->llst_list);
3870 list_add_tail(&llst->llst_list,
3871 &llsd->llsd_master_list);
3872 atomic_inc(&llst->llst_ref);
3873 spin_unlock(&llsd->llsd_lock);
3875 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
3878 lfsck_layout_llst_del(llsd, llst);
3879 lfsck_layout_llst_put(llst);
3880 spin_lock(&llsd->llsd_lock);
3884 rc = lfsck_layout_async_query(env, com, exp, llst, lr, set);
3886 CERROR("%s: slave fail to query %s for layout: "
3887 "rc = %d\n", lfsck_lfsck2name(lfsck),
3888 exp->exp_obd->obd_name, rc);
3890 lfsck_layout_llst_put(llst);
3891 class_export_put(exp);
3893 spin_lock(&llsd->llsd_lock);
3895 spin_unlock(&llsd->llsd_lock);
3897 rc = ptlrpc_set_wait(set);
3898 ptlrpc_set_destroy(set);
3900 RETURN(rc1 != 0 ? rc1 : rc);
3904 lfsck_layout_slave_notify_master(const struct lu_env *env,
3905 struct lfsck_component *com,
3906 enum lfsck_events event, int result)
3908 struct lfsck_instance *lfsck = com->lc_lfsck;
3909 struct lfsck_layout_slave_data *llsd = com->lc_data;
3910 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
3911 struct lfsck_layout_slave_target *llst;
3912 struct obd_export *exp;
3913 struct ptlrpc_request_set *set;
3917 set = ptlrpc_prep_set();
3921 memset(lr, 0, sizeof(*lr));
3922 lr->lr_event = event;
3923 lr->lr_flags = LEF_FROM_OST;
3924 lr->lr_status = result;
3925 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
3926 lr->lr_active = LFSCK_TYPE_LAYOUT;
3927 llsd->llsd_touch_gen++;
3928 spin_lock(&llsd->llsd_lock);
3929 while (!list_empty(&llsd->llsd_master_list)) {
3930 llst = list_entry(llsd->llsd_master_list.next,
3931 struct lfsck_layout_slave_target,
3933 if (llst->llst_gen == llsd->llsd_touch_gen)
3936 llst->llst_gen = llsd->llsd_touch_gen;
3937 list_del(&llst->llst_list);
3938 list_add_tail(&llst->llst_list,
3939 &llsd->llsd_master_list);
3940 atomic_inc(&llst->llst_ref);
3941 spin_unlock(&llsd->llsd_lock);
3943 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
3946 lfsck_layout_llst_del(llsd, llst);
3947 lfsck_layout_llst_put(llst);
3948 spin_lock(&llsd->llsd_lock);
3952 rc = lfsck_layout_async_notify(env, exp, lr, set);
3954 CERROR("%s: slave fail to notify %s for layout: "
3955 "rc = %d\n", lfsck_lfsck2name(lfsck),
3956 exp->exp_obd->obd_name, rc);
3957 lfsck_layout_llst_put(llst);
3958 class_export_put(exp);
3959 spin_lock(&llsd->llsd_lock);
3961 spin_unlock(&llsd->llsd_lock);
3963 ptlrpc_set_wait(set);
3964 ptlrpc_set_destroy(set);
3970 * \ret -ENODATA: unrecognized stripe
3971 * \ret = 0 : recognized stripe
3972 * \ret < 0 : other failures
3974 static int lfsck_layout_master_check_pairs(const struct lu_env *env,
3975 struct lfsck_component *com,
3976 struct lu_fid *cfid,
3977 struct lu_fid *pfid)
3979 struct lfsck_thread_info *info = lfsck_env_info(env);
3980 struct lu_buf *buf = &info->lti_big_buf;
3981 struct ost_id *oi = &info->lti_oi;
3982 struct dt_object *obj;
3983 struct lov_mds_md_v1 *lmm;
3984 struct lov_ost_data_v1 *objs;
3985 __u32 idx = pfid->f_stripe_idx;
3993 obj = lfsck_object_find_by_dev(env, com->lc_lfsck->li_bottom, pfid);
3995 RETURN(PTR_ERR(obj));
3997 dt_read_lock(env, obj, 0);
3998 if (unlikely(!dt_object_exists(obj)))
3999 GOTO(unlock, rc = -ENOENT);
4001 rc = lfsck_layout_get_lovea(env, obj, buf, NULL);
4006 GOTO(unlock, rc = -ENODATA);
4009 rc = lfsck_layout_verify_header(lmm);
4013 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
4014 * been verified in lfsck_layout_verify_header() already. If some
4015 * new magic introduced in the future, then layout LFSCK needs to
4016 * be updated also. */
4017 magic = le32_to_cpu(lmm->lmm_magic);
4018 if (magic == LOV_MAGIC_V1) {
4019 objs = &lmm->lmm_objects[0];
4021 LASSERT(magic == LOV_MAGIC_V3);
4022 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
4025 fid_to_ostid(cfid, oi);
4026 count = le16_to_cpu(lmm->lmm_stripe_count);
4027 for (i = 0; i < count; i++, objs++) {
4030 ostid_le_to_cpu(&objs->l_ost_oi, &oi2);
4031 if (memcmp(oi, &oi2, sizeof(*oi)) == 0)
4032 GOTO(unlock, rc = (i != idx ? -ENODATA : 0));
4035 GOTO(unlock, rc = -ENODATA);
4038 dt_read_unlock(env, obj);
4039 lu_object_put(env, &obj->do_lu);
4045 * The LFSCK-on-OST will ask the LFSCK-on-MDT to check whether the given
4046 * MDT-object/OST-object pairs match or not to aviod transfer MDT-object
4047 * layout EA from MDT to OST. On one hand, the OST no need to understand
4048 * the layout EA structure; on the other hand, it may cause trouble when
4049 * transfer large layout EA from MDT to OST via normal OUT RPC.
4051 * \ret > 0: unrecognized stripe
4052 * \ret = 0: recognized stripe
4053 * \ret < 0: other failures
4055 static int lfsck_layout_slave_check_pairs(const struct lu_env *env,
4056 struct lfsck_component *com,
4057 struct lu_fid *cfid,
4058 struct lu_fid *pfid)
4060 struct lfsck_instance *lfsck = com->lc_lfsck;
4061 struct obd_device *obd = lfsck->li_obd;
4062 struct seq_server_site *ss =
4063 lu_site2seq(lfsck->li_bottom->dd_lu_dev.ld_site);
4064 struct obd_export *exp = NULL;
4065 struct ptlrpc_request *req = NULL;
4066 struct lfsck_request *lr;
4067 struct lu_seq_range range = { 0 };
4071 if (unlikely(fid_is_idif(pfid)))
4074 fld_range_set_any(&range);
4075 rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(pfid), &range);
4077 RETURN(rc == -ENOENT ? 1 : rc);
4079 if (unlikely(!fld_range_is_mdt(&range)))
4082 exp = lustre_find_lwp_by_index(obd->obd_name, range.lsr_index);
4083 if (unlikely(exp == NULL))
4086 if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
4087 GOTO(out, rc = -EOPNOTSUPP);
4089 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
4091 GOTO(out, rc = -ENOMEM);
4093 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
4095 ptlrpc_request_free(req);
4100 lr = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
4101 memset(lr, 0, sizeof(*lr));
4102 lr->lr_event = LE_PAIRS_VERIFY;
4103 lr->lr_active = LFSCK_TYPE_LAYOUT;
4104 lr->lr_fid = *cfid; /* OST-object itself FID. */
4105 lr->lr_fid2 = *pfid; /* The claimed parent FID. */
4107 ptlrpc_request_set_replen(req);
4108 rc = ptlrpc_queue_wait(req);
4109 ptlrpc_req_finished(req);
4111 if (rc == -ENOENT || rc == -ENODATA)
4118 class_export_put(exp);
4123 static int lfsck_layout_slave_repair_pfid(const struct lu_env *env,
4124 struct lfsck_component *com,
4125 struct lfsck_request *lr)
4127 struct lfsck_thread_info *info = lfsck_env_info(env);
4128 struct filter_fid *ff = &info->lti_new_pfid;
4130 struct dt_device *dev = com->lc_lfsck->li_bottom;
4131 struct dt_object *obj;
4132 struct thandle *th = NULL;
4136 obj = lfsck_object_find_by_dev(env, dev, &lr->lr_fid);
4138 RETURN(PTR_ERR(obj));
4140 fid_cpu_to_le(&ff->ff_parent, &lr->lr_fid2);
4141 buf = lfsck_buf_get(env, ff, sizeof(*ff));
4142 dt_write_lock(env, obj, 0);
4143 if (unlikely(!dt_object_exists(obj)))
4144 GOTO(unlock, rc = 0);
4146 th = dt_trans_create(env, dev);
4148 GOTO(unlock, rc = PTR_ERR(th));
4150 rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_FID, 0, th);
4154 rc = dt_trans_start_local(env, dev, th);
4158 rc = dt_xattr_set(env, obj, buf, XATTR_NAME_FID, 0, th, BYPASS_CAPA);
4163 dt_trans_stop(env, dev, th);
4166 dt_write_unlock(env, obj);
4167 lu_object_put(env, &obj->do_lu);
4174 static int lfsck_layout_reset(const struct lu_env *env,
4175 struct lfsck_component *com, bool init)
4177 struct lfsck_layout *lo = com->lc_file_ram;
4180 down_write(&com->lc_sem);
4182 memset(lo, 0, com->lc_file_size);
4184 __u32 count = lo->ll_success_count;
4185 __u64 last_time = lo->ll_time_last_complete;
4187 memset(lo, 0, com->lc_file_size);
4188 lo->ll_success_count = count;
4189 lo->ll_time_last_complete = last_time;
4192 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
4193 lo->ll_status = LS_INIT;
4195 rc = lfsck_layout_store(env, com);
4196 up_write(&com->lc_sem);
4201 static void lfsck_layout_fail(const struct lu_env *env,
4202 struct lfsck_component *com, bool new_checked)
4204 struct lfsck_layout *lo = com->lc_file_ram;
4206 down_write(&com->lc_sem);
4208 com->lc_new_checked++;
4209 lfsck_layout_record_failure(env, com->lc_lfsck, lo);
4210 up_write(&com->lc_sem);
4213 static int lfsck_layout_master_checkpoint(const struct lu_env *env,
4214 struct lfsck_component *com, bool init)
4216 struct lfsck_instance *lfsck = com->lc_lfsck;
4217 struct lfsck_layout *lo = com->lc_file_ram;
4218 struct lfsck_layout_master_data *llmd = com->lc_data;
4219 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4220 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4221 struct l_wait_info lwi = { 0 };
4224 if (com->lc_new_checked == 0 && !init)
4227 l_wait_event(mthread->t_ctl_waitq,
4228 list_empty(&llmd->llmd_req_list) ||
4229 !thread_is_running(mthread) ||
4230 thread_is_stopped(athread),
4233 if (!thread_is_running(mthread) || thread_is_stopped(athread))
4236 down_write(&com->lc_sem);
4238 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
4240 lo->ll_pos_last_checkpoint =
4241 lfsck->li_pos_current.lp_oit_cookie;
4242 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4243 HALF_SEC - lfsck->li_time_last_checkpoint);
4244 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4245 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4246 com->lc_new_checked = 0;
4249 rc = lfsck_layout_store(env, com);
4250 up_write(&com->lc_sem);
4255 static int lfsck_layout_slave_checkpoint(const struct lu_env *env,
4256 struct lfsck_component *com, bool init)
4258 struct lfsck_instance *lfsck = com->lc_lfsck;
4259 struct lfsck_layout *lo = com->lc_file_ram;
4262 if (com->lc_new_checked == 0 && !init)
4265 down_write(&com->lc_sem);
4268 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
4270 lo->ll_pos_last_checkpoint =
4271 lfsck->li_pos_current.lp_oit_cookie;
4272 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4273 HALF_SEC - lfsck->li_time_last_checkpoint);
4274 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4275 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4276 com->lc_new_checked = 0;
4279 rc = lfsck_layout_store(env, com);
4281 up_write(&com->lc_sem);
4286 static int lfsck_layout_prep(const struct lu_env *env,
4287 struct lfsck_component *com,
4288 struct lfsck_start *start)
4290 struct lfsck_instance *lfsck = com->lc_lfsck;
4291 struct lfsck_layout *lo = com->lc_file_ram;
4292 struct lfsck_position *pos = &com->lc_pos_start;
4294 fid_zero(&pos->lp_dir_parent);
4295 pos->lp_dir_cookie = 0;
4296 if (lo->ll_status == LS_COMPLETED ||
4297 lo->ll_status == LS_PARTIAL ||
4298 /* To handle orphan, must scan from the beginning. */
4299 (start != NULL && start->ls_flags & LPF_ORPHAN)) {
4302 rc = lfsck_layout_reset(env, com, false);
4304 rc = lfsck_set_param(env, lfsck, start, true);
4310 down_write(&com->lc_sem);
4311 lo->ll_time_latest_start = cfs_time_current_sec();
4312 spin_lock(&lfsck->li_lock);
4313 if (lo->ll_flags & LF_SCANNED_ONCE) {
4314 if (!lfsck->li_drop_dryrun ||
4315 lo->ll_pos_first_inconsistent == 0) {
4316 lo->ll_status = LS_SCANNING_PHASE2;
4317 list_del_init(&com->lc_link);
4318 list_add_tail(&com->lc_link,
4319 &lfsck->li_list_double_scan);
4320 pos->lp_oit_cookie = 0;
4324 lo->ll_status = LS_SCANNING_PHASE1;
4325 lo->ll_run_time_phase1 = 0;
4326 lo->ll_run_time_phase2 = 0;
4327 lo->ll_objs_checked_phase1 = 0;
4328 lo->ll_objs_checked_phase2 = 0;
4329 lo->ll_objs_failed_phase1 = 0;
4330 lo->ll_objs_failed_phase2 = 0;
4331 for (i = 0; i < LLIT_MAX; i++)
4332 lo->ll_objs_repaired[i] = 0;
4334 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
4335 fid_zero(&com->lc_fid_latest_scanned_phase2);
4338 lo->ll_status = LS_SCANNING_PHASE1;
4339 if (!lfsck->li_drop_dryrun ||
4340 lo->ll_pos_first_inconsistent == 0)
4341 pos->lp_oit_cookie = lo->ll_pos_last_checkpoint + 1;
4343 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
4345 spin_unlock(&lfsck->li_lock);
4346 up_write(&com->lc_sem);
4351 static int lfsck_layout_slave_prep(const struct lu_env *env,
4352 struct lfsck_component *com,
4353 struct lfsck_start_param *lsp)
4355 struct lfsck_layout_slave_data *llsd = com->lc_data;
4356 struct lfsck_instance *lfsck = com->lc_lfsck;
4357 struct lfsck_layout *lo = com->lc_file_ram;
4358 struct lfsck_start *start = lsp->lsp_start;
4361 rc = lfsck_layout_prep(env, com, start);
4365 if (lo->ll_flags & LF_CRASHED_LASTID &&
4366 list_empty(&llsd->llsd_master_list)) {
4367 LASSERT(lfsck->li_out_notify != NULL);
4369 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
4370 LE_LASTID_REBUILDING);
4373 if (!lsp->lsp_index_valid)
4376 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
4377 if (rc == 0 && start != NULL && start->ls_flags & LPF_ORPHAN) {
4378 LASSERT(!llsd->llsd_rbtree_valid);
4380 write_lock(&llsd->llsd_rb_lock);
4381 rc = lfsck_rbtree_setup(env, com);
4382 write_unlock(&llsd->llsd_rb_lock);
4388 static int lfsck_layout_master_prep(const struct lu_env *env,
4389 struct lfsck_component *com,
4390 struct lfsck_start_param *lsp)
4392 struct lfsck_instance *lfsck = com->lc_lfsck;
4393 struct lfsck_layout_master_data *llmd = com->lc_data;
4394 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4395 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4396 struct lfsck_thread_args *lta;
4397 struct task_struct *task;
4401 rc = lfsck_layout_prep(env, com, lsp->lsp_start);
4405 llmd->llmd_assistant_status = 0;
4406 llmd->llmd_post_result = 0;
4407 llmd->llmd_to_post = 0;
4408 llmd->llmd_to_double_scan = 0;
4409 llmd->llmd_in_double_scan = 0;
4410 llmd->llmd_exit = 0;
4411 thread_set_flags(athread, 0);
4413 lta = lfsck_thread_args_init(lfsck, com, lsp);
4415 RETURN(PTR_ERR(lta));
4417 task = kthread_run(lfsck_layout_assistant, lta, "lfsck_layout");
4420 CERROR("%s: Cannot start LFSCK layout assistant thread: "
4421 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
4422 lfsck_thread_args_fini(lta);
4424 struct l_wait_info lwi = { 0 };
4426 l_wait_event(mthread->t_ctl_waitq,
4427 thread_is_running(athread) ||
4428 thread_is_stopped(athread),
4430 if (unlikely(!thread_is_running(athread)))
4431 rc = llmd->llmd_assistant_status;
4439 /* Pre-fetch the attribute for each stripe in the given layout EA. */
4440 static int lfsck_layout_scan_stripes(const struct lu_env *env,
4441 struct lfsck_component *com,
4442 struct dt_object *parent,
4443 struct lov_mds_md_v1 *lmm)
4445 struct lfsck_thread_info *info = lfsck_env_info(env);
4446 struct lfsck_instance *lfsck = com->lc_lfsck;
4447 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
4448 struct lfsck_layout *lo = com->lc_file_ram;
4449 struct lfsck_layout_master_data *llmd = com->lc_data;
4450 struct lfsck_layout_object *llo = NULL;
4451 struct lov_ost_data_v1 *objs;
4452 struct lfsck_tgt_descs *ltds = &lfsck->li_ost_descs;
4453 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4454 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4455 struct l_wait_info lwi = { 0 };
4464 buf = lfsck_buf_get(env, &info->lti_old_pfid,
4465 sizeof(struct filter_fid_old));
4466 count = le16_to_cpu(lmm->lmm_stripe_count);
4467 gen = le16_to_cpu(lmm->lmm_layout_gen);
4468 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
4469 * been verified in lfsck_layout_verify_header() already. If some
4470 * new magic introduced in the future, then layout LFSCK needs to
4471 * be updated also. */
4472 magic = le32_to_cpu(lmm->lmm_magic);
4473 if (magic == LOV_MAGIC_V1) {
4474 objs = &lmm->lmm_objects[0];
4476 LASSERT(magic == LOV_MAGIC_V3);
4477 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
4480 for (i = 0; i < count; i++, objs++) {
4481 struct lu_fid *fid = &info->lti_fid;
4482 struct ost_id *oi = &info->lti_oi;
4483 struct lfsck_layout_req *llr;
4484 struct lfsck_tgt_desc *tgt = NULL;
4485 struct dt_object *cobj = NULL;
4487 le32_to_cpu(objs->l_ost_idx);
4488 bool wakeup = false;
4490 if (unlikely(lovea_slot_is_dummy(objs)))
4493 l_wait_event(mthread->t_ctl_waitq,
4494 bk->lb_async_windows == 0 ||
4495 llmd->llmd_prefetched < bk->lb_async_windows ||
4496 !thread_is_running(mthread) ||
4497 thread_is_stopped(athread),
4500 if (unlikely(!thread_is_running(mthread)) ||
4501 thread_is_stopped(athread))
4504 ostid_le_to_cpu(&objs->l_ost_oi, oi);
4505 ostid_to_fid(fid, oi, index);
4506 tgt = lfsck_tgt_get(ltds, index);
4507 if (unlikely(tgt == NULL)) {
4508 CERROR("%s: Cannot talk with OST %x which did not join "
4509 "the layout LFSCK.\n",
4510 lfsck_lfsck2name(lfsck), index);
4511 lo->ll_flags |= LF_INCOMPLETE;
4515 cobj = lfsck_object_find_by_dev(env, tgt->ltd_tgt, fid);
4521 rc = dt_declare_attr_get(env, cobj, BYPASS_CAPA);
4525 rc = dt_declare_xattr_get(env, cobj, buf, XATTR_NAME_FID,
4531 llo = lfsck_layout_object_init(env, parent, gen);
4538 llr = lfsck_layout_req_init(llo, cobj, index, i);
4545 spin_lock(&llmd->llmd_lock);
4546 if (llmd->llmd_assistant_status < 0) {
4547 spin_unlock(&llmd->llmd_lock);
4548 lfsck_layout_req_fini(env, llr);
4550 RETURN(llmd->llmd_assistant_status);
4553 list_add_tail(&llr->llr_list, &llmd->llmd_req_list);
4554 if (llmd->llmd_prefetched == 0)
4557 llmd->llmd_prefetched++;
4558 spin_unlock(&llmd->llmd_lock);
4560 wake_up_all(&athread->t_ctl_waitq);
4563 down_write(&com->lc_sem);
4564 com->lc_new_checked++;
4566 lfsck_layout_record_failure(env, lfsck, lo);
4567 up_write(&com->lc_sem);
4569 if (cobj != NULL && !IS_ERR(cobj))
4570 lu_object_put(env, &cobj->do_lu);
4572 if (likely(tgt != NULL))
4575 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
4582 if (llo != NULL && !IS_ERR(llo))
4583 lfsck_layout_object_put(env, llo);
4588 /* For the given object, read its layout EA locally. For each stripe, pre-fetch
4589 * the OST-object's attribute and generate an structure lfsck_layout_req on the
4590 * list ::llmd_req_list.
4592 * For each request on above list, the lfsck_layout_assistant thread compares
4593 * the OST side attribute with local attribute, if inconsistent, then repair it.
4595 * All above processing is async mode with pipeline. */
4596 static int lfsck_layout_master_exec_oit(const struct lu_env *env,
4597 struct lfsck_component *com,
4598 struct dt_object *obj)
4600 struct lfsck_thread_info *info = lfsck_env_info(env);
4601 struct ost_id *oi = &info->lti_oi;
4602 struct lfsck_layout *lo = com->lc_file_ram;
4603 struct lfsck_layout_master_data *llmd = com->lc_data;
4604 struct lfsck_instance *lfsck = com->lc_lfsck;
4605 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
4606 struct thandle *handle = NULL;
4607 struct lu_buf *buf = &info->lti_big_buf;
4608 struct lov_mds_md_v1 *lmm = NULL;
4609 struct dt_device *dev = lfsck->li_bottom;
4610 struct lustre_handle lh = { 0 };
4611 ssize_t buflen = buf->lb_len;
4613 bool locked = false;
4614 bool stripe = false;
4617 if (!S_ISREG(lfsck_object_type(obj)))
4620 if (llmd->llmd_assistant_status < 0)
4621 GOTO(out, rc = -ESRCH);
4623 fid_to_lmm_oi(lfsck_dto2fid(obj), oi);
4624 lmm_oi_cpu_to_le(oi, oi);
4625 dt_read_lock(env, obj, 0);
4629 rc = lfsck_layout_get_lovea(env, obj, buf, &buflen);
4635 rc = lfsck_layout_verify_header(lmm);
4636 /* If the LOV EA crashed, then it is possible to be rebuilt later
4637 * when handle orphan OST-objects. */
4641 if (memcmp(oi, &lmm->lmm_oi, sizeof(*oi)) == 0)
4642 GOTO(out, stripe = true);
4644 /* Inconsistent lmm_oi, should be repaired. */
4645 CDEBUG(D_LFSCK, "Repair bad lmm_oi for "DFID"\n",
4646 PFID(lfsck_dto2fid(obj)));
4648 if (bk->lb_param & LPF_DRYRUN) {
4649 down_write(&com->lc_sem);
4650 lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
4651 up_write(&com->lc_sem);
4653 GOTO(out, stripe = true);
4656 if (!lustre_handle_is_used(&lh)) {
4657 dt_read_unlock(env, obj);
4659 buf->lb_len = buflen;
4660 rc = lfsck_layout_lock(env, com, obj, &lh,
4661 MDS_INODELOCK_LAYOUT |
4662 MDS_INODELOCK_XATTR);
4666 handle = dt_trans_create(env, dev);
4668 GOTO(out, rc = PTR_ERR(handle));
4670 rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_LOV,
4671 LU_XATTR_REPLACE, handle);
4675 rc = dt_trans_start_local(env, dev, handle);
4679 dt_write_lock(env, obj, 0);
4686 rc = dt_xattr_set(env, obj, buf, XATTR_NAME_LOV,
4687 LU_XATTR_REPLACE, handle, BYPASS_CAPA);
4691 down_write(&com->lc_sem);
4692 lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
4693 up_write(&com->lc_sem);
4695 GOTO(out, stripe = true);
4699 if (lustre_handle_is_used(&lh))
4700 dt_write_unlock(env, obj);
4702 dt_read_unlock(env, obj);
4705 if (handle != NULL && !IS_ERR(handle))
4706 dt_trans_stop(env, dev, handle);
4708 lfsck_layout_unlock(&lh);
4710 rc = lfsck_layout_scan_stripes(env, com, obj, lmm);
4712 down_write(&com->lc_sem);
4713 com->lc_new_checked++;
4715 lfsck_layout_record_failure(env, lfsck, lo);
4716 up_write(&com->lc_sem);
4718 buf->lb_len = buflen;
4723 static int lfsck_layout_slave_exec_oit(const struct lu_env *env,
4724 struct lfsck_component *com,
4725 struct dt_object *obj)
4727 struct lfsck_instance *lfsck = com->lc_lfsck;
4728 struct lfsck_layout *lo = com->lc_file_ram;
4729 const struct lu_fid *fid = lfsck_dto2fid(obj);
4730 struct lfsck_layout_slave_data *llsd = com->lc_data;
4731 struct lfsck_layout_seq *lls;
4737 LASSERT(llsd != NULL);
4739 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY5) &&
4740 cfs_fail_val == lfsck_dev_idx(lfsck->li_bottom)) {
4741 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(1),
4743 struct ptlrpc_thread *thread = &lfsck->li_thread;
4745 l_wait_event(thread->t_ctl_waitq,
4746 !thread_is_running(thread),
4750 lfsck_rbtree_update_bitmap(env, com, fid, false);
4752 down_write(&com->lc_sem);
4753 if (fid_is_idif(fid))
4755 else if (!fid_is_norm(fid) ||
4756 !fid_is_for_ostobj(env, lfsck->li_next, obj, fid))
4757 GOTO(unlock, rc = 0);
4760 com->lc_new_checked++;
4762 lls = lfsck_layout_seq_lookup(llsd, seq);
4765 if (unlikely(lls == NULL))
4766 GOTO(unlock, rc = -ENOMEM);
4768 INIT_LIST_HEAD(&lls->lls_list);
4770 rc = lfsck_layout_lastid_load(env, com, lls);
4772 lo->ll_objs_failed_phase1++;
4777 lfsck_layout_seq_insert(llsd, lls);
4780 if (unlikely(fid_is_last_id(fid)))
4781 GOTO(unlock, rc = 0);
4784 if (oid > lls->lls_lastid_known)
4785 lls->lls_lastid_known = oid;
4787 if (oid > lls->lls_lastid) {
4788 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
4789 /* OFD may create new objects during LFSCK scanning. */
4790 rc = lfsck_layout_lastid_reload(env, com, lls);
4791 if (unlikely(rc != 0))
4792 CWARN("%s: failed to reload LAST_ID for "LPX64
4794 lfsck_lfsck2name(com->lc_lfsck),
4796 if (oid <= lls->lls_lastid)
4797 GOTO(unlock, rc = 0);
4799 LASSERT(lfsck->li_out_notify != NULL);
4801 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
4802 LE_LASTID_REBUILDING);
4803 lo->ll_flags |= LF_CRASHED_LASTID;
4806 lls->lls_lastid = oid;
4810 GOTO(unlock, rc = 0);
4813 up_write(&com->lc_sem);
4818 static int lfsck_layout_exec_dir(const struct lu_env *env,
4819 struct lfsck_component *com,
4820 struct dt_object *obj,
4821 struct lu_dirent *ent)
4826 static int lfsck_layout_master_post(const struct lu_env *env,
4827 struct lfsck_component *com,
4828 int result, bool init)
4830 struct lfsck_instance *lfsck = com->lc_lfsck;
4831 struct lfsck_layout *lo = com->lc_file_ram;
4832 struct lfsck_layout_master_data *llmd = com->lc_data;
4833 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4834 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4835 struct l_wait_info lwi = { 0 };
4840 llmd->llmd_post_result = result;
4841 llmd->llmd_to_post = 1;
4842 if (llmd->llmd_post_result <= 0)
4843 llmd->llmd_exit = 1;
4845 wake_up_all(&athread->t_ctl_waitq);
4846 l_wait_event(mthread->t_ctl_waitq,
4847 (result > 0 && list_empty(&llmd->llmd_req_list)) ||
4848 thread_is_stopped(athread),
4851 if (llmd->llmd_assistant_status < 0)
4852 result = llmd->llmd_assistant_status;
4854 down_write(&com->lc_sem);
4855 spin_lock(&lfsck->li_lock);
4856 /* When LFSCK failed, there may be some prefetched objects those are
4857 * not been processed yet, we do not know the exactly position, then
4858 * just restart from last check-point next time. */
4859 if (!init && !llmd->llmd_exit)
4860 lo->ll_pos_last_checkpoint =
4861 lfsck->li_pos_current.lp_oit_cookie;
4864 lo->ll_status = LS_SCANNING_PHASE2;
4865 lo->ll_flags |= LF_SCANNED_ONCE;
4866 lo->ll_flags &= ~LF_UPGRADE;
4867 list_del_init(&com->lc_link);
4868 list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
4869 } else if (result == 0) {
4870 lo->ll_status = lfsck->li_status;
4871 if (lo->ll_status == 0)
4872 lo->ll_status = LS_STOPPED;
4873 if (lo->ll_status != LS_PAUSED) {
4874 list_del_init(&com->lc_link);
4875 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
4878 lo->ll_status = LS_FAILED;
4879 list_del_init(&com->lc_link);
4880 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
4882 spin_unlock(&lfsck->li_lock);
4885 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4886 HALF_SEC - lfsck->li_time_last_checkpoint);
4887 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4888 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4889 com->lc_new_checked = 0;
4892 rc = lfsck_layout_store(env, com);
4893 up_write(&com->lc_sem);
4898 static int lfsck_layout_slave_post(const struct lu_env *env,
4899 struct lfsck_component *com,
4900 int result, bool init)
4902 struct lfsck_instance *lfsck = com->lc_lfsck;
4903 struct lfsck_layout *lo = com->lc_file_ram;
4907 rc = lfsck_layout_lastid_store(env, com);
4911 LASSERT(lfsck->li_out_notify != NULL);
4913 down_write(&com->lc_sem);
4915 spin_lock(&lfsck->li_lock);
4917 lo->ll_pos_last_checkpoint =
4918 lfsck->li_pos_current.lp_oit_cookie;
4920 lo->ll_status = LS_SCANNING_PHASE2;
4921 lo->ll_flags |= LF_SCANNED_ONCE;
4922 if (lo->ll_flags & LF_CRASHED_LASTID) {
4924 lo->ll_flags &= ~LF_CRASHED_LASTID;
4926 lo->ll_flags &= ~LF_UPGRADE;
4927 list_del_init(&com->lc_link);
4928 list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
4929 } else if (result == 0) {
4930 lo->ll_status = lfsck->li_status;
4931 if (lo->ll_status == 0)
4932 lo->ll_status = LS_STOPPED;
4933 if (lo->ll_status != LS_PAUSED) {
4934 list_del_init(&com->lc_link);
4935 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
4938 lo->ll_status = LS_FAILED;
4939 list_del_init(&com->lc_link);
4940 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
4942 spin_unlock(&lfsck->li_lock);
4945 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
4949 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4950 HALF_SEC - lfsck->li_time_last_checkpoint);
4951 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4952 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4953 com->lc_new_checked = 0;
4956 rc = lfsck_layout_store(env, com);
4958 up_write(&com->lc_sem);
4960 lfsck_layout_slave_notify_master(env, com, LE_PHASE1_DONE, result);
4963 lfsck_rbtree_cleanup(env, com);
4968 static int lfsck_layout_dump(const struct lu_env *env,
4969 struct lfsck_component *com, struct seq_file *m)
4971 struct lfsck_instance *lfsck = com->lc_lfsck;
4972 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
4973 struct lfsck_layout *lo = com->lc_file_ram;
4976 down_read(&com->lc_sem);
4977 seq_printf(m, "name: lfsck_layout\n"
4983 lfsck_status2names(lo->ll_status));
4985 rc = lfsck_bits_dump(m, lo->ll_flags, lfsck_flags_names, "flags");
4989 rc = lfsck_bits_dump(m, bk->lb_param, lfsck_param_names, "param");
4993 rc = lfsck_time_dump(m, lo->ll_time_last_complete,
4994 "time_since_last_completed");
4998 rc = lfsck_time_dump(m, lo->ll_time_latest_start,
4999 "time_since_latest_start");
5003 rc = lfsck_time_dump(m, lo->ll_time_last_checkpoint,
5004 "time_since_last_checkpoint");
5008 seq_printf(m, "latest_start_position: "LPU64"\n"
5009 "last_checkpoint_position: "LPU64"\n"
5010 "first_failure_position: "LPU64"\n",
5011 lo->ll_pos_latest_start,
5012 lo->ll_pos_last_checkpoint,
5013 lo->ll_pos_first_inconsistent);
5015 seq_printf(m, "success_count: %u\n"
5016 "repaired_dangling: "LPU64"\n"
5017 "repaired_unmatched_pair: "LPU64"\n"
5018 "repaired_multiple_referenced: "LPU64"\n"
5019 "repaired_orphan: "LPU64"\n"
5020 "repaired_inconsistent_owner: "LPU64"\n"
5021 "repaired_others: "LPU64"\n"
5022 "skipped: "LPU64"\n"
5023 "failed_phase1: "LPU64"\n"
5024 "failed_phase2: "LPU64"\n",
5025 lo->ll_success_count,
5026 lo->ll_objs_repaired[LLIT_DANGLING - 1],
5027 lo->ll_objs_repaired[LLIT_UNMATCHED_PAIR - 1],
5028 lo->ll_objs_repaired[LLIT_MULTIPLE_REFERENCED - 1],
5029 lo->ll_objs_repaired[LLIT_ORPHAN - 1],
5030 lo->ll_objs_repaired[LLIT_INCONSISTENT_OWNER - 1],
5031 lo->ll_objs_repaired[LLIT_OTHERS - 1],
5032 lo->ll_objs_skipped,
5033 lo->ll_objs_failed_phase1,
5034 lo->ll_objs_failed_phase2);
5036 if (lo->ll_status == LS_SCANNING_PHASE1) {
5038 const struct dt_it_ops *iops;
5039 cfs_duration_t duration = cfs_time_current() -
5040 lfsck->li_time_last_checkpoint;
5041 __u64 checked = lo->ll_objs_checked_phase1 +
5042 com->lc_new_checked;
5043 __u64 speed = checked;
5044 __u64 new_checked = com->lc_new_checked * HZ;
5045 __u32 rtime = lo->ll_run_time_phase1 +
5046 cfs_duration_sec(duration + HALF_SEC);
5049 do_div(new_checked, duration);
5051 do_div(speed, rtime);
5052 seq_printf(m, "checked_phase1: "LPU64"\n"
5053 "checked_phase2: "LPU64"\n"
5054 "run_time_phase1: %u seconds\n"
5055 "run_time_phase2: %u seconds\n"
5056 "average_speed_phase1: "LPU64" items/sec\n"
5057 "average_speed_phase2: N/A\n"
5058 "real-time_speed_phase1: "LPU64" items/sec\n"
5059 "real-time_speed_phase2: N/A\n",
5061 lo->ll_objs_checked_phase2,
5063 lo->ll_run_time_phase2,
5067 LASSERT(lfsck->li_di_oit != NULL);
5069 iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
5071 /* The low layer otable-based iteration position may NOT
5072 * exactly match the layout-based directory traversal
5073 * cookie. Generally, it is not a serious issue. But the
5074 * caller should NOT make assumption on that. */
5075 pos = iops->store(env, lfsck->li_di_oit);
5076 if (!lfsck->li_current_oit_processed)
5078 seq_printf(m, "current_position: "LPU64"\n", pos);
5080 } else if (lo->ll_status == LS_SCANNING_PHASE2) {
5081 cfs_duration_t duration = cfs_time_current() -
5082 lfsck->li_time_last_checkpoint;
5083 __u64 checked = lo->ll_objs_checked_phase2 +
5084 com->lc_new_checked;
5085 __u64 speed1 = lo->ll_objs_checked_phase1;
5086 __u64 speed2 = checked;
5087 __u64 new_checked = com->lc_new_checked * HZ;
5088 __u32 rtime = lo->ll_run_time_phase2 +
5089 cfs_duration_sec(duration + HALF_SEC);
5092 do_div(new_checked, duration);
5093 if (lo->ll_run_time_phase1 != 0)
5094 do_div(speed1, lo->ll_run_time_phase1);
5096 do_div(speed2, rtime);
5097 rc = seq_printf(m, "checked_phase1: "LPU64"\n"
5098 "checked_phase2: "LPU64"\n"
5099 "run_time_phase1: %u seconds\n"
5100 "run_time_phase2: %u seconds\n"
5101 "average_speed_phase1: "LPU64" items/sec\n"
5102 "average_speed_phase2: "LPU64" items/sec\n"
5103 "real-time_speed_phase1: N/A\n"
5104 "real-time_speed_phase2: "LPU64" items/sec\n"
5105 "current_position: "DFID"\n",
5106 lo->ll_objs_checked_phase1,
5108 lo->ll_run_time_phase1,
5113 PFID(&com->lc_fid_latest_scanned_phase2));
5118 __u64 speed1 = lo->ll_objs_checked_phase1;
5119 __u64 speed2 = lo->ll_objs_checked_phase2;
5121 if (lo->ll_run_time_phase1 != 0)
5122 do_div(speed1, lo->ll_run_time_phase1);
5123 if (lo->ll_run_time_phase2 != 0)
5124 do_div(speed2, lo->ll_run_time_phase2);
5125 seq_printf(m, "checked_phase1: "LPU64"\n"
5126 "checked_phase2: "LPU64"\n"
5127 "run_time_phase1: %u seconds\n"
5128 "run_time_phase2: %u seconds\n"
5129 "average_speed_phase1: "LPU64" items/sec\n"
5130 "average_speed_phase2: "LPU64" objs/sec\n"
5131 "real-time_speed_phase1: N/A\n"
5132 "real-time_speed_phase2: N/A\n"
5133 "current_position: N/A\n",
5134 lo->ll_objs_checked_phase1,
5135 lo->ll_objs_checked_phase2,
5136 lo->ll_run_time_phase1,
5137 lo->ll_run_time_phase2,
5142 up_read(&com->lc_sem);
5147 static int lfsck_layout_master_double_scan(const struct lu_env *env,
5148 struct lfsck_component *com)
5150 struct lfsck_layout_master_data *llmd = com->lc_data;
5151 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
5152 struct ptlrpc_thread *athread = &llmd->llmd_thread;
5153 struct lfsck_layout *lo = com->lc_file_ram;
5154 struct l_wait_info lwi = { 0 };
5156 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2))
5159 llmd->llmd_to_double_scan = 1;
5160 wake_up_all(&athread->t_ctl_waitq);
5161 l_wait_event(mthread->t_ctl_waitq,
5162 llmd->llmd_in_double_scan ||
5163 thread_is_stopped(athread),
5165 if (llmd->llmd_assistant_status < 0)
5166 return llmd->llmd_assistant_status;
5171 static int lfsck_layout_slave_double_scan(const struct lu_env *env,
5172 struct lfsck_component *com)
5174 struct lfsck_instance *lfsck = com->lc_lfsck;
5175 struct lfsck_layout_slave_data *llsd = com->lc_data;
5176 struct lfsck_layout *lo = com->lc_file_ram;
5177 struct ptlrpc_thread *thread = &lfsck->li_thread;
5181 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2)) {
5182 lfsck_rbtree_cleanup(env, com);
5183 lfsck_layout_slave_notify_master(env, com, LE_PHASE2_DONE, 0);
5187 atomic_inc(&lfsck->li_double_scan_count);
5189 com->lc_new_checked = 0;
5190 com->lc_new_scanned = 0;
5191 com->lc_time_last_checkpoint = cfs_time_current();
5192 com->lc_time_next_checkpoint = com->lc_time_last_checkpoint +
5193 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
5196 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(30),
5199 rc = lfsck_layout_slave_query_master(env, com);
5200 if (list_empty(&llsd->llsd_master_list)) {
5201 if (unlikely(!thread_is_running(thread)))
5212 rc = l_wait_event(thread->t_ctl_waitq,
5213 !thread_is_running(thread) ||
5214 list_empty(&llsd->llsd_master_list),
5216 if (unlikely(!thread_is_running(thread)))
5219 if (rc == -ETIMEDOUT)
5222 GOTO(done, rc = (rc < 0 ? rc : 1));
5226 rc = lfsck_layout_double_scan_result(env, com, rc);
5228 lfsck_rbtree_cleanup(env, com);
5229 lfsck_layout_slave_notify_master(env, com, LE_PHASE2_DONE, rc);
5230 if (atomic_dec_and_test(&lfsck->li_double_scan_count))
5231 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
5236 static void lfsck_layout_master_data_release(const struct lu_env *env,
5237 struct lfsck_component *com)
5239 struct lfsck_layout_master_data *llmd = com->lc_data;
5240 struct lfsck_instance *lfsck = com->lc_lfsck;
5241 struct lfsck_tgt_descs *ltds;
5242 struct lfsck_tgt_desc *ltd;
5243 struct lfsck_tgt_desc *next;
5245 LASSERT(llmd != NULL);
5246 LASSERT(thread_is_init(&llmd->llmd_thread) ||
5247 thread_is_stopped(&llmd->llmd_thread));
5248 LASSERT(list_empty(&llmd->llmd_req_list));
5250 com->lc_data = NULL;
5252 ltds = &lfsck->li_ost_descs;
5253 spin_lock(<ds->ltd_lock);
5254 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase1_list,
5255 ltd_layout_phase_list) {
5256 list_del_init(<d->ltd_layout_phase_list);
5258 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase2_list,
5259 ltd_layout_phase_list) {
5260 list_del_init(<d->ltd_layout_phase_list);
5262 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_list,
5264 list_del_init(<d->ltd_layout_list);
5266 spin_unlock(<ds->ltd_lock);
5268 ltds = &lfsck->li_mdt_descs;
5269 spin_lock(<ds->ltd_lock);
5270 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase1_list,
5271 ltd_layout_phase_list) {
5272 list_del_init(<d->ltd_layout_phase_list);
5274 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase2_list,
5275 ltd_layout_phase_list) {
5276 list_del_init(<d->ltd_layout_phase_list);
5278 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_list,
5280 list_del_init(<d->ltd_layout_list);
5282 spin_unlock(<ds->ltd_lock);
5287 static void lfsck_layout_slave_data_release(const struct lu_env *env,
5288 struct lfsck_component *com)
5290 struct lfsck_layout_slave_data *llsd = com->lc_data;
5291 struct lfsck_layout_seq *lls;
5292 struct lfsck_layout_seq *next;
5293 struct lfsck_layout_slave_target *llst;
5294 struct lfsck_layout_slave_target *tmp;
5296 LASSERT(llsd != NULL);
5298 list_for_each_entry_safe(lls, next, &llsd->llsd_seq_list,
5300 list_del_init(&lls->lls_list);
5301 lfsck_object_put(env, lls->lls_lastid_obj);
5305 list_for_each_entry_safe(llst, tmp, &llsd->llsd_master_list,
5307 list_del_init(&llst->llst_list);
5311 lfsck_rbtree_cleanup(env, com);
5312 com->lc_data = NULL;
5316 static void lfsck_layout_master_quit(const struct lu_env *env,
5317 struct lfsck_component *com)
5319 struct lfsck_layout_master_data *llmd = com->lc_data;
5320 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
5321 struct ptlrpc_thread *athread = &llmd->llmd_thread;
5322 struct l_wait_info lwi = { 0 };
5324 llmd->llmd_exit = 1;
5325 wake_up_all(&athread->t_ctl_waitq);
5326 l_wait_event(mthread->t_ctl_waitq,
5327 thread_is_init(athread) ||
5328 thread_is_stopped(athread),
5332 static void lfsck_layout_slave_quit(const struct lu_env *env,
5333 struct lfsck_component *com)
5335 lfsck_rbtree_cleanup(env, com);
5338 static int lfsck_layout_master_in_notify(const struct lu_env *env,
5339 struct lfsck_component *com,
5340 struct lfsck_request *lr)
5342 struct lfsck_instance *lfsck = com->lc_lfsck;
5343 struct lfsck_layout *lo = com->lc_file_ram;
5344 struct lfsck_layout_master_data *llmd = com->lc_data;
5345 struct lfsck_tgt_descs *ltds;
5346 struct lfsck_tgt_desc *ltd;
5350 if (lr->lr_event == LE_PAIRS_VERIFY) {
5353 rc = lfsck_layout_master_check_pairs(env, com, &lr->lr_fid,
5359 if (lr->lr_event != LE_PHASE1_DONE &&
5360 lr->lr_event != LE_PHASE2_DONE &&
5361 lr->lr_event != LE_PEER_EXIT)
5364 if (lr->lr_flags & LEF_FROM_OST)
5365 ltds = &lfsck->li_ost_descs;
5367 ltds = &lfsck->li_mdt_descs;
5368 spin_lock(<ds->ltd_lock);
5369 ltd = LTD_TGT(ltds, lr->lr_index);
5371 spin_unlock(<ds->ltd_lock);
5376 list_del_init(<d->ltd_layout_phase_list);
5377 switch (lr->lr_event) {
5378 case LE_PHASE1_DONE:
5379 if (lr->lr_status <= 0) {
5380 ltd->ltd_layout_done = 1;
5381 list_del_init(<d->ltd_layout_list);
5382 CWARN("%s: %s %x failed/stopped at phase1: rc = %d.\n",
5383 lfsck_lfsck2name(lfsck),
5384 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
5385 ltd->ltd_index, lr->lr_status);
5386 lo->ll_flags |= LF_INCOMPLETE;
5391 if (lr->lr_flags & LEF_FROM_OST) {
5392 if (list_empty(<d->ltd_layout_list))
5393 list_add_tail(<d->ltd_layout_list,
5394 &llmd->llmd_ost_list);
5395 list_add_tail(<d->ltd_layout_phase_list,
5396 &llmd->llmd_ost_phase2_list);
5398 if (list_empty(<d->ltd_layout_list))
5399 list_add_tail(<d->ltd_layout_list,
5400 &llmd->llmd_mdt_list);
5401 list_add_tail(<d->ltd_layout_phase_list,
5402 &llmd->llmd_mdt_phase2_list);
5405 case LE_PHASE2_DONE:
5406 ltd->ltd_layout_done = 1;
5407 list_del_init(<d->ltd_layout_list);
5411 ltd->ltd_layout_done = 1;
5412 list_del_init(<d->ltd_layout_list);
5413 if (!(lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT)) {
5414 CWARN("%s: the peer %s %x exit layout LFSCK.\n",
5415 lfsck_lfsck2name(lfsck),
5416 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
5418 lo->ll_flags |= LF_INCOMPLETE;
5424 spin_unlock(<ds->ltd_lock);
5426 if (fail && lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT) {
5427 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
5429 memset(stop, 0, sizeof(*stop));
5430 stop->ls_status = lr->lr_status;
5431 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
5432 lfsck_stop(env, lfsck->li_bottom, stop);
5433 } else if (lfsck_layout_master_to_orphan(llmd)) {
5434 wake_up_all(&llmd->llmd_thread.t_ctl_waitq);
5440 static int lfsck_layout_slave_in_notify(const struct lu_env *env,
5441 struct lfsck_component *com,
5442 struct lfsck_request *lr)
5444 struct lfsck_instance *lfsck = com->lc_lfsck;
5445 struct lfsck_layout_slave_data *llsd = com->lc_data;
5446 struct lfsck_layout_slave_target *llst;
5450 switch (lr->lr_event) {
5451 case LE_FID_ACCESSED:
5452 lfsck_rbtree_update_bitmap(env, com, &lr->lr_fid, true);
5454 case LE_CONDITIONAL_DESTROY:
5455 rc = lfsck_layout_slave_conditional_destroy(env, com, lr);
5457 case LE_PAIRS_VERIFY: {
5458 lr->lr_status = LPVS_INIT;
5459 /* Firstly, if the MDT-object which is claimed via OST-object
5460 * local stored PFID xattr recognizes the OST-object, then it
5461 * must be that the client given PFID is wrong. */
5462 rc = lfsck_layout_slave_check_pairs(env, com, &lr->lr_fid,
5467 lr->lr_status = LPVS_INCONSISTENT;
5468 /* The OST-object local stored PFID xattr is stale. We need to
5469 * check whether the MDT-object that is claimed via the client
5470 * given PFID information recognizes the OST-object or not. If
5471 * matches, then need to update the OST-object's PFID xattr. */
5472 rc = lfsck_layout_slave_check_pairs(env, com, &lr->lr_fid,
5475 * We are not sure whether the client given PFID information
5476 * is correct or not, do nothing to avoid improper fixing.
5479 * The client given PFID information is also invalid, we can
5480 * NOT fix the OST-object inconsistency.
5485 lr->lr_status = LPVS_INCONSISTENT_TOFIX;
5486 rc = lfsck_layout_slave_repair_pfid(env, com, lr);
5490 case LE_PHASE2_DONE:
5497 llst = lfsck_layout_llst_find_and_del(llsd, lr->lr_index, true);
5501 lfsck_layout_llst_put(llst);
5502 if (list_empty(&llsd->llsd_master_list))
5503 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
5505 if (lr->lr_event == LE_PEER_EXIT &&
5506 lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT) {
5507 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
5509 memset(stop, 0, sizeof(*stop));
5510 stop->ls_status = lr->lr_status;
5511 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
5512 lfsck_stop(env, lfsck->li_bottom, stop);
5518 static int lfsck_layout_query(const struct lu_env *env,
5519 struct lfsck_component *com)
5521 struct lfsck_layout *lo = com->lc_file_ram;
5523 return lo->ll_status;
5526 static int lfsck_layout_master_stop_notify(const struct lu_env *env,
5527 struct lfsck_component *com,
5528 struct lfsck_tgt_descs *ltds,
5529 struct lfsck_tgt_desc *ltd,
5530 struct ptlrpc_request_set *set)
5532 struct lfsck_thread_info *info = lfsck_env_info(env);
5533 struct lfsck_async_interpret_args *laia = &info->lti_laia;
5534 struct lfsck_request *lr = &info->lti_lr;
5535 struct lfsck_instance *lfsck = com->lc_lfsck;
5538 spin_lock(<ds->ltd_lock);
5539 if (list_empty(<d->ltd_layout_list)) {
5540 LASSERT(list_empty(<d->ltd_layout_phase_list));
5541 spin_unlock(<ds->ltd_lock);
5546 list_del_init(<d->ltd_layout_phase_list);
5547 list_del_init(<d->ltd_layout_list);
5548 spin_unlock(<ds->ltd_lock);
5550 memset(lr, 0, sizeof(*lr));
5551 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
5552 lr->lr_event = LE_PEER_EXIT;
5553 lr->lr_active = LFSCK_TYPE_LAYOUT;
5554 lr->lr_status = LS_CO_PAUSED;
5555 if (ltds == &lfsck->li_ost_descs)
5556 lr->lr_flags = LEF_TO_OST;
5558 laia->laia_com = com;
5559 laia->laia_ltds = ltds;
5560 atomic_inc(<d->ltd_ref);
5561 laia->laia_ltd = ltd;
5563 laia->laia_shared = 0;
5565 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
5566 lfsck_layout_master_async_interpret,
5567 laia, LFSCK_NOTIFY);
5569 CERROR("%s: Fail to notify %s %x for co-stop: rc = %d\n",
5570 lfsck_lfsck2name(lfsck),
5571 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
5572 ltd->ltd_index, rc);
5579 /* with lfsck::li_lock held */
5580 static int lfsck_layout_slave_join(const struct lu_env *env,
5581 struct lfsck_component *com,
5582 struct lfsck_start_param *lsp)
5584 struct lfsck_instance *lfsck = com->lc_lfsck;
5585 struct lfsck_layout_slave_data *llsd = com->lc_data;
5586 struct lfsck_layout_slave_target *llst;
5587 struct lfsck_start *start = lsp->lsp_start;
5591 if (start == NULL || !(start->ls_flags & LPF_ORPHAN))
5594 if (!lsp->lsp_index_valid)
5597 /* If someone is running the LFSCK without orphan handling,
5598 * it will not maintain the object accessing rbtree. So we
5599 * cannot join it for orphan handling. */
5600 if (!llsd->llsd_rbtree_valid)
5603 spin_unlock(&lfsck->li_lock);
5604 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
5605 spin_lock(&lfsck->li_lock);
5606 if (rc == 0 && !thread_is_running(&lfsck->li_thread)) {
5607 spin_unlock(&lfsck->li_lock);
5608 llst = lfsck_layout_llst_find_and_del(llsd, lsp->lsp_index,
5611 lfsck_layout_llst_put(llst);
5612 spin_lock(&lfsck->li_lock);
5619 static struct lfsck_operations lfsck_layout_master_ops = {
5620 .lfsck_reset = lfsck_layout_reset,
5621 .lfsck_fail = lfsck_layout_fail,
5622 .lfsck_checkpoint = lfsck_layout_master_checkpoint,
5623 .lfsck_prep = lfsck_layout_master_prep,
5624 .lfsck_exec_oit = lfsck_layout_master_exec_oit,
5625 .lfsck_exec_dir = lfsck_layout_exec_dir,
5626 .lfsck_post = lfsck_layout_master_post,
5627 .lfsck_interpret = lfsck_layout_master_async_interpret,
5628 .lfsck_dump = lfsck_layout_dump,
5629 .lfsck_double_scan = lfsck_layout_master_double_scan,
5630 .lfsck_data_release = lfsck_layout_master_data_release,
5631 .lfsck_quit = lfsck_layout_master_quit,
5632 .lfsck_in_notify = lfsck_layout_master_in_notify,
5633 .lfsck_query = lfsck_layout_query,
5634 .lfsck_stop_notify = lfsck_layout_master_stop_notify,
5637 static struct lfsck_operations lfsck_layout_slave_ops = {
5638 .lfsck_reset = lfsck_layout_reset,
5639 .lfsck_fail = lfsck_layout_fail,
5640 .lfsck_checkpoint = lfsck_layout_slave_checkpoint,
5641 .lfsck_prep = lfsck_layout_slave_prep,
5642 .lfsck_exec_oit = lfsck_layout_slave_exec_oit,
5643 .lfsck_exec_dir = lfsck_layout_exec_dir,
5644 .lfsck_post = lfsck_layout_slave_post,
5645 .lfsck_dump = lfsck_layout_dump,
5646 .lfsck_double_scan = lfsck_layout_slave_double_scan,
5647 .lfsck_data_release = lfsck_layout_slave_data_release,
5648 .lfsck_quit = lfsck_layout_slave_quit,
5649 .lfsck_in_notify = lfsck_layout_slave_in_notify,
5650 .lfsck_query = lfsck_layout_query,
5651 .lfsck_join = lfsck_layout_slave_join,
5654 int lfsck_layout_setup(const struct lu_env *env, struct lfsck_instance *lfsck)
5656 struct lfsck_component *com;
5657 struct lfsck_layout *lo;
5658 struct dt_object *root = NULL;
5659 struct dt_object *obj;
5667 INIT_LIST_HEAD(&com->lc_link);
5668 INIT_LIST_HEAD(&com->lc_link_dir);
5669 init_rwsem(&com->lc_sem);
5670 atomic_set(&com->lc_ref, 1);
5671 com->lc_lfsck = lfsck;
5672 com->lc_type = LFSCK_TYPE_LAYOUT;
5673 if (lfsck->li_master) {
5674 struct lfsck_layout_master_data *llmd;
5676 com->lc_ops = &lfsck_layout_master_ops;
5677 OBD_ALLOC_PTR(llmd);
5679 GOTO(out, rc = -ENOMEM);
5681 INIT_LIST_HEAD(&llmd->llmd_req_list);
5682 spin_lock_init(&llmd->llmd_lock);
5683 INIT_LIST_HEAD(&llmd->llmd_ost_list);
5684 INIT_LIST_HEAD(&llmd->llmd_ost_phase1_list);
5685 INIT_LIST_HEAD(&llmd->llmd_ost_phase2_list);
5686 INIT_LIST_HEAD(&llmd->llmd_mdt_list);
5687 INIT_LIST_HEAD(&llmd->llmd_mdt_phase1_list);
5688 INIT_LIST_HEAD(&llmd->llmd_mdt_phase2_list);
5689 init_waitqueue_head(&llmd->llmd_thread.t_ctl_waitq);
5690 com->lc_data = llmd;
5692 struct lfsck_layout_slave_data *llsd;
5694 com->lc_ops = &lfsck_layout_slave_ops;
5695 OBD_ALLOC_PTR(llsd);
5697 GOTO(out, rc = -ENOMEM);
5699 INIT_LIST_HEAD(&llsd->llsd_seq_list);
5700 INIT_LIST_HEAD(&llsd->llsd_master_list);
5701 spin_lock_init(&llsd->llsd_lock);
5702 llsd->llsd_rb_root = RB_ROOT;
5703 rwlock_init(&llsd->llsd_rb_lock);
5704 com->lc_data = llsd;
5706 com->lc_file_size = sizeof(*lo);
5707 OBD_ALLOC(com->lc_file_ram, com->lc_file_size);
5708 if (com->lc_file_ram == NULL)
5709 GOTO(out, rc = -ENOMEM);
5711 OBD_ALLOC(com->lc_file_disk, com->lc_file_size);
5712 if (com->lc_file_disk == NULL)
5713 GOTO(out, rc = -ENOMEM);
5715 root = dt_locate(env, lfsck->li_bottom, &lfsck->li_local_root_fid);
5717 GOTO(out, rc = PTR_ERR(root));
5719 if (unlikely(!dt_try_as_dir(env, root)))
5720 GOTO(out, rc = -ENOTDIR);
5722 obj = local_file_find_or_create(env, lfsck->li_los, root,
5724 S_IFREG | S_IRUGO | S_IWUSR);
5726 GOTO(out, rc = PTR_ERR(obj));
5729 rc = lfsck_layout_load(env, com);
5731 rc = lfsck_layout_reset(env, com, true);
5732 else if (rc == -ENOENT)
5733 rc = lfsck_layout_init(env, com);
5738 lo = com->lc_file_ram;
5739 switch (lo->ll_status) {
5745 spin_lock(&lfsck->li_lock);
5746 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
5747 spin_unlock(&lfsck->li_lock);
5750 CERROR("%s: unknown lfsck_layout status: rc = %u\n",
5751 lfsck_lfsck2name(lfsck), lo->ll_status);
5753 case LS_SCANNING_PHASE1:
5754 case LS_SCANNING_PHASE2:
5755 /* No need to store the status to disk right now.
5756 * If the system crashed before the status stored,
5757 * it will be loaded back when next time. */
5758 lo->ll_status = LS_CRASHED;
5759 lo->ll_flags |= LF_INCOMPLETE;
5766 spin_lock(&lfsck->li_lock);
5767 list_add_tail(&com->lc_link, &lfsck->li_list_scan);
5768 spin_unlock(&lfsck->li_lock);
5772 if (lo->ll_flags & LF_CRASHED_LASTID) {
5773 LASSERT(lfsck->li_out_notify != NULL);
5775 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
5776 LE_LASTID_REBUILDING);
5782 if (root != NULL && !IS_ERR(root))
5783 lu_object_put(env, &root->do_lu);
5786 lfsck_component_cleanup(env, com);
5791 struct lfsck_orphan_it {
5792 struct lfsck_component *loi_com;
5793 struct lfsck_rbtree_node *loi_lrn;
5794 struct lfsck_layout_slave_target *loi_llst;
5795 struct lu_fid loi_key;
5796 struct lu_orphan_rec loi_rec;
5798 unsigned int loi_over:1;
5801 static int lfsck_fid_match_idx(const struct lu_env *env,
5802 struct lfsck_instance *lfsck,
5803 const struct lu_fid *fid, int idx)
5805 struct seq_server_site *ss;
5806 struct lu_server_fld *sf;
5807 struct lu_seq_range range = { 0 };
5810 /* All abnormal cases will be returned to MDT0. */
5811 if (!fid_is_norm(fid)) {
5818 ss = lu_site2seq(lfsck->li_bottom->dd_lu_dev.ld_site);
5819 if (unlikely(ss == NULL))
5822 sf = ss->ss_server_fld;
5823 LASSERT(sf != NULL);
5825 fld_range_set_any(&range);
5826 rc = fld_server_lookup(env, sf, fid_seq(fid), &range);
5830 if (!fld_range_is_mdt(&range))
5833 if (range.lsr_index == idx)
5839 static void lfsck_layout_destroy_orphan(const struct lu_env *env,
5840 struct dt_device *dev,
5841 struct dt_object *obj)
5843 struct thandle *handle;
5847 handle = dt_trans_create(env, dev);
5851 rc = dt_declare_ref_del(env, obj, handle);
5855 rc = dt_declare_destroy(env, obj, handle);
5859 rc = dt_trans_start_local(env, dev, handle);
5863 dt_write_lock(env, obj, 0);
5864 rc = dt_ref_del(env, obj, handle);
5866 rc = dt_destroy(env, obj, handle);
5867 dt_write_unlock(env, obj);
5872 dt_trans_stop(env, dev, handle);
5877 static int lfsck_orphan_index_lookup(const struct lu_env *env,
5878 struct dt_object *dt,
5880 const struct dt_key *key,
5881 struct lustre_capa *capa)
5886 static int lfsck_orphan_index_declare_insert(const struct lu_env *env,
5887 struct dt_object *dt,
5888 const struct dt_rec *rec,
5889 const struct dt_key *key,
5890 struct thandle *handle)
5895 static int lfsck_orphan_index_insert(const struct lu_env *env,
5896 struct dt_object *dt,
5897 const struct dt_rec *rec,
5898 const struct dt_key *key,
5899 struct thandle *handle,
5900 struct lustre_capa *capa,
5906 static int lfsck_orphan_index_declare_delete(const struct lu_env *env,
5907 struct dt_object *dt,
5908 const struct dt_key *key,
5909 struct thandle *handle)
5914 static int lfsck_orphan_index_delete(const struct lu_env *env,
5915 struct dt_object *dt,
5916 const struct dt_key *key,
5917 struct thandle *handle,
5918 struct lustre_capa *capa)
5923 static struct dt_it *lfsck_orphan_it_init(const struct lu_env *env,
5924 struct dt_object *dt,
5926 struct lustre_capa *capa)
5928 struct dt_device *dev = lu2dt_dev(dt->do_lu.lo_dev);
5929 struct lfsck_instance *lfsck;
5930 struct lfsck_component *com = NULL;
5931 struct lfsck_layout_slave_data *llsd;
5932 struct lfsck_orphan_it *it = NULL;
5936 lfsck = lfsck_instance_find(dev, true, false);
5937 if (unlikely(lfsck == NULL))
5938 RETURN(ERR_PTR(-ENXIO));
5940 com = lfsck_component_find(lfsck, LFSCK_TYPE_LAYOUT);
5941 if (unlikely(com == NULL))
5942 GOTO(out, rc = -ENOENT);
5944 llsd = com->lc_data;
5945 if (!llsd->llsd_rbtree_valid)
5946 GOTO(out, rc = -ESRCH);
5950 GOTO(out, rc = -ENOMEM);
5952 it->loi_llst = lfsck_layout_llst_find_and_del(llsd, attr, false);
5953 if (it->loi_llst == NULL)
5954 GOTO(out, rc = -ENXIO);
5956 if (dev->dd_record_fid_accessed) {
5957 /* The first iteration against the rbtree, scan the whole rbtree
5958 * to remove the nodes which do NOT need to be handled. */
5959 write_lock(&llsd->llsd_rb_lock);
5960 if (dev->dd_record_fid_accessed) {
5961 struct rb_node *node;
5962 struct rb_node *next;
5963 struct lfsck_rbtree_node *lrn;
5965 /* No need to record the fid accessing anymore. */
5966 dev->dd_record_fid_accessed = 0;
5968 node = rb_first(&llsd->llsd_rb_root);
5969 while (node != NULL) {
5970 next = rb_next(node);
5971 lrn = rb_entry(node, struct lfsck_rbtree_node,
5973 if (atomic_read(&lrn->lrn_known_count) <=
5974 atomic_read(&lrn->lrn_accessed_count)) {
5975 rb_erase(node, &llsd->llsd_rb_root);
5976 lfsck_rbtree_free(lrn);
5981 write_unlock(&llsd->llsd_rb_lock);
5984 /* read lock the rbtree when init, and unlock when fini */
5985 read_lock(&llsd->llsd_rb_lock);
5993 lfsck_component_put(env, com);
5994 lfsck_instance_put(env, lfsck);
5999 it = (struct lfsck_orphan_it *)ERR_PTR(rc);
6002 return (struct dt_it *)it;
6005 static void lfsck_orphan_it_fini(const struct lu_env *env,
6008 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6009 struct lfsck_component *com = it->loi_com;
6010 struct lfsck_layout_slave_data *llsd;
6011 struct lfsck_layout_slave_target *llst;
6014 llsd = com->lc_data;
6015 read_unlock(&llsd->llsd_rb_lock);
6016 llst = it->loi_llst;
6017 LASSERT(llst != NULL);
6019 /* Save the key and hash for iterate next. */
6020 llst->llst_fid = it->loi_key;
6021 llst->llst_hash = it->loi_hash;
6022 lfsck_layout_llst_put(llst);
6023 lfsck_component_put(env, com);
6029 * \retval +1: the iteration finished
6030 * \retval 0: on success, not finished
6031 * \retval -ve: on error
6033 static int lfsck_orphan_it_next(const struct lu_env *env,
6036 struct lfsck_thread_info *info = lfsck_env_info(env);
6037 struct filter_fid_old *pfid = &info->lti_old_pfid;
6038 struct lu_attr *la = &info->lti_la;
6039 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6040 struct lu_fid *key = &it->loi_key;
6041 struct lu_orphan_rec *rec = &it->loi_rec;
6042 struct lfsck_component *com = it->loi_com;
6043 struct lfsck_instance *lfsck = com->lc_lfsck;
6044 struct lfsck_layout_slave_data *llsd = com->lc_data;
6045 struct dt_object *obj;
6046 struct lfsck_rbtree_node *lrn;
6050 __u32 idx = it->loi_llst->llst_index;
6060 lrn = lfsck_rbtree_search(llsd, key, &exact);
6068 key->f_seq = lrn->lrn_seq;
6069 key->f_oid = lrn->lrn_first_oid;
6074 if (unlikely(key->f_oid == 0)) {
6081 lrn->lrn_first_oid + LFSCK_RBTREE_BITMAP_WIDTH) {
6087 if (unlikely(atomic_read(&lrn->lrn_known_count) <=
6088 atomic_read(&lrn->lrn_accessed_count))) {
6089 struct rb_node *next = rb_next(&lrn->lrn_node);
6091 while (next != NULL) {
6092 lrn = rb_entry(next, struct lfsck_rbtree_node,
6094 if (atomic_read(&lrn->lrn_known_count) >
6095 atomic_read(&lrn->lrn_accessed_count))
6097 next = rb_next(next);
6106 key->f_seq = lrn->lrn_seq;
6107 key->f_oid = lrn->lrn_first_oid;
6111 pos = key->f_oid - lrn->lrn_first_oid;
6114 pos = find_next_bit(lrn->lrn_known_bitmap,
6115 LFSCK_RBTREE_BITMAP_WIDTH, pos);
6116 if (pos >= LFSCK_RBTREE_BITMAP_WIDTH) {
6117 key->f_oid = lrn->lrn_first_oid + pos;
6118 if (unlikely(key->f_oid < lrn->lrn_first_oid)) {
6126 if (test_bit(pos, lrn->lrn_accessed_bitmap)) {
6131 key->f_oid = lrn->lrn_first_oid + pos;
6132 obj = lfsck_object_find(env, lfsck, key);
6135 if (rc == -ENOENT) {
6142 dt_read_lock(env, obj, 0);
6143 if (!dt_object_exists(obj)) {
6144 dt_read_unlock(env, obj);
6145 lfsck_object_put(env, obj);
6150 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
6154 rc = dt_xattr_get(env, obj, lfsck_buf_get(env, pfid, sizeof(*pfid)),
6155 XATTR_NAME_FID, BYPASS_CAPA);
6156 if (rc == -ENODATA) {
6157 /* For the pre-created OST-object, update the bitmap to avoid
6158 * others LFSCK (second phase) iteration to touch it again. */
6159 if (la->la_ctime == 0) {
6160 if (!test_and_set_bit(pos, lrn->lrn_accessed_bitmap))
6161 atomic_inc(&lrn->lrn_accessed_count);
6163 /* For the race between repairing dangling referenced
6164 * MDT-object and unlink the file, it may left orphan
6165 * OST-object there. Destroy it now! */
6166 if (unlikely(!(la->la_mode & S_ISUID))) {
6167 dt_read_unlock(env, obj);
6168 lfsck_layout_destroy_orphan(env,
6171 lfsck_object_put(env, obj);
6175 } else if (idx == 0) {
6176 /* If the orphan OST-object has no parent information,
6177 * regard it as referenced by the MDT-object on MDT0. */
6178 fid_zero(&rec->lor_fid);
6179 rec->lor_uid = la->la_uid;
6180 rec->lor_gid = la->la_gid;
6184 dt_read_unlock(env, obj);
6185 lfsck_object_put(env, obj);
6193 if (rc != sizeof(struct filter_fid) &&
6194 rc != sizeof(struct filter_fid_old))
6195 GOTO(out, rc = -EINVAL);
6197 fid_le_to_cpu(&rec->lor_fid, &pfid->ff_parent);
6198 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
6199 * MDT-object's FID::f_ver, instead it is the OST-object index in its
6200 * parent MDT-object's layout EA. */
6201 save = rec->lor_fid.f_stripe_idx;
6202 rec->lor_fid.f_ver = 0;
6203 rc = lfsck_fid_match_idx(env, lfsck, &rec->lor_fid, idx);
6204 /* If the orphan OST-object does not claim the MDT, then next.
6206 * If we do not know whether it matches or not, then return it
6207 * to the MDT for further check. */
6209 dt_read_unlock(env, obj);
6210 lfsck_object_put(env, obj);
6215 rec->lor_fid.f_stripe_idx = save;
6216 rec->lor_uid = la->la_uid;
6217 rec->lor_gid = la->la_gid;
6219 CDEBUG(D_LFSCK, "%s: return orphan "DFID", PFID "DFID", owner %u:%u\n",
6220 lfsck_lfsck2name(com->lc_lfsck), PFID(key), PFID(&rec->lor_fid),
6221 rec->lor_uid, rec->lor_gid);
6226 dt_read_unlock(env, obj);
6227 lfsck_object_put(env, obj);
6235 * \retval +1: locate to the exactly position
6236 * \retval 0: cannot locate to the exactly position,
6237 * call next() to move to a valid position.
6238 * \retval -ve: on error
6240 static int lfsck_orphan_it_get(const struct lu_env *env,
6242 const struct dt_key *key)
6244 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6247 it->loi_key = *(struct lu_fid *)key;
6248 rc = lfsck_orphan_it_next(env, di);
6258 static void lfsck_orphan_it_put(const struct lu_env *env,
6263 static struct dt_key *lfsck_orphan_it_key(const struct lu_env *env,
6264 const struct dt_it *di)
6266 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6268 return (struct dt_key *)&it->loi_key;
6271 static int lfsck_orphan_it_key_size(const struct lu_env *env,
6272 const struct dt_it *di)
6274 return sizeof(struct lu_fid);
6277 static int lfsck_orphan_it_rec(const struct lu_env *env,
6278 const struct dt_it *di,
6282 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6284 *(struct lu_orphan_rec *)rec = it->loi_rec;
6289 static __u64 lfsck_orphan_it_store(const struct lu_env *env,
6290 const struct dt_it *di)
6292 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6294 return it->loi_hash;
6298 * \retval +1: locate to the exactly position
6299 * \retval 0: cannot locate to the exactly position,
6300 * call next() to move to a valid position.
6301 * \retval -ve: on error
6303 static int lfsck_orphan_it_load(const struct lu_env *env,
6304 const struct dt_it *di,
6307 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6308 struct lfsck_layout_slave_target *llst = it->loi_llst;
6311 LASSERT(llst != NULL);
6313 if (hash != llst->llst_hash) {
6314 CWARN("%s: the given hash "LPU64" for orphan iteration does "
6315 "not match the one when fini "LPU64", to be reset.\n",
6316 lfsck_lfsck2name(it->loi_com->lc_lfsck), hash,
6318 fid_zero(&llst->llst_fid);
6319 llst->llst_hash = 0;
6322 it->loi_key = llst->llst_fid;
6323 it->loi_hash = llst->llst_hash;
6324 rc = lfsck_orphan_it_next(env, (struct dt_it *)di);
6334 static int lfsck_orphan_it_key_rec(const struct lu_env *env,
6335 const struct dt_it *di,
6341 const struct dt_index_operations lfsck_orphan_index_ops = {
6342 .dio_lookup = lfsck_orphan_index_lookup,
6343 .dio_declare_insert = lfsck_orphan_index_declare_insert,
6344 .dio_insert = lfsck_orphan_index_insert,
6345 .dio_declare_delete = lfsck_orphan_index_declare_delete,
6346 .dio_delete = lfsck_orphan_index_delete,
6348 .init = lfsck_orphan_it_init,
6349 .fini = lfsck_orphan_it_fini,
6350 .get = lfsck_orphan_it_get,
6351 .put = lfsck_orphan_it_put,
6352 .next = lfsck_orphan_it_next,
6353 .key = lfsck_orphan_it_key,
6354 .key_size = lfsck_orphan_it_key_size,
6355 .rec = lfsck_orphan_it_rec,
6356 .store = lfsck_orphan_it_store,
6357 .load = lfsck_orphan_it_load,
6358 .key_rec = lfsck_orphan_it_key_rec,