4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2013, Intel Corporation.
26 * lustre/lfsck/lfsck_layout.c
28 * Author: Fan, Yong <fan.yong@intel.com>
32 # define EXPORT_SYMTAB
34 #define DEBUG_SUBSYSTEM S_LFSCK
36 #include <linux/bitops.h>
37 #include <linux/rbtree.h>
39 #include <lustre/lustre_idl.h>
40 #include <lu_object.h>
41 #include <dt_object.h>
42 #include <lustre_linkea.h>
43 #include <lustre_fid.h>
44 #include <lustre_lib.h>
45 #include <lustre_net.h>
46 #include <lustre/lustre_user.h>
47 #include <md_object.h>
48 #include <obd_class.h>
50 #include "lfsck_internal.h"
52 #define LFSCK_LAYOUT_MAGIC 0xB173AE14
54 static const char lfsck_layout_name[] = "lfsck_layout";
56 struct lfsck_layout_seq {
57 struct list_head lls_list;
60 __u64 lls_lastid_known;
61 struct dt_object *lls_lastid_obj;
62 unsigned int lls_dirty:1;
65 struct lfsck_layout_slave_target {
66 /* link into lfsck_layout_slave_data::llsd_master_list. */
67 struct list_head llst_list;
68 /* The position for next record in the rbtree for iteration. */
69 struct lu_fid llst_fid;
70 /* Dummy hash for iteration against the rbtree. */
77 struct lfsck_layout_slave_data {
78 /* list for lfsck_layout_seq */
79 struct list_head llsd_seq_list;
81 /* list for the masters involve layout verification. */
82 struct list_head llsd_master_list;
85 struct dt_object *llsd_rb_obj;
86 struct rb_root llsd_rb_root;
87 rwlock_t llsd_rb_lock;
88 unsigned int llsd_rbtree_valid:1;
91 struct lfsck_layout_object {
92 struct dt_object *llo_obj;
93 struct lu_attr llo_attr;
98 struct lfsck_layout_req {
99 struct list_head llr_list;
100 struct lfsck_layout_object *llr_parent;
101 struct dt_object *llr_child;
103 __u32 llr_lov_idx; /* offset in LOV EA */
106 struct lfsck_layout_master_data {
107 spinlock_t llmd_lock;
108 struct list_head llmd_req_list;
110 /* list for the ost targets involve layout verification. */
111 struct list_head llmd_ost_list;
113 /* list for the ost targets in phase1 scanning. */
114 struct list_head llmd_ost_phase1_list;
116 /* list for the ost targets in phase1 scanning. */
117 struct list_head llmd_ost_phase2_list;
119 /* list for the mdt targets involve layout verification. */
120 struct list_head llmd_mdt_list;
122 /* list for the mdt targets in phase1 scanning. */
123 struct list_head llmd_mdt_phase1_list;
125 /* list for the mdt targets in phase1 scanning. */
126 struct list_head llmd_mdt_phase2_list;
128 struct ptlrpc_thread llmd_thread;
129 __u32 llmd_touch_gen;
131 int llmd_assistant_status;
132 int llmd_post_result;
133 unsigned int llmd_to_post:1,
134 llmd_to_double_scan:1,
135 llmd_in_double_scan:1,
139 struct lfsck_layout_slave_async_args {
140 struct obd_export *llsaa_exp;
141 struct lfsck_component *llsaa_com;
142 struct lfsck_layout_slave_target *llsaa_llst;
145 static struct lfsck_layout_object *
146 lfsck_layout_object_init(const struct lu_env *env, struct dt_object *obj,
149 struct lfsck_layout_object *llo;
154 return ERR_PTR(-ENOMEM);
156 rc = dt_attr_get(env, obj, &llo->llo_attr, BYPASS_CAPA);
163 lu_object_get(&obj->do_lu);
165 /* The gen can be used to check whether some others have changed the
166 * file layout after LFSCK pre-fetching but before real verification. */
168 atomic_set(&llo->llo_ref, 1);
174 lfsck_layout_llst_put(struct lfsck_layout_slave_target *llst)
176 if (atomic_dec_and_test(&llst->llst_ref)) {
177 LASSERT(list_empty(&llst->llst_list));
184 lfsck_layout_llst_add(struct lfsck_layout_slave_data *llsd, __u32 index)
186 struct lfsck_layout_slave_target *llst;
187 struct lfsck_layout_slave_target *tmp;
194 INIT_LIST_HEAD(&llst->llst_list);
196 llst->llst_index = index;
197 atomic_set(&llst->llst_ref, 1);
199 spin_lock(&llsd->llsd_lock);
200 list_for_each_entry(tmp, &llsd->llsd_master_list, llst_list) {
201 if (tmp->llst_index == index) {
207 list_add_tail(&llst->llst_list, &llsd->llsd_master_list);
208 spin_unlock(&llsd->llsd_lock);
217 lfsck_layout_llst_del(struct lfsck_layout_slave_data *llsd,
218 struct lfsck_layout_slave_target *llst)
222 spin_lock(&llsd->llsd_lock);
223 if (!list_empty(&llst->llst_list)) {
224 list_del_init(&llst->llst_list);
227 spin_unlock(&llsd->llsd_lock);
230 lfsck_layout_llst_put(llst);
233 static inline struct lfsck_layout_slave_target *
234 lfsck_layout_llst_find_and_del(struct lfsck_layout_slave_data *llsd,
235 __u32 index, bool unlink)
237 struct lfsck_layout_slave_target *llst;
239 spin_lock(&llsd->llsd_lock);
240 list_for_each_entry(llst, &llsd->llsd_master_list, llst_list) {
241 if (llst->llst_index == index) {
243 list_del_init(&llst->llst_list);
245 atomic_inc(&llst->llst_ref);
246 spin_unlock(&llsd->llsd_lock);
251 spin_unlock(&llsd->llsd_lock);
256 static inline void lfsck_layout_object_put(const struct lu_env *env,
257 struct lfsck_layout_object *llo)
259 if (atomic_dec_and_test(&llo->llo_ref)) {
260 lfsck_object_put(env, llo->llo_obj);
265 static struct lfsck_layout_req *
266 lfsck_layout_req_init(struct lfsck_layout_object *parent,
267 struct dt_object *child, __u32 ost_idx, __u32 lov_idx)
269 struct lfsck_layout_req *llr;
273 return ERR_PTR(-ENOMEM);
275 INIT_LIST_HEAD(&llr->llr_list);
276 atomic_inc(&parent->llo_ref);
277 llr->llr_parent = parent;
278 llr->llr_child = child;
279 llr->llr_ost_idx = ost_idx;
280 llr->llr_lov_idx = lov_idx;
285 static inline void lfsck_layout_req_fini(const struct lu_env *env,
286 struct lfsck_layout_req *llr)
288 lu_object_put(env, &llr->llr_child->do_lu);
289 lfsck_layout_object_put(env, llr->llr_parent);
293 static inline bool lfsck_layout_req_empty(struct lfsck_layout_master_data *llmd)
297 spin_lock(&llmd->llmd_lock);
298 if (list_empty(&llmd->llmd_req_list))
300 spin_unlock(&llmd->llmd_lock);
305 static int lfsck_layout_get_lovea(const struct lu_env *env,
306 struct dt_object *obj,
307 struct lu_buf *buf, ssize_t *buflen)
312 rc = dt_xattr_get(env, obj, buf, XATTR_NAME_LOV, BYPASS_CAPA);
314 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_LOV,
319 lu_buf_realloc(buf, rc);
321 *buflen = buf->lb_len;
323 if (buf->lb_buf == NULL)
335 if (unlikely(buf->lb_buf == NULL)) {
336 lu_buf_alloc(buf, rc);
338 *buflen = buf->lb_len;
340 if (buf->lb_buf == NULL)
349 static int lfsck_layout_verify_header(struct lov_mds_md_v1 *lmm)
354 magic = le32_to_cpu(lmm->lmm_magic);
355 /* If magic crashed, keep it there. Sometime later, during OST-object
356 * orphan handling, if some OST-object(s) back-point to it, it can be
357 * verified and repaired. */
358 if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3) {
362 lmm_oi_cpu_to_le(&oi, &lmm->lmm_oi);
363 if ((magic & LOV_MAGIC_MASK) == LOV_MAGIC_MAGIC)
368 CDEBUG(D_LFSCK, "%s LOV EA magic %u on "DOSTID"\n",
369 rc == -EINVAL ? "Unknown" : "Unsupported",
375 pattern = le32_to_cpu(lmm->lmm_pattern);
376 /* XXX: currently, we only support LOV_PATTERN_RAID0. */
377 if (lov_pattern(pattern) != LOV_PATTERN_RAID0) {
380 lmm_oi_cpu_to_le(&oi, &lmm->lmm_oi);
381 CDEBUG(D_LFSCK, "Unsupported LOV EA pattern %u on "DOSTID"\n",
382 pattern, POSTID(&oi));
390 #define LFSCK_RBTREE_BITMAP_SIZE PAGE_CACHE_SIZE
391 #define LFSCK_RBTREE_BITMAP_WIDTH (LFSCK_RBTREE_BITMAP_SIZE << 3)
392 #define LFSCK_RBTREE_BITMAP_MASK (LFSCK_RBTREE_BITMAP_WIDTH - 1)
394 struct lfsck_rbtree_node {
395 struct rb_node lrn_node;
398 atomic_t lrn_known_count;
399 atomic_t lrn_accessed_count;
400 void *lrn_known_bitmap;
401 void *lrn_accessed_bitmap;
404 static inline int lfsck_rbtree_cmp(struct lfsck_rbtree_node *lrn,
405 __u64 seq, __u32 oid)
407 if (seq < lrn->lrn_seq)
410 if (seq > lrn->lrn_seq)
413 if (oid < lrn->lrn_first_oid)
416 if (oid - lrn->lrn_first_oid >= LFSCK_RBTREE_BITMAP_WIDTH)
422 /* The caller should hold llsd->llsd_rb_lock. */
423 static struct lfsck_rbtree_node *
424 lfsck_rbtree_search(struct lfsck_layout_slave_data *llsd,
425 const struct lu_fid *fid, bool *exact)
427 struct rb_node *node = llsd->llsd_rb_root.rb_node;
428 struct rb_node *prev = NULL;
429 struct lfsck_rbtree_node *lrn = NULL;
435 while (node != NULL) {
437 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
438 rc = lfsck_rbtree_cmp(lrn, fid_seq(fid), fid_oid(fid));
440 node = node->rb_left;
442 node = node->rb_right;
450 /* If there is no exactly matched one, then to the next valid one. */
453 /* The rbtree is empty. */
460 node = rb_next(prev);
462 /* The end of the rbtree. */
466 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
471 static struct lfsck_rbtree_node *lfsck_rbtree_new(const struct lu_env *env,
472 const struct lu_fid *fid)
474 struct lfsck_rbtree_node *lrn;
478 return ERR_PTR(-ENOMEM);
480 OBD_ALLOC(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
481 if (lrn->lrn_known_bitmap == NULL) {
484 return ERR_PTR(-ENOMEM);
487 OBD_ALLOC(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
488 if (lrn->lrn_accessed_bitmap == NULL) {
489 OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
492 return ERR_PTR(-ENOMEM);
495 rb_init_node(&lrn->lrn_node);
496 lrn->lrn_seq = fid_seq(fid);
497 lrn->lrn_first_oid = fid_oid(fid) & ~LFSCK_RBTREE_BITMAP_MASK;
498 atomic_set(&lrn->lrn_known_count, 0);
499 atomic_set(&lrn->lrn_accessed_count, 0);
504 static void lfsck_rbtree_free(struct lfsck_rbtree_node *lrn)
506 OBD_FREE(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
507 OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
511 /* The caller should hold lock. */
512 static struct lfsck_rbtree_node *
513 lfsck_rbtree_insert(struct lfsck_layout_slave_data *llsd,
514 struct lfsck_rbtree_node *lrn)
516 struct rb_node **pos = &llsd->llsd_rb_root.rb_node;
517 struct rb_node *parent = NULL;
518 struct lfsck_rbtree_node *tmp;
521 while (*pos != NULL) {
523 tmp = rb_entry(parent, struct lfsck_rbtree_node, lrn_node);
524 rc = lfsck_rbtree_cmp(tmp, lrn->lrn_seq, lrn->lrn_first_oid);
526 pos = &(*pos)->rb_left;
528 pos = &(*pos)->rb_right;
533 rb_link_node(&lrn->lrn_node, parent, pos);
534 rb_insert_color(&lrn->lrn_node, &llsd->llsd_rb_root);
539 extern const struct dt_index_operations lfsck_orphan_index_ops;
541 static int lfsck_rbtree_setup(const struct lu_env *env,
542 struct lfsck_component *com)
544 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
545 struct lfsck_instance *lfsck = com->lc_lfsck;
546 struct dt_device *dev = lfsck->li_bottom;
547 struct lfsck_layout_slave_data *llsd = com->lc_data;
548 struct dt_object *obj;
550 fid->f_seq = FID_SEQ_LAYOUT_RBTREE;
551 fid->f_oid = lfsck_dev_idx(dev);
553 obj = dt_locate(env, dev, fid);
555 RETURN(PTR_ERR(obj));
557 /* Generate an in-RAM object to stand for the layout rbtree.
558 * Scanning the layout rbtree will be via the iteration over
559 * the object. In the future, the rbtree may be written onto
560 * disk with the object.
562 * Mark the object to be as exist. */
563 obj->do_lu.lo_header->loh_attr |= LOHA_EXISTS;
564 obj->do_index_ops = &lfsck_orphan_index_ops;
565 llsd->llsd_rb_obj = obj;
566 llsd->llsd_rbtree_valid = 1;
567 dev->dd_record_fid_accessed = 1;
572 static void lfsck_rbtree_cleanup(const struct lu_env *env,
573 struct lfsck_component *com)
575 struct lfsck_instance *lfsck = com->lc_lfsck;
576 struct lfsck_layout_slave_data *llsd = com->lc_data;
577 struct rb_node *node = rb_first(&llsd->llsd_rb_root);
578 struct rb_node *next;
579 struct lfsck_rbtree_node *lrn;
581 lfsck->li_bottom->dd_record_fid_accessed = 0;
582 /* Invalid the rbtree, then no others will use it. */
583 write_lock(&llsd->llsd_rb_lock);
584 llsd->llsd_rbtree_valid = 0;
585 write_unlock(&llsd->llsd_rb_lock);
587 while (node != NULL) {
588 next = rb_next(node);
589 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
590 rb_erase(node, &llsd->llsd_rb_root);
591 lfsck_rbtree_free(lrn);
595 if (llsd->llsd_rb_obj != NULL) {
596 lu_object_put(env, &llsd->llsd_rb_obj->do_lu);
597 llsd->llsd_rb_obj = NULL;
601 static void lfsck_rbtree_update_bitmap(const struct lu_env *env,
602 struct lfsck_component *com,
603 const struct lu_fid *fid,
606 struct lfsck_layout_slave_data *llsd = com->lc_data;
607 struct lfsck_rbtree_node *lrn;
613 CDEBUG(D_LFSCK, "%s: update bitmap for "DFID"\n",
614 lfsck_lfsck2name(com->lc_lfsck), PFID(fid));
616 if (unlikely(!fid_is_sane(fid) || fid_is_last_id(fid)))
619 if (!fid_is_idif(fid) && !fid_is_norm(fid))
622 read_lock(&llsd->llsd_rb_lock);
623 if (!llsd->llsd_rbtree_valid)
624 GOTO(unlock, rc = 0);
626 lrn = lfsck_rbtree_search(llsd, fid, NULL);
628 struct lfsck_rbtree_node *tmp;
632 read_unlock(&llsd->llsd_rb_lock);
633 tmp = lfsck_rbtree_new(env, fid);
635 GOTO(out, rc = PTR_ERR(tmp));
638 write_lock(&llsd->llsd_rb_lock);
639 if (!llsd->llsd_rbtree_valid) {
640 lfsck_rbtree_free(tmp);
641 GOTO(unlock, rc = 0);
644 lrn = lfsck_rbtree_insert(llsd, tmp);
646 lfsck_rbtree_free(tmp);
649 idx = fid_oid(fid) & LFSCK_RBTREE_BITMAP_MASK;
650 /* Any accessed object must be a known object. */
651 if (!test_and_set_bit(idx, lrn->lrn_known_bitmap))
652 atomic_inc(&lrn->lrn_known_count);
653 if (accessed && !test_and_set_bit(idx, lrn->lrn_accessed_bitmap))
654 atomic_inc(&lrn->lrn_accessed_count);
656 GOTO(unlock, rc = 0);
660 write_unlock(&llsd->llsd_rb_lock);
662 read_unlock(&llsd->llsd_rb_lock);
664 if (rc != 0 && accessed) {
665 struct lfsck_layout *lo = com->lc_file_ram;
667 CERROR("%s: Fail to update object accessed bitmap, will cause "
668 "incorrect LFSCK OST-object handling, so disable it to "
669 "cancel orphan handling for related device. rc = %d.\n",
670 lfsck_lfsck2name(com->lc_lfsck), rc);
671 lo->ll_flags |= LF_INCOMPLETE;
672 lfsck_rbtree_cleanup(env, com);
676 static inline bool is_dummy_lov_ost_data(struct lov_ost_data_v1 *obj)
678 if (fid_is_zero(&obj->l_ost_oi.oi_fid) &&
679 obj->l_ost_gen == 0 && obj->l_ost_idx == 0)
685 static void lfsck_layout_le_to_cpu(struct lfsck_layout *des,
686 const struct lfsck_layout *src)
690 des->ll_magic = le32_to_cpu(src->ll_magic);
691 des->ll_status = le32_to_cpu(src->ll_status);
692 des->ll_flags = le32_to_cpu(src->ll_flags);
693 des->ll_success_count = le32_to_cpu(src->ll_success_count);
694 des->ll_run_time_phase1 = le32_to_cpu(src->ll_run_time_phase1);
695 des->ll_run_time_phase2 = le32_to_cpu(src->ll_run_time_phase2);
696 des->ll_time_last_complete = le64_to_cpu(src->ll_time_last_complete);
697 des->ll_time_latest_start = le64_to_cpu(src->ll_time_latest_start);
698 des->ll_time_last_checkpoint =
699 le64_to_cpu(src->ll_time_last_checkpoint);
700 des->ll_pos_latest_start = le64_to_cpu(src->ll_pos_latest_start);
701 des->ll_pos_last_checkpoint = le64_to_cpu(src->ll_pos_last_checkpoint);
702 des->ll_pos_first_inconsistent =
703 le64_to_cpu(src->ll_pos_first_inconsistent);
704 des->ll_objs_checked_phase1 = le64_to_cpu(src->ll_objs_checked_phase1);
705 des->ll_objs_failed_phase1 = le64_to_cpu(src->ll_objs_failed_phase1);
706 des->ll_objs_checked_phase2 = le64_to_cpu(src->ll_objs_checked_phase2);
707 des->ll_objs_failed_phase2 = le64_to_cpu(src->ll_objs_failed_phase2);
708 for (i = 0; i < LLIT_MAX; i++)
709 des->ll_objs_repaired[i] =
710 le64_to_cpu(src->ll_objs_repaired[i]);
711 des->ll_objs_skipped = le64_to_cpu(src->ll_objs_skipped);
714 static void lfsck_layout_cpu_to_le(struct lfsck_layout *des,
715 const struct lfsck_layout *src)
719 des->ll_magic = cpu_to_le32(src->ll_magic);
720 des->ll_status = cpu_to_le32(src->ll_status);
721 des->ll_flags = cpu_to_le32(src->ll_flags);
722 des->ll_success_count = cpu_to_le32(src->ll_success_count);
723 des->ll_run_time_phase1 = cpu_to_le32(src->ll_run_time_phase1);
724 des->ll_run_time_phase2 = cpu_to_le32(src->ll_run_time_phase2);
725 des->ll_time_last_complete = cpu_to_le64(src->ll_time_last_complete);
726 des->ll_time_latest_start = cpu_to_le64(src->ll_time_latest_start);
727 des->ll_time_last_checkpoint =
728 cpu_to_le64(src->ll_time_last_checkpoint);
729 des->ll_pos_latest_start = cpu_to_le64(src->ll_pos_latest_start);
730 des->ll_pos_last_checkpoint = cpu_to_le64(src->ll_pos_last_checkpoint);
731 des->ll_pos_first_inconsistent =
732 cpu_to_le64(src->ll_pos_first_inconsistent);
733 des->ll_objs_checked_phase1 = cpu_to_le64(src->ll_objs_checked_phase1);
734 des->ll_objs_failed_phase1 = cpu_to_le64(src->ll_objs_failed_phase1);
735 des->ll_objs_checked_phase2 = cpu_to_le64(src->ll_objs_checked_phase2);
736 des->ll_objs_failed_phase2 = cpu_to_le64(src->ll_objs_failed_phase2);
737 for (i = 0; i < LLIT_MAX; i++)
738 des->ll_objs_repaired[i] =
739 cpu_to_le64(src->ll_objs_repaired[i]);
740 des->ll_objs_skipped = cpu_to_le64(src->ll_objs_skipped);
744 * \retval +ve: the lfsck_layout is broken, the caller should reset it.
745 * \retval 0: succeed.
746 * \retval -ve: failed cases.
748 static int lfsck_layout_load(const struct lu_env *env,
749 struct lfsck_component *com)
751 struct lfsck_layout *lo = com->lc_file_ram;
752 const struct dt_body_operations *dbo = com->lc_obj->do_body_ops;
753 ssize_t size = com->lc_file_size;
757 rc = dbo->dbo_read(env, com->lc_obj,
758 lfsck_buf_get(env, com->lc_file_disk, size), &pos,
763 CWARN("%s: failed to load lfsck_layout: rc = %d\n",
764 lfsck_lfsck2name(com->lc_lfsck), rc);
766 } else if (rc != size) {
767 CWARN("%s: crashed lfsck_layout, to be reset: rc = %d\n",
768 lfsck_lfsck2name(com->lc_lfsck), rc);
772 lfsck_layout_le_to_cpu(lo, com->lc_file_disk);
773 if (lo->ll_magic != LFSCK_LAYOUT_MAGIC) {
774 CWARN("%s: invalid lfsck_layout magic %#x != %#x, "
775 "to be reset\n", lfsck_lfsck2name(com->lc_lfsck),
776 lo->ll_magic, LFSCK_LAYOUT_MAGIC);
783 static int lfsck_layout_store(const struct lu_env *env,
784 struct lfsck_component *com)
786 struct dt_object *obj = com->lc_obj;
787 struct lfsck_instance *lfsck = com->lc_lfsck;
788 struct lfsck_layout *lo = com->lc_file_disk;
789 struct thandle *handle;
790 ssize_t size = com->lc_file_size;
795 lfsck_layout_cpu_to_le(lo, com->lc_file_ram);
796 handle = dt_trans_create(env, lfsck->li_bottom);
797 if (IS_ERR(handle)) {
798 rc = PTR_ERR(handle);
799 CERROR("%s: fail to create trans for storing lfsck_layout: "
800 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
804 rc = dt_declare_record_write(env, obj, lfsck_buf_get(env, lo, size),
807 CERROR("%s: fail to declare trans for storing lfsck_layout(1): "
808 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
812 rc = dt_trans_start_local(env, lfsck->li_bottom, handle);
814 CERROR("%s: fail to start trans for storing lfsck_layout: "
815 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
819 rc = dt_record_write(env, obj, lfsck_buf_get(env, lo, size), &pos,
822 CERROR("%s: fail to store lfsck_layout(1): size = %d, "
823 "rc = %d\n", lfsck_lfsck2name(lfsck), (int)size, rc);
828 dt_trans_stop(env, lfsck->li_bottom, handle);
833 static int lfsck_layout_init(const struct lu_env *env,
834 struct lfsck_component *com)
836 struct lfsck_layout *lo = com->lc_file_ram;
839 memset(lo, 0, com->lc_file_size);
840 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
841 lo->ll_status = LS_INIT;
842 down_write(&com->lc_sem);
843 rc = lfsck_layout_store(env, com);
844 up_write(&com->lc_sem);
849 static int fid_is_for_ostobj(const struct lu_env *env, struct dt_device *dt,
850 struct dt_object *obj, const struct lu_fid *fid)
852 struct seq_server_site *ss = lu_site2seq(dt->dd_lu_dev.ld_site);
853 struct lu_seq_range range = { 0 };
854 struct lustre_mdt_attrs *lma;
857 fld_range_set_any(&range);
858 rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(fid), &range);
860 if (fld_range_is_ost(&range))
866 lma = &lfsck_env_info(env)->lti_lma;
867 rc = dt_xattr_get(env, obj, lfsck_buf_get(env, lma, sizeof(*lma)),
868 XATTR_NAME_LMA, BYPASS_CAPA);
869 if (rc == sizeof(*lma)) {
870 lustre_lma_swab(lma);
872 return lma->lma_compat & LMAC_FID_ON_OST ? 1 : 0;
875 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_FID, BYPASS_CAPA);
880 static struct lfsck_layout_seq *
881 lfsck_layout_seq_lookup(struct lfsck_layout_slave_data *llsd, __u64 seq)
883 struct lfsck_layout_seq *lls;
885 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
886 if (lls->lls_seq == seq)
889 if (lls->lls_seq > seq)
897 lfsck_layout_seq_insert(struct lfsck_layout_slave_data *llsd,
898 struct lfsck_layout_seq *lls)
900 struct lfsck_layout_seq *tmp;
901 struct list_head *pos = &llsd->llsd_seq_list;
903 list_for_each_entry(tmp, &llsd->llsd_seq_list, lls_list) {
904 if (lls->lls_seq < tmp->lls_seq) {
905 pos = &tmp->lls_list;
909 list_add_tail(&lls->lls_list, pos);
913 lfsck_layout_lastid_create(const struct lu_env *env,
914 struct lfsck_instance *lfsck,
915 struct dt_object *obj)
917 struct lfsck_thread_info *info = lfsck_env_info(env);
918 struct lu_attr *la = &info->lti_la;
919 struct dt_object_format *dof = &info->lti_dof;
920 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
921 struct dt_device *dt = lfsck->li_bottom;
928 CDEBUG(D_LFSCK, "To create LAST_ID for <seq> "LPX64"\n",
929 fid_seq(lfsck_dto2fid(obj)));
931 if (bk->lb_param & LPF_DRYRUN)
934 memset(la, 0, sizeof(*la));
935 la->la_mode = S_IFREG | S_IRUGO | S_IWUSR;
936 la->la_valid = LA_MODE | LA_UID | LA_GID;
937 dof->dof_type = dt_mode_to_dft(S_IFREG);
939 th = dt_trans_create(env, dt);
941 RETURN(rc = PTR_ERR(th));
943 rc = dt_declare_create(env, obj, la, NULL, dof, th);
947 rc = dt_declare_record_write(env, obj,
948 lfsck_buf_get(env, &lastid,
954 rc = dt_trans_start_local(env, dt, th);
958 dt_write_lock(env, obj, 0);
959 if (likely(!dt_object_exists(obj))) {
960 rc = dt_create(env, obj, la, NULL, dof, th);
962 rc = dt_record_write(env, obj,
963 lfsck_buf_get(env, &lastid, sizeof(lastid)),
966 dt_write_unlock(env, obj);
971 dt_trans_stop(env, dt, th);
977 lfsck_layout_lastid_reload(const struct lu_env *env,
978 struct lfsck_component *com,
979 struct lfsck_layout_seq *lls)
985 dt_read_lock(env, lls->lls_lastid_obj, 0);
986 rc = dt_record_read(env, lls->lls_lastid_obj,
987 lfsck_buf_get(env, &lastid, sizeof(lastid)), &pos);
988 dt_read_unlock(env, lls->lls_lastid_obj);
989 if (unlikely(rc != 0))
992 lastid = le64_to_cpu(lastid);
993 if (lastid < lls->lls_lastid_known) {
994 struct lfsck_instance *lfsck = com->lc_lfsck;
995 struct lfsck_layout *lo = com->lc_file_ram;
997 lls->lls_lastid = lls->lls_lastid_known;
999 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
1000 LASSERT(lfsck->li_out_notify != NULL);
1002 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1003 LE_LASTID_REBUILDING);
1004 lo->ll_flags |= LF_CRASHED_LASTID;
1006 } else if (lastid >= lls->lls_lastid) {
1007 lls->lls_lastid = lastid;
1015 lfsck_layout_lastid_store(const struct lu_env *env,
1016 struct lfsck_component *com)
1018 struct lfsck_instance *lfsck = com->lc_lfsck;
1019 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1020 struct dt_device *dt = lfsck->li_bottom;
1021 struct lfsck_layout_slave_data *llsd = com->lc_data;
1022 struct lfsck_layout_seq *lls;
1028 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
1031 /* XXX: Add the code back if we really found related
1032 * inconsistent cases in the future. */
1034 if (!lls->lls_dirty) {
1035 /* In OFD, before the pre-creation, the LAST_ID
1036 * file will be updated firstly, which may hide
1037 * some potential crashed cases. For example:
1039 * The old obj1's ID is higher than old LAST_ID
1040 * but lower than the new LAST_ID, but the LFSCK
1041 * have not touch the obj1 until the OFD updated
1042 * the LAST_ID. So the LFSCK does not regard it
1043 * as crashed case. But when OFD does not create
1044 * successfully, it will set the LAST_ID as the
1045 * real created objects' ID, then LFSCK needs to
1046 * found related inconsistency. */
1047 rc = lfsck_layout_lastid_reload(env, com, lls);
1048 if (likely(!lls->lls_dirty))
1053 CDEBUG(D_LFSCK, "To sync the LAST_ID for <seq> "LPX64
1054 " as <oid> "LPU64"\n", lls->lls_seq, lls->lls_lastid);
1056 if (bk->lb_param & LPF_DRYRUN) {
1061 th = dt_trans_create(env, dt);
1064 CERROR("%s: (1) failed to store "LPX64": rc = %d\n",
1065 lfsck_lfsck2name(com->lc_lfsck),
1070 lastid = cpu_to_le64(lls->lls_lastid);
1071 rc = dt_declare_record_write(env, lls->lls_lastid_obj,
1072 lfsck_buf_get(env, &lastid,
1078 rc = dt_trans_start_local(env, dt, th);
1082 dt_write_lock(env, lls->lls_lastid_obj, 0);
1083 rc = dt_record_write(env, lls->lls_lastid_obj,
1084 lfsck_buf_get(env, &lastid,
1085 sizeof(lastid)), &pos, th);
1086 dt_write_unlock(env, lls->lls_lastid_obj);
1091 dt_trans_stop(env, dt, th);
1094 CERROR("%s: (2) failed to store "LPX64": rc = %d\n",
1095 lfsck_lfsck2name(com->lc_lfsck),
1104 lfsck_layout_lastid_load(const struct lu_env *env,
1105 struct lfsck_component *com,
1106 struct lfsck_layout_seq *lls)
1108 struct lfsck_instance *lfsck = com->lc_lfsck;
1109 struct lfsck_layout *lo = com->lc_file_ram;
1110 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
1111 struct dt_object *obj;
1116 lu_last_id_fid(fid, lls->lls_seq, lfsck_dev_idx(lfsck->li_bottom));
1117 obj = dt_locate(env, lfsck->li_bottom, fid);
1119 RETURN(PTR_ERR(obj));
1121 /* LAST_ID crashed, to be rebuilt */
1122 if (!dt_object_exists(obj)) {
1123 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
1124 LASSERT(lfsck->li_out_notify != NULL);
1126 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1127 LE_LASTID_REBUILDING);
1128 lo->ll_flags |= LF_CRASHED_LASTID;
1130 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY4) &&
1132 struct l_wait_info lwi = LWI_TIMEOUT(
1133 cfs_time_seconds(cfs_fail_val),
1136 up_write(&com->lc_sem);
1137 l_wait_event(lfsck->li_thread.t_ctl_waitq,
1138 !thread_is_running(&lfsck->li_thread),
1140 down_write(&com->lc_sem);
1144 rc = lfsck_layout_lastid_create(env, lfsck, obj);
1146 dt_read_lock(env, obj, 0);
1147 rc = dt_read(env, obj,
1148 lfsck_buf_get(env, &lls->lls_lastid, sizeof(__u64)),
1150 dt_read_unlock(env, obj);
1151 if (rc != 0 && rc != sizeof(__u64))
1152 GOTO(out, rc = (rc > 0 ? -EFAULT : rc));
1154 if (rc == 0 && !(lo->ll_flags & LF_CRASHED_LASTID)) {
1155 LASSERT(lfsck->li_out_notify != NULL);
1157 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1158 LE_LASTID_REBUILDING);
1159 lo->ll_flags |= LF_CRASHED_LASTID;
1162 lls->lls_lastid = le64_to_cpu(lls->lls_lastid);
1170 lfsck_object_put(env, obj);
1172 lls->lls_lastid_obj = obj;
1177 static void lfsck_layout_record_failure(const struct lu_env *env,
1178 struct lfsck_instance *lfsck,
1179 struct lfsck_layout *lo)
1181 lo->ll_objs_failed_phase1++;
1182 if (unlikely(lo->ll_pos_first_inconsistent == 0))
1183 lo->ll_pos_first_inconsistent =
1184 lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
1188 static int lfsck_layout_master_async_interpret(const struct lu_env *env,
1189 struct ptlrpc_request *req,
1192 struct lfsck_async_interpret_args *laia = args;
1193 struct lfsck_component *com = laia->laia_com;
1194 struct lfsck_layout_master_data *llmd = com->lc_data;
1195 struct lfsck_tgt_descs *ltds = laia->laia_ltds;
1196 struct lfsck_tgt_desc *ltd = laia->laia_ltd;
1197 struct lfsck_request *lr = laia->laia_lr;
1199 switch (lr->lr_event) {
1202 struct lfsck_layout *lo = com->lc_file_ram;
1204 CERROR("%s: fail to notify %s %x for layout start: "
1205 "rc = %d\n", lfsck_lfsck2name(com->lc_lfsck),
1206 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1207 ltd->ltd_index, rc);
1208 lo->ll_flags |= LF_INCOMPLETE;
1212 spin_lock(<ds->ltd_lock);
1213 if (ltd->ltd_dead || ltd->ltd_layout_done) {
1214 spin_unlock(<ds->ltd_lock);
1218 if (lr->lr_flags & LEF_TO_OST) {
1219 if (list_empty(<d->ltd_layout_list))
1220 list_add_tail(<d->ltd_layout_list,
1221 &llmd->llmd_ost_list);
1222 if (list_empty(<d->ltd_layout_phase_list))
1223 list_add_tail(<d->ltd_layout_phase_list,
1224 &llmd->llmd_ost_phase1_list);
1226 if (list_empty(<d->ltd_layout_list))
1227 list_add_tail(<d->ltd_layout_list,
1228 &llmd->llmd_mdt_list);
1229 if (list_empty(<d->ltd_layout_phase_list))
1230 list_add_tail(<d->ltd_layout_phase_list,
1231 &llmd->llmd_mdt_phase1_list);
1233 spin_unlock(<ds->ltd_lock);
1236 case LE_PHASE1_DONE:
1237 case LE_PHASE2_DONE:
1239 if (rc != 0 && rc != -EALREADY)
1240 CWARN("%s: fail to notify %s %x for layout: "
1241 "event = %d, rc = %d\n",
1242 lfsck_lfsck2name(com->lc_lfsck),
1243 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1244 ltd->ltd_index, lr->lr_event, rc);
1247 struct lfsck_reply *reply;
1250 spin_lock(<ds->ltd_lock);
1251 list_del_init(<d->ltd_layout_phase_list);
1252 list_del_init(<d->ltd_layout_list);
1253 spin_unlock(<ds->ltd_lock);
1257 reply = req_capsule_server_get(&req->rq_pill,
1259 if (reply == NULL) {
1261 CERROR("%s: invalid return value: rc = %d\n",
1262 lfsck_lfsck2name(com->lc_lfsck), rc);
1263 spin_lock(<ds->ltd_lock);
1264 list_del_init(<d->ltd_layout_phase_list);
1265 list_del_init(<d->ltd_layout_list);
1266 spin_unlock(<ds->ltd_lock);
1270 switch (reply->lr_status) {
1271 case LS_SCANNING_PHASE1:
1273 case LS_SCANNING_PHASE2:
1274 spin_lock(<ds->ltd_lock);
1275 list_del_init(<d->ltd_layout_phase_list);
1276 if (ltd->ltd_dead || ltd->ltd_layout_done) {
1277 spin_unlock(<ds->ltd_lock);
1281 if (lr->lr_flags & LEF_TO_OST)
1282 list_add_tail(<d->ltd_layout_phase_list,
1283 &llmd->llmd_ost_phase2_list);
1285 list_add_tail(<d->ltd_layout_phase_list,
1286 &llmd->llmd_mdt_phase2_list);
1287 spin_unlock(<ds->ltd_lock);
1290 spin_lock(<ds->ltd_lock);
1291 list_del_init(<d->ltd_layout_phase_list);
1292 list_del_init(<d->ltd_layout_list);
1293 spin_unlock(<ds->ltd_lock);
1299 CERROR("%s: unexpected event: rc = %d\n",
1300 lfsck_lfsck2name(com->lc_lfsck), lr->lr_event);
1304 if (!laia->laia_shared) {
1306 lfsck_component_put(env, com);
1312 static int lfsck_layout_master_query_others(const struct lu_env *env,
1313 struct lfsck_component *com)
1315 struct lfsck_thread_info *info = lfsck_env_info(env);
1316 struct lfsck_request *lr = &info->lti_lr;
1317 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1318 struct lfsck_instance *lfsck = com->lc_lfsck;
1319 struct lfsck_layout_master_data *llmd = com->lc_data;
1320 struct ptlrpc_request_set *set;
1321 struct lfsck_tgt_descs *ltds;
1322 struct lfsck_tgt_desc *ltd;
1323 struct list_head *head;
1328 set = ptlrpc_prep_set();
1332 llmd->llmd_touch_gen++;
1333 memset(lr, 0, sizeof(*lr));
1334 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1335 lr->lr_event = LE_QUERY;
1336 lr->lr_active = LT_LAYOUT;
1337 laia->laia_com = com;
1339 laia->laia_shared = 0;
1341 if (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1342 ltds = &lfsck->li_mdt_descs;
1344 head = &llmd->llmd_mdt_phase1_list;
1348 ltds = &lfsck->li_ost_descs;
1349 lr->lr_flags = LEF_TO_OST;
1350 head = &llmd->llmd_ost_phase1_list;
1353 laia->laia_ltds = ltds;
1354 spin_lock(<ds->ltd_lock);
1355 while (!list_empty(head)) {
1356 ltd = list_entry(head->next,
1357 struct lfsck_tgt_desc,
1358 ltd_layout_phase_list);
1359 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1362 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1363 list_del(<d->ltd_layout_phase_list);
1364 list_add_tail(<d->ltd_layout_phase_list, head);
1365 atomic_inc(<d->ltd_ref);
1366 laia->laia_ltd = ltd;
1367 spin_unlock(<ds->ltd_lock);
1368 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1369 lfsck_layout_master_async_interpret,
1372 CERROR("%s: fail to query %s %x for layout: rc = %d\n",
1373 lfsck_lfsck2name(lfsck),
1374 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1375 ltd->ltd_index, rc);
1379 spin_lock(<ds->ltd_lock);
1381 spin_unlock(<ds->ltd_lock);
1383 rc = ptlrpc_set_wait(set);
1385 ptlrpc_set_destroy(set);
1389 if (!(lr->lr_flags & LEF_TO_OST) &&
1390 list_empty(&llmd->llmd_mdt_phase1_list))
1393 ptlrpc_set_destroy(set);
1395 RETURN(rc1 != 0 ? rc1 : rc);
1399 lfsck_layout_master_to_orphan(struct lfsck_layout_master_data *llmd)
1401 return list_empty(&llmd->llmd_mdt_phase1_list) &&
1402 (!list_empty(&llmd->llmd_ost_phase2_list) ||
1403 list_empty(&llmd->llmd_ost_phase1_list));
1406 static int lfsck_layout_master_notify_others(const struct lu_env *env,
1407 struct lfsck_component *com,
1408 struct lfsck_request *lr)
1410 struct lfsck_thread_info *info = lfsck_env_info(env);
1411 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1412 struct lfsck_instance *lfsck = com->lc_lfsck;
1413 struct lfsck_layout_master_data *llmd = com->lc_data;
1414 struct lfsck_layout *lo = com->lc_file_ram;
1415 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1416 struct ptlrpc_request_set *set;
1417 struct lfsck_tgt_descs *ltds;
1418 struct lfsck_tgt_desc *ltd;
1419 struct lfsck_tgt_desc *next;
1420 struct list_head *head;
1425 set = ptlrpc_prep_set();
1429 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1430 lr->lr_active = LT_LAYOUT;
1431 laia->laia_com = com;
1433 laia->laia_shared = 0;
1434 switch (lr->lr_event) {
1436 /* Notify OSTs firstly, then handle other MDTs if needed. */
1437 ltds = &lfsck->li_ost_descs;
1438 laia->laia_ltds = ltds;
1439 down_read(<ds->ltd_rw_sem);
1440 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1441 ltd = lfsck_tgt_get(ltds, idx);
1442 LASSERT(ltd != NULL);
1444 laia->laia_ltd = ltd;
1445 ltd->ltd_layout_done = 0;
1446 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1447 lfsck_layout_master_async_interpret,
1448 laia, LFSCK_NOTIFY);
1450 CERROR("%s: fail to notify %s %x for layout "
1452 lfsck_lfsck2name(lfsck),
1453 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1456 lo->ll_flags |= LF_INCOMPLETE;
1459 up_read(<ds->ltd_rw_sem);
1462 rc = ptlrpc_set_wait(set);
1464 ptlrpc_set_destroy(set);
1468 if (!(bk->lb_param & LPF_ALL_TGT))
1471 /* link other MDT targets locallly. */
1472 spin_lock(<ds->ltd_lock);
1473 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1474 ltd = LTD_TGT(ltds, idx);
1475 LASSERT(ltd != NULL);
1477 if (!list_empty(<d->ltd_layout_list))
1480 list_add_tail(<d->ltd_layout_list,
1481 &llmd->llmd_mdt_list);
1482 list_add_tail(<d->ltd_layout_phase_list,
1483 &llmd->llmd_mdt_phase1_list);
1485 spin_unlock(<ds->ltd_lock);
1488 case LE_PHASE2_DONE:
1489 case LE_PEER_EXIT: {
1490 /* Handle other MDTs firstly if needed, then notify the OSTs. */
1491 if (bk->lb_param & LPF_ALL_TGT) {
1492 head = &llmd->llmd_mdt_list;
1493 ltds = &lfsck->li_mdt_descs;
1494 if (lr->lr_event == LE_STOP) {
1495 /* unlink other MDT targets locallly. */
1496 spin_lock(<ds->ltd_lock);
1497 list_for_each_entry_safe(ltd, next, head,
1499 list_del_init(<d->ltd_layout_phase_list);
1500 list_del_init(<d->ltd_layout_list);
1502 spin_unlock(<ds->ltd_lock);
1504 lr->lr_flags |= LEF_TO_OST;
1505 head = &llmd->llmd_ost_list;
1506 ltds = &lfsck->li_ost_descs;
1508 lr->lr_flags &= ~LEF_TO_OST;
1511 lr->lr_flags |= LEF_TO_OST;
1512 head = &llmd->llmd_ost_list;
1513 ltds = &lfsck->li_ost_descs;
1517 laia->laia_ltds = ltds;
1518 spin_lock(<ds->ltd_lock);
1519 while (!list_empty(head)) {
1520 ltd = list_entry(head->next, struct lfsck_tgt_desc,
1522 if (!list_empty(<d->ltd_layout_phase_list))
1523 list_del_init(<d->ltd_layout_phase_list);
1524 list_del_init(<d->ltd_layout_list);
1525 atomic_inc(<d->ltd_ref);
1526 laia->laia_ltd = ltd;
1527 spin_unlock(<ds->ltd_lock);
1528 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1529 lfsck_layout_master_async_interpret,
1530 laia, LFSCK_NOTIFY);
1532 CERROR("%s: fail to notify %s %x for layout "
1533 "stop/phase2: rc = %d\n",
1534 lfsck_lfsck2name(lfsck),
1535 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1536 "MDT", ltd->ltd_index, rc);
1539 spin_lock(<ds->ltd_lock);
1541 spin_unlock(<ds->ltd_lock);
1543 rc = ptlrpc_set_wait(set);
1545 ptlrpc_set_destroy(set);
1549 if (!(lr->lr_flags & LEF_TO_OST)) {
1550 lr->lr_flags |= LEF_TO_OST;
1551 head = &llmd->llmd_ost_list;
1552 ltds = &lfsck->li_ost_descs;
1557 case LE_PHASE1_DONE:
1558 llmd->llmd_touch_gen++;
1559 ltds = &lfsck->li_mdt_descs;
1560 laia->laia_ltds = ltds;
1561 spin_lock(<ds->ltd_lock);
1562 while (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1563 ltd = list_entry(llmd->llmd_mdt_phase1_list.next,
1564 struct lfsck_tgt_desc,
1565 ltd_layout_phase_list);
1566 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1569 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1570 list_del_init(<d->ltd_layout_phase_list);
1571 list_add_tail(<d->ltd_layout_phase_list,
1572 &llmd->llmd_mdt_phase1_list);
1573 atomic_inc(<d->ltd_ref);
1574 laia->laia_ltd = ltd;
1575 spin_unlock(<ds->ltd_lock);
1576 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1577 lfsck_layout_master_async_interpret,
1578 laia, LFSCK_NOTIFY);
1580 CERROR("%s: fail to notify MDT %x for layout "
1581 "phase1 done: rc = %d\n",
1582 lfsck_lfsck2name(lfsck),
1583 ltd->ltd_index, rc);
1586 spin_lock(<ds->ltd_lock);
1588 spin_unlock(<ds->ltd_lock);
1591 CERROR("%s: unexpected LFSCK event: rc = %d\n",
1592 lfsck_lfsck2name(lfsck), lr->lr_event);
1597 rc = ptlrpc_set_wait(set);
1598 ptlrpc_set_destroy(set);
1603 static int lfsck_layout_double_scan_result(const struct lu_env *env,
1604 struct lfsck_component *com,
1607 struct lfsck_instance *lfsck = com->lc_lfsck;
1608 struct lfsck_layout *lo = com->lc_file_ram;
1609 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1611 down_write(&com->lc_sem);
1612 lo->ll_run_time_phase2 += cfs_duration_sec(cfs_time_current() +
1613 HALF_SEC - lfsck->li_time_last_checkpoint);
1614 lo->ll_time_last_checkpoint = cfs_time_current_sec();
1615 lo->ll_objs_checked_phase2 += com->lc_new_checked;
1618 com->lc_journal = 0;
1619 if (lo->ll_flags & LF_INCOMPLETE)
1620 lo->ll_status = LS_PARTIAL;
1622 lo->ll_status = LS_COMPLETED;
1623 if (!(bk->lb_param & LPF_DRYRUN))
1624 lo->ll_flags &= ~(LF_SCANNED_ONCE | LF_INCONSISTENT);
1625 lo->ll_time_last_complete = lo->ll_time_last_checkpoint;
1626 lo->ll_success_count++;
1627 } else if (rc == 0) {
1628 lo->ll_status = lfsck->li_status;
1629 if (lo->ll_status == 0)
1630 lo->ll_status = LS_STOPPED;
1632 lo->ll_status = LS_FAILED;
1635 rc = lfsck_layout_store(env, com);
1636 up_write(&com->lc_sem);
1641 static int lfsck_layout_lock(const struct lu_env *env,
1642 struct lfsck_component *com,
1643 struct dt_object *obj,
1644 struct lustre_handle *lh, __u64 bits)
1646 struct lfsck_thread_info *info = lfsck_env_info(env);
1647 ldlm_policy_data_t *policy = &info->lti_policy;
1648 struct ldlm_res_id *resid = &info->lti_resid;
1649 struct lfsck_instance *lfsck = com->lc_lfsck;
1650 __u64 flags = LDLM_FL_ATOMIC_CB;
1653 LASSERT(lfsck->li_namespace != NULL);
1655 memset(policy, 0, sizeof(*policy));
1656 policy->l_inodebits.bits = bits;
1657 fid_build_reg_res_name(lfsck_dto2fid(obj), resid);
1658 rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid, LDLM_IBITS,
1659 policy, LCK_EX, &flags, ldlm_blocking_ast,
1660 ldlm_completion_ast, NULL, NULL, 0,
1661 LVB_T_NONE, NULL, lh);
1662 if (rc == ELDLM_OK) {
1665 memset(lh, 0, sizeof(*lh));
1672 static void lfsck_layout_unlock(struct lustre_handle *lh)
1674 if (lustre_handle_is_used(lh)) {
1675 ldlm_lock_decref(lh, LCK_EX);
1676 memset(lh, 0, sizeof(*lh));
1680 static int lfsck_layout_trans_stop(const struct lu_env *env,
1681 struct dt_device *dev,
1682 struct thandle *handle, int result)
1686 handle->th_result = result;
1687 rc = dt_trans_stop(env, dev, handle);
1697 * \retval +1: repaired
1698 * \retval 0: did nothing
1699 * \retval -ve: on error
1701 static int lfsck_layout_refill_lovea(const struct lu_env *env,
1702 struct thandle *handle,
1703 struct dt_object *parent,
1704 struct lu_fid *cfid,
1706 struct lov_ost_data_v1 *slot,
1707 int fl, __u32 ost_idx)
1709 struct ost_id *oi = &lfsck_env_info(env)->lti_oi;
1712 fid_to_ostid(cfid, oi);
1713 ostid_cpu_to_le(oi, &slot->l_ost_oi);
1714 slot->l_ost_gen = cpu_to_le32(0);
1715 slot->l_ost_idx = cpu_to_le32(ost_idx);
1716 rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV, fl, handle,
1725 * \retval +1: repaired
1726 * \retval 0: did nothing
1727 * \retval -ve: on error
1729 static int lfsck_layout_extend_lovea(const struct lu_env *env,
1730 struct thandle *handle,
1731 struct dt_object *parent,
1732 struct lu_fid *cfid,
1733 struct lu_buf *buf, int fl,
1734 __u32 ost_idx, __u32 ea_off, bool reset)
1736 struct lov_mds_md_v1 *lmm = buf->lb_buf;
1737 struct lov_ost_data_v1 *objs;
1741 if (fl == LU_XATTR_CREATE || reset) {
1742 LASSERT(buf->lb_len == lov_mds_md_size(ea_off + 1,
1745 memset(lmm, 0, buf->lb_len);
1746 lmm->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
1747 /* XXX: currently, we only support LOV_PATTERN_RAID0. */
1748 lmm->lmm_pattern = cpu_to_le32(LOV_PATTERN_RAID0);
1749 fid_to_lmm_oi(lfsck_dto2fid(parent), &lmm->lmm_oi);
1750 lmm_oi_cpu_to_le(&lmm->lmm_oi, &lmm->lmm_oi);
1751 /* XXX: We cannot know the stripe size,
1752 * then use the default value (1 MB). */
1753 lmm->lmm_stripe_size =
1754 cpu_to_le32(LOV_DESC_STRIPE_SIZE_DEFAULT);
1755 objs = &(lmm->lmm_objects[ea_off]);
1757 __u16 count = le16_to_cpu(lmm->lmm_stripe_count);
1758 int gap = ea_off - count;
1759 __u32 magic = le32_to_cpu(lmm->lmm_magic);
1761 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3
1762 * which has been verified in lfsck_layout_verify_header()
1763 * already. If some new magic introduced in the future,
1764 * then layout LFSCK needs to be updated also. */
1765 if (magic == LOV_MAGIC_V1) {
1766 objs = &(lmm->lmm_objects[count]);
1768 LASSERT(magic == LOV_MAGIC_V3);
1769 objs = &((struct lov_mds_md_v3 *)lmm)->
1774 memset(objs, 0, gap * sizeof(*objs));
1775 lmm->lmm_layout_gen =
1776 cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
1779 LASSERT(buf->lb_len == lov_mds_md_size(ea_off + 1, magic));
1782 lmm->lmm_stripe_count = cpu_to_le16(ea_off + 1);
1783 rc = lfsck_layout_refill_lovea(env, handle, parent, cfid, buf, objs,
1790 * \retval +1: repaired
1791 * \retval 0: did nothing
1792 * \retval -ve: on error
1794 static int lfsck_layout_update_pfid(const struct lu_env *env,
1795 struct lfsck_component *com,
1796 struct dt_object *parent,
1797 struct lu_fid *cfid,
1798 struct dt_device *cdev, __u32 ea_off)
1800 struct filter_fid *pfid = &lfsck_env_info(env)->lti_new_pfid;
1801 struct dt_object *child;
1802 struct thandle *handle;
1803 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
1808 child = lfsck_object_find_by_dev(env, cdev, cfid);
1810 RETURN(PTR_ERR(child));
1812 handle = dt_trans_create(env, cdev);
1814 GOTO(out, rc = PTR_ERR(handle));
1816 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
1817 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
1818 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
1819 * MDT-object's FID::f_ver, instead it is the OST-object index in its
1820 * parent MDT-object's layout EA. */
1821 pfid->ff_parent.f_stripe_idx = cpu_to_le32(ea_off);
1822 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
1824 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle);
1828 rc = dt_trans_start(env, cdev, handle);
1832 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle,
1835 GOTO(stop, rc = (rc == 0 ? 1 : rc));
1838 dt_trans_stop(env, cdev, handle);
1841 lu_object_put(env, &child->do_lu);
1847 * \retval +1: repaired
1848 * \retval 0: did nothing
1849 * \retval -ve: on error
1851 static int lfsck_layout_recreate_parent(const struct lu_env *env,
1852 struct lfsck_component *com,
1853 struct lfsck_tgt_desc *ltd,
1854 struct lu_orphan_rec *rec,
1855 struct lu_fid *cfid,
1857 const char *postfix,
1860 struct lfsck_thread_info *info = lfsck_env_info(env);
1861 char *name = info->lti_key;
1862 struct lu_attr *la = &info->lti_la;
1863 struct dt_object_format *dof = &info->lti_dof;
1864 struct lfsck_instance *lfsck = com->lc_lfsck;
1865 struct lu_fid *pfid = &rec->lor_fid;
1866 struct lu_fid *tfid = &info->lti_fid3;
1867 struct dt_device *next = lfsck->li_next;
1868 struct dt_object *pobj = NULL;
1869 struct dt_object *cobj = NULL;
1870 struct thandle *th = NULL;
1871 struct lu_buf *pbuf = NULL;
1872 struct lu_buf *ea_buf = &info->lti_big_buf;
1873 struct lustre_handle lh = { 0 };
1874 int buflen = ea_buf->lb_len;
1879 /* Create .lustre/lost+found/MDTxxxx when needed. */
1880 if (unlikely(lfsck->li_lpf_obj == NULL)) {
1881 rc = lfsck_create_lpf(env, lfsck);
1886 if (fid_is_zero(pfid)) {
1887 struct filter_fid *ff = &info->lti_new_pfid;
1889 rc = lfsck_fid_alloc(env, lfsck, pfid, false);
1893 ff->ff_parent.f_seq = cpu_to_le64(pfid->f_seq);
1894 ff->ff_parent.f_oid = cpu_to_le32(pfid->f_oid);
1895 /* Currently, the filter_fid::ff_parent::f_ver is not the
1896 * real parent MDT-object's FID::f_ver, instead it is the
1897 * OST-object index in its parent MDT-object's layout EA. */
1898 ff->ff_parent.f_stripe_idx = cpu_to_le32(ea_off);
1899 pbuf = lfsck_buf_get(env, ff, sizeof(struct filter_fid));
1900 cobj = lfsck_object_find_by_dev(env, ltd->ltd_tgt, cfid);
1902 RETURN(PTR_ERR(cobj));
1905 CDEBUG(D_LFSCK, "Re-create the lost MDT-object: parent "
1906 DFID", child "DFID", OST-index %u, stripe-index %u, "
1907 "prefix %s, postfix %s\n",
1908 PFID(pfid), PFID(cfid), ltd->ltd_index, ea_off, prefix, postfix);
1910 pobj = lfsck_object_find_by_dev(env, lfsck->li_bottom, pfid);
1912 GOTO(put, rc = PTR_ERR(pobj));
1914 LASSERT(prefix != NULL);
1915 LASSERT(postfix != NULL);
1919 * 1. Use the MDT-object's FID as the name with prefix and postfix.
1921 * 1.1 prefix "C-": More than one OST-objects claim the same
1922 * MDT-object and the same slot in the layout EA.
1923 * It may be created for dangling referenced MDT
1924 * object or may be not.
1925 * 1.2 prefix "N-": The orphan OST-object does not know which one
1926 * is the real parent, so the LFSCK assign a new
1927 * FID as its parent.
1928 * 1.3 prefix "R-": The orphan OST-object know its parent FID but
1929 * does not know the position in the namespace.
1931 * 2. If there is name conflict, append more index for new name. */
1932 sprintf(name, "%s"DFID"%s", prefix, PFID(pfid), postfix);
1934 rc = dt_lookup(env, lfsck->li_lpf_obj, (struct dt_rec *)tfid,
1935 (const struct dt_key *)name, BYPASS_CAPA);
1936 if (rc != 0 && rc != -ENOENT)
1939 if (unlikely(rc == 0)) {
1940 CWARN("%s: The name %s under lost+found has been used "
1941 "by the "DFID". Try to increase the FID version "
1942 "for the new file name.\n",
1943 lfsck_lfsck2name(lfsck), name, PFID(tfid));
1944 sprintf(name, "%s"DFID"%s-%d", prefix, PFID(pfid),
1949 memset(la, 0, sizeof(*la));
1950 la->la_uid = rec->lor_uid;
1951 la->la_gid = rec->lor_gid;
1952 la->la_mode = S_IFREG | S_IRUSR | S_IWUSR;
1953 la->la_valid = LA_MODE | LA_UID | LA_GID;
1955 memset(dof, 0, sizeof(*dof));
1956 dof->dof_type = dt_mode_to_dft(S_IFREG);
1958 rc = lov_mds_md_size(ea_off + 1, LOV_MAGIC_V1);
1960 lu_buf_realloc(ea_buf, rc);
1961 buflen = ea_buf->lb_len;
1962 if (ea_buf->lb_buf == NULL)
1963 GOTO(put, rc = -ENOMEM);
1965 ea_buf->lb_len = rc;
1968 /* Hold update lock on the .lustre/lost+found/MDTxxxx/.
1970 * XXX: Currently, we do not grab the PDO lock as normal create cases,
1971 * because creating MDT-object for orphan OST-object is rare, we
1972 * do not much care about the performance. It can be improved in
1973 * the future when needed. */
1974 rc = lfsck_layout_lock(env, com, lfsck->li_lpf_obj, &lh,
1975 MDS_INODELOCK_UPDATE);
1979 th = dt_trans_create(env, next);
1981 GOTO(unlock, rc = PTR_ERR(th));
1983 /* 1a. Update OST-object's parent information remotely.
1985 * If other subsequent modifications failed, then next LFSCK scanning
1986 * will process the OST-object as orphan again with known parent FID. */
1988 rc = dt_declare_xattr_set(env, cobj, pbuf, XATTR_NAME_FID, 0, th);
1993 /* 2a. Create the MDT-object locally. */
1994 rc = dt_declare_create(env, pobj, la, NULL, dof, th);
1998 /* 3a. Add layout EA for the MDT-object. */
1999 rc = dt_declare_xattr_set(env, pobj, ea_buf, XATTR_NAME_LOV,
2000 LU_XATTR_CREATE, th);
2004 /* 4a. Insert the MDT-object to .lustre/lost+found/MDTxxxx/ */
2005 rc = dt_declare_insert(env, lfsck->li_lpf_obj,
2006 (const struct dt_rec *)pfid,
2007 (const struct dt_key *)name, th);
2011 rc = dt_trans_start(env, next, th);
2015 /* 1b. Update OST-object's parent information remotely. */
2017 rc = dt_xattr_set(env, cobj, pbuf, XATTR_NAME_FID, 0, th,
2023 dt_write_lock(env, pobj, 0);
2024 /* 2b. Create the MDT-object locally. */
2025 rc = dt_create(env, pobj, la, NULL, dof, th);
2027 /* 3b. Add layout EA for the MDT-object. */
2028 rc = lfsck_layout_extend_lovea(env, th, pobj, cfid, ea_buf,
2029 LU_XATTR_CREATE, ltd->ltd_index,
2031 dt_write_unlock(env, pobj);
2035 /* 4b. Insert the MDT-object to .lustre/lost+found/MDTxxxx/ */
2036 rc = dt_insert(env, lfsck->li_lpf_obj,
2037 (const struct dt_rec *)pfid,
2038 (const struct dt_key *)name, th, BYPASS_CAPA, 1);
2043 dt_trans_stop(env, next, th);
2046 lfsck_layout_unlock(&lh);
2049 if (cobj != NULL && !IS_ERR(cobj))
2050 lu_object_put(env, &cobj->do_lu);
2051 if (pobj != NULL && !IS_ERR(pobj))
2052 lu_object_put(env, &pobj->do_lu);
2053 ea_buf->lb_len = buflen;
2055 return rc >= 0 ? 1 : rc;
2058 static int lfsck_layout_master_conditional_destroy(const struct lu_env *env,
2059 struct lfsck_component *com,
2060 const struct lu_fid *fid,
2063 struct lfsck_thread_info *info = lfsck_env_info(env);
2064 struct lfsck_request *lr = &info->lti_lr;
2065 struct lfsck_instance *lfsck = com->lc_lfsck;
2066 struct lfsck_tgt_desc *ltd;
2067 struct ptlrpc_request *req;
2068 struct lfsck_request *tmp;
2069 struct obd_export *exp;
2073 ltd = lfsck_tgt_get(&lfsck->li_ost_descs, index);
2074 if (unlikely(ltd == NULL))
2078 if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
2079 GOTO(put, rc = -EOPNOTSUPP);
2081 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
2083 GOTO(put, rc = -ENOMEM);
2085 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
2087 ptlrpc_request_free(req);
2092 memset(lr, 0, sizeof(*lr));
2093 lr->lr_event = LE_CONDITIONAL_DESTROY;
2094 lr->lr_active = LT_LAYOUT;
2097 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
2099 ptlrpc_request_set_replen(req);
2101 rc = ptlrpc_queue_wait(req);
2102 ptlrpc_req_finished(req);
2112 static int lfsck_layout_slave_conditional_destroy(const struct lu_env *env,
2113 struct lfsck_component *com,
2114 struct lfsck_request *lr)
2116 struct lfsck_thread_info *info = lfsck_env_info(env);
2117 struct lu_attr *la = &info->lti_la;
2118 ldlm_policy_data_t *policy = &info->lti_policy;
2119 struct ldlm_res_id *resid = &info->lti_resid;
2120 struct lfsck_instance *lfsck = com->lc_lfsck;
2121 struct dt_device *dev = lfsck->li_bottom;
2122 struct lu_fid *fid = &lr->lr_fid;
2123 struct dt_object *obj;
2124 struct thandle *th = NULL;
2125 struct lustre_handle lh = { 0 };
2130 obj = lfsck_object_find_by_dev(env, dev, fid);
2132 RETURN(PTR_ERR(obj));
2134 dt_read_lock(env, obj, 0);
2135 if (dt_object_exists(obj) == 0) {
2136 dt_read_unlock(env, obj);
2138 GOTO(put, rc = -ENOENT);
2141 /* Get obj's attr without lock firstly. */
2142 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
2143 dt_read_unlock(env, obj);
2147 if (likely(la->la_ctime != 0 || la->la_mode & S_ISUID))
2148 GOTO(put, rc = -ETXTBSY);
2150 /* Acquire extent lock on [0, EOF] to sync with all possible written. */
2151 LASSERT(lfsck->li_namespace != NULL);
2153 memset(policy, 0, sizeof(*policy));
2154 policy->l_extent.end = OBD_OBJECT_EOF;
2155 ost_fid_build_resid(fid, resid);
2156 rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid, LDLM_EXTENT,
2157 policy, LCK_EX, &flags, ldlm_blocking_ast,
2158 ldlm_completion_ast, NULL, NULL, 0,
2159 LVB_T_NONE, NULL, &lh);
2161 GOTO(put, rc = -EIO);
2163 dt_write_lock(env, obj, 0);
2164 /* Get obj's attr within lock again. */
2165 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
2169 if (la->la_ctime != 0)
2170 GOTO(unlock, rc = -ETXTBSY);
2172 th = dt_trans_create(env, dev);
2174 GOTO(unlock, rc = PTR_ERR(th));
2176 rc = dt_declare_ref_del(env, obj, th);
2180 rc = dt_declare_destroy(env, obj, th);
2184 rc = dt_trans_start_local(env, dev, th);
2188 rc = dt_ref_del(env, obj, th);
2192 rc = dt_destroy(env, obj, th);
2194 CDEBUG(D_LFSCK, "Destroy the empty OST-object "DFID" which "
2195 "was created for reparing dangling referenced case. "
2196 "But the original missed OST-object is found now.\n",
2202 dt_trans_stop(env, dev, th);
2205 dt_write_unlock(env, obj);
2206 ldlm_lock_decref(&lh, LCK_EX);
2209 lu_object_put(env, &obj->do_lu);
2215 * Some OST-object has occupied the specified layout EA slot.
2216 * Such OST-object may be generated by the LFSCK when repair
2217 * dangling referenced MDT-object, which can be indicated by
2218 * attr::la_ctime == 0 but without S_ISUID in la_mode. If it
2219 * is true and such OST-object has not been modified yet, we
2220 * will replace it with the orphan OST-object; otherwise the
2221 * LFSCK will create new MDT-object to reference the orphan.
2223 * \retval +1: repaired
2224 * \retval 0: did nothing
2225 * \retval -ve: on error
2227 static int lfsck_layout_conflict_create(const struct lu_env *env,
2228 struct lfsck_component *com,
2229 struct lfsck_tgt_desc *ltd,
2230 struct lu_orphan_rec *rec,
2231 struct dt_object *parent,
2232 struct lu_fid *cfid,
2233 struct lu_buf *ea_buf,
2234 struct lov_ost_data_v1 *slot,
2235 __u32 ea_off, __u32 ori_len)
2237 struct lfsck_thread_info *info = lfsck_env_info(env);
2238 struct lu_fid *cfid2 = &info->lti_fid2;
2239 struct ost_id *oi = &info->lti_oi;
2240 char *postfix = info->lti_tmpbuf;
2241 struct lov_mds_md_v1 *lmm = ea_buf->lb_buf;
2242 struct dt_device *dev = com->lc_lfsck->li_bottom;
2243 struct thandle *th = NULL;
2244 struct lustre_handle lh = { 0 };
2245 __u32 ost_idx2 = le32_to_cpu(slot->l_ost_idx);
2249 ostid_le_to_cpu(&slot->l_ost_oi, oi);
2250 ostid_to_fid(cfid2, oi, ost_idx2);
2252 CDEBUG(D_LFSCK, "Handle layout EA conflict: parent "DFID
2253 ", cur-child "DFID" on the OST %u, orphan-child "
2254 DFID" on the OST %u, stripe-index %u\n",
2255 PFID(lfsck_dto2fid(parent)), PFID(cfid2), ost_idx2,
2256 PFID(cfid), ltd->ltd_index, ea_off);
2258 /* Hold layout lock on the parent to prevent others to access. */
2259 rc = lfsck_layout_lock(env, com, parent, &lh,
2260 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2264 rc = lfsck_layout_master_conditional_destroy(env, com, cfid2, ost_idx2);
2266 /* If the conflict OST-obejct is not created for fixing dangling
2267 * referenced MDT-object in former LFSCK check/repair, or it has
2268 * been modified by others, then we cannot destroy it. Re-create
2269 * a new MDT-object for the orphan OST-object. */
2270 if (rc == -ETXTBSY) {
2271 /* No need the layout lock on the original parent. */
2272 lfsck_layout_unlock(&lh);
2273 ea_buf->lb_len = ori_len;
2275 fid_zero(&rec->lor_fid);
2276 snprintf(postfix, LFSCK_TMPBUF_LEN, "-"DFID"-%x",
2277 PFID(lu_object_fid(&parent->do_lu)), ea_off);
2278 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2279 "C-", postfix, ea_off);
2284 if (rc != 0 && rc != -ENOENT)
2287 th = dt_trans_create(env, dev);
2289 GOTO(unlock, rc = PTR_ERR(th));
2291 rc = dt_declare_xattr_set(env, parent, ea_buf, XATTR_NAME_LOV,
2292 LU_XATTR_REPLACE, th);
2296 rc = dt_trans_start_local(env, dev, th);
2300 dt_write_lock(env, parent, 0);
2301 lmm->lmm_layout_gen = cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
2302 rc = lfsck_layout_refill_lovea(env, th, parent, cfid, ea_buf, slot,
2303 LU_XATTR_REPLACE, ltd->ltd_index);
2304 dt_write_unlock(env, parent);
2309 dt_trans_stop(env, dev, th);
2312 lfsck_layout_unlock(&lh);
2315 ea_buf->lb_len = ori_len;
2317 return rc >= 0 ? 1 : rc;
2321 * \retval +1: repaired
2322 * \retval 0: did nothing
2323 * \retval -ve: on error
2325 static int lfsck_layout_recreate_lovea(const struct lu_env *env,
2326 struct lfsck_component *com,
2327 struct lfsck_tgt_desc *ltd,
2328 struct lu_orphan_rec *rec,
2329 struct dt_object *parent,
2330 struct lu_fid *cfid,
2331 __u32 ost_idx, __u32 ea_off)
2333 struct lfsck_thread_info *info = lfsck_env_info(env);
2334 struct lu_buf *buf = &info->lti_big_buf;
2335 struct lu_fid *fid = &info->lti_fid2;
2336 struct ost_id *oi = &info->lti_oi;
2337 struct lfsck_instance *lfsck = com->lc_lfsck;
2338 struct dt_device *dt = lfsck->li_bottom;
2339 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2340 struct thandle *handle = NULL;
2341 size_t buflen = buf->lb_len;
2342 struct lov_mds_md_v1 *lmm;
2343 struct lov_ost_data_v1 *objs;
2344 struct lustre_handle lh = { 0 };
2351 bool locked = false;
2354 CDEBUG(D_LFSCK, "Re-create the crashed layout EA: parent "
2355 DFID", child "DFID", OST-index %u, stripe-index %u\n",
2356 PFID(lfsck_dto2fid(parent)), PFID(cfid), ost_idx, ea_off);
2358 rc = lfsck_layout_lock(env, com, parent, &lh,
2359 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2365 dt_write_unlock(env, parent);
2369 if (handle != NULL) {
2370 dt_trans_stop(env, dt, handle);
2375 GOTO(unlock_layout, rc);
2377 if (buf->lb_len < rc) {
2378 lu_buf_realloc(buf, rc);
2379 buflen = buf->lb_len;
2380 if (buf->lb_buf == NULL)
2381 GOTO(unlock_layout, rc = -ENOMEM);
2384 if (!(bk->lb_param & LPF_DRYRUN)) {
2385 handle = dt_trans_create(env, dt);
2387 GOTO(unlock_layout, rc = PTR_ERR(handle));
2389 rc = dt_declare_xattr_set(env, parent, buf, XATTR_NAME_LOV,
2394 rc = dt_trans_start_local(env, dt, handle);
2399 dt_write_lock(env, parent, 0);
2401 rc = dt_xattr_get(env, parent, buf, XATTR_NAME_LOV, BYPASS_CAPA);
2402 if (rc == -ERANGE) {
2403 rc = dt_xattr_get(env, parent, &LU_BUF_NULL, XATTR_NAME_LOV,
2407 } else if (rc == -ENODATA || rc == 0) {
2408 rc = lov_mds_md_size(ea_off + 1, LOV_MAGIC_V1);
2409 /* If the declared is not big enough, re-try. */
2410 if (buf->lb_len < rc)
2413 fl = LU_XATTR_CREATE;
2414 } else if (rc < 0) {
2415 GOTO(unlock_parent, rc);
2416 } else if (unlikely(buf->lb_len == 0)) {
2419 fl = LU_XATTR_REPLACE;
2422 if (fl == LU_XATTR_CREATE) {
2423 if (bk->lb_param & LPF_DRYRUN)
2424 GOTO(unlock_parent, rc = 1);
2426 LASSERT(buf->lb_len >= rc);
2429 rc = lfsck_layout_extend_lovea(env, handle, parent, cfid, buf,
2430 fl, ost_idx, ea_off, false);
2432 GOTO(unlock_parent, rc);
2436 rc1 = lfsck_layout_verify_header(lmm);
2438 /* If the LOV EA crashed, the rebuild it. */
2439 if (rc1 == -EINVAL) {
2440 if (bk->lb_param & LPF_DRYRUN)
2441 GOTO(unlock_parent, rc = 1);
2443 LASSERT(buf->lb_len >= rc);
2446 memset(lmm, 0, buf->lb_len);
2447 rc = lfsck_layout_extend_lovea(env, handle, parent, cfid, buf,
2448 fl, ost_idx, ea_off, true);
2450 GOTO(unlock_parent, rc);
2453 /* For other unknown magic/pattern, keep the current LOV EA. */
2455 GOTO(unlock_parent, rc = rc1);
2457 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
2458 * been verified in lfsck_layout_verify_header() already. If some
2459 * new magic introduced in the future, then layout LFSCK needs to
2460 * be updated also. */
2461 magic = le32_to_cpu(lmm->lmm_magic);
2462 if (magic == LOV_MAGIC_V1) {
2463 objs = &(lmm->lmm_objects[0]);
2465 LASSERT(magic == LOV_MAGIC_V3);
2466 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
2469 count = le16_to_cpu(lmm->lmm_stripe_count);
2471 GOTO(unlock_parent, rc = -EINVAL);
2474 /* Exceed the current end of MDT-object layout EA. Then extend it. */
2475 if (count <= ea_off) {
2476 if (bk->lb_param & LPF_DRYRUN)
2477 GOTO(unlock_parent, rc = 1);
2479 rc = lov_mds_md_size(ea_off + 1, magic);
2480 /* If the declared is not big enough, re-try. */
2481 if (buf->lb_len < rc)
2485 rc = lfsck_layout_extend_lovea(env, handle, parent, cfid, buf,
2486 fl, ost_idx, ea_off, false);
2487 GOTO(unlock_parent, rc);
2490 LASSERTF(rc > 0, "invalid rc = %d\n", rc);
2493 for (i = 0; i < count; i++, objs++) {
2494 /* The MDT-object was created via lfsck_layout_recover_create()
2495 * by others before, and we fill the dummy layout EA. */
2496 if (is_dummy_lov_ost_data(objs)) {
2500 if (bk->lb_param & LPF_DRYRUN)
2501 GOTO(unlock_parent, rc = 1);
2503 lmm->lmm_layout_gen =
2504 cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
2505 rc = lfsck_layout_refill_lovea(env, handle, parent,
2506 cfid, buf, objs, fl,
2508 GOTO(unlock_parent, rc);
2511 ostid_le_to_cpu(&objs->l_ost_oi, oi);
2512 ostid_to_fid(fid, oi, le32_to_cpu(objs->l_ost_idx));
2513 /* It should be rare case, the slot is there, but the LFSCK
2514 * does not handle it during the first-phase cycle scanning. */
2515 if (unlikely(lu_fid_eq(fid, cfid))) {
2517 GOTO(unlock_parent, rc = 0);
2519 /* Rare case that the OST-object index
2520 * does not match the parent MDT-object
2521 * layout EA. We trust the later one. */
2522 if (bk->lb_param & LPF_DRYRUN)
2523 GOTO(unlock_parent, rc = 1);
2525 dt_write_unlock(env, parent);
2527 dt_trans_stop(env, dt, handle);
2528 lfsck_layout_unlock(&lh);
2529 buf->lb_len = buflen;
2530 rc = lfsck_layout_update_pfid(env, com, parent,
2531 cfid, ltd->ltd_tgt, i);
2538 /* The MDT-object exists, but related layout EA slot is occupied
2540 if (bk->lb_param & LPF_DRYRUN)
2541 GOTO(unlock_parent, rc = 1);
2543 dt_write_unlock(env, parent);
2545 dt_trans_stop(env, dt, handle);
2546 lfsck_layout_unlock(&lh);
2547 if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_V1)
2548 objs = &(lmm->lmm_objects[ea_off]);
2550 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[ea_off];
2551 rc = lfsck_layout_conflict_create(env, com, ltd, rec, parent, cfid,
2552 buf, objs, ea_off, buflen);
2558 dt_write_unlock(env, parent);
2562 dt_trans_stop(env, dt, handle);
2565 lfsck_layout_unlock(&lh);
2566 buf->lb_len = buflen;
2571 static int lfsck_layout_scan_orphan_one(const struct lu_env *env,
2572 struct lfsck_component *com,
2573 struct lfsck_tgt_desc *ltd,
2574 struct lu_orphan_rec *rec,
2575 struct lu_fid *cfid)
2577 struct lfsck_layout *lo = com->lc_file_ram;
2578 struct lu_fid *pfid = &rec->lor_fid;
2579 struct dt_object *parent = NULL;
2580 __u32 ea_off = pfid->f_stripe_idx;
2584 if (!fid_is_sane(cfid))
2585 GOTO(out, rc = -EINVAL);
2587 if (fid_is_zero(pfid)) {
2588 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2594 if (!fid_is_sane(pfid))
2595 GOTO(out, rc = -EINVAL);
2597 parent = lfsck_object_find_by_dev(env, com->lc_lfsck->li_bottom, pfid);
2599 GOTO(out, rc = PTR_ERR(parent));
2601 if (unlikely(dt_object_remote(parent) != 0))
2602 GOTO(put, rc = -EXDEV);
2604 if (dt_object_exists(parent) == 0) {
2605 lu_object_put(env, &parent->do_lu);
2606 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2611 if (!S_ISREG(lu_object_attr(&parent->do_lu)))
2612 GOTO(put, rc = -EISDIR);
2614 rc = lfsck_layout_recreate_lovea(env, com, ltd, rec, parent, cfid,
2615 ltd->ltd_index, ea_off);
2621 lu_object_put(env, &parent->do_lu);
2623 /* The layout EA is changed, need to be reloaded next time. */
2624 lu_object_put_nocache(env, &parent->do_lu);
2627 down_write(&com->lc_sem);
2628 com->lc_new_scanned++;
2629 com->lc_new_checked++;
2631 lo->ll_objs_repaired[LLIT_ORPHAN - 1]++;
2633 } else if (rc < 0) {
2634 lo->ll_objs_failed_phase2++;
2636 up_write(&com->lc_sem);
2641 static int lfsck_layout_scan_orphan(const struct lu_env *env,
2642 struct lfsck_component *com,
2643 struct lfsck_tgt_desc *ltd)
2645 struct lfsck_layout *lo = com->lc_file_ram;
2646 struct lfsck_instance *lfsck = com->lc_lfsck;
2647 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2648 struct lfsck_thread_info *info = lfsck_env_info(env);
2649 struct ost_id *oi = &info->lti_oi;
2650 struct lu_fid *fid = &info->lti_fid;
2651 struct dt_object *obj;
2652 const struct dt_it_ops *iops;
2657 CDEBUG(D_LFSCK, "%s: start the orphan scanning for OST%04x\n",
2658 lfsck_lfsck2name(lfsck), ltd->ltd_index);
2660 ostid_set_seq(oi, FID_SEQ_IDIF);
2661 ostid_set_id(oi, 0);
2662 ostid_to_fid(fid, oi, ltd->ltd_index);
2663 obj = lfsck_object_find_by_dev(env, ltd->ltd_tgt, fid);
2664 if (unlikely(IS_ERR(obj)))
2665 RETURN(PTR_ERR(obj));
2667 rc = obj->do_ops->do_index_try(env, obj, &dt_lfsck_orphan_features);
2671 iops = &obj->do_index_ops->dio_it;
2672 di = iops->init(env, obj, 0, BYPASS_CAPA);
2674 GOTO(put, rc = PTR_ERR(di));
2676 rc = iops->load(env, di, 0);
2678 /* -ESRCH means that the orphan OST-objects rbtree has been
2679 * cleanup because of the OSS server restart or other errors. */
2680 lo->ll_flags |= LF_INCOMPLETE;
2685 rc = iops->next(env, di);
2697 struct lu_orphan_rec *rec = &info->lti_rec;
2699 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY3) &&
2701 struct ptlrpc_thread *thread = &lfsck->li_thread;
2702 struct l_wait_info lwi;
2704 lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val),
2706 l_wait_event(thread->t_ctl_waitq,
2707 !thread_is_running(thread),
2711 key = iops->key(env, di);
2712 com->lc_fid_latest_scanned_phase2 = *(struct lu_fid *)key;
2713 rc = iops->rec(env, di, (struct dt_rec *)rec, 0);
2715 rc = lfsck_layout_scan_orphan_one(env, com, ltd, rec,
2716 &com->lc_fid_latest_scanned_phase2);
2717 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
2720 lfsck_control_speed_by_self(com);
2722 rc = iops->next(env, di);
2723 } while (rc < 0 && !(bk->lb_param & LPF_FAILOUT));
2730 iops->fini(env, di);
2732 lu_object_put(env, &obj->do_lu);
2734 CDEBUG(D_LFSCK, "%s: finish the orphan scanning for OST%04x, rc = %d\n",
2735 lfsck_lfsck2name(lfsck), ltd->ltd_index, rc);
2737 return rc > 0 ? 0 : rc;
2740 /* For the MDT-object with dangling reference, we need to repare the
2741 * inconsistency according to the LFSCK sponsor's requirement:
2743 * 1) Keep the inconsistency there and report the inconsistency case,
2744 * then give the chance to the application to find related issues,
2745 * and the users can make the decision about how to handle it with
2746 * more human knownledge. (by default)
2748 * 2) Re-create the missed OST-object with the FID/owner information. */
2749 static int lfsck_layout_repair_dangling(const struct lu_env *env,
2750 struct lfsck_component *com,
2751 struct lfsck_layout_req *llr,
2752 const struct lu_attr *pla)
2754 struct lfsck_thread_info *info = lfsck_env_info(env);
2755 struct filter_fid *pfid = &info->lti_new_pfid;
2756 struct dt_allocation_hint *hint = &info->lti_hint;
2757 struct lu_attr *cla = &info->lti_la2;
2758 struct dt_object *parent = llr->llr_parent->llo_obj;
2759 struct dt_object *child = llr->llr_child;
2760 struct dt_device *dev = lfsck_obj2dt_dev(child);
2761 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
2762 struct thandle *handle;
2764 struct lustre_handle lh = { 0 };
2769 if (com->lc_lfsck->li_bookmark_ram.lb_param & LPF_CREATE_OSTOBJ)
2774 CDEBUG(D_LFSCK, "Found dangling reference for: parent "DFID
2775 ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u. %s",
2776 PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
2777 llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid,
2778 create ? "Create the lost OST-object as required.\n" :
2779 "Keep the MDT-object there by default.\n");
2784 memset(cla, 0, sizeof(*cla));
2785 cla->la_uid = pla->la_uid;
2786 cla->la_gid = pla->la_gid;
2787 cla->la_mode = S_IFREG | 0666;
2788 cla->la_valid = LA_TYPE | LA_MODE | LA_UID | LA_GID |
2789 LA_ATIME | LA_MTIME | LA_CTIME;
2791 rc = lfsck_layout_lock(env, com, parent, &lh,
2792 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2796 handle = dt_trans_create(env, dev);
2798 GOTO(unlock1, rc = PTR_ERR(handle));
2800 hint->dah_parent = NULL;
2802 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
2803 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
2804 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
2805 * MDT-object's FID::f_ver, instead it is the OST-object index in its
2806 * parent MDT-object's layout EA. */
2807 pfid->ff_parent.f_stripe_idx = cpu_to_le32(llr->llr_lov_idx);
2808 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
2810 rc = dt_declare_create(env, child, cla, hint, NULL, handle);
2814 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID,
2815 LU_XATTR_CREATE, handle);
2819 rc = dt_trans_start(env, dev, handle);
2823 dt_read_lock(env, parent, 0);
2824 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
2825 GOTO(unlock2, rc = 1);
2827 rc = dt_create(env, child, cla, hint, NULL, handle);
2831 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, LU_XATTR_CREATE,
2832 handle, BYPASS_CAPA);
2837 dt_read_unlock(env, parent);
2840 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
2843 lfsck_layout_unlock(&lh);
2848 /* If the OST-object does not recognize the MDT-object as its parent, and
2849 * there is no other MDT-object claims as its parent, then just trust the
2850 * given MDT-object as its parent. So update the OST-object filter_fid. */
2851 static int lfsck_layout_repair_unmatched_pair(const struct lu_env *env,
2852 struct lfsck_component *com,
2853 struct lfsck_layout_req *llr,
2854 const struct lu_attr *pla)
2856 struct lfsck_thread_info *info = lfsck_env_info(env);
2857 struct filter_fid *pfid = &info->lti_new_pfid;
2858 struct lu_attr *tla = &info->lti_la3;
2859 struct dt_object *parent = llr->llr_parent->llo_obj;
2860 struct dt_object *child = llr->llr_child;
2861 struct dt_device *dev = lfsck_obj2dt_dev(child);
2862 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
2863 struct thandle *handle;
2865 struct lustre_handle lh = { 0 };
2869 CDEBUG(D_LFSCK, "Repair unmatched MDT-OST pair for: parent "DFID
2870 ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
2871 PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
2872 llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid);
2874 rc = lfsck_layout_lock(env, com, parent, &lh,
2875 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2879 handle = dt_trans_create(env, dev);
2881 GOTO(unlock1, rc = PTR_ERR(handle));
2883 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
2884 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
2885 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
2886 * MDT-object's FID::f_ver, instead it is the OST-object index in its
2887 * parent MDT-object's layout EA. */
2888 pfid->ff_parent.f_stripe_idx = cpu_to_le32(llr->llr_lov_idx);
2889 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
2891 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle);
2895 tla->la_valid = LA_UID | LA_GID;
2896 tla->la_uid = pla->la_uid;
2897 tla->la_gid = pla->la_gid;
2898 rc = dt_declare_attr_set(env, child, tla, handle);
2902 rc = dt_trans_start(env, dev, handle);
2906 dt_write_lock(env, parent, 0);
2907 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
2908 GOTO(unlock2, rc = 1);
2910 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle,
2915 /* Get the latest parent's owner. */
2916 rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
2920 tla->la_valid = LA_UID | LA_GID;
2921 rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
2926 dt_write_unlock(env, parent);
2929 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
2932 lfsck_layout_unlock(&lh);
2937 /* If there are more than one MDT-objects claim as the OST-object's parent,
2938 * and the OST-object only recognizes one of them, then we need to generate
2939 * new OST-object(s) with new fid(s) for the non-recognized MDT-object(s). */
2940 static int lfsck_layout_repair_multiple_references(const struct lu_env *env,
2941 struct lfsck_component *com,
2942 struct lfsck_layout_req *llr,
2946 struct lfsck_thread_info *info = lfsck_env_info(env);
2947 struct dt_allocation_hint *hint = &info->lti_hint;
2948 struct dt_object_format *dof = &info->lti_dof;
2949 struct dt_device *pdev = com->lc_lfsck->li_next;
2950 struct ost_id *oi = &info->lti_oi;
2951 struct dt_object *parent = llr->llr_parent->llo_obj;
2952 struct dt_device *cdev = lfsck_obj2dt_dev(llr->llr_child);
2953 struct dt_object *child = NULL;
2954 struct lu_device *d = &cdev->dd_lu_dev;
2955 struct lu_object *o = NULL;
2956 struct thandle *handle;
2957 struct lov_mds_md_v1 *lmm;
2958 struct lov_ost_data_v1 *objs;
2959 struct lustre_handle lh = { 0 };
2964 CDEBUG(D_LFSCK, "Repair multiple references for: parent "DFID
2965 ", OST-index %u, stripe-index %u, owner %u:%u\n",
2966 PFID(lfsck_dto2fid(parent)), llr->llr_ost_idx,
2967 llr->llr_lov_idx, la->la_uid, la->la_gid);
2969 rc = lfsck_layout_lock(env, com, parent, &lh,
2970 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2974 handle = dt_trans_create(env, pdev);
2976 GOTO(unlock1, rc = PTR_ERR(handle));
2978 o = lu_object_anon(env, d, NULL);
2980 GOTO(stop, rc = PTR_ERR(o));
2982 child = container_of(o, struct dt_object, do_lu);
2983 o = lu_object_locate(o->lo_header, d->ld_type);
2984 if (unlikely(o == NULL))
2985 GOTO(stop, rc = -EINVAL);
2987 child = container_of(o, struct dt_object, do_lu);
2988 la->la_valid = LA_UID | LA_GID;
2989 hint->dah_parent = NULL;
2991 dof->dof_type = DFT_REGULAR;
2992 rc = dt_declare_create(env, child, la, NULL, NULL, handle);
2996 rc = dt_declare_xattr_set(env, parent, buf, XATTR_NAME_LOV,
2997 LU_XATTR_REPLACE, handle);
3001 rc = dt_trans_start(env, pdev, handle);
3005 dt_write_lock(env, parent, 0);
3006 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
3007 GOTO(unlock2, rc = 0);
3009 rc = dt_xattr_get(env, parent, buf, XATTR_NAME_LOV, BYPASS_CAPA);
3010 if (unlikely(rc == 0 || rc == -ENODATA || rc == -ERANGE))
3011 GOTO(unlock2, rc = 0);
3014 /* Someone change layout during the LFSCK, no need to repair then. */
3015 if (le16_to_cpu(lmm->lmm_layout_gen) != llr->llr_parent->llo_gen)
3016 GOTO(unlock2, rc = 0);
3018 rc = dt_create(env, child, la, hint, dof, handle);
3022 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
3023 * been verified in lfsck_layout_verify_header() already. If some
3024 * new magic introduced in the future, then layout LFSCK needs to
3025 * be updated also. */
3026 magic = le32_to_cpu(lmm->lmm_magic);
3027 if (magic == LOV_MAGIC_V1) {
3028 objs = &(lmm->lmm_objects[0]);
3030 LASSERT(magic == LOV_MAGIC_V3);
3031 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
3034 lmm->lmm_layout_gen = cpu_to_le16(llr->llr_parent->llo_gen + 1);
3035 fid_to_ostid(lu_object_fid(&child->do_lu), oi);
3036 ostid_cpu_to_le(oi, &objs[llr->llr_lov_idx].l_ost_oi);
3037 objs[llr->llr_lov_idx].l_ost_gen = cpu_to_le32(0);
3038 objs[llr->llr_lov_idx].l_ost_idx = cpu_to_le32(llr->llr_ost_idx);
3039 rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV,
3040 LU_XATTR_REPLACE, handle, BYPASS_CAPA);
3042 GOTO(unlock2, rc = (rc == 0 ? 1 : rc));
3045 dt_write_unlock(env, parent);
3049 lu_object_put(env, &child->do_lu);
3051 dt_trans_stop(env, pdev, handle);
3054 lfsck_layout_unlock(&lh);
3059 /* If the MDT-object and the OST-object have different owner information,
3060 * then trust the MDT-object, because the normal chown/chgrp handle order
3061 * is from MDT to OST, and it is possible that some chown/chgrp operation
3062 * is partly done. */
3063 static int lfsck_layout_repair_owner(const struct lu_env *env,
3064 struct lfsck_component *com,
3065 struct lfsck_layout_req *llr,
3066 struct lu_attr *pla)
3068 struct lfsck_thread_info *info = lfsck_env_info(env);
3069 struct lu_attr *tla = &info->lti_la3;
3070 struct dt_object *parent = llr->llr_parent->llo_obj;
3071 struct dt_object *child = llr->llr_child;
3072 struct dt_device *dev = lfsck_obj2dt_dev(child);
3073 struct thandle *handle;
3077 CDEBUG(D_LFSCK, "Repair inconsistent file owner for: parent "DFID
3078 ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
3079 PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
3080 llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid);
3082 handle = dt_trans_create(env, dev);
3084 RETURN(PTR_ERR(handle));
3086 tla->la_uid = pla->la_uid;
3087 tla->la_gid = pla->la_gid;
3088 tla->la_valid = LA_UID | LA_GID;
3089 rc = dt_declare_attr_set(env, child, tla, handle);
3093 rc = dt_trans_start(env, dev, handle);
3097 /* Use the dt_object lock to serialize with destroy and attr_set. */
3098 dt_read_lock(env, parent, 0);
3099 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
3100 GOTO(unlock, rc = 1);
3102 /* Get the latest parent's owner. */
3103 rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
3105 CWARN("%s: fail to get the latest parent's ("DFID") owner, "
3106 "not sure whether some others chown/chgrp during the "
3107 "LFSCK: rc = %d\n", lfsck_lfsck2name(com->lc_lfsck),
3108 PFID(lfsck_dto2fid(parent)), rc);
3113 /* Some others chown/chgrp during the LFSCK, needs to do nothing. */
3114 if (unlikely(tla->la_uid != pla->la_uid ||
3115 tla->la_gid != pla->la_gid))
3116 GOTO(unlock, rc = 1);
3118 tla->la_valid = LA_UID | LA_GID;
3119 rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
3124 dt_read_unlock(env, parent);
3127 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
3132 /* Check whether the OST-object correctly back points to the
3133 * MDT-object (@parent) via the XATTR_NAME_FID xattr (@pfid). */
3134 static int lfsck_layout_check_parent(const struct lu_env *env,
3135 struct lfsck_component *com,
3136 struct dt_object *parent,
3137 const struct lu_fid *pfid,
3138 const struct lu_fid *cfid,
3139 const struct lu_attr *pla,
3140 const struct lu_attr *cla,
3141 struct lfsck_layout_req *llr,
3142 struct lu_buf *lov_ea, __u32 idx)
3144 struct lfsck_thread_info *info = lfsck_env_info(env);
3145 struct lu_buf *buf = &info->lti_big_buf;
3146 struct dt_object *tobj;
3147 struct lov_mds_md_v1 *lmm;
3148 struct lov_ost_data_v1 *objs;
3155 if (fid_is_zero(pfid)) {
3156 /* client never wrote. */
3157 if (cla->la_size == 0 && cla->la_blocks == 0) {
3158 if (unlikely(cla->la_uid != pla->la_uid ||
3159 cla->la_gid != pla->la_gid))
3160 RETURN (LLIT_INCONSISTENT_OWNER);
3165 RETURN(LLIT_UNMATCHED_PAIR);
3168 if (unlikely(!fid_is_sane(pfid)))
3169 RETURN(LLIT_UNMATCHED_PAIR);
3171 if (lu_fid_eq(pfid, lu_object_fid(&parent->do_lu))) {
3172 if (llr->llr_lov_idx == idx)
3175 RETURN(LLIT_UNMATCHED_PAIR);
3178 tobj = lfsck_object_find(env, com->lc_lfsck, pfid);
3180 RETURN(LLIT_UNMATCHED_PAIR);
3183 RETURN(PTR_ERR(tobj));
3185 if (!dt_object_exists(tobj))
3186 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3188 /* Load the tobj's layout EA, in spite of it is a local MDT-object or
3189 * remote one on another MDT. Then check whether the given OST-object
3190 * is in such layout. If yes, it is multiple referenced, otherwise it
3191 * is unmatched referenced case. */
3192 rc = lfsck_layout_get_lovea(env, tobj, buf, NULL);
3194 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3200 magic = le32_to_cpu(lmm->lmm_magic);
3201 if (magic == LOV_MAGIC_V1) {
3202 objs = &(lmm->lmm_objects[0]);
3204 LASSERT(magic == LOV_MAGIC_V3);
3205 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
3208 count = le16_to_cpu(lmm->lmm_stripe_count);
3209 for (i = 0; i < count; i++, objs++) {
3210 struct lu_fid *tfid = &info->lti_fid2;
3211 struct ost_id *oi = &info->lti_oi;
3213 if (is_dummy_lov_ost_data(objs))
3216 ostid_le_to_cpu(&objs->l_ost_oi, oi);
3217 ostid_to_fid(tfid, oi, le32_to_cpu(objs->l_ost_idx));
3218 if (lu_fid_eq(cfid, tfid)) {
3221 GOTO(out, rc = LLIT_MULTIPLE_REFERENCED);
3225 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3228 lfsck_object_put(env, tobj);
3233 static int lfsck_layout_assistant_handle_one(const struct lu_env *env,
3234 struct lfsck_component *com,
3235 struct lfsck_layout_req *llr)
3237 struct lfsck_layout *lo = com->lc_file_ram;
3238 struct lfsck_thread_info *info = lfsck_env_info(env);
3239 struct filter_fid_old *pea = &info->lti_old_pfid;
3240 struct lu_fid *pfid = &info->lti_fid;
3241 struct lu_buf *buf = NULL;
3242 struct dt_object *parent = llr->llr_parent->llo_obj;
3243 struct dt_object *child = llr->llr_child;
3244 struct lu_attr *pla = &info->lti_la;
3245 struct lu_attr *cla = &info->lti_la2;
3246 struct lfsck_instance *lfsck = com->lc_lfsck;
3247 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3248 enum lfsck_layout_inconsistency_type type = LLIT_NONE;
3253 rc = dt_attr_get(env, parent, pla, BYPASS_CAPA);
3255 if (lu_object_is_dying(parent->do_lu.lo_header))
3261 rc = dt_attr_get(env, child, cla, BYPASS_CAPA);
3262 if (rc == -ENOENT) {
3263 if (lu_object_is_dying(parent->do_lu.lo_header))
3266 type = LLIT_DANGLING;
3273 buf = lfsck_buf_get(env, pea, sizeof(struct filter_fid_old));
3274 rc= dt_xattr_get(env, child, buf, XATTR_NAME_FID, BYPASS_CAPA);
3275 if (unlikely(rc >= 0 && rc != sizeof(struct filter_fid_old) &&
3276 rc != sizeof(struct filter_fid))) {
3277 type = LLIT_UNMATCHED_PAIR;
3281 if (rc < 0 && rc != -ENODATA)
3284 if (rc == -ENODATA) {
3287 fid_le_to_cpu(pfid, &pea->ff_parent);
3288 /* Currently, the filter_fid::ff_parent::f_ver is not the
3289 * real parent MDT-object's FID::f_ver, instead it is the
3290 * OST-object index in its parent MDT-object's layout EA. */
3291 idx = pfid->f_stripe_idx;
3295 rc = lfsck_layout_check_parent(env, com, parent, pfid,
3296 lu_object_fid(&child->do_lu),
3297 pla, cla, llr, buf, idx);
3306 if (unlikely(cla->la_uid != pla->la_uid ||
3307 cla->la_gid != pla->la_gid)) {
3308 type = LLIT_INCONSISTENT_OWNER;
3313 if (bk->lb_param & LPF_DRYRUN) {
3314 if (type != LLIT_NONE)
3322 rc = lfsck_layout_repair_dangling(env, com, llr, pla);
3324 case LLIT_UNMATCHED_PAIR:
3325 rc = lfsck_layout_repair_unmatched_pair(env, com, llr, pla);
3327 case LLIT_MULTIPLE_REFERENCED:
3328 rc = lfsck_layout_repair_multiple_references(env, com, llr,
3331 case LLIT_INCONSISTENT_OWNER:
3332 rc = lfsck_layout_repair_owner(env, com, llr, pla);
3342 down_write(&com->lc_sem);
3344 struct lfsck_layout_master_data *llmd = com->lc_data;
3346 if (unlikely(llmd->llmd_exit)) {
3348 } else if (rc == -ENOTCONN || rc == -ESHUTDOWN ||
3349 rc == -ETIMEDOUT || rc == -EHOSTDOWN ||
3350 rc == -EHOSTUNREACH) {
3351 /* If cannot touch the target server,
3352 * mark the LFSCK as INCOMPLETE. */
3353 CERROR("%s: Fail to talk with OST %x: rc = %d.\n",
3354 lfsck_lfsck2name(lfsck), llr->llr_ost_idx, rc);
3355 lo->ll_flags |= LF_INCOMPLETE;
3356 lo->ll_objs_skipped++;
3359 lfsck_layout_record_failure(env, lfsck, lo);
3361 } else if (rc > 0) {
3362 LASSERTF(type > LLIT_NONE && type <= LLIT_MAX,
3363 "unknown type = %d\n", type);
3365 lo->ll_objs_repaired[type - 1]++;
3366 if (bk->lb_param & LPF_DRYRUN &&
3367 unlikely(lo->ll_pos_first_inconsistent == 0))
3368 lo->ll_pos_first_inconsistent =
3369 lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
3372 up_write(&com->lc_sem);
3377 static int lfsck_layout_assistant(void *args)
3379 struct lfsck_thread_args *lta = args;
3380 struct lu_env *env = <a->lta_env;
3381 struct lfsck_component *com = lta->lta_com;
3382 struct lfsck_instance *lfsck = lta->lta_lfsck;
3383 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3384 struct lfsck_position *pos = &com->lc_pos_start;
3385 struct lfsck_thread_info *info = lfsck_env_info(env);
3386 struct lfsck_request *lr = &info->lti_lr;
3387 struct lfsck_layout_master_data *llmd = com->lc_data;
3388 struct ptlrpc_thread *mthread = &lfsck->li_thread;
3389 struct ptlrpc_thread *athread = &llmd->llmd_thread;
3390 struct lfsck_layout_req *llr;
3391 struct l_wait_info lwi = { 0 };
3396 memset(lr, 0, sizeof(*lr));
3397 lr->lr_event = LE_START;
3398 lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
3399 LSV_ASYNC_WINDOWS | LSV_CREATE_OSTOBJ;
3400 lr->lr_speed = bk->lb_speed_limit;
3401 lr->lr_version = bk->lb_version;
3402 lr->lr_param = bk->lb_param;
3403 lr->lr_async_windows = bk->lb_async_windows;
3404 lr->lr_flags = LEF_TO_OST;
3405 if (pos->lp_oit_cookie <= 1)
3406 lr->lr_param |= LPF_RESET;
3408 rc = lfsck_layout_master_notify_others(env, com, lr);
3410 CERROR("%s: fail to notify others for layout start: rc = %d\n",
3411 lfsck_lfsck2name(lfsck), rc);
3415 spin_lock(&llmd->llmd_lock);
3416 thread_set_flags(athread, SVC_RUNNING);
3417 spin_unlock(&llmd->llmd_lock);
3418 wake_up_all(&mthread->t_ctl_waitq);
3421 while (!list_empty(&llmd->llmd_req_list)) {
3422 bool wakeup = false;
3424 if (unlikely(llmd->llmd_exit ||
3425 !thread_is_running(mthread)))
3426 GOTO(cleanup1, rc = llmd->llmd_post_result);
3428 llr = list_entry(llmd->llmd_req_list.next,
3429 struct lfsck_layout_req,
3431 /* Only the lfsck_layout_assistant thread itself can
3432 * remove the "llr" from the head of the list, LFSCK
3433 * engine thread only inserts other new "lld" at the
3434 * end of the list. So it is safe to handle current
3435 * "llr" without the spin_lock. */
3436 rc = lfsck_layout_assistant_handle_one(env, com, llr);
3437 spin_lock(&llmd->llmd_lock);
3438 list_del_init(&llr->llr_list);
3439 llmd->llmd_prefetched--;
3440 /* Wake up the main engine thread only when the list
3441 * is empty or half of the prefetched items have been
3442 * handled to avoid too frequent thread schedule. */
3443 if (llmd->llmd_prefetched == 0 ||
3444 (bk->lb_async_windows != 0 &&
3445 bk->lb_async_windows / 2 ==
3446 llmd->llmd_prefetched))
3448 spin_unlock(&llmd->llmd_lock);
3450 wake_up_all(&mthread->t_ctl_waitq);
3452 lfsck_layout_req_fini(env, llr);
3453 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
3457 l_wait_event(athread->t_ctl_waitq,
3458 !lfsck_layout_req_empty(llmd) ||
3460 llmd->llmd_to_post ||
3461 llmd->llmd_to_double_scan,
3464 if (unlikely(llmd->llmd_exit))
3465 GOTO(cleanup1, rc = llmd->llmd_post_result);
3467 if (!list_empty(&llmd->llmd_req_list))
3470 if (llmd->llmd_to_post) {
3471 llmd->llmd_to_post = 0;
3472 LASSERT(llmd->llmd_post_result > 0);
3474 memset(lr, 0, sizeof(*lr));
3475 lr->lr_event = LE_PHASE1_DONE;
3476 lr->lr_status = llmd->llmd_post_result;
3477 rc = lfsck_layout_master_notify_others(env, com, lr);
3479 CERROR("%s: failed to notify others "
3480 "for layout post: rc = %d\n",
3481 lfsck_lfsck2name(lfsck), rc);
3483 /* Wakeup the master engine to go ahead. */
3484 wake_up_all(&mthread->t_ctl_waitq);
3487 if (llmd->llmd_to_double_scan) {
3488 llmd->llmd_to_double_scan = 0;
3489 atomic_inc(&lfsck->li_double_scan_count);
3490 llmd->llmd_in_double_scan = 1;
3491 wake_up_all(&mthread->t_ctl_waitq);
3493 com->lc_new_checked = 0;
3494 com->lc_new_scanned = 0;
3495 com->lc_time_last_checkpoint = cfs_time_current();
3496 com->lc_time_next_checkpoint =
3497 com->lc_time_last_checkpoint +
3498 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
3500 while (llmd->llmd_in_double_scan) {
3501 struct lfsck_tgt_descs *ltds =
3502 &lfsck->li_ost_descs;
3503 struct lfsck_tgt_desc *ltd;
3505 rc = lfsck_layout_master_query_others(env, com);
3506 if (lfsck_layout_master_to_orphan(llmd))
3512 /* Pull LFSCK status on related targets once
3513 * per 30 seconds if we are not notified. */
3514 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(30),
3515 cfs_time_seconds(1),
3517 rc = l_wait_event(athread->t_ctl_waitq,
3518 lfsck_layout_master_to_orphan(llmd) ||
3520 !thread_is_running(mthread),
3523 if (unlikely(llmd->llmd_exit ||
3524 !thread_is_running(mthread)))
3525 GOTO(cleanup2, rc = 0);
3527 if (rc == -ETIMEDOUT)
3534 spin_lock(<ds->ltd_lock);
3536 &llmd->llmd_ost_phase2_list)) {
3538 llmd->llmd_ost_phase2_list.next,
3539 struct lfsck_tgt_desc,
3540 ltd_layout_phase_list);
3542 <d->ltd_layout_phase_list);
3543 spin_unlock(<ds->ltd_lock);
3545 if (bk->lb_param & LPF_ALL_TGT) {
3546 rc = lfsck_layout_scan_orphan(
3549 bk->lb_param & LPF_FAILOUT)
3553 if (unlikely(llmd->llmd_exit ||
3554 !thread_is_running(mthread)))
3555 GOTO(cleanup2, rc = 0);
3557 spin_lock(<ds->ltd_lock);
3560 if (list_empty(&llmd->llmd_ost_phase1_list)) {
3561 spin_unlock(<ds->ltd_lock);
3562 GOTO(cleanup2, rc = 1);
3564 spin_unlock(<ds->ltd_lock);
3570 /* Cleanup the unfinished requests. */
3571 spin_lock(&llmd->llmd_lock);
3573 llmd->llmd_assistant_status = rc;
3575 while (!list_empty(&llmd->llmd_req_list)) {
3576 llr = list_entry(llmd->llmd_req_list.next,
3577 struct lfsck_layout_req,
3579 list_del_init(&llr->llr_list);
3580 llmd->llmd_prefetched--;
3581 spin_unlock(&llmd->llmd_lock);
3582 lfsck_layout_req_fini(env, llr);
3583 spin_lock(&llmd->llmd_lock);
3585 spin_unlock(&llmd->llmd_lock);
3587 LASSERTF(llmd->llmd_prefetched == 0, "unmatched prefeteched objs %d\n",
3588 llmd->llmd_prefetched);
3591 memset(lr, 0, sizeof(*lr));
3593 lr->lr_event = LE_PHASE2_DONE;
3595 } else if (rc == 0) {
3596 if (lfsck->li_flags & LPF_ALL_TGT) {
3597 lr->lr_event = LE_STOP;
3598 lr->lr_status = LS_STOPPED;
3600 lr->lr_event = LE_PEER_EXIT;
3601 switch (lfsck->li_status) {
3604 lr->lr_status = LS_CO_PAUSED;
3608 lr->lr_status = LS_CO_STOPPED;
3611 CERROR("%s: unknown status: rc = %d\n",
3612 lfsck_lfsck2name(lfsck),
3614 lr->lr_status = LS_CO_FAILED;
3619 if (lfsck->li_flags & LPF_ALL_TGT) {
3620 lr->lr_event = LE_STOP;
3621 lr->lr_status = LS_FAILED;
3623 lr->lr_event = LE_PEER_EXIT;
3624 lr->lr_status = LS_CO_FAILED;
3628 rc1 = lfsck_layout_master_notify_others(env, com, lr);
3630 CERROR("%s: failed to notify others for layout quit: rc = %d\n",
3631 lfsck_lfsck2name(lfsck), rc1);
3635 /* Under force exit case, some requests may be just freed without
3636 * verification, those objects should be re-handled when next run.
3637 * So not update the on-disk tracing file under such case. */
3638 if (llmd->llmd_in_double_scan && !llmd->llmd_exit)
3639 rc1 = lfsck_layout_double_scan_result(env, com, rc);
3642 if (llmd->llmd_in_double_scan)
3643 atomic_dec(&lfsck->li_double_scan_count);
3645 spin_lock(&llmd->llmd_lock);
3646 llmd->llmd_assistant_status = (rc1 != 0 ? rc1 : rc);
3647 thread_set_flags(athread, SVC_STOPPED);
3648 wake_up_all(&mthread->t_ctl_waitq);
3649 spin_unlock(&llmd->llmd_lock);
3650 lfsck_thread_args_fini(lta);
3656 lfsck_layout_slave_async_interpret(const struct lu_env *env,
3657 struct ptlrpc_request *req,
3660 struct lfsck_layout_slave_async_args *llsaa = args;
3661 struct obd_export *exp = llsaa->llsaa_exp;
3662 struct lfsck_component *com = llsaa->llsaa_com;
3663 struct lfsck_layout_slave_target *llst = llsaa->llsaa_llst;
3664 struct lfsck_layout_slave_data *llsd = com->lc_data;
3668 /* It is quite probably caused by target crash,
3669 * to make the LFSCK can go ahead, assume that
3670 * the target finished the LFSCK prcoessing. */
3673 struct lfsck_reply *lr;
3675 lr = req_capsule_server_get(&req->rq_pill, &RMF_LFSCK_REPLY);
3676 if (lr->lr_status != LS_SCANNING_PHASE1 &&
3677 lr->lr_status != LS_SCANNING_PHASE2)
3681 lfsck_layout_llst_del(llsd, llst);
3682 lfsck_layout_llst_put(llst);
3683 lfsck_component_put(env, com);
3684 class_export_put(exp);
3689 static int lfsck_layout_async_query(const struct lu_env *env,
3690 struct lfsck_component *com,
3691 struct obd_export *exp,
3692 struct lfsck_layout_slave_target *llst,
3693 struct lfsck_request *lr,
3694 struct ptlrpc_request_set *set)
3696 struct lfsck_layout_slave_async_args *llsaa;
3697 struct ptlrpc_request *req;
3698 struct lfsck_request *tmp;
3702 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_QUERY);
3706 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_QUERY);
3708 ptlrpc_request_free(req);
3712 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
3714 ptlrpc_request_set_replen(req);
3716 llsaa = ptlrpc_req_async_args(req);
3717 llsaa->llsaa_exp = exp;
3718 llsaa->llsaa_com = lfsck_component_get(com);
3719 llsaa->llsaa_llst = llst;
3720 req->rq_interpret_reply = lfsck_layout_slave_async_interpret;
3721 ptlrpc_set_add_req(set, req);
3726 static int lfsck_layout_async_notify(const struct lu_env *env,
3727 struct obd_export *exp,
3728 struct lfsck_request *lr,
3729 struct ptlrpc_request_set *set)
3731 struct ptlrpc_request *req;
3732 struct lfsck_request *tmp;
3736 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
3740 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
3742 ptlrpc_request_free(req);
3746 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
3748 ptlrpc_request_set_replen(req);
3749 ptlrpc_set_add_req(set, req);
3755 lfsck_layout_slave_query_master(const struct lu_env *env,
3756 struct lfsck_component *com)
3758 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
3759 struct lfsck_instance *lfsck = com->lc_lfsck;
3760 struct lfsck_layout_slave_data *llsd = com->lc_data;
3761 struct lfsck_layout_slave_target *llst;
3762 struct obd_export *exp;
3763 struct ptlrpc_request_set *set;
3768 set = ptlrpc_prep_set();
3772 memset(lr, 0, sizeof(*lr));
3773 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
3774 lr->lr_event = LE_QUERY;
3775 lr->lr_active = LT_LAYOUT;
3777 llsd->llsd_touch_gen++;
3778 spin_lock(&llsd->llsd_lock);
3779 while (!list_empty(&llsd->llsd_master_list)) {
3780 llst = list_entry(llsd->llsd_master_list.next,
3781 struct lfsck_layout_slave_target,
3783 if (llst->llst_gen == llsd->llsd_touch_gen)
3786 llst->llst_gen = llsd->llsd_touch_gen;
3787 list_del(&llst->llst_list);
3788 list_add_tail(&llst->llst_list,
3789 &llsd->llsd_master_list);
3790 atomic_inc(&llst->llst_ref);
3791 spin_unlock(&llsd->llsd_lock);
3793 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
3796 lfsck_layout_llst_del(llsd, llst);
3797 lfsck_layout_llst_put(llst);
3798 spin_lock(&llsd->llsd_lock);
3802 rc = lfsck_layout_async_query(env, com, exp, llst, lr, set);
3804 CERROR("%s: slave fail to query %s for layout: "
3805 "rc = %d\n", lfsck_lfsck2name(lfsck),
3806 exp->exp_obd->obd_name, rc);
3808 lfsck_layout_llst_put(llst);
3809 class_export_put(exp);
3811 spin_lock(&llsd->llsd_lock);
3813 spin_unlock(&llsd->llsd_lock);
3815 rc = ptlrpc_set_wait(set);
3816 ptlrpc_set_destroy(set);
3818 RETURN(rc1 != 0 ? rc1 : rc);
3822 lfsck_layout_slave_notify_master(const struct lu_env *env,
3823 struct lfsck_component *com,
3824 enum lfsck_events event, int result)
3826 struct lfsck_instance *lfsck = com->lc_lfsck;
3827 struct lfsck_layout_slave_data *llsd = com->lc_data;
3828 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
3829 struct lfsck_layout_slave_target *llst;
3830 struct obd_export *exp;
3831 struct ptlrpc_request_set *set;
3835 set = ptlrpc_prep_set();
3839 memset(lr, 0, sizeof(*lr));
3840 lr->lr_event = event;
3841 lr->lr_flags = LEF_FROM_OST;
3842 lr->lr_status = result;
3843 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
3844 lr->lr_active = LT_LAYOUT;
3845 llsd->llsd_touch_gen++;
3846 spin_lock(&llsd->llsd_lock);
3847 while (!list_empty(&llsd->llsd_master_list)) {
3848 llst = list_entry(llsd->llsd_master_list.next,
3849 struct lfsck_layout_slave_target,
3851 if (llst->llst_gen == llsd->llsd_touch_gen)
3854 llst->llst_gen = llsd->llsd_touch_gen;
3855 list_del(&llst->llst_list);
3856 list_add_tail(&llst->llst_list,
3857 &llsd->llsd_master_list);
3858 atomic_inc(&llst->llst_ref);
3859 spin_unlock(&llsd->llsd_lock);
3861 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
3864 lfsck_layout_llst_del(llsd, llst);
3865 lfsck_layout_llst_put(llst);
3866 spin_lock(&llsd->llsd_lock);
3870 rc = lfsck_layout_async_notify(env, exp, lr, set);
3872 CERROR("%s: slave fail to notify %s for layout: "
3873 "rc = %d\n", lfsck_lfsck2name(lfsck),
3874 exp->exp_obd->obd_name, rc);
3875 lfsck_layout_llst_put(llst);
3876 class_export_put(exp);
3877 spin_lock(&llsd->llsd_lock);
3879 spin_unlock(&llsd->llsd_lock);
3881 ptlrpc_set_wait(set);
3882 ptlrpc_set_destroy(set);
3888 * \ret -ENODATA: unrecognized stripe
3889 * \ret = 0 : recognized stripe
3890 * \ret < 0 : other failures
3892 static int lfsck_layout_master_check_pairs(const struct lu_env *env,
3893 struct lfsck_component *com,
3894 struct lu_fid *cfid,
3895 struct lu_fid *pfid)
3897 struct lfsck_thread_info *info = lfsck_env_info(env);
3898 struct lu_buf *buf = &info->lti_big_buf;
3899 struct ost_id *oi = &info->lti_oi;
3900 struct dt_object *obj;
3901 struct lov_mds_md_v1 *lmm;
3902 struct lov_ost_data_v1 *objs;
3903 __u32 idx = pfid->f_stripe_idx;
3911 obj = lfsck_object_find_by_dev(env, com->lc_lfsck->li_bottom, pfid);
3913 RETURN(PTR_ERR(obj));
3915 dt_read_lock(env, obj, 0);
3916 if (unlikely(!dt_object_exists(obj)))
3917 GOTO(unlock, rc = -ENOENT);
3919 rc = lfsck_layout_get_lovea(env, obj, buf, NULL);
3924 GOTO(unlock, rc = -ENODATA);
3927 rc = lfsck_layout_verify_header(lmm);
3931 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
3932 * been verified in lfsck_layout_verify_header() already. If some
3933 * new magic introduced in the future, then layout LFSCK needs to
3934 * be updated also. */
3935 magic = le32_to_cpu(lmm->lmm_magic);
3936 if (magic == LOV_MAGIC_V1) {
3937 objs = &(lmm->lmm_objects[0]);
3939 LASSERT(magic == LOV_MAGIC_V3);
3940 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
3943 fid_to_ostid(cfid, oi);
3944 count = le16_to_cpu(lmm->lmm_stripe_count);
3945 for (i = 0; i < count; i++, objs++) {
3948 ostid_le_to_cpu(&objs->l_ost_oi, &oi2);
3949 if (memcmp(oi, &oi2, sizeof(*oi)) == 0)
3950 GOTO(unlock, rc = (i != idx ? -ENODATA : 0));
3953 GOTO(unlock, rc = -ENODATA);
3956 dt_read_unlock(env, obj);
3957 lu_object_put(env, &obj->do_lu);
3963 * The LFSCK-on-OST will ask the LFSCK-on-MDT to check whether the given
3964 * MDT-object/OST-object pairs match or not to aviod transfer MDT-object
3965 * layout EA from MDT to OST. On one hand, the OST no need to understand
3966 * the layout EA structure; on the other hand, it may cause trouble when
3967 * transfer large layout EA from MDT to OST via normal OUT RPC.
3969 * \ret > 0: unrecognized stripe
3970 * \ret = 0: recognized stripe
3971 * \ret < 0: other failures
3973 static int lfsck_layout_slave_check_pairs(const struct lu_env *env,
3974 struct lfsck_component *com,
3975 struct lu_fid *cfid,
3976 struct lu_fid *pfid)
3978 struct lfsck_instance *lfsck = com->lc_lfsck;
3979 struct obd_device *obd = lfsck->li_obd;
3980 struct seq_server_site *ss =
3981 lu_site2seq(lfsck->li_bottom->dd_lu_dev.ld_site);
3982 struct obd_export *exp = NULL;
3983 struct ptlrpc_request *req = NULL;
3984 struct lfsck_request *lr;
3985 struct lu_seq_range range = { 0 };
3989 if (unlikely(fid_is_idif(pfid)))
3992 fld_range_set_any(&range);
3993 rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(pfid), &range);
3995 RETURN(rc == -ENOENT ? 1 : rc);
3997 if (unlikely(!fld_range_is_mdt(&range)))
4000 exp = lustre_find_lwp_by_index(obd->obd_name, range.lsr_index);
4001 if (unlikely(exp == NULL))
4004 if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
4005 GOTO(out, rc = -EOPNOTSUPP);
4007 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
4009 GOTO(out, rc = -ENOMEM);
4011 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
4013 ptlrpc_request_free(req);
4018 lr = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
4019 memset(lr, 0, sizeof(*lr));
4020 lr->lr_event = LE_PAIRS_VERIFY;
4021 lr->lr_active = LT_LAYOUT;
4022 lr->lr_fid = *cfid; /* OST-object itself FID. */
4023 lr->lr_fid2 = *pfid; /* The claimed parent FID. */
4025 ptlrpc_request_set_replen(req);
4026 rc = ptlrpc_queue_wait(req);
4027 ptlrpc_req_finished(req);
4029 if (rc == -ENOENT || rc == -ENODATA)
4036 class_export_put(exp);
4041 static int lfsck_layout_slave_repair_pfid(const struct lu_env *env,
4042 struct lfsck_component *com,
4043 struct lfsck_request *lr)
4045 struct lfsck_thread_info *info = lfsck_env_info(env);
4046 struct filter_fid *ff = &info->lti_new_pfid;
4048 struct dt_device *dev = com->lc_lfsck->li_bottom;
4049 struct dt_object *obj;
4050 struct thandle *th = NULL;
4054 obj = lfsck_object_find_by_dev(env, dev, &lr->lr_fid);
4056 RETURN(PTR_ERR(obj));
4058 fid_cpu_to_le(&ff->ff_parent, &lr->lr_fid2);
4059 buf = lfsck_buf_get(env, ff, sizeof(*ff));
4060 dt_write_lock(env, obj, 0);
4061 if (unlikely(!dt_object_exists(obj)))
4062 GOTO(unlock, rc = 0);
4064 th = dt_trans_create(env, dev);
4066 GOTO(unlock, rc = PTR_ERR(th));
4068 rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_FID, 0, th);
4072 rc = dt_trans_start_local(env, dev, th);
4076 rc = dt_xattr_set(env, obj, buf, XATTR_NAME_FID, 0, th, BYPASS_CAPA);
4081 dt_trans_stop(env, dev, th);
4084 dt_write_unlock(env, obj);
4085 lu_object_put(env, &obj->do_lu);
4092 static int lfsck_layout_reset(const struct lu_env *env,
4093 struct lfsck_component *com, bool init)
4095 struct lfsck_layout *lo = com->lc_file_ram;
4098 down_write(&com->lc_sem);
4100 memset(lo, 0, com->lc_file_size);
4102 __u32 count = lo->ll_success_count;
4103 __u64 last_time = lo->ll_time_last_complete;
4105 memset(lo, 0, com->lc_file_size);
4106 lo->ll_success_count = count;
4107 lo->ll_time_last_complete = last_time;
4110 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
4111 lo->ll_status = LS_INIT;
4113 rc = lfsck_layout_store(env, com);
4114 up_write(&com->lc_sem);
4119 static void lfsck_layout_fail(const struct lu_env *env,
4120 struct lfsck_component *com, bool new_checked)
4122 struct lfsck_layout *lo = com->lc_file_ram;
4124 down_write(&com->lc_sem);
4126 com->lc_new_checked++;
4127 lfsck_layout_record_failure(env, com->lc_lfsck, lo);
4128 up_write(&com->lc_sem);
4131 static int lfsck_layout_master_checkpoint(const struct lu_env *env,
4132 struct lfsck_component *com, bool init)
4134 struct lfsck_instance *lfsck = com->lc_lfsck;
4135 struct lfsck_layout *lo = com->lc_file_ram;
4136 struct lfsck_layout_master_data *llmd = com->lc_data;
4137 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4138 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4139 struct l_wait_info lwi = { 0 };
4142 if (com->lc_new_checked == 0 && !init)
4145 l_wait_event(mthread->t_ctl_waitq,
4146 list_empty(&llmd->llmd_req_list) ||
4147 !thread_is_running(mthread) ||
4148 thread_is_stopped(athread),
4151 if (!thread_is_running(mthread) || thread_is_stopped(athread))
4154 down_write(&com->lc_sem);
4156 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
4158 lo->ll_pos_last_checkpoint =
4159 lfsck->li_pos_current.lp_oit_cookie;
4160 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4161 HALF_SEC - lfsck->li_time_last_checkpoint);
4162 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4163 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4164 com->lc_new_checked = 0;
4167 rc = lfsck_layout_store(env, com);
4168 up_write(&com->lc_sem);
4173 static int lfsck_layout_slave_checkpoint(const struct lu_env *env,
4174 struct lfsck_component *com, bool init)
4176 struct lfsck_instance *lfsck = com->lc_lfsck;
4177 struct lfsck_layout *lo = com->lc_file_ram;
4180 if (com->lc_new_checked == 0 && !init)
4183 down_write(&com->lc_sem);
4186 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
4188 lo->ll_pos_last_checkpoint =
4189 lfsck->li_pos_current.lp_oit_cookie;
4190 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4191 HALF_SEC - lfsck->li_time_last_checkpoint);
4192 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4193 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4194 com->lc_new_checked = 0;
4197 rc = lfsck_layout_store(env, com);
4199 up_write(&com->lc_sem);
4204 static int lfsck_layout_prep(const struct lu_env *env,
4205 struct lfsck_component *com,
4206 struct lfsck_start *start)
4208 struct lfsck_instance *lfsck = com->lc_lfsck;
4209 struct lfsck_layout *lo = com->lc_file_ram;
4210 struct lfsck_position *pos = &com->lc_pos_start;
4212 fid_zero(&pos->lp_dir_parent);
4213 pos->lp_dir_cookie = 0;
4214 if (lo->ll_status == LS_COMPLETED ||
4215 lo->ll_status == LS_PARTIAL ||
4216 /* To handle orphan, must scan from the beginning. */
4217 (start != NULL && start->ls_flags & LPF_ORPHAN)) {
4220 rc = lfsck_layout_reset(env, com, false);
4222 rc = lfsck_set_param(env, lfsck, start, true);
4228 down_write(&com->lc_sem);
4229 lo->ll_time_latest_start = cfs_time_current_sec();
4230 spin_lock(&lfsck->li_lock);
4231 if (lo->ll_flags & LF_SCANNED_ONCE) {
4232 if (!lfsck->li_drop_dryrun ||
4233 lo->ll_pos_first_inconsistent == 0) {
4234 lo->ll_status = LS_SCANNING_PHASE2;
4235 list_del_init(&com->lc_link);
4236 list_add_tail(&com->lc_link,
4237 &lfsck->li_list_double_scan);
4238 pos->lp_oit_cookie = 0;
4242 lo->ll_status = LS_SCANNING_PHASE1;
4243 lo->ll_run_time_phase1 = 0;
4244 lo->ll_run_time_phase2 = 0;
4245 lo->ll_objs_checked_phase1 = 0;
4246 lo->ll_objs_checked_phase2 = 0;
4247 lo->ll_objs_failed_phase1 = 0;
4248 lo->ll_objs_failed_phase2 = 0;
4249 for (i = 0; i < LLIT_MAX; i++)
4250 lo->ll_objs_repaired[i] = 0;
4252 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
4253 fid_zero(&com->lc_fid_latest_scanned_phase2);
4256 lo->ll_status = LS_SCANNING_PHASE1;
4257 if (!lfsck->li_drop_dryrun ||
4258 lo->ll_pos_first_inconsistent == 0)
4259 pos->lp_oit_cookie = lo->ll_pos_last_checkpoint + 1;
4261 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
4263 spin_unlock(&lfsck->li_lock);
4264 up_write(&com->lc_sem);
4269 static int lfsck_layout_slave_prep(const struct lu_env *env,
4270 struct lfsck_component *com,
4271 struct lfsck_start_param *lsp)
4273 struct lfsck_layout_slave_data *llsd = com->lc_data;
4274 struct lfsck_instance *lfsck = com->lc_lfsck;
4275 struct lfsck_layout *lo = com->lc_file_ram;
4276 struct lfsck_start *start = lsp->lsp_start;
4279 rc = lfsck_layout_prep(env, com, start);
4283 if (lo->ll_flags & LF_CRASHED_LASTID &&
4284 list_empty(&llsd->llsd_master_list)) {
4285 LASSERT(lfsck->li_out_notify != NULL);
4287 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
4288 LE_LASTID_REBUILDING);
4291 if (!lsp->lsp_index_valid)
4294 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
4295 if (rc == 0 && start != NULL && start->ls_flags & LPF_ORPHAN) {
4296 LASSERT(!llsd->llsd_rbtree_valid);
4298 write_lock(&llsd->llsd_rb_lock);
4299 rc = lfsck_rbtree_setup(env, com);
4300 write_unlock(&llsd->llsd_rb_lock);
4306 static int lfsck_layout_master_prep(const struct lu_env *env,
4307 struct lfsck_component *com,
4308 struct lfsck_start_param *lsp)
4310 struct lfsck_instance *lfsck = com->lc_lfsck;
4311 struct lfsck_layout_master_data *llmd = com->lc_data;
4312 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4313 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4314 struct lfsck_thread_args *lta;
4315 struct task_struct *task;
4319 rc = lfsck_layout_prep(env, com, lsp->lsp_start);
4323 llmd->llmd_assistant_status = 0;
4324 llmd->llmd_post_result = 0;
4325 llmd->llmd_to_post = 0;
4326 llmd->llmd_to_double_scan = 0;
4327 llmd->llmd_in_double_scan = 0;
4328 llmd->llmd_exit = 0;
4329 thread_set_flags(athread, 0);
4331 lta = lfsck_thread_args_init(lfsck, com, lsp);
4333 RETURN(PTR_ERR(lta));
4335 task = kthread_run(lfsck_layout_assistant, lta, "lfsck_layout");
4338 CERROR("%s: Cannot start LFSCK layout assistant thread: "
4339 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
4340 lfsck_thread_args_fini(lta);
4342 struct l_wait_info lwi = { 0 };
4344 l_wait_event(mthread->t_ctl_waitq,
4345 thread_is_running(athread) ||
4346 thread_is_stopped(athread),
4348 if (unlikely(!thread_is_running(athread)))
4349 rc = llmd->llmd_assistant_status;
4357 /* Pre-fetch the attribute for each stripe in the given layout EA. */
4358 static int lfsck_layout_scan_stripes(const struct lu_env *env,
4359 struct lfsck_component *com,
4360 struct dt_object *parent,
4361 struct lov_mds_md_v1 *lmm)
4363 struct lfsck_thread_info *info = lfsck_env_info(env);
4364 struct lfsck_instance *lfsck = com->lc_lfsck;
4365 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
4366 struct lfsck_layout *lo = com->lc_file_ram;
4367 struct lfsck_layout_master_data *llmd = com->lc_data;
4368 struct lfsck_layout_object *llo = NULL;
4369 struct lov_ost_data_v1 *objs;
4370 struct lfsck_tgt_descs *ltds = &lfsck->li_ost_descs;
4371 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4372 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4373 struct l_wait_info lwi = { 0 };
4382 buf = lfsck_buf_get(env, &info->lti_old_pfid,
4383 sizeof(struct filter_fid_old));
4384 count = le16_to_cpu(lmm->lmm_stripe_count);
4385 gen = le16_to_cpu(lmm->lmm_layout_gen);
4386 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
4387 * been verified in lfsck_layout_verify_header() already. If some
4388 * new magic introduced in the future, then layout LFSCK needs to
4389 * be updated also. */
4390 magic = le32_to_cpu(lmm->lmm_magic);
4391 if (magic == LOV_MAGIC_V1) {
4392 objs = &(lmm->lmm_objects[0]);
4394 LASSERT(magic == LOV_MAGIC_V3);
4395 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
4398 for (i = 0; i < count; i++, objs++) {
4399 struct lu_fid *fid = &info->lti_fid;
4400 struct ost_id *oi = &info->lti_oi;
4401 struct lfsck_layout_req *llr;
4402 struct lfsck_tgt_desc *tgt = NULL;
4403 struct dt_object *cobj = NULL;
4405 le32_to_cpu(objs->l_ost_idx);
4406 bool wakeup = false;
4408 if (is_dummy_lov_ost_data(objs))
4411 l_wait_event(mthread->t_ctl_waitq,
4412 bk->lb_async_windows == 0 ||
4413 llmd->llmd_prefetched < bk->lb_async_windows ||
4414 !thread_is_running(mthread) ||
4415 thread_is_stopped(athread),
4418 if (unlikely(!thread_is_running(mthread)) ||
4419 thread_is_stopped(athread))
4422 ostid_le_to_cpu(&objs->l_ost_oi, oi);
4423 ostid_to_fid(fid, oi, index);
4424 tgt = lfsck_tgt_get(ltds, index);
4425 if (unlikely(tgt == NULL)) {
4426 CERROR("%s: Cannot talk with OST %x which did not join "
4427 "the layout LFSCK.\n",
4428 lfsck_lfsck2name(lfsck), index);
4429 lo->ll_flags |= LF_INCOMPLETE;
4433 cobj = lfsck_object_find_by_dev(env, tgt->ltd_tgt, fid);
4439 rc = dt_declare_attr_get(env, cobj, BYPASS_CAPA);
4443 rc = dt_declare_xattr_get(env, cobj, buf, XATTR_NAME_FID,
4449 llo = lfsck_layout_object_init(env, parent, gen);
4456 llr = lfsck_layout_req_init(llo, cobj, index, i);
4463 spin_lock(&llmd->llmd_lock);
4464 if (llmd->llmd_assistant_status < 0) {
4465 spin_unlock(&llmd->llmd_lock);
4466 lfsck_layout_req_fini(env, llr);
4468 RETURN(llmd->llmd_assistant_status);
4471 list_add_tail(&llr->llr_list, &llmd->llmd_req_list);
4472 if (llmd->llmd_prefetched == 0)
4475 llmd->llmd_prefetched++;
4476 spin_unlock(&llmd->llmd_lock);
4478 wake_up_all(&athread->t_ctl_waitq);
4481 down_write(&com->lc_sem);
4482 com->lc_new_checked++;
4484 lfsck_layout_record_failure(env, lfsck, lo);
4485 up_write(&com->lc_sem);
4487 if (cobj != NULL && !IS_ERR(cobj))
4488 lu_object_put(env, &cobj->do_lu);
4490 if (likely(tgt != NULL))
4493 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
4500 if (llo != NULL && !IS_ERR(llo))
4501 lfsck_layout_object_put(env, llo);
4506 /* For the given object, read its layout EA locally. For each stripe, pre-fetch
4507 * the OST-object's attribute and generate an structure lfsck_layout_req on the
4508 * list ::llmd_req_list.
4510 * For each request on above list, the lfsck_layout_assistant thread compares
4511 * the OST side attribute with local attribute, if inconsistent, then repair it.
4513 * All above processing is async mode with pipeline. */
4514 static int lfsck_layout_master_exec_oit(const struct lu_env *env,
4515 struct lfsck_component *com,
4516 struct dt_object *obj)
4518 struct lfsck_thread_info *info = lfsck_env_info(env);
4519 struct ost_id *oi = &info->lti_oi;
4520 struct lfsck_layout *lo = com->lc_file_ram;
4521 struct lfsck_layout_master_data *llmd = com->lc_data;
4522 struct lfsck_instance *lfsck = com->lc_lfsck;
4523 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
4524 struct thandle *handle = NULL;
4525 struct lu_buf *buf = &info->lti_big_buf;
4526 struct lov_mds_md_v1 *lmm = NULL;
4527 struct dt_device *dev = lfsck->li_bottom;
4528 struct lustre_handle lh = { 0 };
4529 ssize_t buflen = buf->lb_len;
4531 bool locked = false;
4532 bool stripe = false;
4535 if (!S_ISREG(lfsck_object_type(obj)))
4538 if (llmd->llmd_assistant_status < 0)
4539 GOTO(out, rc = -ESRCH);
4541 fid_to_lmm_oi(lfsck_dto2fid(obj), oi);
4542 lmm_oi_cpu_to_le(oi, oi);
4543 dt_read_lock(env, obj, 0);
4547 rc = lfsck_layout_get_lovea(env, obj, buf, &buflen);
4553 rc = lfsck_layout_verify_header(lmm);
4554 /* If the LOV EA crashed, then it is possible to be rebuilt later
4555 * when handle orphan OST-objects. */
4559 if (memcmp(oi, &lmm->lmm_oi, sizeof(*oi)) == 0)
4560 GOTO(out, stripe = true);
4562 /* Inconsistent lmm_oi, should be repaired. */
4563 CDEBUG(D_LFSCK, "Repair bad lmm_oi for "DFID"\n",
4564 PFID(lfsck_dto2fid(obj)));
4566 if (bk->lb_param & LPF_DRYRUN) {
4567 down_write(&com->lc_sem);
4568 lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
4569 up_write(&com->lc_sem);
4571 GOTO(out, stripe = true);
4574 if (!lustre_handle_is_used(&lh)) {
4575 dt_read_unlock(env, obj);
4577 buf->lb_len = buflen;
4578 rc = lfsck_layout_lock(env, com, obj, &lh,
4579 MDS_INODELOCK_LAYOUT |
4580 MDS_INODELOCK_XATTR);
4584 handle = dt_trans_create(env, dev);
4586 GOTO(out, rc = PTR_ERR(handle));
4588 rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_LOV,
4589 LU_XATTR_REPLACE, handle);
4593 rc = dt_trans_start_local(env, dev, handle);
4597 dt_write_lock(env, obj, 0);
4604 rc = dt_xattr_set(env, obj, buf, XATTR_NAME_LOV,
4605 LU_XATTR_REPLACE, handle, BYPASS_CAPA);
4609 down_write(&com->lc_sem);
4610 lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
4611 up_write(&com->lc_sem);
4613 GOTO(out, stripe = true);
4617 if (lustre_handle_is_used(&lh))
4618 dt_write_unlock(env, obj);
4620 dt_read_unlock(env, obj);
4623 if (handle != NULL && !IS_ERR(handle))
4624 dt_trans_stop(env, dev, handle);
4626 lfsck_layout_unlock(&lh);
4628 rc = lfsck_layout_scan_stripes(env, com, obj, lmm);
4630 down_write(&com->lc_sem);
4631 com->lc_new_checked++;
4633 lfsck_layout_record_failure(env, lfsck, lo);
4634 up_write(&com->lc_sem);
4636 buf->lb_len = buflen;
4641 static int lfsck_layout_slave_exec_oit(const struct lu_env *env,
4642 struct lfsck_component *com,
4643 struct dt_object *obj)
4645 struct lfsck_instance *lfsck = com->lc_lfsck;
4646 struct lfsck_layout *lo = com->lc_file_ram;
4647 const struct lu_fid *fid = lfsck_dto2fid(obj);
4648 struct lfsck_layout_slave_data *llsd = com->lc_data;
4649 struct lfsck_layout_seq *lls;
4655 LASSERT(llsd != NULL);
4657 lfsck_rbtree_update_bitmap(env, com, fid, false);
4659 down_write(&com->lc_sem);
4660 if (fid_is_idif(fid))
4662 else if (!fid_is_norm(fid) ||
4663 !fid_is_for_ostobj(env, lfsck->li_next, obj, fid))
4664 GOTO(unlock, rc = 0);
4667 com->lc_new_checked++;
4669 lls = lfsck_layout_seq_lookup(llsd, seq);
4672 if (unlikely(lls == NULL))
4673 GOTO(unlock, rc = -ENOMEM);
4675 INIT_LIST_HEAD(&lls->lls_list);
4677 rc = lfsck_layout_lastid_load(env, com, lls);
4679 lo->ll_objs_failed_phase1++;
4684 lfsck_layout_seq_insert(llsd, lls);
4687 if (unlikely(fid_is_last_id(fid)))
4688 GOTO(unlock, rc = 0);
4691 if (oid > lls->lls_lastid_known)
4692 lls->lls_lastid_known = oid;
4694 if (oid > lls->lls_lastid) {
4695 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
4696 /* OFD may create new objects during LFSCK scanning. */
4697 rc = lfsck_layout_lastid_reload(env, com, lls);
4698 if (unlikely(rc != 0))
4699 CWARN("%s: failed to reload LAST_ID for "LPX64
4701 lfsck_lfsck2name(com->lc_lfsck),
4703 if (oid <= lls->lls_lastid)
4704 GOTO(unlock, rc = 0);
4706 LASSERT(lfsck->li_out_notify != NULL);
4708 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
4709 LE_LASTID_REBUILDING);
4710 lo->ll_flags |= LF_CRASHED_LASTID;
4713 lls->lls_lastid = oid;
4717 GOTO(unlock, rc = 0);
4720 up_write(&com->lc_sem);
4725 static int lfsck_layout_exec_dir(const struct lu_env *env,
4726 struct lfsck_component *com,
4727 struct dt_object *obj,
4728 struct lu_dirent *ent)
4733 static int lfsck_layout_master_post(const struct lu_env *env,
4734 struct lfsck_component *com,
4735 int result, bool init)
4737 struct lfsck_instance *lfsck = com->lc_lfsck;
4738 struct lfsck_layout *lo = com->lc_file_ram;
4739 struct lfsck_layout_master_data *llmd = com->lc_data;
4740 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4741 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4742 struct l_wait_info lwi = { 0 };
4747 llmd->llmd_post_result = result;
4748 llmd->llmd_to_post = 1;
4749 if (llmd->llmd_post_result <= 0)
4750 llmd->llmd_exit = 1;
4752 wake_up_all(&athread->t_ctl_waitq);
4753 l_wait_event(mthread->t_ctl_waitq,
4754 (result > 0 && list_empty(&llmd->llmd_req_list)) ||
4755 thread_is_stopped(athread),
4758 if (llmd->llmd_assistant_status < 0)
4759 result = llmd->llmd_assistant_status;
4761 down_write(&com->lc_sem);
4762 spin_lock(&lfsck->li_lock);
4763 /* When LFSCK failed, there may be some prefetched objects those are
4764 * not been processed yet, we do not know the exactly position, then
4765 * just restart from last check-point next time. */
4766 if (!init && !llmd->llmd_exit)
4767 lo->ll_pos_last_checkpoint =
4768 lfsck->li_pos_current.lp_oit_cookie;
4771 lo->ll_status = LS_SCANNING_PHASE2;
4772 lo->ll_flags |= LF_SCANNED_ONCE;
4773 lo->ll_flags &= ~LF_UPGRADE;
4774 list_del_init(&com->lc_link);
4775 list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
4776 } else if (result == 0) {
4777 lo->ll_status = lfsck->li_status;
4778 if (lo->ll_status == 0)
4779 lo->ll_status = LS_STOPPED;
4780 if (lo->ll_status != LS_PAUSED) {
4781 list_del_init(&com->lc_link);
4782 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
4785 lo->ll_status = LS_FAILED;
4786 list_del_init(&com->lc_link);
4787 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
4789 spin_unlock(&lfsck->li_lock);
4792 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4793 HALF_SEC - lfsck->li_time_last_checkpoint);
4794 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4795 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4796 com->lc_new_checked = 0;
4799 rc = lfsck_layout_store(env, com);
4800 up_write(&com->lc_sem);
4805 static int lfsck_layout_slave_post(const struct lu_env *env,
4806 struct lfsck_component *com,
4807 int result, bool init)
4809 struct lfsck_instance *lfsck = com->lc_lfsck;
4810 struct lfsck_layout *lo = com->lc_file_ram;
4814 rc = lfsck_layout_lastid_store(env, com);
4818 LASSERT(lfsck->li_out_notify != NULL);
4820 down_write(&com->lc_sem);
4822 spin_lock(&lfsck->li_lock);
4824 lo->ll_pos_last_checkpoint =
4825 lfsck->li_pos_current.lp_oit_cookie;
4827 lo->ll_status = LS_SCANNING_PHASE2;
4828 lo->ll_flags |= LF_SCANNED_ONCE;
4829 if (lo->ll_flags & LF_CRASHED_LASTID) {
4831 lo->ll_flags &= ~LF_CRASHED_LASTID;
4833 lo->ll_flags &= ~LF_UPGRADE;
4834 list_del_init(&com->lc_link);
4835 list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
4836 } else if (result == 0) {
4837 lo->ll_status = lfsck->li_status;
4838 if (lo->ll_status == 0)
4839 lo->ll_status = LS_STOPPED;
4840 if (lo->ll_status != LS_PAUSED) {
4841 list_del_init(&com->lc_link);
4842 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
4845 lo->ll_status = LS_FAILED;
4846 list_del_init(&com->lc_link);
4847 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
4849 spin_unlock(&lfsck->li_lock);
4852 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
4856 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4857 HALF_SEC - lfsck->li_time_last_checkpoint);
4858 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4859 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4860 com->lc_new_checked = 0;
4863 rc = lfsck_layout_store(env, com);
4865 up_write(&com->lc_sem);
4867 lfsck_layout_slave_notify_master(env, com, LE_PHASE1_DONE, result);
4870 lfsck_rbtree_cleanup(env, com);
4875 static int lfsck_layout_dump(const struct lu_env *env,
4876 struct lfsck_component *com, char *buf, int len)
4878 struct lfsck_instance *lfsck = com->lc_lfsck;
4879 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
4880 struct lfsck_layout *lo = com->lc_file_ram;
4885 down_read(&com->lc_sem);
4886 rc = snprintf(buf, len,
4887 "name: lfsck_layout\n"
4893 lfsck_status2names(lo->ll_status));
4899 rc = lfsck_bits_dump(&buf, &len, lo->ll_flags, lfsck_flags_names,
4904 rc = lfsck_bits_dump(&buf, &len, bk->lb_param, lfsck_param_names,
4909 rc = lfsck_time_dump(&buf, &len, lo->ll_time_last_complete,
4910 "time_since_last_completed");
4914 rc = lfsck_time_dump(&buf, &len, lo->ll_time_latest_start,
4915 "time_since_latest_start");
4919 rc = lfsck_time_dump(&buf, &len, lo->ll_time_last_checkpoint,
4920 "time_since_last_checkpoint");
4924 rc = snprintf(buf, len,
4925 "latest_start_position: "LPU64"\n"
4926 "last_checkpoint_position: "LPU64"\n"
4927 "first_failure_position: "LPU64"\n",
4928 lo->ll_pos_latest_start,
4929 lo->ll_pos_last_checkpoint,
4930 lo->ll_pos_first_inconsistent);
4937 rc = snprintf(buf, len,
4938 "success_count: %u\n"
4939 "repaired_dangling: "LPU64"\n"
4940 "repaired_unmatched_pair: "LPU64"\n"
4941 "repaired_multiple_referenced: "LPU64"\n"
4942 "repaired_orphan: "LPU64"\n"
4943 "repaired_inconsistent_owner: "LPU64"\n"
4944 "repaired_others: "LPU64"\n"
4945 "skipped: "LPU64"\n"
4946 "failed_phase1: "LPU64"\n"
4947 "failed_phase2: "LPU64"\n",
4948 lo->ll_success_count,
4949 lo->ll_objs_repaired[LLIT_DANGLING - 1],
4950 lo->ll_objs_repaired[LLIT_UNMATCHED_PAIR - 1],
4951 lo->ll_objs_repaired[LLIT_MULTIPLE_REFERENCED - 1],
4952 lo->ll_objs_repaired[LLIT_ORPHAN - 1],
4953 lo->ll_objs_repaired[LLIT_INCONSISTENT_OWNER - 1],
4954 lo->ll_objs_repaired[LLIT_OTHERS - 1],
4955 lo->ll_objs_skipped,
4956 lo->ll_objs_failed_phase1,
4957 lo->ll_objs_failed_phase2);
4964 if (lo->ll_status == LS_SCANNING_PHASE1) {
4966 const struct dt_it_ops *iops;
4967 cfs_duration_t duration = cfs_time_current() -
4968 lfsck->li_time_last_checkpoint;
4969 __u64 checked = lo->ll_objs_checked_phase1 +
4970 com->lc_new_checked;
4971 __u64 speed = checked;
4972 __u64 new_checked = com->lc_new_checked * HZ;
4973 __u32 rtime = lo->ll_run_time_phase1 +
4974 cfs_duration_sec(duration + HALF_SEC);
4977 do_div(new_checked, duration);
4979 do_div(speed, rtime);
4980 rc = snprintf(buf, len,
4981 "checked_phase1: "LPU64"\n"
4982 "checked_phase2: "LPU64"\n"
4983 "run_time_phase1: %u seconds\n"
4984 "run_time_phase2: %u seconds\n"
4985 "average_speed_phase1: "LPU64" items/sec\n"
4986 "average_speed_phase2: N/A\n"
4987 "real-time_speed_phase1: "LPU64" items/sec\n"
4988 "real-time_speed_phase2: N/A\n",
4990 lo->ll_objs_checked_phase2,
4992 lo->ll_run_time_phase2,
5001 LASSERT(lfsck->li_di_oit != NULL);
5003 iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
5005 /* The low layer otable-based iteration position may NOT
5006 * exactly match the layout-based directory traversal
5007 * cookie. Generally, it is not a serious issue. But the
5008 * caller should NOT make assumption on that. */
5009 pos = iops->store(env, lfsck->li_di_oit);
5010 if (!lfsck->li_current_oit_processed)
5012 rc = snprintf(buf, len, "current_position: "LPU64"\n", pos);
5018 } else if (lo->ll_status == LS_SCANNING_PHASE2) {
5019 cfs_duration_t duration = cfs_time_current() -
5020 lfsck->li_time_last_checkpoint;
5021 __u64 checked = lo->ll_objs_checked_phase2 +
5022 com->lc_new_checked;
5023 __u64 speed1 = lo->ll_objs_checked_phase1;
5024 __u64 speed2 = checked;
5025 __u64 new_checked = com->lc_new_checked * HZ;
5026 __u32 rtime = lo->ll_run_time_phase2 +
5027 cfs_duration_sec(duration + HALF_SEC);
5030 do_div(new_checked, duration);
5031 if (lo->ll_run_time_phase1 != 0)
5032 do_div(speed1, lo->ll_run_time_phase1);
5034 do_div(speed2, rtime);
5035 rc = snprintf(buf, len,
5036 "checked_phase1: "LPU64"\n"
5037 "checked_phase2: "LPU64"\n"
5038 "run_time_phase1: %u seconds\n"
5039 "run_time_phase2: %u seconds\n"
5040 "average_speed_phase1: "LPU64" items/sec\n"
5041 "average_speed_phase2: "LPU64" items/sec\n"
5042 "real-time_speed_phase1: N/A\n"
5043 "real-time_speed_phase2: "LPU64" items/sec\n"
5044 "current_position: "DFID"\n",
5045 lo->ll_objs_checked_phase1,
5047 lo->ll_run_time_phase1,
5052 PFID(&com->lc_fid_latest_scanned_phase2));
5059 __u64 speed1 = lo->ll_objs_checked_phase1;
5060 __u64 speed2 = lo->ll_objs_checked_phase2;
5062 if (lo->ll_run_time_phase1 != 0)
5063 do_div(speed1, lo->ll_run_time_phase1);
5064 if (lo->ll_run_time_phase2 != 0)
5065 do_div(speed2, lo->ll_run_time_phase2);
5066 rc = snprintf(buf, len,
5067 "checked_phase1: "LPU64"\n"
5068 "checked_phase2: "LPU64"\n"
5069 "run_time_phase1: %u seconds\n"
5070 "run_time_phase2: %u seconds\n"
5071 "average_speed_phase1: "LPU64" items/sec\n"
5072 "average_speed_phase2: "LPU64" objs/sec\n"
5073 "real-time_speed_phase1: N/A\n"
5074 "real-time_speed_phase2: N/A\n"
5075 "current_position: N/A\n",
5076 lo->ll_objs_checked_phase1,
5077 lo->ll_objs_checked_phase2,
5078 lo->ll_run_time_phase1,
5079 lo->ll_run_time_phase2,
5091 up_read(&com->lc_sem);
5096 static int lfsck_layout_master_double_scan(const struct lu_env *env,
5097 struct lfsck_component *com)
5099 struct lfsck_layout_master_data *llmd = com->lc_data;
5100 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
5101 struct ptlrpc_thread *athread = &llmd->llmd_thread;
5102 struct lfsck_layout *lo = com->lc_file_ram;
5103 struct l_wait_info lwi = { 0 };
5105 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2))
5108 llmd->llmd_to_double_scan = 1;
5109 wake_up_all(&athread->t_ctl_waitq);
5110 l_wait_event(mthread->t_ctl_waitq,
5111 llmd->llmd_in_double_scan ||
5112 thread_is_stopped(athread),
5114 if (llmd->llmd_assistant_status < 0)
5115 return llmd->llmd_assistant_status;
5120 static int lfsck_layout_slave_double_scan(const struct lu_env *env,
5121 struct lfsck_component *com)
5123 struct lfsck_instance *lfsck = com->lc_lfsck;
5124 struct lfsck_layout_slave_data *llsd = com->lc_data;
5125 struct lfsck_layout *lo = com->lc_file_ram;
5126 struct ptlrpc_thread *thread = &lfsck->li_thread;
5130 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2)) {
5131 lfsck_rbtree_cleanup(env, com);
5132 lfsck_layout_slave_notify_master(env, com, LE_PHASE2_DONE, 0);
5136 atomic_inc(&lfsck->li_double_scan_count);
5138 com->lc_new_checked = 0;
5139 com->lc_new_scanned = 0;
5140 com->lc_time_last_checkpoint = cfs_time_current();
5141 com->lc_time_next_checkpoint = com->lc_time_last_checkpoint +
5142 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
5145 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(30),
5148 rc = lfsck_layout_slave_query_master(env, com);
5149 if (list_empty(&llsd->llsd_master_list)) {
5150 if (unlikely(!thread_is_running(thread)))
5161 rc = l_wait_event(thread->t_ctl_waitq,
5162 !thread_is_running(thread) ||
5163 list_empty(&llsd->llsd_master_list),
5165 if (unlikely(!thread_is_running(thread)))
5168 if (rc == -ETIMEDOUT)
5171 GOTO(done, rc = (rc < 0 ? rc : 1));
5175 rc = lfsck_layout_double_scan_result(env, com, rc);
5177 lfsck_rbtree_cleanup(env, com);
5178 lfsck_layout_slave_notify_master(env, com, LE_PHASE2_DONE, rc);
5179 if (atomic_dec_and_test(&lfsck->li_double_scan_count))
5180 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
5185 static void lfsck_layout_master_data_release(const struct lu_env *env,
5186 struct lfsck_component *com)
5188 struct lfsck_layout_master_data *llmd = com->lc_data;
5189 struct lfsck_instance *lfsck = com->lc_lfsck;
5190 struct lfsck_tgt_descs *ltds;
5191 struct lfsck_tgt_desc *ltd;
5192 struct lfsck_tgt_desc *next;
5194 LASSERT(llmd != NULL);
5195 LASSERT(thread_is_init(&llmd->llmd_thread) ||
5196 thread_is_stopped(&llmd->llmd_thread));
5197 LASSERT(list_empty(&llmd->llmd_req_list));
5199 com->lc_data = NULL;
5201 ltds = &lfsck->li_ost_descs;
5202 spin_lock(<ds->ltd_lock);
5203 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase1_list,
5204 ltd_layout_phase_list) {
5205 list_del_init(<d->ltd_layout_phase_list);
5207 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase2_list,
5208 ltd_layout_phase_list) {
5209 list_del_init(<d->ltd_layout_phase_list);
5211 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_list,
5213 list_del_init(<d->ltd_layout_list);
5215 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase1_list,
5216 ltd_layout_phase_list) {
5217 list_del_init(<d->ltd_layout_phase_list);
5219 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase2_list,
5220 ltd_layout_phase_list) {
5221 list_del_init(<d->ltd_layout_phase_list);
5223 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_list,
5225 list_del_init(<d->ltd_layout_list);
5227 spin_unlock(<ds->ltd_lock);
5232 static void lfsck_layout_slave_data_release(const struct lu_env *env,
5233 struct lfsck_component *com)
5235 struct lfsck_layout_slave_data *llsd = com->lc_data;
5236 struct lfsck_layout_seq *lls;
5237 struct lfsck_layout_seq *next;
5238 struct lfsck_layout_slave_target *llst;
5239 struct lfsck_layout_slave_target *tmp;
5241 LASSERT(llsd != NULL);
5243 list_for_each_entry_safe(lls, next, &llsd->llsd_seq_list,
5245 list_del_init(&lls->lls_list);
5246 lfsck_object_put(env, lls->lls_lastid_obj);
5250 list_for_each_entry_safe(llst, tmp, &llsd->llsd_master_list,
5252 list_del_init(&llst->llst_list);
5256 lfsck_rbtree_cleanup(env, com);
5257 com->lc_data = NULL;
5261 static void lfsck_layout_master_quit(const struct lu_env *env,
5262 struct lfsck_component *com)
5264 struct lfsck_layout_master_data *llmd = com->lc_data;
5265 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
5266 struct ptlrpc_thread *athread = &llmd->llmd_thread;
5267 struct l_wait_info lwi = { 0 };
5269 llmd->llmd_exit = 1;
5270 wake_up_all(&athread->t_ctl_waitq);
5271 l_wait_event(mthread->t_ctl_waitq,
5272 thread_is_init(athread) ||
5273 thread_is_stopped(athread),
5277 static void lfsck_layout_slave_quit(const struct lu_env *env,
5278 struct lfsck_component *com)
5280 lfsck_rbtree_cleanup(env, com);
5283 static int lfsck_layout_master_in_notify(const struct lu_env *env,
5284 struct lfsck_component *com,
5285 struct lfsck_request *lr)
5287 struct lfsck_instance *lfsck = com->lc_lfsck;
5288 struct lfsck_layout *lo = com->lc_file_ram;
5289 struct lfsck_layout_master_data *llmd = com->lc_data;
5290 struct lfsck_tgt_descs *ltds;
5291 struct lfsck_tgt_desc *ltd;
5295 if (lr->lr_event == LE_PAIRS_VERIFY) {
5298 rc = lfsck_layout_master_check_pairs(env, com, &lr->lr_fid,
5304 if (lr->lr_event != LE_PHASE1_DONE &&
5305 lr->lr_event != LE_PHASE2_DONE &&
5306 lr->lr_event != LE_PEER_EXIT)
5309 if (lr->lr_flags & LEF_FROM_OST)
5310 ltds = &lfsck->li_ost_descs;
5312 ltds = &lfsck->li_mdt_descs;
5313 spin_lock(<ds->ltd_lock);
5314 ltd = LTD_TGT(ltds, lr->lr_index);
5316 spin_unlock(<ds->ltd_lock);
5321 list_del_init(<d->ltd_layout_phase_list);
5322 switch (lr->lr_event) {
5323 case LE_PHASE1_DONE:
5324 if (lr->lr_status <= 0) {
5325 ltd->ltd_layout_done = 1;
5326 list_del_init(<d->ltd_layout_list);
5327 CWARN("%s: %s %x failed/stopped at phase1: rc = %d.\n",
5328 lfsck_lfsck2name(lfsck),
5329 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
5330 ltd->ltd_index, lr->lr_status);
5331 lo->ll_flags |= LF_INCOMPLETE;
5336 if (lr->lr_flags & LEF_FROM_OST) {
5337 if (list_empty(<d->ltd_layout_list))
5338 list_add_tail(<d->ltd_layout_list,
5339 &llmd->llmd_ost_list);
5340 list_add_tail(<d->ltd_layout_phase_list,
5341 &llmd->llmd_ost_phase2_list);
5343 if (list_empty(<d->ltd_layout_list))
5344 list_add_tail(<d->ltd_layout_list,
5345 &llmd->llmd_mdt_list);
5346 list_add_tail(<d->ltd_layout_phase_list,
5347 &llmd->llmd_mdt_phase2_list);
5350 case LE_PHASE2_DONE:
5351 ltd->ltd_layout_done = 1;
5352 list_del_init(<d->ltd_layout_list);
5356 ltd->ltd_layout_done = 1;
5357 list_del_init(<d->ltd_layout_list);
5358 if (!(lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT)) {
5359 CWARN("%s: the peer %s %x exit layout LFSCK.\n",
5360 lfsck_lfsck2name(lfsck),
5361 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
5363 lo->ll_flags |= LF_INCOMPLETE;
5369 spin_unlock(<ds->ltd_lock);
5371 if (fail && lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT) {
5372 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
5374 memset(stop, 0, sizeof(*stop));
5375 stop->ls_status = lr->lr_status;
5376 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
5377 lfsck_stop(env, lfsck->li_bottom, stop);
5378 } else if (lfsck_layout_master_to_orphan(llmd)) {
5379 wake_up_all(&llmd->llmd_thread.t_ctl_waitq);
5385 static int lfsck_layout_slave_in_notify(const struct lu_env *env,
5386 struct lfsck_component *com,
5387 struct lfsck_request *lr)
5389 struct lfsck_instance *lfsck = com->lc_lfsck;
5390 struct lfsck_layout_slave_data *llsd = com->lc_data;
5391 struct lfsck_layout_slave_target *llst;
5395 switch (lr->lr_event) {
5396 case LE_FID_ACCESSED:
5397 lfsck_rbtree_update_bitmap(env, com, &lr->lr_fid, true);
5399 case LE_CONDITIONAL_DESTROY:
5400 rc = lfsck_layout_slave_conditional_destroy(env, com, lr);
5402 case LE_PAIRS_VERIFY: {
5403 lr->lr_status = LPVS_INIT;
5404 /* Firstly, if the MDT-object which is claimed via OST-object
5405 * local stored PFID xattr recognizes the OST-object, then it
5406 * must be that the client given PFID is wrong. */
5407 rc = lfsck_layout_slave_check_pairs(env, com, &lr->lr_fid,
5412 lr->lr_status = LPVS_INCONSISTENT;
5413 /* The OST-object local stored PFID xattr is stale. We need to
5414 * check whether the MDT-object that is claimed via the client
5415 * given PFID information recognizes the OST-object or not. If
5416 * matches, then need to update the OST-object's PFID xattr. */
5417 rc = lfsck_layout_slave_check_pairs(env, com, &lr->lr_fid,
5420 * We are not sure whether the client given PFID information
5421 * is correct or not, do nothing to avoid improper fixing.
5424 * The client given PFID information is also invalid, we can
5425 * NOT fix the OST-object inconsistency.
5430 lr->lr_status = LPVS_INCONSISTENT_TOFIX;
5431 rc = lfsck_layout_slave_repair_pfid(env, com, lr);
5435 case LE_PHASE2_DONE:
5442 llst = lfsck_layout_llst_find_and_del(llsd, lr->lr_index, true);
5446 lfsck_layout_llst_put(llst);
5447 if (list_empty(&llsd->llsd_master_list))
5448 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
5450 if (lr->lr_event == LE_PEER_EXIT &&
5451 lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT) {
5452 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
5454 memset(stop, 0, sizeof(*stop));
5455 stop->ls_status = lr->lr_status;
5456 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
5457 lfsck_stop(env, lfsck->li_bottom, stop);
5463 static int lfsck_layout_query(const struct lu_env *env,
5464 struct lfsck_component *com)
5466 struct lfsck_layout *lo = com->lc_file_ram;
5468 return lo->ll_status;
5471 static int lfsck_layout_master_stop_notify(const struct lu_env *env,
5472 struct lfsck_component *com,
5473 struct lfsck_tgt_descs *ltds,
5474 struct lfsck_tgt_desc *ltd,
5475 struct ptlrpc_request_set *set)
5477 struct lfsck_thread_info *info = lfsck_env_info(env);
5478 struct lfsck_async_interpret_args *laia = &info->lti_laia;
5479 struct lfsck_request *lr = &info->lti_lr;
5480 struct lfsck_instance *lfsck = com->lc_lfsck;
5483 spin_lock(<ds->ltd_lock);
5484 if (list_empty(<d->ltd_layout_list)) {
5485 LASSERT(list_empty(<d->ltd_layout_phase_list));
5486 spin_unlock(<ds->ltd_lock);
5491 list_del_init(<d->ltd_layout_phase_list);
5492 list_del_init(<d->ltd_layout_list);
5493 spin_unlock(<ds->ltd_lock);
5495 memset(lr, 0, sizeof(*lr));
5496 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
5497 lr->lr_event = LE_PEER_EXIT;
5498 lr->lr_active = LT_LAYOUT;
5499 lr->lr_status = LS_CO_PAUSED;
5500 if (ltds == &lfsck->li_ost_descs)
5501 lr->lr_flags = LEF_TO_OST;
5503 laia->laia_com = com;
5504 laia->laia_ltds = ltds;
5505 atomic_inc(<d->ltd_ref);
5506 laia->laia_ltd = ltd;
5508 laia->laia_shared = 0;
5510 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
5511 lfsck_layout_master_async_interpret,
5512 laia, LFSCK_NOTIFY);
5514 CERROR("%s: Fail to notify %s %x for co-stop: rc = %d\n",
5515 lfsck_lfsck2name(lfsck),
5516 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
5517 ltd->ltd_index, rc);
5524 /* with lfsck::li_lock held */
5525 static int lfsck_layout_slave_join(const struct lu_env *env,
5526 struct lfsck_component *com,
5527 struct lfsck_start_param *lsp)
5529 struct lfsck_instance *lfsck = com->lc_lfsck;
5530 struct lfsck_layout_slave_data *llsd = com->lc_data;
5531 struct lfsck_layout_slave_target *llst;
5532 struct lfsck_start *start = lsp->lsp_start;
5536 if (!lsp->lsp_index_valid || start == NULL ||
5537 !(start->ls_flags & LPF_ALL_TGT) ||
5538 !(lfsck->li_bookmark_ram.lb_param & LPF_ALL_TGT))
5541 spin_unlock(&lfsck->li_lock);
5542 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
5543 spin_lock(&lfsck->li_lock);
5544 if (rc == 0 && !thread_is_running(&lfsck->li_thread)) {
5545 spin_unlock(&lfsck->li_lock);
5546 llst = lfsck_layout_llst_find_and_del(llsd, lsp->lsp_index,
5549 lfsck_layout_llst_put(llst);
5550 spin_lock(&lfsck->li_lock);
5557 static struct lfsck_operations lfsck_layout_master_ops = {
5558 .lfsck_reset = lfsck_layout_reset,
5559 .lfsck_fail = lfsck_layout_fail,
5560 .lfsck_checkpoint = lfsck_layout_master_checkpoint,
5561 .lfsck_prep = lfsck_layout_master_prep,
5562 .lfsck_exec_oit = lfsck_layout_master_exec_oit,
5563 .lfsck_exec_dir = lfsck_layout_exec_dir,
5564 .lfsck_post = lfsck_layout_master_post,
5565 .lfsck_interpret = lfsck_layout_master_async_interpret,
5566 .lfsck_dump = lfsck_layout_dump,
5567 .lfsck_double_scan = lfsck_layout_master_double_scan,
5568 .lfsck_data_release = lfsck_layout_master_data_release,
5569 .lfsck_quit = lfsck_layout_master_quit,
5570 .lfsck_in_notify = lfsck_layout_master_in_notify,
5571 .lfsck_query = lfsck_layout_query,
5572 .lfsck_stop_notify = lfsck_layout_master_stop_notify,
5575 static struct lfsck_operations lfsck_layout_slave_ops = {
5576 .lfsck_reset = lfsck_layout_reset,
5577 .lfsck_fail = lfsck_layout_fail,
5578 .lfsck_checkpoint = lfsck_layout_slave_checkpoint,
5579 .lfsck_prep = lfsck_layout_slave_prep,
5580 .lfsck_exec_oit = lfsck_layout_slave_exec_oit,
5581 .lfsck_exec_dir = lfsck_layout_exec_dir,
5582 .lfsck_post = lfsck_layout_slave_post,
5583 .lfsck_dump = lfsck_layout_dump,
5584 .lfsck_double_scan = lfsck_layout_slave_double_scan,
5585 .lfsck_data_release = lfsck_layout_slave_data_release,
5586 .lfsck_quit = lfsck_layout_slave_quit,
5587 .lfsck_in_notify = lfsck_layout_slave_in_notify,
5588 .lfsck_query = lfsck_layout_query,
5589 .lfsck_join = lfsck_layout_slave_join,
5592 int lfsck_layout_setup(const struct lu_env *env, struct lfsck_instance *lfsck)
5594 struct lfsck_component *com;
5595 struct lfsck_layout *lo;
5596 struct dt_object *root = NULL;
5597 struct dt_object *obj;
5605 INIT_LIST_HEAD(&com->lc_link);
5606 INIT_LIST_HEAD(&com->lc_link_dir);
5607 init_rwsem(&com->lc_sem);
5608 atomic_set(&com->lc_ref, 1);
5609 com->lc_lfsck = lfsck;
5610 com->lc_type = LT_LAYOUT;
5611 if (lfsck->li_master) {
5612 struct lfsck_layout_master_data *llmd;
5614 com->lc_ops = &lfsck_layout_master_ops;
5615 OBD_ALLOC_PTR(llmd);
5617 GOTO(out, rc = -ENOMEM);
5619 INIT_LIST_HEAD(&llmd->llmd_req_list);
5620 spin_lock_init(&llmd->llmd_lock);
5621 INIT_LIST_HEAD(&llmd->llmd_ost_list);
5622 INIT_LIST_HEAD(&llmd->llmd_ost_phase1_list);
5623 INIT_LIST_HEAD(&llmd->llmd_ost_phase2_list);
5624 INIT_LIST_HEAD(&llmd->llmd_mdt_list);
5625 INIT_LIST_HEAD(&llmd->llmd_mdt_phase1_list);
5626 INIT_LIST_HEAD(&llmd->llmd_mdt_phase2_list);
5627 init_waitqueue_head(&llmd->llmd_thread.t_ctl_waitq);
5628 com->lc_data = llmd;
5630 struct lfsck_layout_slave_data *llsd;
5632 com->lc_ops = &lfsck_layout_slave_ops;
5633 OBD_ALLOC_PTR(llsd);
5635 GOTO(out, rc = -ENOMEM);
5637 INIT_LIST_HEAD(&llsd->llsd_seq_list);
5638 INIT_LIST_HEAD(&llsd->llsd_master_list);
5639 spin_lock_init(&llsd->llsd_lock);
5640 llsd->llsd_rb_root = RB_ROOT;
5641 rwlock_init(&llsd->llsd_rb_lock);
5642 com->lc_data = llsd;
5644 com->lc_file_size = sizeof(*lo);
5645 OBD_ALLOC(com->lc_file_ram, com->lc_file_size);
5646 if (com->lc_file_ram == NULL)
5647 GOTO(out, rc = -ENOMEM);
5649 OBD_ALLOC(com->lc_file_disk, com->lc_file_size);
5650 if (com->lc_file_disk == NULL)
5651 GOTO(out, rc = -ENOMEM);
5653 root = dt_locate(env, lfsck->li_bottom, &lfsck->li_local_root_fid);
5655 GOTO(out, rc = PTR_ERR(root));
5657 if (unlikely(!dt_try_as_dir(env, root)))
5658 GOTO(out, rc = -ENOTDIR);
5660 obj = local_file_find_or_create(env, lfsck->li_los, root,
5662 S_IFREG | S_IRUGO | S_IWUSR);
5664 GOTO(out, rc = PTR_ERR(obj));
5667 rc = lfsck_layout_load(env, com);
5669 rc = lfsck_layout_reset(env, com, true);
5670 else if (rc == -ENOENT)
5671 rc = lfsck_layout_init(env, com);
5676 lo = com->lc_file_ram;
5677 switch (lo->ll_status) {
5683 spin_lock(&lfsck->li_lock);
5684 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
5685 spin_unlock(&lfsck->li_lock);
5688 CERROR("%s: unknown lfsck_layout status: rc = %u\n",
5689 lfsck_lfsck2name(lfsck), lo->ll_status);
5691 case LS_SCANNING_PHASE1:
5692 case LS_SCANNING_PHASE2:
5693 /* No need to store the status to disk right now.
5694 * If the system crashed before the status stored,
5695 * it will be loaded back when next time. */
5696 lo->ll_status = LS_CRASHED;
5697 lo->ll_flags |= LF_INCOMPLETE;
5704 spin_lock(&lfsck->li_lock);
5705 list_add_tail(&com->lc_link, &lfsck->li_list_scan);
5706 spin_unlock(&lfsck->li_lock);
5710 if (lo->ll_flags & LF_CRASHED_LASTID) {
5711 LASSERT(lfsck->li_out_notify != NULL);
5713 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
5714 LE_LASTID_REBUILDING);
5720 if (root != NULL && !IS_ERR(root))
5721 lu_object_put(env, &root->do_lu);
5724 lfsck_component_cleanup(env, com);
5729 struct lfsck_orphan_it {
5730 struct lfsck_component *loi_com;
5731 struct lfsck_rbtree_node *loi_lrn;
5732 struct lfsck_layout_slave_target *loi_llst;
5733 struct lu_fid loi_key;
5734 struct lu_orphan_rec loi_rec;
5736 unsigned int loi_over:1;
5739 static int lfsck_fid_match_idx(const struct lu_env *env,
5740 struct lfsck_instance *lfsck,
5741 const struct lu_fid *fid, int idx)
5743 struct seq_server_site *ss;
5744 struct lu_server_fld *sf;
5745 struct lu_seq_range range = { 0 };
5748 /* All abnormal cases will be returned to MDT0. */
5749 if (!fid_is_norm(fid)) {
5756 ss = lu_site2seq(lfsck->li_bottom->dd_lu_dev.ld_site);
5757 if (unlikely(ss == NULL))
5760 sf = ss->ss_server_fld;
5761 LASSERT(sf != NULL);
5763 fld_range_set_any(&range);
5764 rc = fld_server_lookup(env, sf, fid_seq(fid), &range);
5768 if (!fld_range_is_mdt(&range))
5771 if (range.lsr_index == idx)
5777 static void lfsck_layout_destroy_orphan(const struct lu_env *env,
5778 struct dt_device *dev,
5779 struct dt_object *obj)
5781 struct thandle *handle;
5785 handle = dt_trans_create(env, dev);
5789 rc = dt_declare_ref_del(env, obj, handle);
5793 rc = dt_declare_destroy(env, obj, handle);
5797 rc = dt_trans_start_local(env, dev, handle);
5801 dt_write_lock(env, obj, 0);
5802 rc = dt_ref_del(env, obj, handle);
5804 rc = dt_destroy(env, obj, handle);
5805 dt_write_unlock(env, obj);
5810 dt_trans_stop(env, dev, handle);
5815 static int lfsck_orphan_index_lookup(const struct lu_env *env,
5816 struct dt_object *dt,
5818 const struct dt_key *key,
5819 struct lustre_capa *capa)
5824 static int lfsck_orphan_index_declare_insert(const struct lu_env *env,
5825 struct dt_object *dt,
5826 const struct dt_rec *rec,
5827 const struct dt_key *key,
5828 struct thandle *handle)
5833 static int lfsck_orphan_index_insert(const struct lu_env *env,
5834 struct dt_object *dt,
5835 const struct dt_rec *rec,
5836 const struct dt_key *key,
5837 struct thandle *handle,
5838 struct lustre_capa *capa,
5844 static int lfsck_orphan_index_declare_delete(const struct lu_env *env,
5845 struct dt_object *dt,
5846 const struct dt_key *key,
5847 struct thandle *handle)
5852 static int lfsck_orphan_index_delete(const struct lu_env *env,
5853 struct dt_object *dt,
5854 const struct dt_key *key,
5855 struct thandle *handle,
5856 struct lustre_capa *capa)
5861 static struct dt_it *lfsck_orphan_it_init(const struct lu_env *env,
5862 struct dt_object *dt,
5864 struct lustre_capa *capa)
5866 struct dt_device *dev = lu2dt_dev(dt->do_lu.lo_dev);
5867 struct lfsck_instance *lfsck;
5868 struct lfsck_component *com = NULL;
5869 struct lfsck_layout_slave_data *llsd;
5870 struct lfsck_orphan_it *it = NULL;
5874 lfsck = lfsck_instance_find(dev, true, false);
5875 if (unlikely(lfsck == NULL))
5876 RETURN(ERR_PTR(-ENXIO));
5878 com = lfsck_component_find(lfsck, LT_LAYOUT);
5879 if (unlikely(com == NULL))
5880 GOTO(out, rc = -ENOENT);
5882 llsd = com->lc_data;
5883 if (!llsd->llsd_rbtree_valid)
5884 GOTO(out, rc = -ESRCH);
5888 GOTO(out, rc = -ENOMEM);
5890 it->loi_llst = lfsck_layout_llst_find_and_del(llsd, attr, false);
5891 if (it->loi_llst == NULL)
5892 GOTO(out, rc = -ENXIO);
5894 if (dev->dd_record_fid_accessed) {
5895 /* The first iteration against the rbtree, scan the whole rbtree
5896 * to remove the nodes which do NOT need to be handled. */
5897 write_lock(&llsd->llsd_rb_lock);
5898 if (dev->dd_record_fid_accessed) {
5899 struct rb_node *node;
5900 struct rb_node *next;
5901 struct lfsck_rbtree_node *lrn;
5903 /* No need to record the fid accessing anymore. */
5904 dev->dd_record_fid_accessed = 0;
5906 node = rb_first(&llsd->llsd_rb_root);
5907 while (node != NULL) {
5908 next = rb_next(node);
5909 lrn = rb_entry(node, struct lfsck_rbtree_node,
5911 if (atomic_read(&lrn->lrn_known_count) <=
5912 atomic_read(&lrn->lrn_accessed_count)) {
5913 rb_erase(node, &llsd->llsd_rb_root);
5914 lfsck_rbtree_free(lrn);
5919 write_unlock(&llsd->llsd_rb_lock);
5922 /* read lock the rbtree when init, and unlock when fini */
5923 read_lock(&llsd->llsd_rb_lock);
5931 lfsck_component_put(env, com);
5932 lfsck_instance_put(env, lfsck);
5937 it = (struct lfsck_orphan_it *)ERR_PTR(rc);
5940 return (struct dt_it *)it;
5943 static void lfsck_orphan_it_fini(const struct lu_env *env,
5946 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
5947 struct lfsck_component *com = it->loi_com;
5948 struct lfsck_layout_slave_data *llsd;
5949 struct lfsck_layout_slave_target *llst;
5952 llsd = com->lc_data;
5953 read_unlock(&llsd->llsd_rb_lock);
5954 llst = it->loi_llst;
5955 LASSERT(llst != NULL);
5957 /* Save the key and hash for iterate next. */
5958 llst->llst_fid = it->loi_key;
5959 llst->llst_hash = it->loi_hash;
5960 lfsck_layout_llst_put(llst);
5961 lfsck_component_put(env, com);
5967 * \retval +1: the iteration finished
5968 * \retval 0: on success, not finished
5969 * \retval -ve: on error
5971 static int lfsck_orphan_it_next(const struct lu_env *env,
5974 struct lfsck_thread_info *info = lfsck_env_info(env);
5975 struct filter_fid_old *pfid = &info->lti_old_pfid;
5976 struct lu_attr *la = &info->lti_la;
5977 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
5978 struct lu_fid *key = &it->loi_key;
5979 struct lu_orphan_rec *rec = &it->loi_rec;
5980 struct lfsck_component *com = it->loi_com;
5981 struct lfsck_instance *lfsck = com->lc_lfsck;
5982 struct lfsck_layout_slave_data *llsd = com->lc_data;
5983 struct dt_object *obj;
5984 struct lfsck_rbtree_node *lrn;
5988 __u32 idx = it->loi_llst->llst_index;
5998 lrn = lfsck_rbtree_search(llsd, key, &exact);
6006 key->f_seq = lrn->lrn_seq;
6007 key->f_oid = lrn->lrn_first_oid;
6012 if (unlikely(key->f_oid == 0)) {
6019 lrn->lrn_first_oid + LFSCK_RBTREE_BITMAP_WIDTH) {
6025 if (unlikely(atomic_read(&lrn->lrn_known_count) <=
6026 atomic_read(&lrn->lrn_accessed_count))) {
6027 struct rb_node *next = rb_next(&lrn->lrn_node);
6029 while (next != NULL) {
6030 lrn = rb_entry(next, struct lfsck_rbtree_node,
6032 if (atomic_read(&lrn->lrn_known_count) >
6033 atomic_read(&lrn->lrn_accessed_count))
6035 next = rb_next(next);
6044 key->f_seq = lrn->lrn_seq;
6045 key->f_oid = lrn->lrn_first_oid;
6049 pos = key->f_oid - lrn->lrn_first_oid;
6052 pos = find_next_bit(lrn->lrn_known_bitmap,
6053 LFSCK_RBTREE_BITMAP_WIDTH, pos);
6054 if (pos >= LFSCK_RBTREE_BITMAP_WIDTH) {
6055 key->f_oid = lrn->lrn_first_oid + pos;
6056 if (unlikely(key->f_oid < lrn->lrn_first_oid)) {
6064 if (test_bit(pos, lrn->lrn_accessed_bitmap)) {
6069 key->f_oid = lrn->lrn_first_oid + pos;
6070 obj = lfsck_object_find(env, lfsck, key);
6073 if (rc == -ENOENT) {
6080 dt_read_lock(env, obj, 0);
6081 if (!dt_object_exists(obj)) {
6082 dt_read_unlock(env, obj);
6083 lfsck_object_put(env, obj);
6088 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
6092 rc = dt_xattr_get(env, obj, lfsck_buf_get(env, pfid, sizeof(*pfid)),
6093 XATTR_NAME_FID, BYPASS_CAPA);
6094 if (rc == -ENODATA) {
6095 /* For the pre-created OST-object, update the bitmap to avoid
6096 * others LFSCK (second phase) iteration to touch it again. */
6097 if (la->la_ctime == 0) {
6098 if (!test_and_set_bit(pos, lrn->lrn_accessed_bitmap))
6099 atomic_inc(&lrn->lrn_accessed_count);
6101 /* For the race between repairing dangling referenced
6102 * MDT-object and unlink the file, it may left orphan
6103 * OST-object there. Destroy it now! */
6104 if (unlikely(!(la->la_mode & S_ISUID))) {
6105 dt_read_unlock(env, obj);
6106 lfsck_layout_destroy_orphan(env,
6109 lfsck_object_put(env, obj);
6113 } else if (idx == 0) {
6114 /* If the orphan OST-object has no parent information,
6115 * regard it as referenced by the MDT-object on MDT0. */
6116 fid_zero(&rec->lor_fid);
6117 rec->lor_uid = la->la_uid;
6118 rec->lor_gid = la->la_gid;
6122 dt_read_unlock(env, obj);
6123 lfsck_object_put(env, obj);
6131 if (rc != sizeof(struct filter_fid) &&
6132 rc != sizeof(struct filter_fid_old))
6133 GOTO(out, rc = -EINVAL);
6135 fid_le_to_cpu(&rec->lor_fid, &pfid->ff_parent);
6136 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
6137 * MDT-object's FID::f_ver, instead it is the OST-object index in its
6138 * parent MDT-object's layout EA. */
6139 save = rec->lor_fid.f_stripe_idx;
6140 rec->lor_fid.f_ver = 0;
6141 rc = lfsck_fid_match_idx(env, lfsck, &rec->lor_fid, idx);
6142 /* If the orphan OST-object does not claim the MDT, then next.
6144 * If we do not know whether it matches or not, then return it
6145 * to the MDT for further check. */
6147 dt_read_unlock(env, obj);
6148 lfsck_object_put(env, obj);
6153 rec->lor_fid.f_stripe_idx = save;
6154 rec->lor_uid = la->la_uid;
6155 rec->lor_gid = la->la_gid;
6157 CDEBUG(D_LFSCK, "%s: return orphan "DFID", PFID "DFID", owner %u:%u\n",
6158 lfsck_lfsck2name(com->lc_lfsck), PFID(key), PFID(&rec->lor_fid),
6159 rec->lor_uid, rec->lor_gid);
6164 dt_read_unlock(env, obj);
6165 lfsck_object_put(env, obj);
6173 * \retval +1: locate to the exactly position
6174 * \retval 0: cannot locate to the exactly position,
6175 * call next() to move to a valid position.
6176 * \retval -ve: on error
6178 static int lfsck_orphan_it_get(const struct lu_env *env,
6180 const struct dt_key *key)
6182 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6185 it->loi_key = *(struct lu_fid *)key;
6186 rc = lfsck_orphan_it_next(env, di);
6196 static void lfsck_orphan_it_put(const struct lu_env *env,
6201 static struct dt_key *lfsck_orphan_it_key(const struct lu_env *env,
6202 const struct dt_it *di)
6204 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6206 return (struct dt_key *)&it->loi_key;
6209 static int lfsck_orphan_it_key_size(const struct lu_env *env,
6210 const struct dt_it *di)
6212 return sizeof(struct lu_fid);
6215 static int lfsck_orphan_it_rec(const struct lu_env *env,
6216 const struct dt_it *di,
6220 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6222 *(struct lu_orphan_rec *)rec = it->loi_rec;
6227 static __u64 lfsck_orphan_it_store(const struct lu_env *env,
6228 const struct dt_it *di)
6230 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6232 return it->loi_hash;
6236 * \retval +1: locate to the exactly position
6237 * \retval 0: cannot locate to the exactly position,
6238 * call next() to move to a valid position.
6239 * \retval -ve: on error
6241 static int lfsck_orphan_it_load(const struct lu_env *env,
6242 const struct dt_it *di,
6245 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6246 struct lfsck_layout_slave_target *llst = it->loi_llst;
6249 LASSERT(llst != NULL);
6251 if (hash != llst->llst_hash) {
6252 CWARN("%s: the given hash "LPU64" for orphan iteration does "
6253 "not match the one when fini "LPU64", to be reset.\n",
6254 lfsck_lfsck2name(it->loi_com->lc_lfsck), hash,
6256 fid_zero(&llst->llst_fid);
6257 llst->llst_hash = 0;
6260 it->loi_key = llst->llst_fid;
6261 it->loi_hash = llst->llst_hash;
6262 rc = lfsck_orphan_it_next(env, (struct dt_it *)di);
6272 static int lfsck_orphan_it_key_rec(const struct lu_env *env,
6273 const struct dt_it *di,
6279 const struct dt_index_operations lfsck_orphan_index_ops = {
6280 .dio_lookup = lfsck_orphan_index_lookup,
6281 .dio_declare_insert = lfsck_orphan_index_declare_insert,
6282 .dio_insert = lfsck_orphan_index_insert,
6283 .dio_declare_delete = lfsck_orphan_index_declare_delete,
6284 .dio_delete = lfsck_orphan_index_delete,
6286 .init = lfsck_orphan_it_init,
6287 .fini = lfsck_orphan_it_fini,
6288 .get = lfsck_orphan_it_get,
6289 .put = lfsck_orphan_it_put,
6290 .next = lfsck_orphan_it_next,
6291 .key = lfsck_orphan_it_key,
6292 .key_size = lfsck_orphan_it_key_size,
6293 .rec = lfsck_orphan_it_rec,
6294 .store = lfsck_orphan_it_store,
6295 .load = lfsck_orphan_it_load,
6296 .key_rec = lfsck_orphan_it_key_rec,