4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2013, Intel Corporation.
26 * lustre/lfsck/lfsck_layout.c
28 * Author: Fan, Yong <fan.yong@intel.com>
32 # define EXPORT_SYMTAB
34 #define DEBUG_SUBSYSTEM S_LFSCK
36 #include <linux/bitops.h>
37 #include <linux/rbtree.h>
39 #include <lustre/lustre_idl.h>
40 #include <lu_object.h>
41 #include <dt_object.h>
42 #include <lustre_fid.h>
43 #include <lustre_lib.h>
44 #include <lustre_net.h>
45 #include <lustre/lustre_user.h>
46 #include <md_object.h>
47 #include <obd_class.h>
49 #include "lfsck_internal.h"
51 #define LFSCK_LAYOUT_MAGIC 0xB173AE14
53 static const char lfsck_layout_name[] = "lfsck_layout";
55 struct lfsck_layout_seq {
56 struct list_head lls_list;
59 __u64 lls_lastid_known;
60 struct dt_object *lls_lastid_obj;
61 unsigned int lls_dirty:1;
64 struct lfsck_layout_slave_target {
65 /* link into lfsck_layout_slave_data::llsd_master_list. */
66 struct list_head llst_list;
67 /* The position for next record in the rbtree for iteration. */
68 struct lu_fid llst_fid;
69 /* Dummy hash for iteration against the rbtree. */
76 struct lfsck_layout_slave_data {
77 /* list for lfsck_layout_seq */
78 struct list_head llsd_seq_list;
80 /* list for the masters involve layout verification. */
81 struct list_head llsd_master_list;
84 struct dt_object *llsd_rb_obj;
85 struct rb_root llsd_rb_root;
86 rwlock_t llsd_rb_lock;
87 unsigned int llsd_rbtree_valid:1;
90 struct lfsck_layout_object {
91 struct dt_object *llo_obj;
92 struct lu_attr llo_attr;
97 struct lfsck_layout_req {
98 struct list_head llr_list;
99 struct lfsck_layout_object *llr_parent;
100 struct dt_object *llr_child;
102 __u32 llr_lov_idx; /* offset in LOV EA */
105 struct lfsck_layout_master_data {
106 spinlock_t llmd_lock;
107 struct list_head llmd_req_list;
109 /* list for the ost targets involve layout verification. */
110 struct list_head llmd_ost_list;
112 /* list for the ost targets in phase1 scanning. */
113 struct list_head llmd_ost_phase1_list;
115 /* list for the ost targets in phase1 scanning. */
116 struct list_head llmd_ost_phase2_list;
118 /* list for the mdt targets involve layout verification. */
119 struct list_head llmd_mdt_list;
121 /* list for the mdt targets in phase1 scanning. */
122 struct list_head llmd_mdt_phase1_list;
124 /* list for the mdt targets in phase1 scanning. */
125 struct list_head llmd_mdt_phase2_list;
127 struct ptlrpc_thread llmd_thread;
128 __u32 llmd_touch_gen;
130 int llmd_assistant_status;
131 int llmd_post_result;
132 unsigned int llmd_to_post:1,
133 llmd_to_double_scan:1,
134 llmd_in_double_scan:1,
138 struct lfsck_layout_slave_async_args {
139 struct obd_export *llsaa_exp;
140 struct lfsck_component *llsaa_com;
141 struct lfsck_layout_slave_target *llsaa_llst;
144 static struct lfsck_layout_object *
145 lfsck_layout_object_init(const struct lu_env *env, struct dt_object *obj,
148 struct lfsck_layout_object *llo;
153 return ERR_PTR(-ENOMEM);
155 rc = dt_attr_get(env, obj, &llo->llo_attr, BYPASS_CAPA);
162 lu_object_get(&obj->do_lu);
164 /* The gen can be used to check whether some others have changed the
165 * file layout after LFSCK pre-fetching but before real verification. */
167 atomic_set(&llo->llo_ref, 1);
173 lfsck_layout_llst_put(struct lfsck_layout_slave_target *llst)
175 if (atomic_dec_and_test(&llst->llst_ref)) {
176 LASSERT(list_empty(&llst->llst_list));
183 lfsck_layout_llst_add(struct lfsck_layout_slave_data *llsd, __u32 index)
185 struct lfsck_layout_slave_target *llst;
186 struct lfsck_layout_slave_target *tmp;
193 INIT_LIST_HEAD(&llst->llst_list);
195 llst->llst_index = index;
196 atomic_set(&llst->llst_ref, 1);
198 spin_lock(&llsd->llsd_lock);
199 list_for_each_entry(tmp, &llsd->llsd_master_list, llst_list) {
200 if (tmp->llst_index == index) {
206 list_add_tail(&llst->llst_list, &llsd->llsd_master_list);
207 spin_unlock(&llsd->llsd_lock);
216 lfsck_layout_llst_del(struct lfsck_layout_slave_data *llsd,
217 struct lfsck_layout_slave_target *llst)
221 spin_lock(&llsd->llsd_lock);
222 if (!list_empty(&llst->llst_list)) {
223 list_del_init(&llst->llst_list);
226 spin_unlock(&llsd->llsd_lock);
229 lfsck_layout_llst_put(llst);
232 static inline struct lfsck_layout_slave_target *
233 lfsck_layout_llst_find_and_del(struct lfsck_layout_slave_data *llsd,
234 __u32 index, bool unlink)
236 struct lfsck_layout_slave_target *llst;
238 spin_lock(&llsd->llsd_lock);
239 list_for_each_entry(llst, &llsd->llsd_master_list, llst_list) {
240 if (llst->llst_index == index) {
242 list_del_init(&llst->llst_list);
244 atomic_inc(&llst->llst_ref);
245 spin_unlock(&llsd->llsd_lock);
250 spin_unlock(&llsd->llsd_lock);
255 static inline void lfsck_layout_object_put(const struct lu_env *env,
256 struct lfsck_layout_object *llo)
258 if (atomic_dec_and_test(&llo->llo_ref)) {
259 lfsck_object_put(env, llo->llo_obj);
264 static struct lfsck_layout_req *
265 lfsck_layout_req_init(struct lfsck_layout_object *parent,
266 struct dt_object *child, __u32 ost_idx, __u32 lov_idx)
268 struct lfsck_layout_req *llr;
272 return ERR_PTR(-ENOMEM);
274 INIT_LIST_HEAD(&llr->llr_list);
275 atomic_inc(&parent->llo_ref);
276 llr->llr_parent = parent;
277 llr->llr_child = child;
278 llr->llr_ost_idx = ost_idx;
279 llr->llr_lov_idx = lov_idx;
284 static inline void lfsck_layout_req_fini(const struct lu_env *env,
285 struct lfsck_layout_req *llr)
287 lu_object_put(env, &llr->llr_child->do_lu);
288 lfsck_layout_object_put(env, llr->llr_parent);
292 static inline bool lfsck_layout_req_empty(struct lfsck_layout_master_data *llmd)
296 spin_lock(&llmd->llmd_lock);
297 if (list_empty(&llmd->llmd_req_list))
299 spin_unlock(&llmd->llmd_lock);
304 static int lfsck_layout_get_lovea(const struct lu_env *env,
305 struct dt_object *obj,
306 struct lu_buf *buf, ssize_t *buflen)
311 rc = dt_xattr_get(env, obj, buf, XATTR_NAME_LOV, BYPASS_CAPA);
313 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_LOV,
318 lu_buf_realloc(buf, rc);
320 *buflen = buf->lb_len;
322 if (buf->lb_buf == NULL)
334 if (unlikely(buf->lb_buf == NULL)) {
335 lu_buf_alloc(buf, rc);
337 *buflen = buf->lb_len;
339 if (buf->lb_buf == NULL)
348 static int lfsck_layout_verify_header(struct lov_mds_md_v1 *lmm)
353 magic = le32_to_cpu(lmm->lmm_magic);
354 /* If magic crashed, keep it there. Sometime later, during OST-object
355 * orphan handling, if some OST-object(s) back-point to it, it can be
356 * verified and repaired. */
357 if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3) {
361 lmm_oi_cpu_to_le(&oi, &lmm->lmm_oi);
362 if ((magic & LOV_MAGIC_MASK) == LOV_MAGIC_MAGIC)
367 CDEBUG(D_LFSCK, "%s LOV EA magic %u on "DOSTID"\n",
368 rc == -EINVAL ? "Unknown" : "Unsupported",
374 pattern = le32_to_cpu(lmm->lmm_pattern);
375 /* XXX: currently, we only support LOV_PATTERN_RAID0. */
376 if (lov_pattern(pattern) != LOV_PATTERN_RAID0) {
379 lmm_oi_cpu_to_le(&oi, &lmm->lmm_oi);
380 CDEBUG(D_LFSCK, "Unsupported LOV EA pattern %u on "DOSTID"\n",
381 pattern, POSTID(&oi));
389 #define LFSCK_RBTREE_BITMAP_SIZE PAGE_CACHE_SIZE
390 #define LFSCK_RBTREE_BITMAP_WIDTH (LFSCK_RBTREE_BITMAP_SIZE << 3)
391 #define LFSCK_RBTREE_BITMAP_MASK (LFSCK_RBTREE_BITMAP_WIDTH - 1)
393 struct lfsck_rbtree_node {
394 struct rb_node lrn_node;
397 atomic_t lrn_known_count;
398 atomic_t lrn_accessed_count;
399 void *lrn_known_bitmap;
400 void *lrn_accessed_bitmap;
403 static inline int lfsck_rbtree_cmp(struct lfsck_rbtree_node *lrn,
404 __u64 seq, __u32 oid)
406 if (seq < lrn->lrn_seq)
409 if (seq > lrn->lrn_seq)
412 if (oid < lrn->lrn_first_oid)
415 if (oid - lrn->lrn_first_oid >= LFSCK_RBTREE_BITMAP_WIDTH)
421 /* The caller should hold llsd->llsd_rb_lock. */
422 static struct lfsck_rbtree_node *
423 lfsck_rbtree_search(struct lfsck_layout_slave_data *llsd,
424 const struct lu_fid *fid, bool *exact)
426 struct rb_node *node = llsd->llsd_rb_root.rb_node;
427 struct rb_node *prev = NULL;
428 struct lfsck_rbtree_node *lrn = NULL;
434 while (node != NULL) {
436 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
437 rc = lfsck_rbtree_cmp(lrn, fid_seq(fid), fid_oid(fid));
439 node = node->rb_left;
441 node = node->rb_right;
449 /* If there is no exactly matched one, then to the next valid one. */
452 /* The rbtree is empty. */
459 node = rb_next(prev);
461 /* The end of the rbtree. */
465 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
470 static struct lfsck_rbtree_node *lfsck_rbtree_new(const struct lu_env *env,
471 const struct lu_fid *fid)
473 struct lfsck_rbtree_node *lrn;
477 return ERR_PTR(-ENOMEM);
479 OBD_ALLOC(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
480 if (lrn->lrn_known_bitmap == NULL) {
483 return ERR_PTR(-ENOMEM);
486 OBD_ALLOC(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
487 if (lrn->lrn_accessed_bitmap == NULL) {
488 OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
491 return ERR_PTR(-ENOMEM);
494 RB_CLEAR_NODE(&lrn->lrn_node);
495 lrn->lrn_seq = fid_seq(fid);
496 lrn->lrn_first_oid = fid_oid(fid) & ~LFSCK_RBTREE_BITMAP_MASK;
497 atomic_set(&lrn->lrn_known_count, 0);
498 atomic_set(&lrn->lrn_accessed_count, 0);
503 static void lfsck_rbtree_free(struct lfsck_rbtree_node *lrn)
505 OBD_FREE(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
506 OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
510 /* The caller should hold lock. */
511 static struct lfsck_rbtree_node *
512 lfsck_rbtree_insert(struct lfsck_layout_slave_data *llsd,
513 struct lfsck_rbtree_node *lrn)
515 struct rb_node **pos = &llsd->llsd_rb_root.rb_node;
516 struct rb_node *parent = NULL;
517 struct lfsck_rbtree_node *tmp;
520 while (*pos != NULL) {
522 tmp = rb_entry(parent, struct lfsck_rbtree_node, lrn_node);
523 rc = lfsck_rbtree_cmp(tmp, lrn->lrn_seq, lrn->lrn_first_oid);
525 pos = &(*pos)->rb_left;
527 pos = &(*pos)->rb_right;
532 rb_link_node(&lrn->lrn_node, parent, pos);
533 rb_insert_color(&lrn->lrn_node, &llsd->llsd_rb_root);
538 extern const struct dt_index_operations lfsck_orphan_index_ops;
540 static int lfsck_rbtree_setup(const struct lu_env *env,
541 struct lfsck_component *com)
543 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
544 struct lfsck_instance *lfsck = com->lc_lfsck;
545 struct dt_device *dev = lfsck->li_bottom;
546 struct lfsck_layout_slave_data *llsd = com->lc_data;
547 struct dt_object *obj;
549 fid->f_seq = FID_SEQ_LAYOUT_RBTREE;
550 fid->f_oid = lfsck_dev_idx(dev);
552 obj = dt_locate(env, dev, fid);
554 RETURN(PTR_ERR(obj));
556 /* Generate an in-RAM object to stand for the layout rbtree.
557 * Scanning the layout rbtree will be via the iteration over
558 * the object. In the future, the rbtree may be written onto
559 * disk with the object.
561 * Mark the object to be as exist. */
562 obj->do_lu.lo_header->loh_attr |= LOHA_EXISTS;
563 obj->do_index_ops = &lfsck_orphan_index_ops;
564 llsd->llsd_rb_obj = obj;
565 llsd->llsd_rbtree_valid = 1;
566 dev->dd_record_fid_accessed = 1;
568 CDEBUG(D_LFSCK, "%s: layout LFSCK init OST-objects accessing bitmap\n",
569 lfsck_lfsck2name(lfsck));
574 static void lfsck_rbtree_cleanup(const struct lu_env *env,
575 struct lfsck_component *com)
577 struct lfsck_instance *lfsck = com->lc_lfsck;
578 struct lfsck_layout_slave_data *llsd = com->lc_data;
579 struct rb_node *node = rb_first(&llsd->llsd_rb_root);
580 struct rb_node *next;
581 struct lfsck_rbtree_node *lrn;
583 lfsck->li_bottom->dd_record_fid_accessed = 0;
584 /* Invalid the rbtree, then no others will use it. */
585 write_lock(&llsd->llsd_rb_lock);
586 llsd->llsd_rbtree_valid = 0;
587 write_unlock(&llsd->llsd_rb_lock);
589 while (node != NULL) {
590 next = rb_next(node);
591 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
592 rb_erase(node, &llsd->llsd_rb_root);
593 lfsck_rbtree_free(lrn);
597 if (llsd->llsd_rb_obj != NULL) {
598 lu_object_put(env, &llsd->llsd_rb_obj->do_lu);
599 llsd->llsd_rb_obj = NULL;
602 CDEBUG(D_LFSCK, "%s: layout LFSCK fini OST-objects accessing bitmap\n",
603 lfsck_lfsck2name(lfsck));
606 static void lfsck_rbtree_update_bitmap(const struct lu_env *env,
607 struct lfsck_component *com,
608 const struct lu_fid *fid,
611 struct lfsck_layout_slave_data *llsd = com->lc_data;
612 struct lfsck_rbtree_node *lrn;
618 if (unlikely(!fid_is_sane(fid) || fid_is_last_id(fid)))
621 if (!fid_is_idif(fid) && !fid_is_norm(fid))
624 read_lock(&llsd->llsd_rb_lock);
625 if (!llsd->llsd_rbtree_valid)
626 GOTO(unlock, rc = 0);
628 lrn = lfsck_rbtree_search(llsd, fid, NULL);
630 struct lfsck_rbtree_node *tmp;
634 read_unlock(&llsd->llsd_rb_lock);
635 tmp = lfsck_rbtree_new(env, fid);
637 GOTO(out, rc = PTR_ERR(tmp));
640 write_lock(&llsd->llsd_rb_lock);
641 if (!llsd->llsd_rbtree_valid) {
642 lfsck_rbtree_free(tmp);
643 GOTO(unlock, rc = 0);
646 lrn = lfsck_rbtree_insert(llsd, tmp);
648 lfsck_rbtree_free(tmp);
651 idx = fid_oid(fid) & LFSCK_RBTREE_BITMAP_MASK;
652 /* Any accessed object must be a known object. */
653 if (!test_and_set_bit(idx, lrn->lrn_known_bitmap))
654 atomic_inc(&lrn->lrn_known_count);
655 if (accessed && !test_and_set_bit(idx, lrn->lrn_accessed_bitmap))
656 atomic_inc(&lrn->lrn_accessed_count);
658 GOTO(unlock, rc = 0);
662 write_unlock(&llsd->llsd_rb_lock);
664 read_unlock(&llsd->llsd_rb_lock);
666 if (rc != 0 && accessed) {
667 struct lfsck_layout *lo = com->lc_file_ram;
669 CDEBUG(D_LFSCK, "%s: fail to update OST-objects accessing "
670 "bitmap, and will cause incorrect LFSCK OST-object "
671 "handling, so disable it to cancel orphan handling "
672 "for related device. rc = %d\n",
673 lfsck_lfsck2name(com->lc_lfsck), rc);
675 lo->ll_flags |= LF_INCOMPLETE;
676 lfsck_rbtree_cleanup(env, com);
680 static void lfsck_layout_le_to_cpu(struct lfsck_layout *des,
681 const struct lfsck_layout *src)
685 des->ll_magic = le32_to_cpu(src->ll_magic);
686 des->ll_status = le32_to_cpu(src->ll_status);
687 des->ll_flags = le32_to_cpu(src->ll_flags);
688 des->ll_success_count = le32_to_cpu(src->ll_success_count);
689 des->ll_run_time_phase1 = le32_to_cpu(src->ll_run_time_phase1);
690 des->ll_run_time_phase2 = le32_to_cpu(src->ll_run_time_phase2);
691 des->ll_time_last_complete = le64_to_cpu(src->ll_time_last_complete);
692 des->ll_time_latest_start = le64_to_cpu(src->ll_time_latest_start);
693 des->ll_time_last_checkpoint =
694 le64_to_cpu(src->ll_time_last_checkpoint);
695 des->ll_pos_latest_start = le64_to_cpu(src->ll_pos_latest_start);
696 des->ll_pos_last_checkpoint = le64_to_cpu(src->ll_pos_last_checkpoint);
697 des->ll_pos_first_inconsistent =
698 le64_to_cpu(src->ll_pos_first_inconsistent);
699 des->ll_objs_checked_phase1 = le64_to_cpu(src->ll_objs_checked_phase1);
700 des->ll_objs_failed_phase1 = le64_to_cpu(src->ll_objs_failed_phase1);
701 des->ll_objs_checked_phase2 = le64_to_cpu(src->ll_objs_checked_phase2);
702 des->ll_objs_failed_phase2 = le64_to_cpu(src->ll_objs_failed_phase2);
703 for (i = 0; i < LLIT_MAX; i++)
704 des->ll_objs_repaired[i] =
705 le64_to_cpu(src->ll_objs_repaired[i]);
706 des->ll_objs_skipped = le64_to_cpu(src->ll_objs_skipped);
709 static void lfsck_layout_cpu_to_le(struct lfsck_layout *des,
710 const struct lfsck_layout *src)
714 des->ll_magic = cpu_to_le32(src->ll_magic);
715 des->ll_status = cpu_to_le32(src->ll_status);
716 des->ll_flags = cpu_to_le32(src->ll_flags);
717 des->ll_success_count = cpu_to_le32(src->ll_success_count);
718 des->ll_run_time_phase1 = cpu_to_le32(src->ll_run_time_phase1);
719 des->ll_run_time_phase2 = cpu_to_le32(src->ll_run_time_phase2);
720 des->ll_time_last_complete = cpu_to_le64(src->ll_time_last_complete);
721 des->ll_time_latest_start = cpu_to_le64(src->ll_time_latest_start);
722 des->ll_time_last_checkpoint =
723 cpu_to_le64(src->ll_time_last_checkpoint);
724 des->ll_pos_latest_start = cpu_to_le64(src->ll_pos_latest_start);
725 des->ll_pos_last_checkpoint = cpu_to_le64(src->ll_pos_last_checkpoint);
726 des->ll_pos_first_inconsistent =
727 cpu_to_le64(src->ll_pos_first_inconsistent);
728 des->ll_objs_checked_phase1 = cpu_to_le64(src->ll_objs_checked_phase1);
729 des->ll_objs_failed_phase1 = cpu_to_le64(src->ll_objs_failed_phase1);
730 des->ll_objs_checked_phase2 = cpu_to_le64(src->ll_objs_checked_phase2);
731 des->ll_objs_failed_phase2 = cpu_to_le64(src->ll_objs_failed_phase2);
732 for (i = 0; i < LLIT_MAX; i++)
733 des->ll_objs_repaired[i] =
734 cpu_to_le64(src->ll_objs_repaired[i]);
735 des->ll_objs_skipped = cpu_to_le64(src->ll_objs_skipped);
739 * \retval +ve: the lfsck_layout is broken, the caller should reset it.
740 * \retval 0: succeed.
741 * \retval -ve: failed cases.
743 static int lfsck_layout_load(const struct lu_env *env,
744 struct lfsck_component *com)
746 struct lfsck_layout *lo = com->lc_file_ram;
747 const struct dt_body_operations *dbo = com->lc_obj->do_body_ops;
748 ssize_t size = com->lc_file_size;
752 rc = dbo->dbo_read(env, com->lc_obj,
753 lfsck_buf_get(env, com->lc_file_disk, size), &pos,
758 CDEBUG(D_LFSCK, "%s: failed to load lfsck_layout: rc = %d\n",
759 lfsck_lfsck2name(com->lc_lfsck), rc);
761 } else if (rc != size) {
762 CDEBUG(D_LFSCK, "%s: lfsck_layout size %u != %u; reset it\n",
763 lfsck_lfsck2name(com->lc_lfsck), rc, (unsigned int)size);
767 lfsck_layout_le_to_cpu(lo, com->lc_file_disk);
768 if (lo->ll_magic != LFSCK_LAYOUT_MAGIC) {
769 CDEBUG(D_LFSCK, "%s: invalid lfsck_layout magic %#x != %#x, "
770 "to be reset\n", lfsck_lfsck2name(com->lc_lfsck),
771 lo->ll_magic, LFSCK_LAYOUT_MAGIC);
778 static int lfsck_layout_store(const struct lu_env *env,
779 struct lfsck_component *com)
781 struct dt_object *obj = com->lc_obj;
782 struct lfsck_instance *lfsck = com->lc_lfsck;
783 struct lfsck_layout *lo = com->lc_file_disk;
784 struct thandle *handle;
785 ssize_t size = com->lc_file_size;
790 lfsck_layout_cpu_to_le(lo, com->lc_file_ram);
791 handle = dt_trans_create(env, lfsck->li_bottom);
793 GOTO(log, rc = PTR_ERR(handle));
795 rc = dt_declare_record_write(env, obj, lfsck_buf_get(env, lo, size),
800 rc = dt_trans_start_local(env, lfsck->li_bottom, handle);
804 rc = dt_record_write(env, obj, lfsck_buf_get(env, lo, size), &pos,
810 dt_trans_stop(env, lfsck->li_bottom, handle);
814 CDEBUG(D_LFSCK, "%s: fail to store lfsck_layout: rc = %d\n",
815 lfsck_lfsck2name(lfsck), rc);
819 static int lfsck_layout_init(const struct lu_env *env,
820 struct lfsck_component *com)
822 struct lfsck_layout *lo = com->lc_file_ram;
825 memset(lo, 0, com->lc_file_size);
826 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
827 lo->ll_status = LS_INIT;
828 down_write(&com->lc_sem);
829 rc = lfsck_layout_store(env, com);
830 up_write(&com->lc_sem);
835 static int fid_is_for_ostobj(const struct lu_env *env, struct dt_device *dt,
836 struct dt_object *obj, const struct lu_fid *fid)
838 struct seq_server_site *ss = lu_site2seq(dt->dd_lu_dev.ld_site);
839 struct lu_seq_range range = { 0 };
840 struct lustre_mdt_attrs *lma;
843 fld_range_set_any(&range);
844 rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(fid), &range);
846 if (fld_range_is_ost(&range))
852 lma = &lfsck_env_info(env)->lti_lma;
853 rc = dt_xattr_get(env, obj, lfsck_buf_get(env, lma, sizeof(*lma)),
854 XATTR_NAME_LMA, BYPASS_CAPA);
855 if (rc == sizeof(*lma)) {
856 lustre_lma_swab(lma);
858 return lma->lma_compat & LMAC_FID_ON_OST ? 1 : 0;
861 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_FID, BYPASS_CAPA);
866 static struct lfsck_layout_seq *
867 lfsck_layout_seq_lookup(struct lfsck_layout_slave_data *llsd, __u64 seq)
869 struct lfsck_layout_seq *lls;
871 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
872 if (lls->lls_seq == seq)
875 if (lls->lls_seq > seq)
883 lfsck_layout_seq_insert(struct lfsck_layout_slave_data *llsd,
884 struct lfsck_layout_seq *lls)
886 struct lfsck_layout_seq *tmp;
887 struct list_head *pos = &llsd->llsd_seq_list;
889 list_for_each_entry(tmp, &llsd->llsd_seq_list, lls_list) {
890 if (lls->lls_seq < tmp->lls_seq) {
891 pos = &tmp->lls_list;
895 list_add_tail(&lls->lls_list, pos);
899 lfsck_layout_lastid_create(const struct lu_env *env,
900 struct lfsck_instance *lfsck,
901 struct dt_object *obj)
903 struct lfsck_thread_info *info = lfsck_env_info(env);
904 struct lu_attr *la = &info->lti_la;
905 struct dt_object_format *dof = &info->lti_dof;
906 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
907 struct dt_device *dt = lfsck->li_bottom;
914 if (bk->lb_param & LPF_DRYRUN)
917 memset(la, 0, sizeof(*la));
918 la->la_mode = S_IFREG | S_IRUGO | S_IWUSR;
919 la->la_valid = LA_MODE | LA_UID | LA_GID;
920 dof->dof_type = dt_mode_to_dft(S_IFREG);
922 th = dt_trans_create(env, dt);
924 GOTO(log, rc = PTR_ERR(th));
926 rc = dt_declare_create(env, obj, la, NULL, dof, th);
930 rc = dt_declare_record_write(env, obj,
931 lfsck_buf_get(env, &lastid,
937 rc = dt_trans_start_local(env, dt, th);
941 dt_write_lock(env, obj, 0);
942 if (likely(!dt_object_exists(obj))) {
943 rc = dt_create(env, obj, la, NULL, dof, th);
945 rc = dt_record_write(env, obj,
946 lfsck_buf_get(env, &lastid, sizeof(lastid)),
949 dt_write_unlock(env, obj);
954 dt_trans_stop(env, dt, th);
957 CDEBUG(D_LFSCK, "%s: layout LFSCK will create LAST_ID for <seq> "
959 lfsck_lfsck2name(lfsck), fid_seq(lfsck_dto2fid(obj)), rc);
965 lfsck_layout_lastid_reload(const struct lu_env *env,
966 struct lfsck_component *com,
967 struct lfsck_layout_seq *lls)
973 dt_read_lock(env, lls->lls_lastid_obj, 0);
974 rc = dt_record_read(env, lls->lls_lastid_obj,
975 lfsck_buf_get(env, &lastid, sizeof(lastid)), &pos);
976 dt_read_unlock(env, lls->lls_lastid_obj);
977 if (unlikely(rc != 0))
980 lastid = le64_to_cpu(lastid);
981 if (lastid < lls->lls_lastid_known) {
982 struct lfsck_instance *lfsck = com->lc_lfsck;
983 struct lfsck_layout *lo = com->lc_file_ram;
985 lls->lls_lastid = lls->lls_lastid_known;
987 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
988 LASSERT(lfsck->li_out_notify != NULL);
990 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
991 LE_LASTID_REBUILDING);
992 lo->ll_flags |= LF_CRASHED_LASTID;
994 } else if (lastid >= lls->lls_lastid) {
995 lls->lls_lastid = lastid;
1003 lfsck_layout_lastid_store(const struct lu_env *env,
1004 struct lfsck_component *com)
1006 struct lfsck_instance *lfsck = com->lc_lfsck;
1007 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1008 struct dt_device *dt = lfsck->li_bottom;
1009 struct lfsck_layout_slave_data *llsd = com->lc_data;
1010 struct lfsck_layout_seq *lls;
1016 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
1019 /* XXX: Add the code back if we really found related
1020 * inconsistent cases in the future. */
1022 if (!lls->lls_dirty) {
1023 /* In OFD, before the pre-creation, the LAST_ID
1024 * file will be updated firstly, which may hide
1025 * some potential crashed cases. For example:
1027 * The old obj1's ID is higher than old LAST_ID
1028 * but lower than the new LAST_ID, but the LFSCK
1029 * have not touch the obj1 until the OFD updated
1030 * the LAST_ID. So the LFSCK does not regard it
1031 * as crashed case. But when OFD does not create
1032 * successfully, it will set the LAST_ID as the
1033 * real created objects' ID, then LFSCK needs to
1034 * found related inconsistency. */
1035 rc = lfsck_layout_lastid_reload(env, com, lls);
1036 if (likely(!lls->lls_dirty))
1041 CDEBUG(D_LFSCK, "%s: layout LFSCK will sync the LAST_ID for "
1042 "<seq> "LPX64" as <oid> "LPU64"\n",
1043 lfsck_lfsck2name(lfsck), lls->lls_seq, lls->lls_lastid);
1045 if (bk->lb_param & LPF_DRYRUN) {
1050 th = dt_trans_create(env, dt);
1053 CDEBUG(D_LFSCK, "%s: layout LFSCK failed to store "
1054 "the LAST_ID for <seq> "LPX64"(1): rc = %d\n",
1055 lfsck_lfsck2name(com->lc_lfsck),
1060 lastid = cpu_to_le64(lls->lls_lastid);
1061 rc = dt_declare_record_write(env, lls->lls_lastid_obj,
1062 lfsck_buf_get(env, &lastid,
1068 rc = dt_trans_start_local(env, dt, th);
1072 dt_write_lock(env, lls->lls_lastid_obj, 0);
1073 rc = dt_record_write(env, lls->lls_lastid_obj,
1074 lfsck_buf_get(env, &lastid,
1075 sizeof(lastid)), &pos, th);
1076 dt_write_unlock(env, lls->lls_lastid_obj);
1081 dt_trans_stop(env, dt, th);
1084 CDEBUG(D_LFSCK, "%s: layout LFSCK failed to store "
1085 "the LAST_ID for <seq> "LPX64"(2): rc = %d\n",
1086 lfsck_lfsck2name(com->lc_lfsck),
1095 lfsck_layout_lastid_load(const struct lu_env *env,
1096 struct lfsck_component *com,
1097 struct lfsck_layout_seq *lls)
1099 struct lfsck_instance *lfsck = com->lc_lfsck;
1100 struct lfsck_layout *lo = com->lc_file_ram;
1101 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
1102 struct dt_object *obj;
1107 lu_last_id_fid(fid, lls->lls_seq, lfsck_dev_idx(lfsck->li_bottom));
1108 obj = dt_locate(env, lfsck->li_bottom, fid);
1110 RETURN(PTR_ERR(obj));
1112 /* LAST_ID crashed, to be rebuilt */
1113 if (!dt_object_exists(obj)) {
1114 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
1115 LASSERT(lfsck->li_out_notify != NULL);
1117 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1118 LE_LASTID_REBUILDING);
1119 lo->ll_flags |= LF_CRASHED_LASTID;
1121 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY4) &&
1123 struct l_wait_info lwi = LWI_TIMEOUT(
1124 cfs_time_seconds(cfs_fail_val),
1127 up_write(&com->lc_sem);
1128 l_wait_event(lfsck->li_thread.t_ctl_waitq,
1129 !thread_is_running(&lfsck->li_thread),
1131 down_write(&com->lc_sem);
1135 rc = lfsck_layout_lastid_create(env, lfsck, obj);
1137 dt_read_lock(env, obj, 0);
1138 rc = dt_read(env, obj,
1139 lfsck_buf_get(env, &lls->lls_lastid, sizeof(__u64)),
1141 dt_read_unlock(env, obj);
1142 if (rc != 0 && rc != sizeof(__u64))
1143 GOTO(out, rc = (rc > 0 ? -EFAULT : rc));
1145 if (rc == 0 && !(lo->ll_flags & LF_CRASHED_LASTID)) {
1146 LASSERT(lfsck->li_out_notify != NULL);
1148 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1149 LE_LASTID_REBUILDING);
1150 lo->ll_flags |= LF_CRASHED_LASTID;
1153 lls->lls_lastid = le64_to_cpu(lls->lls_lastid);
1161 lfsck_object_put(env, obj);
1163 lls->lls_lastid_obj = obj;
1168 static void lfsck_layout_record_failure(const struct lu_env *env,
1169 struct lfsck_instance *lfsck,
1170 struct lfsck_layout *lo)
1172 lo->ll_objs_failed_phase1++;
1173 if (unlikely(lo->ll_pos_first_inconsistent == 0)) {
1174 lo->ll_pos_first_inconsistent =
1175 lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
1178 CDEBUG(D_LFSCK, "%s: layout LFSCK hit first non-repaired "
1179 "inconsistency at the pos ["LPU64"]\n",
1180 lfsck_lfsck2name(lfsck),
1181 lo->ll_pos_first_inconsistent);
1185 static int lfsck_layout_master_async_interpret(const struct lu_env *env,
1186 struct ptlrpc_request *req,
1189 struct lfsck_async_interpret_args *laia = args;
1190 struct lfsck_component *com = laia->laia_com;
1191 struct lfsck_layout_master_data *llmd = com->lc_data;
1192 struct lfsck_tgt_descs *ltds = laia->laia_ltds;
1193 struct lfsck_tgt_desc *ltd = laia->laia_ltd;
1194 struct lfsck_request *lr = laia->laia_lr;
1196 switch (lr->lr_event) {
1199 struct lfsck_layout *lo = com->lc_file_ram;
1201 CDEBUG(D_LFSCK, "%s: fail to notify %s %x for layout "
1203 lfsck_lfsck2name(com->lc_lfsck),
1204 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1205 ltd->ltd_index, rc);
1206 lo->ll_flags |= LF_INCOMPLETE;
1210 spin_lock(<ds->ltd_lock);
1211 if (ltd->ltd_dead || ltd->ltd_layout_done) {
1212 spin_unlock(<ds->ltd_lock);
1216 if (lr->lr_flags & LEF_TO_OST) {
1217 if (list_empty(<d->ltd_layout_list))
1218 list_add_tail(<d->ltd_layout_list,
1219 &llmd->llmd_ost_list);
1220 if (list_empty(<d->ltd_layout_phase_list))
1221 list_add_tail(<d->ltd_layout_phase_list,
1222 &llmd->llmd_ost_phase1_list);
1224 if (list_empty(<d->ltd_layout_list))
1225 list_add_tail(<d->ltd_layout_list,
1226 &llmd->llmd_mdt_list);
1227 if (list_empty(<d->ltd_layout_phase_list))
1228 list_add_tail(<d->ltd_layout_phase_list,
1229 &llmd->llmd_mdt_phase1_list);
1231 spin_unlock(<ds->ltd_lock);
1234 case LE_PHASE1_DONE:
1235 case LE_PHASE2_DONE:
1237 if (rc != 0 && rc != -EALREADY)
1238 CDEBUG(D_LFSCK, "%s: fail to notify %s %x for layout: "
1239 "event = %d, rc = %d\n",
1240 lfsck_lfsck2name(com->lc_lfsck),
1241 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1242 ltd->ltd_index, lr->lr_event, rc);
1245 struct lfsck_reply *reply;
1248 spin_lock(<ds->ltd_lock);
1249 list_del_init(<d->ltd_layout_phase_list);
1250 list_del_init(<d->ltd_layout_list);
1251 spin_unlock(<ds->ltd_lock);
1255 reply = req_capsule_server_get(&req->rq_pill,
1257 if (reply == NULL) {
1259 CDEBUG(D_LFSCK, "%s: invalid query reply: rc = %d\n",
1260 lfsck_lfsck2name(com->lc_lfsck), rc);
1261 spin_lock(<ds->ltd_lock);
1262 list_del_init(<d->ltd_layout_phase_list);
1263 list_del_init(<d->ltd_layout_list);
1264 spin_unlock(<ds->ltd_lock);
1268 switch (reply->lr_status) {
1269 case LS_SCANNING_PHASE1:
1271 case LS_SCANNING_PHASE2:
1272 spin_lock(<ds->ltd_lock);
1273 list_del_init(<d->ltd_layout_phase_list);
1274 if (ltd->ltd_dead || ltd->ltd_layout_done) {
1275 spin_unlock(<ds->ltd_lock);
1279 if (lr->lr_flags & LEF_TO_OST)
1280 list_add_tail(<d->ltd_layout_phase_list,
1281 &llmd->llmd_ost_phase2_list);
1283 list_add_tail(<d->ltd_layout_phase_list,
1284 &llmd->llmd_mdt_phase2_list);
1285 spin_unlock(<ds->ltd_lock);
1288 spin_lock(<ds->ltd_lock);
1289 list_del_init(<d->ltd_layout_phase_list);
1290 list_del_init(<d->ltd_layout_list);
1291 spin_unlock(<ds->ltd_lock);
1297 CDEBUG(D_LFSCK, "%s: layout LFSCK unexpected event: rc = %d\n",
1298 lfsck_lfsck2name(com->lc_lfsck), lr->lr_event);
1302 if (!laia->laia_shared) {
1304 lfsck_component_put(env, com);
1310 static int lfsck_layout_master_query_others(const struct lu_env *env,
1311 struct lfsck_component *com)
1313 struct lfsck_thread_info *info = lfsck_env_info(env);
1314 struct lfsck_request *lr = &info->lti_lr;
1315 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1316 struct lfsck_instance *lfsck = com->lc_lfsck;
1317 struct lfsck_layout_master_data *llmd = com->lc_data;
1318 struct ptlrpc_request_set *set;
1319 struct lfsck_tgt_descs *ltds;
1320 struct lfsck_tgt_desc *ltd;
1321 struct list_head *head;
1326 set = ptlrpc_prep_set();
1330 llmd->llmd_touch_gen++;
1331 memset(lr, 0, sizeof(*lr));
1332 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1333 lr->lr_event = LE_QUERY;
1334 lr->lr_active = LFSCK_TYPE_LAYOUT;
1335 laia->laia_com = com;
1337 laia->laia_shared = 0;
1339 if (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1340 ltds = &lfsck->li_mdt_descs;
1342 head = &llmd->llmd_mdt_phase1_list;
1346 ltds = &lfsck->li_ost_descs;
1347 lr->lr_flags = LEF_TO_OST;
1348 head = &llmd->llmd_ost_phase1_list;
1351 laia->laia_ltds = ltds;
1352 spin_lock(<ds->ltd_lock);
1353 while (!list_empty(head)) {
1354 ltd = list_entry(head->next,
1355 struct lfsck_tgt_desc,
1356 ltd_layout_phase_list);
1357 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1360 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1361 list_move_tail(<d->ltd_layout_phase_list, head);
1362 atomic_inc(<d->ltd_ref);
1363 laia->laia_ltd = ltd;
1364 spin_unlock(<ds->ltd_lock);
1365 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1366 lfsck_layout_master_async_interpret,
1369 CDEBUG(D_LFSCK, "%s: layout LFSCK fail to query %s %x: "
1370 "rc = %d\n", lfsck_lfsck2name(lfsck),
1371 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1372 ltd->ltd_index, rc);
1376 spin_lock(<ds->ltd_lock);
1378 spin_unlock(<ds->ltd_lock);
1380 rc = ptlrpc_set_wait(set);
1382 ptlrpc_set_destroy(set);
1386 if (!(lr->lr_flags & LEF_TO_OST) &&
1387 list_empty(&llmd->llmd_mdt_phase1_list))
1390 ptlrpc_set_destroy(set);
1392 RETURN(rc1 != 0 ? rc1 : rc);
1396 lfsck_layout_master_to_orphan(struct lfsck_layout_master_data *llmd)
1398 return list_empty(&llmd->llmd_mdt_phase1_list) &&
1399 (!list_empty(&llmd->llmd_ost_phase2_list) ||
1400 list_empty(&llmd->llmd_ost_phase1_list));
1403 static int lfsck_layout_master_notify_others(const struct lu_env *env,
1404 struct lfsck_component *com,
1405 struct lfsck_request *lr)
1407 struct lfsck_thread_info *info = lfsck_env_info(env);
1408 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1409 struct lfsck_instance *lfsck = com->lc_lfsck;
1410 struct lfsck_layout_master_data *llmd = com->lc_data;
1411 struct lfsck_layout *lo = com->lc_file_ram;
1412 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1413 struct ptlrpc_request_set *set;
1414 struct lfsck_tgt_descs *ltds;
1415 struct lfsck_tgt_desc *ltd;
1416 struct lfsck_tgt_desc *next;
1417 struct list_head *head;
1422 set = ptlrpc_prep_set();
1426 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1427 lr->lr_active = LFSCK_TYPE_LAYOUT;
1428 laia->laia_com = com;
1430 laia->laia_shared = 0;
1431 switch (lr->lr_event) {
1433 /* Notify OSTs firstly, then handle other MDTs if needed. */
1434 ltds = &lfsck->li_ost_descs;
1435 laia->laia_ltds = ltds;
1436 down_read(<ds->ltd_rw_sem);
1437 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1438 ltd = lfsck_tgt_get(ltds, idx);
1439 LASSERT(ltd != NULL);
1441 laia->laia_ltd = ltd;
1442 ltd->ltd_layout_done = 0;
1443 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1444 lfsck_layout_master_async_interpret,
1445 laia, LFSCK_NOTIFY);
1447 CDEBUG(D_LFSCK, "%s: layout LFSCK fail to "
1448 "notify %s %x for start: rc = %d\n",
1449 lfsck_lfsck2name(lfsck),
1450 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1453 lo->ll_flags |= LF_INCOMPLETE;
1456 up_read(<ds->ltd_rw_sem);
1459 rc = ptlrpc_set_wait(set);
1461 ptlrpc_set_destroy(set);
1465 if (!(bk->lb_param & LPF_ALL_TGT))
1468 /* link other MDT targets locallly. */
1469 ltds = &lfsck->li_mdt_descs;
1470 spin_lock(<ds->ltd_lock);
1471 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1472 ltd = LTD_TGT(ltds, idx);
1473 LASSERT(ltd != NULL);
1475 if (!list_empty(<d->ltd_layout_list))
1478 list_add_tail(<d->ltd_layout_list,
1479 &llmd->llmd_mdt_list);
1480 list_add_tail(<d->ltd_layout_phase_list,
1481 &llmd->llmd_mdt_phase1_list);
1483 spin_unlock(<ds->ltd_lock);
1486 case LE_PHASE2_DONE:
1487 case LE_PEER_EXIT: {
1488 /* Handle other MDTs firstly if needed, then notify the OSTs. */
1489 if (bk->lb_param & LPF_ALL_TGT) {
1490 head = &llmd->llmd_mdt_list;
1491 ltds = &lfsck->li_mdt_descs;
1492 if (lr->lr_event == LE_STOP) {
1493 /* unlink other MDT targets locallly. */
1494 spin_lock(<ds->ltd_lock);
1495 list_for_each_entry_safe(ltd, next, head,
1497 list_del_init(<d->ltd_layout_phase_list);
1498 list_del_init(<d->ltd_layout_list);
1500 spin_unlock(<ds->ltd_lock);
1502 lr->lr_flags |= LEF_TO_OST;
1503 head = &llmd->llmd_ost_list;
1504 ltds = &lfsck->li_ost_descs;
1506 lr->lr_flags &= ~LEF_TO_OST;
1509 lr->lr_flags |= LEF_TO_OST;
1510 head = &llmd->llmd_ost_list;
1511 ltds = &lfsck->li_ost_descs;
1515 laia->laia_ltds = ltds;
1516 spin_lock(<ds->ltd_lock);
1517 while (!list_empty(head)) {
1518 ltd = list_entry(head->next, struct lfsck_tgt_desc,
1520 if (!list_empty(<d->ltd_layout_phase_list))
1521 list_del_init(<d->ltd_layout_phase_list);
1522 list_del_init(<d->ltd_layout_list);
1523 atomic_inc(<d->ltd_ref);
1524 laia->laia_ltd = ltd;
1525 spin_unlock(<ds->ltd_lock);
1526 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1527 lfsck_layout_master_async_interpret,
1528 laia, LFSCK_NOTIFY);
1530 CDEBUG(D_LFSCK, "%s: layout LFSCK fail to "
1531 "notify %s %x for stop/phase2_done/"
1532 "peer_exit: rc = %d\n",
1533 lfsck_lfsck2name(lfsck),
1534 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1535 "MDT", ltd->ltd_index, rc);
1538 spin_lock(<ds->ltd_lock);
1540 spin_unlock(<ds->ltd_lock);
1542 rc = ptlrpc_set_wait(set);
1544 ptlrpc_set_destroy(set);
1548 if (!(lr->lr_flags & LEF_TO_OST)) {
1549 lr->lr_flags |= LEF_TO_OST;
1550 head = &llmd->llmd_ost_list;
1551 ltds = &lfsck->li_ost_descs;
1556 case LE_PHASE1_DONE:
1557 llmd->llmd_touch_gen++;
1558 ltds = &lfsck->li_mdt_descs;
1559 laia->laia_ltds = ltds;
1560 spin_lock(<ds->ltd_lock);
1561 while (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1562 ltd = list_entry(llmd->llmd_mdt_phase1_list.next,
1563 struct lfsck_tgt_desc,
1564 ltd_layout_phase_list);
1565 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1568 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1569 list_move_tail(<d->ltd_layout_phase_list,
1570 &llmd->llmd_mdt_phase1_list);
1571 atomic_inc(<d->ltd_ref);
1572 laia->laia_ltd = ltd;
1573 spin_unlock(<ds->ltd_lock);
1574 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1575 lfsck_layout_master_async_interpret,
1576 laia, LFSCK_NOTIFY);
1578 CDEBUG(D_LFSCK, "%s: layout LFSCK fail to "
1579 "notify MDT %x for phase1_done: "
1580 "rc = %d\n", lfsck_lfsck2name(lfsck),
1581 ltd->ltd_index, rc);
1584 spin_lock(<ds->ltd_lock);
1586 spin_unlock(<ds->ltd_lock);
1589 CDEBUG(D_LFSCK, "%s: layout LFSCK unexpected event: rc = %d\n",
1590 lfsck_lfsck2name(lfsck), lr->lr_event);
1595 rc = ptlrpc_set_wait(set);
1596 ptlrpc_set_destroy(set);
1601 static int lfsck_layout_double_scan_result(const struct lu_env *env,
1602 struct lfsck_component *com,
1605 struct lfsck_instance *lfsck = com->lc_lfsck;
1606 struct lfsck_layout *lo = com->lc_file_ram;
1607 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1609 down_write(&com->lc_sem);
1610 lo->ll_run_time_phase2 += cfs_duration_sec(cfs_time_current() +
1611 HALF_SEC - lfsck->li_time_last_checkpoint);
1612 lo->ll_time_last_checkpoint = cfs_time_current_sec();
1613 lo->ll_objs_checked_phase2 += com->lc_new_checked;
1616 com->lc_journal = 0;
1617 if (lo->ll_flags & LF_INCOMPLETE)
1618 lo->ll_status = LS_PARTIAL;
1620 lo->ll_status = LS_COMPLETED;
1621 if (!(bk->lb_param & LPF_DRYRUN))
1622 lo->ll_flags &= ~(LF_SCANNED_ONCE | LF_INCONSISTENT);
1623 lo->ll_time_last_complete = lo->ll_time_last_checkpoint;
1624 lo->ll_success_count++;
1625 } else if (rc == 0) {
1626 lo->ll_status = lfsck->li_status;
1627 if (lo->ll_status == 0)
1628 lo->ll_status = LS_STOPPED;
1630 lo->ll_status = LS_FAILED;
1633 rc = lfsck_layout_store(env, com);
1634 up_write(&com->lc_sem);
1639 static int lfsck_layout_lock(const struct lu_env *env,
1640 struct lfsck_component *com,
1641 struct dt_object *obj,
1642 struct lustre_handle *lh, __u64 bits)
1644 struct lfsck_thread_info *info = lfsck_env_info(env);
1645 ldlm_policy_data_t *policy = &info->lti_policy;
1646 struct ldlm_res_id *resid = &info->lti_resid;
1647 struct lfsck_instance *lfsck = com->lc_lfsck;
1648 __u64 flags = LDLM_FL_ATOMIC_CB;
1651 LASSERT(lfsck->li_namespace != NULL);
1653 memset(policy, 0, sizeof(*policy));
1654 policy->l_inodebits.bits = bits;
1655 fid_build_reg_res_name(lfsck_dto2fid(obj), resid);
1656 rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid, LDLM_IBITS,
1657 policy, LCK_EX, &flags, ldlm_blocking_ast,
1658 ldlm_completion_ast, NULL, NULL, 0,
1659 LVB_T_NONE, NULL, lh);
1660 if (rc == ELDLM_OK) {
1663 memset(lh, 0, sizeof(*lh));
1670 static void lfsck_layout_unlock(struct lustre_handle *lh)
1672 if (lustre_handle_is_used(lh)) {
1673 ldlm_lock_decref(lh, LCK_EX);
1674 memset(lh, 0, sizeof(*lh));
1678 static int lfsck_layout_trans_stop(const struct lu_env *env,
1679 struct dt_device *dev,
1680 struct thandle *handle, int result)
1684 handle->th_result = result;
1685 rc = dt_trans_stop(env, dev, handle);
1695 * Get the system default stripe size.
1697 * \param[in] env pointer to the thread context
1698 * \param[in] lfsck pointer to the lfsck instance
1699 * \param[out] size pointer to the default stripe size
1701 * \retval 0 for success
1702 * \retval negative error number on failure
1704 static int lfsck_layout_get_def_stripesize(const struct lu_env *env,
1705 struct lfsck_instance *lfsck,
1708 struct lov_user_md *lum = &lfsck_env_info(env)->lti_lum;
1709 struct dt_object *root;
1712 root = dt_locate(env, lfsck->li_next, &lfsck->li_local_root_fid);
1714 return PTR_ERR(root);
1716 /* Get the default stripe size via xattr_get on the backend root. */
1717 rc = dt_xattr_get(env, root, lfsck_buf_get(env, lum, sizeof(*lum)),
1718 XATTR_NAME_LOV, BYPASS_CAPA);
1720 /* The lum->lmm_stripe_size is LE mode. The *size also
1721 * should be LE mode. So it is unnecessary to convert. */
1722 *size = lum->lmm_stripe_size;
1724 } else if (unlikely(rc == 0)) {
1728 lfsck_object_put(env, root);
1734 * \retval +1: repaired
1735 * \retval 0: did nothing
1736 * \retval -ve: on error
1738 static int lfsck_layout_refill_lovea(const struct lu_env *env,
1739 struct thandle *handle,
1740 struct dt_object *parent,
1741 struct lu_fid *cfid,
1743 struct lov_ost_data_v1 *slot,
1744 int fl, __u32 ost_idx)
1746 struct ost_id *oi = &lfsck_env_info(env)->lti_oi;
1747 struct lov_mds_md_v1 *lmm = buf->lb_buf;
1750 fid_to_ostid(cfid, oi);
1751 ostid_cpu_to_le(oi, &slot->l_ost_oi);
1752 slot->l_ost_gen = cpu_to_le32(0);
1753 slot->l_ost_idx = cpu_to_le32(ost_idx);
1755 if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_HOLE) {
1756 struct lov_ost_data_v1 *objs;
1760 count = le16_to_cpu(lmm->lmm_stripe_count);
1761 if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_V1)
1762 objs = &lmm->lmm_objects[0];
1764 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
1765 for (i = 0; i < count; i++, objs++) {
1766 if (objs != slot && lovea_slot_is_dummy(objs))
1770 /* If the @slot is the last dummy slot to be refilled,
1771 * then drop LOV_PATTERN_F_HOLE from lmm::lmm_pattern. */
1773 lmm->lmm_pattern &= ~cpu_to_le32(LOV_PATTERN_F_HOLE);
1776 rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV, fl, handle,
1785 * \retval +1: repaired
1786 * \retval 0: did nothing
1787 * \retval -ve: on error
1789 static int lfsck_layout_extend_lovea(const struct lu_env *env,
1790 struct lfsck_instance *lfsck,
1791 struct thandle *handle,
1792 struct dt_object *parent,
1793 struct lu_fid *cfid,
1794 struct lu_buf *buf, int fl,
1795 __u32 ost_idx, __u32 ea_off, bool reset)
1797 struct lov_mds_md_v1 *lmm = buf->lb_buf;
1798 struct lov_ost_data_v1 *objs;
1804 if (fl == LU_XATTR_CREATE || reset) {
1805 __u32 pattern = LOV_PATTERN_RAID0;
1808 LASSERT(buf->lb_len == lov_mds_md_size(count, LOV_MAGIC_V1));
1810 if (ea_off != 0 || reset) {
1811 pattern |= LOV_PATTERN_F_HOLE;
1815 memset(lmm, 0, buf->lb_len);
1816 lmm->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
1817 lmm->lmm_pattern = cpu_to_le32(pattern);
1818 fid_to_lmm_oi(lfsck_dto2fid(parent), &lmm->lmm_oi);
1819 lmm_oi_cpu_to_le(&lmm->lmm_oi, &lmm->lmm_oi);
1821 rc = lfsck_layout_get_def_stripesize(env, lfsck,
1822 &lmm->lmm_stripe_size);
1826 objs = &lmm->lmm_objects[ea_off];
1828 __u32 magic = le32_to_cpu(lmm->lmm_magic);
1831 count = le16_to_cpu(lmm->lmm_stripe_count);
1832 if (magic == LOV_MAGIC_V1)
1833 objs = &lmm->lmm_objects[count];
1835 objs = &((struct lov_mds_md_v3 *)lmm)->
1838 gap = ea_off - count;
1841 LASSERT(buf->lb_len == lov_mds_md_size(count, magic));
1844 memset(objs, 0, gap * sizeof(*objs));
1845 lmm->lmm_pattern |= cpu_to_le32(LOV_PATTERN_F_HOLE);
1849 lmm->lmm_layout_gen =
1850 cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
1854 lmm->lmm_stripe_count = cpu_to_le16(count);
1855 rc = lfsck_layout_refill_lovea(env, handle, parent, cfid, buf, objs,
1858 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant extend layout EA for "
1859 DFID": parent "DFID", OST-index %u, stripe-index %u, fl %d, "
1860 "reset %s, %s LOV EA hole: rc = %d\n",
1861 lfsck_lfsck2name(lfsck), PFID(cfid), PFID(lfsck_dto2fid(parent)),
1862 ost_idx, ea_off, fl, reset ? "yes" : "no",
1863 hole ? "with" : "without", rc);
1869 * \retval +1: repaired
1870 * \retval 0: did nothing
1871 * \retval -ve: on error
1873 static int lfsck_layout_update_pfid(const struct lu_env *env,
1874 struct lfsck_component *com,
1875 struct dt_object *parent,
1876 struct lu_fid *cfid,
1877 struct dt_device *cdev, __u32 ea_off)
1879 struct filter_fid *pfid = &lfsck_env_info(env)->lti_new_pfid;
1880 struct dt_object *child;
1881 struct thandle *handle;
1882 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
1887 child = lfsck_object_find_by_dev(env, cdev, cfid);
1889 RETURN(PTR_ERR(child));
1891 handle = dt_trans_create(env, cdev);
1893 GOTO(out, rc = PTR_ERR(handle));
1895 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
1896 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
1897 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
1898 * MDT-object's FID::f_ver, instead it is the OST-object index in its
1899 * parent MDT-object's layout EA. */
1900 pfid->ff_parent.f_stripe_idx = cpu_to_le32(ea_off);
1901 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
1903 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle);
1907 rc = dt_trans_start(env, cdev, handle);
1911 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle,
1914 GOTO(stop, rc = (rc == 0 ? 1 : rc));
1917 dt_trans_stop(env, cdev, handle);
1920 lu_object_put(env, &child->do_lu);
1926 * This function will create the MDT-object with the given (partial) LOV EA.
1928 * Under some data corruption cases, the MDT-object of the file may be lost,
1929 * but its OST-objects, or some of them are there. The layout LFSCK needs to
1930 * re-create the MDT-object with the orphan OST-object(s) information.
1932 * On the other hand, the LFSCK may has created some OST-object for repairing
1933 * dangling LOV EA reference, but as the LFSCK processing, it may find that
1934 * the old OST-object is there and should replace the former new created OST
1935 * object. Unfortunately, some others have modified such newly created object.
1936 * To keep the data (both new and old), the LFSCK will create MDT-object with
1937 * new FID to reference the original OST-object.
1939 * \param[in] env pointer to the thread context
1940 * \param[in] com pointer to the lfsck component
1941 * \param[in] ltd pointer to target device descriptor
1942 * \param[in] rec pointer to the record for the orphan OST-object
1943 * \param[in] cfid pointer to FID for the orphan OST-object
1944 * \param[in] infix additional information, such as the FID for original
1945 * MDT-object and the stripe offset in the LOV EA
1946 * \param[in] type the type for describing why the orphan MDT-object is
1947 * created. The rules are as following:
1949 * type "C": Multiple OST-objects claim the same MDT-object and the
1950 * same slot in the layout EA. Then the LFSCK will create
1951 * new MDT-object(s) to hold the conflict OST-object(s).
1953 * type "N": The orphan OST-object does not know which one was the
1954 * real parent MDT-object, so the LFSCK uses new FID for
1955 * its parent MDT-object.
1957 * type "R": The orphan OST-object knows its parent MDT-object FID,
1958 * but does not know the position (the file name) in the
1961 * The orphan name will be like:
1962 * ${FID}-${infix}-${type}-${conflict_version}
1964 * \param[in] ea_off the stripe offset in the LOV EA
1966 * \retval positive on repaired something
1967 * \retval 0 if needs to repair nothing
1968 * \retval negative error number on failure
1970 static int lfsck_layout_recreate_parent(const struct lu_env *env,
1971 struct lfsck_component *com,
1972 struct lfsck_tgt_desc *ltd,
1973 struct lu_orphan_rec *rec,
1974 struct lu_fid *cfid,
1979 struct lfsck_thread_info *info = lfsck_env_info(env);
1980 struct dt_insert_rec *dtrec = &info->lti_dt_rec;
1981 char *name = info->lti_key;
1982 struct lu_attr *la = &info->lti_la;
1983 struct dt_object_format *dof = &info->lti_dof;
1984 struct lfsck_instance *lfsck = com->lc_lfsck;
1985 struct lu_fid *pfid = &rec->lor_fid;
1986 struct lu_fid *tfid = &info->lti_fid3;
1987 struct dt_device *next = lfsck->li_next;
1988 struct dt_object *pobj = NULL;
1989 struct dt_object *cobj = NULL;
1990 struct thandle *th = NULL;
1991 struct lu_buf *pbuf = NULL;
1992 struct lu_buf *ea_buf = &info->lti_big_buf;
1993 struct lustre_handle lh = { 0 };
1994 struct linkea_data ldata = { 0 };
1995 struct lu_buf linkea_buf;
1996 const struct lu_name *pname;
1997 int buflen = ea_buf->lb_len;
2002 /* Create .lustre/lost+found/MDTxxxx when needed. */
2003 if (unlikely(lfsck->li_lpf_obj == NULL)) {
2004 rc = lfsck_create_lpf(env, lfsck);
2009 if (fid_is_zero(pfid)) {
2010 struct filter_fid *ff = &info->lti_new_pfid;
2012 rc = lfsck_fid_alloc(env, lfsck, pfid, false);
2016 ff->ff_parent.f_seq = cpu_to_le64(pfid->f_seq);
2017 ff->ff_parent.f_oid = cpu_to_le32(pfid->f_oid);
2018 /* Currently, the filter_fid::ff_parent::f_ver is not the
2019 * real parent MDT-object's FID::f_ver, instead it is the
2020 * OST-object index in its parent MDT-object's layout EA. */
2021 ff->ff_parent.f_stripe_idx = cpu_to_le32(ea_off);
2022 pbuf = lfsck_buf_get(env, ff, sizeof(struct filter_fid));
2023 cobj = lfsck_object_find_by_dev(env, ltd->ltd_tgt, cfid);
2025 GOTO(log, rc = PTR_ERR(cobj));
2028 pobj = lfsck_object_find_by_dev(env, lfsck->li_bottom, pfid);
2030 GOTO(put, rc = PTR_ERR(pobj));
2032 LASSERT(infix != NULL);
2033 LASSERT(type != NULL);
2036 snprintf(name, NAME_MAX, DFID"%s-%s-%d", PFID(pfid), infix,
2038 rc = dt_lookup(env, lfsck->li_lpf_obj, (struct dt_rec *)tfid,
2039 (const struct dt_key *)name, BYPASS_CAPA);
2040 if (rc != 0 && rc != -ENOENT)
2044 rc = linkea_data_new(&ldata,
2045 &lfsck_env_info(env)->lti_linkea_buf);
2049 pname = lfsck_name_get_const(env, name, strlen(name));
2050 rc = linkea_add_buf(&ldata, pname, lfsck_dto2fid(lfsck->li_lpf_obj));
2054 memset(la, 0, sizeof(*la));
2055 la->la_uid = rec->lor_uid;
2056 la->la_gid = rec->lor_gid;
2057 la->la_mode = S_IFREG | S_IRUSR;
2058 la->la_valid = LA_MODE | LA_UID | LA_GID;
2060 memset(dof, 0, sizeof(*dof));
2061 dof->dof_type = dt_mode_to_dft(S_IFREG);
2063 rc = lov_mds_md_size(ea_off + 1, LOV_MAGIC_V1);
2065 lu_buf_realloc(ea_buf, rc);
2066 buflen = ea_buf->lb_len;
2067 if (ea_buf->lb_buf == NULL)
2068 GOTO(put, rc = -ENOMEM);
2070 ea_buf->lb_len = rc;
2073 /* Hold update lock on the .lustre/lost+found/MDTxxxx/.
2075 * XXX: Currently, we do not grab the PDO lock as normal create cases,
2076 * because creating MDT-object for orphan OST-object is rare, we
2077 * do not much care about the performance. It can be improved in
2078 * the future when needed. */
2079 rc = lfsck_layout_lock(env, com, lfsck->li_lpf_obj, &lh,
2080 MDS_INODELOCK_UPDATE);
2084 th = dt_trans_create(env, next);
2086 GOTO(unlock, rc = PTR_ERR(th));
2088 /* 1a. Update OST-object's parent information remotely.
2090 * If other subsequent modifications failed, then next LFSCK scanning
2091 * will process the OST-object as orphan again with known parent FID. */
2093 rc = dt_declare_xattr_set(env, cobj, pbuf, XATTR_NAME_FID, 0, th);
2098 /* 2a. Create the MDT-object locally. */
2099 rc = dt_declare_create(env, pobj, la, NULL, dof, th);
2103 /* 3a. Add layout EA for the MDT-object. */
2104 rc = dt_declare_xattr_set(env, pobj, ea_buf, XATTR_NAME_LOV,
2105 LU_XATTR_CREATE, th);
2109 /* 4a. Insert the MDT-object to .lustre/lost+found/MDTxxxx/ */
2110 dtrec->rec_fid = pfid;
2111 dtrec->rec_type = S_IFREG;
2112 rc = dt_declare_insert(env, lfsck->li_lpf_obj,
2113 (const struct dt_rec *)dtrec,
2114 (const struct dt_key *)name, th);
2118 /* 5a. insert linkEA for parent. */
2119 linkea_buf.lb_buf = ldata.ld_buf->lb_buf;
2120 linkea_buf.lb_len = ldata.ld_leh->leh_len;
2121 rc = dt_declare_xattr_set(env, pobj, &linkea_buf,
2122 XATTR_NAME_LINK, 0, th);
2126 rc = dt_trans_start(env, next, th);
2130 /* 1b. Update OST-object's parent information remotely. */
2132 rc = dt_xattr_set(env, cobj, pbuf, XATTR_NAME_FID, 0, th,
2138 dt_write_lock(env, pobj, 0);
2139 /* 2b. Create the MDT-object locally. */
2140 rc = dt_create(env, pobj, la, NULL, dof, th);
2142 /* 3b. Add layout EA for the MDT-object. */
2143 rc = lfsck_layout_extend_lovea(env, lfsck, th, pobj, cfid,
2144 ea_buf, LU_XATTR_CREATE,
2145 ltd->ltd_index, ea_off, false);
2146 dt_write_unlock(env, pobj);
2150 /* 4b. Insert the MDT-object to .lustre/lost+found/MDTxxxx/ */
2151 rc = dt_insert(env, lfsck->li_lpf_obj, (const struct dt_rec *)dtrec,
2152 (const struct dt_key *)name, th, BYPASS_CAPA, 1);
2156 /* 5b. insert linkEA for parent. */
2157 rc = dt_xattr_set(env, pobj, &linkea_buf,
2158 XATTR_NAME_LINK, 0, th, BYPASS_CAPA);
2163 dt_trans_stop(env, next, th);
2166 lfsck_layout_unlock(&lh);
2169 if (cobj != NULL && !IS_ERR(cobj))
2170 lu_object_put(env, &cobj->do_lu);
2171 if (pobj != NULL && !IS_ERR(pobj))
2172 lu_object_put(env, &pobj->do_lu);
2173 ea_buf->lb_len = buflen;
2177 CDEBUG(D_LFSCK, "%s layout LFSCK assistant failed to "
2178 "recreate the lost MDT-object: parent "DFID
2179 ", child "DFID", OST-index %u, stripe-index %u, "
2180 "infix %s, type %s: rc = %d\n",
2181 lfsck_lfsck2name(lfsck), PFID(pfid), PFID(cfid),
2182 ltd->ltd_index, ea_off, infix, type, rc);
2184 return rc >= 0 ? 1 : rc;
2187 static int lfsck_layout_master_conditional_destroy(const struct lu_env *env,
2188 struct lfsck_component *com,
2189 const struct lu_fid *fid,
2192 struct lfsck_thread_info *info = lfsck_env_info(env);
2193 struct lfsck_request *lr = &info->lti_lr;
2194 struct lfsck_instance *lfsck = com->lc_lfsck;
2195 struct lfsck_tgt_desc *ltd;
2196 struct ptlrpc_request *req;
2197 struct lfsck_request *tmp;
2198 struct obd_export *exp;
2202 ltd = lfsck_tgt_get(&lfsck->li_ost_descs, index);
2203 if (unlikely(ltd == NULL))
2207 if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
2208 GOTO(put, rc = -EOPNOTSUPP);
2210 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
2212 GOTO(put, rc = -ENOMEM);
2214 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
2216 ptlrpc_request_free(req);
2221 memset(lr, 0, sizeof(*lr));
2222 lr->lr_event = LE_CONDITIONAL_DESTROY;
2223 lr->lr_active = LFSCK_TYPE_LAYOUT;
2226 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
2228 ptlrpc_request_set_replen(req);
2230 rc = ptlrpc_queue_wait(req);
2231 ptlrpc_req_finished(req);
2241 static int lfsck_layout_slave_conditional_destroy(const struct lu_env *env,
2242 struct lfsck_component *com,
2243 struct lfsck_request *lr)
2245 struct lfsck_thread_info *info = lfsck_env_info(env);
2246 struct lu_attr *la = &info->lti_la;
2247 ldlm_policy_data_t *policy = &info->lti_policy;
2248 struct ldlm_res_id *resid = &info->lti_resid;
2249 struct lfsck_instance *lfsck = com->lc_lfsck;
2250 struct dt_device *dev = lfsck->li_bottom;
2251 struct lu_fid *fid = &lr->lr_fid;
2252 struct dt_object *obj;
2253 struct thandle *th = NULL;
2254 struct lustre_handle lh = { 0 };
2259 obj = lfsck_object_find_by_dev(env, dev, fid);
2261 RETURN(PTR_ERR(obj));
2263 dt_read_lock(env, obj, 0);
2264 if (dt_object_exists(obj) == 0) {
2265 dt_read_unlock(env, obj);
2267 GOTO(put, rc = -ENOENT);
2270 /* Get obj's attr without lock firstly. */
2271 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
2272 dt_read_unlock(env, obj);
2276 if (likely(la->la_ctime != 0 || la->la_mode & S_ISUID))
2277 GOTO(put, rc = -ETXTBSY);
2279 /* Acquire extent lock on [0, EOF] to sync with all possible written. */
2280 LASSERT(lfsck->li_namespace != NULL);
2282 memset(policy, 0, sizeof(*policy));
2283 policy->l_extent.end = OBD_OBJECT_EOF;
2284 ost_fid_build_resid(fid, resid);
2285 rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid, LDLM_EXTENT,
2286 policy, LCK_EX, &flags, ldlm_blocking_ast,
2287 ldlm_completion_ast, NULL, NULL, 0,
2288 LVB_T_NONE, NULL, &lh);
2290 GOTO(put, rc = -EIO);
2292 dt_write_lock(env, obj, 0);
2293 /* Get obj's attr within lock again. */
2294 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
2298 if (la->la_ctime != 0)
2299 GOTO(unlock, rc = -ETXTBSY);
2301 th = dt_trans_create(env, dev);
2303 GOTO(unlock, rc = PTR_ERR(th));
2305 rc = dt_declare_ref_del(env, obj, th);
2309 rc = dt_declare_destroy(env, obj, th);
2313 rc = dt_trans_start_local(env, dev, th);
2317 rc = dt_ref_del(env, obj, th);
2321 rc = dt_destroy(env, obj, th);
2323 CDEBUG(D_LFSCK, "%s: layout LFSCK destroyed the empty "
2324 "OST-object "DFID" that was created for reparing "
2325 "dangling referenced case. But the original missed "
2326 "OST-object is found now.\n",
2327 lfsck_lfsck2name(lfsck), PFID(fid));
2332 dt_trans_stop(env, dev, th);
2335 dt_write_unlock(env, obj);
2336 ldlm_lock_decref(&lh, LCK_EX);
2339 lu_object_put(env, &obj->do_lu);
2345 * Some OST-object has occupied the specified layout EA slot.
2346 * Such OST-object may be generated by the LFSCK when repair
2347 * dangling referenced MDT-object, which can be indicated by
2348 * attr::la_ctime == 0 but without S_ISUID in la_mode. If it
2349 * is true and such OST-object has not been modified yet, we
2350 * will replace it with the orphan OST-object; otherwise the
2351 * LFSCK will create new MDT-object to reference the orphan.
2353 * \retval +1: repaired
2354 * \retval 0: did nothing
2355 * \retval -ve: on error
2357 static int lfsck_layout_conflict_create(const struct lu_env *env,
2358 struct lfsck_component *com,
2359 struct lfsck_tgt_desc *ltd,
2360 struct lu_orphan_rec *rec,
2361 struct dt_object *parent,
2362 struct lu_fid *cfid,
2363 struct lu_buf *ea_buf,
2364 struct lov_ost_data_v1 *slot,
2365 __u32 ea_off, __u32 ori_len)
2367 struct lfsck_thread_info *info = lfsck_env_info(env);
2368 struct lu_fid *cfid2 = &info->lti_fid2;
2369 struct ost_id *oi = &info->lti_oi;
2370 char *infix = info->lti_tmpbuf;
2371 struct lov_mds_md_v1 *lmm = ea_buf->lb_buf;
2372 struct dt_device *dev = com->lc_lfsck->li_bottom;
2373 struct thandle *th = NULL;
2374 struct lustre_handle lh = { 0 };
2375 __u32 ost_idx2 = le32_to_cpu(slot->l_ost_idx);
2379 ostid_le_to_cpu(&slot->l_ost_oi, oi);
2380 ostid_to_fid(cfid2, oi, ost_idx2);
2382 /* Hold layout lock on the parent to prevent others to access. */
2383 rc = lfsck_layout_lock(env, com, parent, &lh,
2384 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2388 rc = lfsck_layout_master_conditional_destroy(env, com, cfid2, ost_idx2);
2390 /* If the conflict OST-obejct is not created for fixing dangling
2391 * referenced MDT-object in former LFSCK check/repair, or it has
2392 * been modified by others, then we cannot destroy it. Re-create
2393 * a new MDT-object for the orphan OST-object. */
2394 if (rc == -ETXTBSY) {
2395 /* No need the layout lock on the original parent. */
2396 lfsck_layout_unlock(&lh);
2397 ea_buf->lb_len = ori_len;
2399 fid_zero(&rec->lor_fid);
2400 snprintf(infix, LFSCK_TMPBUF_LEN, "-"DFID"-%x",
2401 PFID(lu_object_fid(&parent->do_lu)), ea_off);
2402 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2403 infix, "C", ea_off);
2408 if (rc != 0 && rc != -ENOENT)
2411 th = dt_trans_create(env, dev);
2413 GOTO(unlock, rc = PTR_ERR(th));
2415 rc = dt_declare_xattr_set(env, parent, ea_buf, XATTR_NAME_LOV,
2416 LU_XATTR_REPLACE, th);
2420 rc = dt_trans_start_local(env, dev, th);
2424 dt_write_lock(env, parent, 0);
2425 lmm->lmm_layout_gen = cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
2426 rc = lfsck_layout_refill_lovea(env, th, parent, cfid, ea_buf, slot,
2427 LU_XATTR_REPLACE, ltd->ltd_index);
2428 dt_write_unlock(env, parent);
2433 dt_trans_stop(env, dev, th);
2436 lfsck_layout_unlock(&lh);
2439 ea_buf->lb_len = ori_len;
2441 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant replaced the conflict "
2442 "OST-object "DFID" on the OST %x with the orphan "DFID" on "
2443 "the OST %x: parent "DFID", stripe-index %u: rc = %d\n",
2444 lfsck_lfsck2name(com->lc_lfsck), PFID(cfid2), ost_idx2,
2445 PFID(cfid), ltd->ltd_index, PFID(lfsck_dto2fid(parent)),
2448 return rc >= 0 ? 1 : rc;
2452 * \retval +1: repaired
2453 * \retval 0: did nothing
2454 * \retval -ve: on error
2456 static int lfsck_layout_recreate_lovea(const struct lu_env *env,
2457 struct lfsck_component *com,
2458 struct lfsck_tgt_desc *ltd,
2459 struct lu_orphan_rec *rec,
2460 struct dt_object *parent,
2461 struct lu_fid *cfid,
2462 __u32 ost_idx, __u32 ea_off)
2464 struct lfsck_thread_info *info = lfsck_env_info(env);
2465 struct lu_buf *buf = &info->lti_big_buf;
2466 struct lu_fid *fid = &info->lti_fid2;
2467 struct ost_id *oi = &info->lti_oi;
2468 struct lfsck_instance *lfsck = com->lc_lfsck;
2469 struct dt_device *dt = lfsck->li_bottom;
2470 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2471 struct thandle *handle = NULL;
2472 size_t buflen = buf->lb_len;
2474 struct lov_mds_md_v1 *lmm;
2475 struct lov_ost_data_v1 *objs;
2476 struct lustre_handle lh = { 0 };
2483 bool locked = false;
2486 rc = lfsck_layout_lock(env, com, parent, &lh,
2487 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2489 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant failed to recreate "
2490 "LOV EA for "DFID": parent "DFID", OST-index %u, "
2491 "stripe-index %u: rc = %d\n",
2492 lfsck_lfsck2name(lfsck), PFID(cfid),
2493 PFID(lfsck_dto2fid(parent)), ost_idx, ea_off, rc);
2500 dt_write_unlock(env, parent);
2504 if (handle != NULL) {
2505 dt_trans_stop(env, dt, handle);
2510 GOTO(unlock_layout, rc);
2513 if (buf->lb_len < lovea_size) {
2514 lu_buf_realloc(buf, lovea_size);
2515 buflen = buf->lb_len;
2516 if (buf->lb_buf == NULL)
2517 GOTO(unlock_layout, rc = -ENOMEM);
2520 if (!(bk->lb_param & LPF_DRYRUN)) {
2521 handle = dt_trans_create(env, dt);
2523 GOTO(unlock_layout, rc = PTR_ERR(handle));
2525 rc = dt_declare_xattr_set(env, parent, buf, XATTR_NAME_LOV,
2530 rc = dt_trans_start_local(env, dt, handle);
2535 dt_write_lock(env, parent, 0);
2537 rc = dt_xattr_get(env, parent, buf, XATTR_NAME_LOV, BYPASS_CAPA);
2538 if (rc == -ERANGE) {
2539 rc = dt_xattr_get(env, parent, &LU_BUF_NULL, XATTR_NAME_LOV,
2543 } else if (rc == -ENODATA || rc == 0) {
2544 lovea_size = lov_mds_md_size(ea_off + 1, LOV_MAGIC_V1);
2545 /* If the declared is not big enough, re-try. */
2546 if (buf->lb_len < lovea_size) {
2550 fl = LU_XATTR_CREATE;
2551 } else if (rc < 0) {
2552 GOTO(unlock_parent, rc);
2553 } else if (unlikely(buf->lb_len == 0)) {
2556 fl = LU_XATTR_REPLACE;
2560 if (fl == LU_XATTR_CREATE) {
2561 if (bk->lb_param & LPF_DRYRUN)
2562 GOTO(unlock_parent, rc = 1);
2564 LASSERT(buf->lb_len >= lovea_size);
2566 buf->lb_len = lovea_size;
2567 rc = lfsck_layout_extend_lovea(env, lfsck, handle, parent, cfid,
2568 buf, fl, ost_idx, ea_off, false);
2570 GOTO(unlock_parent, rc);
2574 rc1 = lfsck_layout_verify_header(lmm);
2576 /* If the LOV EA crashed, the rebuild it. */
2577 if (rc1 == -EINVAL) {
2578 if (bk->lb_param & LPF_DRYRUN)
2579 GOTO(unlock_parent, rc = 1);
2581 LASSERT(buf->lb_len >= lovea_size);
2583 buf->lb_len = lovea_size;
2584 memset(lmm, 0, buf->lb_len);
2585 rc = lfsck_layout_extend_lovea(env, lfsck, handle, parent, cfid,
2586 buf, fl, ost_idx, ea_off, true);
2588 GOTO(unlock_parent, rc);
2591 /* For other unknown magic/pattern, keep the current LOV EA. */
2593 GOTO(unlock_parent, rc = rc1);
2595 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
2596 * been verified in lfsck_layout_verify_header() already. If some
2597 * new magic introduced in the future, then layout LFSCK needs to
2598 * be updated also. */
2599 magic = le32_to_cpu(lmm->lmm_magic);
2600 if (magic == LOV_MAGIC_V1) {
2601 objs = &lmm->lmm_objects[0];
2603 LASSERT(magic == LOV_MAGIC_V3);
2604 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
2607 count = le16_to_cpu(lmm->lmm_stripe_count);
2609 GOTO(unlock_parent, rc = -EINVAL);
2612 /* Exceed the current end of MDT-object layout EA. Then extend it. */
2613 if (count <= ea_off) {
2614 if (bk->lb_param & LPF_DRYRUN)
2615 GOTO(unlock_parent, rc = 1);
2617 lovea_size = lov_mds_md_size(ea_off + 1, magic);
2618 /* If the declared is not big enough, re-try. */
2619 if (buf->lb_len < lovea_size) {
2623 buf->lb_len = lovea_size;
2624 rc = lfsck_layout_extend_lovea(env, lfsck, handle, parent, cfid,
2625 buf, fl, ost_idx, ea_off, false);
2627 GOTO(unlock_parent, rc);
2630 LASSERTF(rc > 0, "invalid rc = %d\n", rc);
2632 buf->lb_len = lovea_size;
2633 for (i = 0; i < count; i++, objs++) {
2634 /* The MDT-object was created via lfsck_layout_recover_create()
2635 * by others before, and we fill the dummy layout EA. */
2636 if (lovea_slot_is_dummy(objs)) {
2640 if (bk->lb_param & LPF_DRYRUN)
2641 GOTO(unlock_parent, rc = 1);
2643 lmm->lmm_layout_gen =
2644 cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
2645 rc = lfsck_layout_refill_lovea(env, handle, parent,
2646 cfid, buf, objs, fl,
2649 CDEBUG(D_LFSCK, "%s layout LFSCK assistant fill "
2650 "dummy layout slot for "DFID": parent "DFID
2651 ", OST-index %u, stripe-index %u: rc = %d\n",
2652 lfsck_lfsck2name(lfsck), PFID(cfid),
2653 PFID(lfsck_dto2fid(parent)), ost_idx, i, rc);
2655 GOTO(unlock_parent, rc);
2658 ostid_le_to_cpu(&objs->l_ost_oi, oi);
2659 ostid_to_fid(fid, oi, le32_to_cpu(objs->l_ost_idx));
2660 /* It should be rare case, the slot is there, but the LFSCK
2661 * does not handle it during the first-phase cycle scanning. */
2662 if (unlikely(lu_fid_eq(fid, cfid))) {
2664 GOTO(unlock_parent, rc = 0);
2666 /* Rare case that the OST-object index
2667 * does not match the parent MDT-object
2668 * layout EA. We trust the later one. */
2669 if (bk->lb_param & LPF_DRYRUN)
2670 GOTO(unlock_parent, rc = 1);
2672 dt_write_unlock(env, parent);
2674 dt_trans_stop(env, dt, handle);
2675 lfsck_layout_unlock(&lh);
2676 buf->lb_len = buflen;
2677 rc = lfsck_layout_update_pfid(env, com, parent,
2678 cfid, ltd->ltd_tgt, i);
2680 CDEBUG(D_LFSCK, "%s layout LFSCK assistant "
2681 "updated OST-object's pfid for "DFID
2682 ": parent "DFID", OST-index %u, "
2683 "stripe-index %u: rc = %d\n",
2684 lfsck_lfsck2name(lfsck), PFID(cfid),
2685 PFID(lfsck_dto2fid(parent)),
2686 ltd->ltd_index, i, rc);
2693 /* The MDT-object exists, but related layout EA slot is occupied
2695 if (bk->lb_param & LPF_DRYRUN)
2696 GOTO(unlock_parent, rc = 1);
2698 dt_write_unlock(env, parent);
2700 dt_trans_stop(env, dt, handle);
2701 lfsck_layout_unlock(&lh);
2702 if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_V1)
2703 objs = &lmm->lmm_objects[ea_off];
2705 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[ea_off];
2706 rc = lfsck_layout_conflict_create(env, com, ltd, rec, parent, cfid,
2707 buf, objs, ea_off, buflen);
2713 dt_write_unlock(env, parent);
2717 dt_trans_stop(env, dt, handle);
2720 lfsck_layout_unlock(&lh);
2721 buf->lb_len = buflen;
2726 static int lfsck_layout_scan_orphan_one(const struct lu_env *env,
2727 struct lfsck_component *com,
2728 struct lfsck_tgt_desc *ltd,
2729 struct lu_orphan_rec *rec,
2730 struct lu_fid *cfid)
2732 struct lfsck_layout *lo = com->lc_file_ram;
2733 struct lu_fid *pfid = &rec->lor_fid;
2734 struct dt_object *parent = NULL;
2735 __u32 ea_off = pfid->f_stripe_idx;
2739 if (!fid_is_sane(cfid))
2740 GOTO(out, rc = -EINVAL);
2742 if (fid_is_zero(pfid)) {
2743 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2749 if (!fid_is_sane(pfid))
2750 GOTO(out, rc = -EINVAL);
2752 parent = lfsck_object_find_by_dev(env, com->lc_lfsck->li_bottom, pfid);
2754 GOTO(out, rc = PTR_ERR(parent));
2756 if (unlikely(dt_object_remote(parent) != 0))
2757 GOTO(put, rc = -EXDEV);
2759 if (dt_object_exists(parent) == 0) {
2760 lu_object_put(env, &parent->do_lu);
2761 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2766 if (!S_ISREG(lu_object_attr(&parent->do_lu)))
2767 GOTO(put, rc = -EISDIR);
2769 rc = lfsck_layout_recreate_lovea(env, com, ltd, rec, parent, cfid,
2770 ltd->ltd_index, ea_off);
2776 lu_object_put(env, &parent->do_lu);
2778 /* The layout EA is changed, need to be reloaded next time. */
2779 lu_object_put_nocache(env, &parent->do_lu);
2782 down_write(&com->lc_sem);
2783 com->lc_new_scanned++;
2784 com->lc_new_checked++;
2786 lo->ll_objs_repaired[LLIT_ORPHAN - 1]++;
2788 } else if (rc < 0) {
2789 lo->ll_objs_failed_phase2++;
2791 up_write(&com->lc_sem);
2796 static int lfsck_layout_scan_orphan(const struct lu_env *env,
2797 struct lfsck_component *com,
2798 struct lfsck_tgt_desc *ltd)
2800 struct lfsck_layout *lo = com->lc_file_ram;
2801 struct lfsck_instance *lfsck = com->lc_lfsck;
2802 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2803 struct lfsck_thread_info *info = lfsck_env_info(env);
2804 struct ost_id *oi = &info->lti_oi;
2805 struct lu_fid *fid = &info->lti_fid;
2806 struct dt_object *obj;
2807 const struct dt_it_ops *iops;
2812 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant starts the orphan "
2813 "scanning for OST%04x\n",
2814 lfsck_lfsck2name(lfsck), ltd->ltd_index);
2816 ostid_set_seq(oi, FID_SEQ_IDIF);
2817 ostid_set_id(oi, 0);
2818 ostid_to_fid(fid, oi, ltd->ltd_index);
2819 obj = lfsck_object_find_by_dev(env, ltd->ltd_tgt, fid);
2820 if (unlikely(IS_ERR(obj)))
2821 GOTO(log, rc = PTR_ERR(obj));
2823 rc = obj->do_ops->do_index_try(env, obj, &dt_lfsck_orphan_features);
2827 iops = &obj->do_index_ops->dio_it;
2828 di = iops->init(env, obj, 0, BYPASS_CAPA);
2830 GOTO(put, rc = PTR_ERR(di));
2832 rc = iops->load(env, di, 0);
2834 /* -ESRCH means that the orphan OST-objects rbtree has been
2835 * cleanup because of the OSS server restart or other errors. */
2836 lo->ll_flags |= LF_INCOMPLETE;
2841 rc = iops->next(env, di);
2853 struct lu_orphan_rec *rec = &info->lti_rec;
2855 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY3) &&
2857 struct ptlrpc_thread *thread = &lfsck->li_thread;
2858 struct l_wait_info lwi;
2860 lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val),
2862 l_wait_event(thread->t_ctl_waitq,
2863 !thread_is_running(thread),
2867 key = iops->key(env, di);
2868 com->lc_fid_latest_scanned_phase2 = *(struct lu_fid *)key;
2869 rc = iops->rec(env, di, (struct dt_rec *)rec, 0);
2871 rc = lfsck_layout_scan_orphan_one(env, com, ltd, rec,
2872 &com->lc_fid_latest_scanned_phase2);
2873 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
2876 lfsck_control_speed_by_self(com);
2878 rc = iops->next(env, di);
2879 } while (rc < 0 && !(bk->lb_param & LPF_FAILOUT));
2886 iops->fini(env, di);
2888 lu_object_put(env, &obj->do_lu);
2891 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant finished the orphan "
2892 "scanning for OST%04x: rc = %d\n",
2893 lfsck_lfsck2name(lfsck), ltd->ltd_index, rc);
2895 return rc > 0 ? 0 : rc;
2898 /* For the MDT-object with dangling reference, we need to repare the
2899 * inconsistency according to the LFSCK sponsor's requirement:
2901 * 1) Keep the inconsistency there and report the inconsistency case,
2902 * then give the chance to the application to find related issues,
2903 * and the users can make the decision about how to handle it with
2904 * more human knownledge. (by default)
2906 * 2) Re-create the missed OST-object with the FID/owner information. */
2907 static int lfsck_layout_repair_dangling(const struct lu_env *env,
2908 struct lfsck_component *com,
2909 struct lfsck_layout_req *llr,
2910 const struct lu_attr *pla)
2912 struct lfsck_thread_info *info = lfsck_env_info(env);
2913 struct filter_fid *pfid = &info->lti_new_pfid;
2914 struct dt_allocation_hint *hint = &info->lti_hint;
2915 struct lu_attr *cla = &info->lti_la2;
2916 struct dt_object *parent = llr->llr_parent->llo_obj;
2917 struct dt_object *child = llr->llr_child;
2918 struct dt_device *dev = lfsck_obj2dt_dev(child);
2919 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
2920 struct thandle *handle;
2922 struct lustre_handle lh = { 0 };
2927 if (com->lc_lfsck->li_bookmark_ram.lb_param & LPF_CREATE_OSTOBJ)
2935 memset(cla, 0, sizeof(*cla));
2936 cla->la_uid = pla->la_uid;
2937 cla->la_gid = pla->la_gid;
2938 cla->la_mode = S_IFREG | 0666;
2939 cla->la_valid = LA_TYPE | LA_MODE | LA_UID | LA_GID |
2940 LA_ATIME | LA_MTIME | LA_CTIME;
2942 rc = lfsck_layout_lock(env, com, parent, &lh,
2943 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2947 handle = dt_trans_create(env, dev);
2949 GOTO(unlock1, rc = PTR_ERR(handle));
2951 hint->dah_parent = NULL;
2953 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
2954 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
2955 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
2956 * MDT-object's FID::f_ver, instead it is the OST-object index in its
2957 * parent MDT-object's layout EA. */
2958 pfid->ff_parent.f_stripe_idx = cpu_to_le32(llr->llr_lov_idx);
2959 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
2961 rc = dt_declare_create(env, child, cla, hint, NULL, handle);
2965 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID,
2966 LU_XATTR_CREATE, handle);
2970 rc = dt_trans_start(env, dev, handle);
2974 dt_read_lock(env, parent, 0);
2975 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
2976 GOTO(unlock2, rc = 1);
2978 rc = dt_create(env, child, cla, hint, NULL, handle);
2982 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, LU_XATTR_CREATE,
2983 handle, BYPASS_CAPA);
2988 dt_read_unlock(env, parent);
2991 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
2994 lfsck_layout_unlock(&lh);
2997 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant found dangling "
2998 "reference for: parent "DFID", child "DFID", OST-index %u, "
2999 "stripe-index %u, owner %u/%u. %s: rc = %d\n",
3000 lfsck_lfsck2name(com->lc_lfsck), PFID(lfsck_dto2fid(parent)),
3001 PFID(lfsck_dto2fid(child)), llr->llr_ost_idx,
3002 llr->llr_lov_idx, pla->la_uid, pla->la_gid,
3003 create ? "Create the lost OST-object as required" :
3004 "Keep the MDT-object there by default", rc);
3009 /* If the OST-object does not recognize the MDT-object as its parent, and
3010 * there is no other MDT-object claims as its parent, then just trust the
3011 * given MDT-object as its parent. So update the OST-object filter_fid. */
3012 static int lfsck_layout_repair_unmatched_pair(const struct lu_env *env,
3013 struct lfsck_component *com,
3014 struct lfsck_layout_req *llr,
3015 const struct lu_attr *pla)
3017 struct lfsck_thread_info *info = lfsck_env_info(env);
3018 struct filter_fid *pfid = &info->lti_new_pfid;
3019 struct lu_attr *tla = &info->lti_la3;
3020 struct dt_object *parent = llr->llr_parent->llo_obj;
3021 struct dt_object *child = llr->llr_child;
3022 struct dt_device *dev = lfsck_obj2dt_dev(child);
3023 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
3024 struct thandle *handle;
3026 struct lustre_handle lh = { 0 };
3030 rc = lfsck_layout_lock(env, com, parent, &lh,
3031 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
3035 handle = dt_trans_create(env, dev);
3037 GOTO(unlock1, rc = PTR_ERR(handle));
3039 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
3040 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
3041 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
3042 * MDT-object's FID::f_ver, instead it is the OST-object index in its
3043 * parent MDT-object's layout EA. */
3044 pfid->ff_parent.f_stripe_idx = cpu_to_le32(llr->llr_lov_idx);
3045 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
3047 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle);
3051 tla->la_valid = LA_UID | LA_GID;
3052 tla->la_uid = pla->la_uid;
3053 tla->la_gid = pla->la_gid;
3054 rc = dt_declare_attr_set(env, child, tla, handle);
3058 rc = dt_trans_start(env, dev, handle);
3062 dt_write_lock(env, parent, 0);
3063 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
3064 GOTO(unlock2, rc = 1);
3066 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle,
3071 /* Get the latest parent's owner. */
3072 rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
3076 tla->la_valid = LA_UID | LA_GID;
3077 rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
3082 dt_write_unlock(env, parent);
3085 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
3088 lfsck_layout_unlock(&lh);
3091 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant repaired unmatched "
3092 "MDT-OST pair for: parent "DFID", child "DFID", OST-index %u, "
3093 "stripe-index %u, owner %u/%u: rc = %d\n",
3094 lfsck_lfsck2name(com->lc_lfsck), PFID(lfsck_dto2fid(parent)),
3095 PFID(lfsck_dto2fid(child)), llr->llr_ost_idx, llr->llr_lov_idx,
3096 pla->la_uid, pla->la_gid, rc);
3101 /* If there are more than one MDT-objects claim as the OST-object's parent,
3102 * and the OST-object only recognizes one of them, then we need to generate
3103 * new OST-object(s) with new fid(s) for the non-recognized MDT-object(s). */
3104 static int lfsck_layout_repair_multiple_references(const struct lu_env *env,
3105 struct lfsck_component *com,
3106 struct lfsck_layout_req *llr,
3110 struct lfsck_thread_info *info = lfsck_env_info(env);
3111 struct dt_allocation_hint *hint = &info->lti_hint;
3112 struct dt_object_format *dof = &info->lti_dof;
3113 struct dt_device *pdev = com->lc_lfsck->li_next;
3114 struct ost_id *oi = &info->lti_oi;
3115 struct dt_object *parent = llr->llr_parent->llo_obj;
3116 struct dt_device *cdev = lfsck_obj2dt_dev(llr->llr_child);
3117 struct dt_object *child = NULL;
3118 struct lu_device *d = &cdev->dd_lu_dev;
3119 struct lu_object *o = NULL;
3120 struct thandle *handle;
3121 struct lov_mds_md_v1 *lmm;
3122 struct lov_ost_data_v1 *objs;
3123 struct lustre_handle lh = { 0 };
3128 rc = lfsck_layout_lock(env, com, parent, &lh,
3129 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
3133 handle = dt_trans_create(env, pdev);
3135 GOTO(unlock1, rc = PTR_ERR(handle));
3137 o = lu_object_anon(env, d, NULL);
3139 GOTO(stop, rc = PTR_ERR(o));
3141 child = container_of(o, struct dt_object, do_lu);
3142 o = lu_object_locate(o->lo_header, d->ld_type);
3143 if (unlikely(o == NULL))
3144 GOTO(stop, rc = -EINVAL);
3146 child = container_of(o, struct dt_object, do_lu);
3147 la->la_valid = LA_UID | LA_GID;
3148 hint->dah_parent = NULL;
3150 dof->dof_type = DFT_REGULAR;
3151 rc = dt_declare_create(env, child, la, NULL, NULL, handle);
3155 rc = dt_declare_xattr_set(env, parent, buf, XATTR_NAME_LOV,
3156 LU_XATTR_REPLACE, handle);
3160 rc = dt_trans_start(env, pdev, handle);
3164 dt_write_lock(env, parent, 0);
3165 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
3166 GOTO(unlock2, rc = 0);
3168 rc = dt_xattr_get(env, parent, buf, XATTR_NAME_LOV, BYPASS_CAPA);
3169 if (unlikely(rc == 0 || rc == -ENODATA || rc == -ERANGE))
3170 GOTO(unlock2, rc = 0);
3173 /* Someone change layout during the LFSCK, no need to repair then. */
3174 if (le16_to_cpu(lmm->lmm_layout_gen) != llr->llr_parent->llo_gen)
3175 GOTO(unlock2, rc = 0);
3177 rc = dt_create(env, child, la, hint, dof, handle);
3181 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
3182 * been verified in lfsck_layout_verify_header() already. If some
3183 * new magic introduced in the future, then layout LFSCK needs to
3184 * be updated also. */
3185 magic = le32_to_cpu(lmm->lmm_magic);
3186 if (magic == LOV_MAGIC_V1) {
3187 objs = &lmm->lmm_objects[0];
3189 LASSERT(magic == LOV_MAGIC_V3);
3190 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
3193 lmm->lmm_layout_gen = cpu_to_le16(llr->llr_parent->llo_gen + 1);
3194 fid_to_ostid(lu_object_fid(&child->do_lu), oi);
3195 ostid_cpu_to_le(oi, &objs[llr->llr_lov_idx].l_ost_oi);
3196 objs[llr->llr_lov_idx].l_ost_gen = cpu_to_le32(0);
3197 objs[llr->llr_lov_idx].l_ost_idx = cpu_to_le32(llr->llr_ost_idx);
3198 rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV,
3199 LU_XATTR_REPLACE, handle, BYPASS_CAPA);
3201 GOTO(unlock2, rc = (rc == 0 ? 1 : rc));
3204 dt_write_unlock(env, parent);
3208 lu_object_put(env, &child->do_lu);
3210 dt_trans_stop(env, pdev, handle);
3213 lfsck_layout_unlock(&lh);
3216 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant repaired multiple "
3217 "references for: parent "DFID", OST-index %u, stripe-index %u, "
3218 "owner %u/%u: rc = %d\n",
3219 lfsck_lfsck2name(com->lc_lfsck), PFID(lfsck_dto2fid(parent)),
3220 llr->llr_ost_idx, llr->llr_lov_idx, la->la_uid, la->la_gid, rc);
3225 /* If the MDT-object and the OST-object have different owner information,
3226 * then trust the MDT-object, because the normal chown/chgrp handle order
3227 * is from MDT to OST, and it is possible that some chown/chgrp operation
3228 * is partly done. */
3229 static int lfsck_layout_repair_owner(const struct lu_env *env,
3230 struct lfsck_component *com,
3231 struct lfsck_layout_req *llr,
3232 struct lu_attr *pla)
3234 struct lfsck_thread_info *info = lfsck_env_info(env);
3235 struct lu_attr *tla = &info->lti_la3;
3236 struct dt_object *parent = llr->llr_parent->llo_obj;
3237 struct dt_object *child = llr->llr_child;
3238 struct dt_device *dev = lfsck_obj2dt_dev(child);
3239 struct thandle *handle;
3243 handle = dt_trans_create(env, dev);
3245 GOTO(log, rc = PTR_ERR(handle));
3247 tla->la_uid = pla->la_uid;
3248 tla->la_gid = pla->la_gid;
3249 tla->la_valid = LA_UID | LA_GID;
3250 rc = dt_declare_attr_set(env, child, tla, handle);
3254 rc = dt_trans_start(env, dev, handle);
3258 /* Use the dt_object lock to serialize with destroy and attr_set. */
3259 dt_read_lock(env, parent, 0);
3260 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
3261 GOTO(unlock, rc = 1);
3263 /* Get the latest parent's owner. */
3264 rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
3268 /* Some others chown/chgrp during the LFSCK, needs to do nothing. */
3269 if (unlikely(tla->la_uid != pla->la_uid ||
3270 tla->la_gid != pla->la_gid))
3271 GOTO(unlock, rc = 1);
3273 tla->la_valid = LA_UID | LA_GID;
3274 rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
3279 dt_read_unlock(env, parent);
3282 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
3285 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant repaired inconsistent "
3286 "file owner for: parent "DFID", child "DFID", OST-index %u, "
3287 "stripe-index %u, owner %u/%u: rc = %d\n",
3288 lfsck_lfsck2name(com->lc_lfsck), PFID(lfsck_dto2fid(parent)),
3289 PFID(lfsck_dto2fid(child)), llr->llr_ost_idx, llr->llr_lov_idx,
3290 pla->la_uid, pla->la_gid, rc);
3295 /* Check whether the OST-object correctly back points to the
3296 * MDT-object (@parent) via the XATTR_NAME_FID xattr (@pfid). */
3297 static int lfsck_layout_check_parent(const struct lu_env *env,
3298 struct lfsck_component *com,
3299 struct dt_object *parent,
3300 const struct lu_fid *pfid,
3301 const struct lu_fid *cfid,
3302 const struct lu_attr *pla,
3303 const struct lu_attr *cla,
3304 struct lfsck_layout_req *llr,
3305 struct lu_buf *lov_ea, __u32 idx)
3307 struct lfsck_thread_info *info = lfsck_env_info(env);
3308 struct lu_buf *buf = &info->lti_big_buf;
3309 struct dt_object *tobj;
3310 struct lov_mds_md_v1 *lmm;
3311 struct lov_ost_data_v1 *objs;
3318 if (fid_is_zero(pfid)) {
3319 /* client never wrote. */
3320 if (cla->la_size == 0 && cla->la_blocks == 0) {
3321 if (unlikely(cla->la_uid != pla->la_uid ||
3322 cla->la_gid != pla->la_gid))
3323 RETURN (LLIT_INCONSISTENT_OWNER);
3328 RETURN(LLIT_UNMATCHED_PAIR);
3331 if (unlikely(!fid_is_sane(pfid)))
3332 RETURN(LLIT_UNMATCHED_PAIR);
3334 if (lu_fid_eq(pfid, lu_object_fid(&parent->do_lu))) {
3335 if (llr->llr_lov_idx == idx)
3338 RETURN(LLIT_UNMATCHED_PAIR);
3341 tobj = lfsck_object_find(env, com->lc_lfsck, pfid);
3343 RETURN(PTR_ERR(tobj));
3345 if (!dt_object_exists(tobj))
3346 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3348 /* Load the tobj's layout EA, in spite of it is a local MDT-object or
3349 * remote one on another MDT. Then check whether the given OST-object
3350 * is in such layout. If yes, it is multiple referenced, otherwise it
3351 * is unmatched referenced case. */
3352 rc = lfsck_layout_get_lovea(env, tobj, buf, NULL);
3354 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3360 magic = le32_to_cpu(lmm->lmm_magic);
3361 if (magic == LOV_MAGIC_V1) {
3362 objs = &lmm->lmm_objects[0];
3364 LASSERT(magic == LOV_MAGIC_V3);
3365 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
3368 count = le16_to_cpu(lmm->lmm_stripe_count);
3369 for (i = 0; i < count; i++, objs++) {
3370 struct lu_fid *tfid = &info->lti_fid2;
3371 struct ost_id *oi = &info->lti_oi;
3373 if (lovea_slot_is_dummy(objs))
3376 ostid_le_to_cpu(&objs->l_ost_oi, oi);
3377 ostid_to_fid(tfid, oi, le32_to_cpu(objs->l_ost_idx));
3378 if (lu_fid_eq(cfid, tfid)) {
3381 GOTO(out, rc = LLIT_MULTIPLE_REFERENCED);
3385 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3388 lfsck_object_put(env, tobj);
3393 static int lfsck_layout_assistant_handle_one(const struct lu_env *env,
3394 struct lfsck_component *com,
3395 struct lfsck_layout_req *llr)
3397 struct lfsck_layout *lo = com->lc_file_ram;
3398 struct lfsck_thread_info *info = lfsck_env_info(env);
3399 struct filter_fid_old *pea = &info->lti_old_pfid;
3400 struct lu_fid *pfid = &info->lti_fid;
3401 struct lu_buf *buf = NULL;
3402 struct dt_object *parent = llr->llr_parent->llo_obj;
3403 struct dt_object *child = llr->llr_child;
3404 struct lu_attr *pla = &info->lti_la;
3405 struct lu_attr *cla = &info->lti_la2;
3406 struct lfsck_instance *lfsck = com->lc_lfsck;
3407 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3408 enum lfsck_layout_inconsistency_type type = LLIT_NONE;
3413 rc = dt_attr_get(env, parent, pla, BYPASS_CAPA);
3415 if (lu_object_is_dying(parent->do_lu.lo_header))
3421 rc = dt_attr_get(env, child, cla, BYPASS_CAPA);
3422 if (rc == -ENOENT) {
3423 if (lu_object_is_dying(parent->do_lu.lo_header))
3426 type = LLIT_DANGLING;
3433 buf = lfsck_buf_get(env, pea, sizeof(struct filter_fid_old));
3434 rc= dt_xattr_get(env, child, buf, XATTR_NAME_FID, BYPASS_CAPA);
3435 if (unlikely(rc >= 0 && rc != sizeof(struct filter_fid_old) &&
3436 rc != sizeof(struct filter_fid))) {
3437 type = LLIT_UNMATCHED_PAIR;
3441 if (rc < 0 && rc != -ENODATA)
3444 if (rc == -ENODATA) {
3447 fid_le_to_cpu(pfid, &pea->ff_parent);
3448 /* Currently, the filter_fid::ff_parent::f_ver is not the
3449 * real parent MDT-object's FID::f_ver, instead it is the
3450 * OST-object index in its parent MDT-object's layout EA. */
3451 idx = pfid->f_stripe_idx;
3455 rc = lfsck_layout_check_parent(env, com, parent, pfid,
3456 lu_object_fid(&child->do_lu),
3457 pla, cla, llr, buf, idx);
3466 if (unlikely(cla->la_uid != pla->la_uid ||
3467 cla->la_gid != pla->la_gid)) {
3468 type = LLIT_INCONSISTENT_OWNER;
3473 if (bk->lb_param & LPF_DRYRUN) {
3474 if (type != LLIT_NONE)
3482 rc = lfsck_layout_repair_dangling(env, com, llr, pla);
3484 case LLIT_UNMATCHED_PAIR:
3485 rc = lfsck_layout_repair_unmatched_pair(env, com, llr, pla);
3487 case LLIT_MULTIPLE_REFERENCED:
3488 rc = lfsck_layout_repair_multiple_references(env, com, llr,
3491 case LLIT_INCONSISTENT_OWNER:
3492 rc = lfsck_layout_repair_owner(env, com, llr, pla);
3502 down_write(&com->lc_sem);
3504 struct lfsck_layout_master_data *llmd = com->lc_data;
3506 if (unlikely(llmd->llmd_exit)) {
3508 } else if (rc == -ENOTCONN || rc == -ESHUTDOWN ||
3509 rc == -ETIMEDOUT || rc == -EHOSTDOWN ||
3510 rc == -EHOSTUNREACH) {
3511 /* If cannot touch the target server,
3512 * mark the LFSCK as INCOMPLETE. */
3513 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant fail to "
3514 "talk with OST %x: rc = %d\n",
3515 lfsck_lfsck2name(lfsck), llr->llr_ost_idx, rc);
3516 lo->ll_flags |= LF_INCOMPLETE;
3517 lo->ll_objs_skipped++;
3520 lfsck_layout_record_failure(env, lfsck, lo);
3522 } else if (rc > 0) {
3523 LASSERTF(type > LLIT_NONE && type <= LLIT_MAX,
3524 "unknown type = %d\n", type);
3526 lo->ll_objs_repaired[type - 1]++;
3527 if (bk->lb_param & LPF_DRYRUN &&
3528 unlikely(lo->ll_pos_first_inconsistent == 0))
3529 lo->ll_pos_first_inconsistent =
3530 lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
3533 up_write(&com->lc_sem);
3538 static int lfsck_layout_assistant(void *args)
3540 struct lfsck_thread_args *lta = args;
3541 struct lu_env *env = <a->lta_env;
3542 struct lfsck_component *com = lta->lta_com;
3543 struct lfsck_instance *lfsck = lta->lta_lfsck;
3544 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3545 struct lfsck_position *pos = &com->lc_pos_start;
3546 struct lfsck_thread_info *info = lfsck_env_info(env);
3547 struct lfsck_request *lr = &info->lti_lr;
3548 struct lfsck_layout_master_data *llmd = com->lc_data;
3549 struct ptlrpc_thread *mthread = &lfsck->li_thread;
3550 struct ptlrpc_thread *athread = &llmd->llmd_thread;
3551 struct lfsck_layout_req *llr;
3552 struct l_wait_info lwi = { 0 };
3557 memset(lr, 0, sizeof(*lr));
3558 lr->lr_event = LE_START;
3559 lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
3560 LSV_ASYNC_WINDOWS | LSV_CREATE_OSTOBJ;
3561 lr->lr_speed = bk->lb_speed_limit;
3562 lr->lr_version = bk->lb_version;
3563 lr->lr_param = bk->lb_param;
3564 lr->lr_async_windows = bk->lb_async_windows;
3565 lr->lr_flags = LEF_TO_OST;
3566 if (pos->lp_oit_cookie <= 1)
3567 lr->lr_param |= LPF_RESET;
3569 rc = lfsck_layout_master_notify_others(env, com, lr);
3571 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant failed to notify "
3572 "others for LFSCK start: rc = %d\n",
3573 lfsck_lfsck2name(lfsck), rc);
3577 spin_lock(&llmd->llmd_lock);
3578 thread_set_flags(athread, SVC_RUNNING);
3579 spin_unlock(&llmd->llmd_lock);
3580 wake_up_all(&mthread->t_ctl_waitq);
3583 while (!list_empty(&llmd->llmd_req_list)) {
3584 bool wakeup = false;
3586 if (unlikely(llmd->llmd_exit ||
3587 !thread_is_running(mthread)))
3588 GOTO(cleanup1, rc = llmd->llmd_post_result);
3590 llr = list_entry(llmd->llmd_req_list.next,
3591 struct lfsck_layout_req,
3593 /* Only the lfsck_layout_assistant thread itself can
3594 * remove the "llr" from the head of the list, LFSCK
3595 * engine thread only inserts other new "lld" at the
3596 * end of the list. So it is safe to handle current
3597 * "llr" without the spin_lock. */
3598 rc = lfsck_layout_assistant_handle_one(env, com, llr);
3599 spin_lock(&llmd->llmd_lock);
3600 list_del_init(&llr->llr_list);
3601 llmd->llmd_prefetched--;
3602 /* Wake up the main engine thread only when the list
3603 * is empty or half of the prefetched items have been
3604 * handled to avoid too frequent thread schedule. */
3605 if (llmd->llmd_prefetched == 0 ||
3606 (bk->lb_async_windows != 0 &&
3607 bk->lb_async_windows / 2 ==
3608 llmd->llmd_prefetched))
3610 spin_unlock(&llmd->llmd_lock);
3612 wake_up_all(&mthread->t_ctl_waitq);
3614 lfsck_layout_req_fini(env, llr);
3615 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
3619 l_wait_event(athread->t_ctl_waitq,
3620 !lfsck_layout_req_empty(llmd) ||
3622 llmd->llmd_to_post ||
3623 llmd->llmd_to_double_scan,
3626 if (unlikely(llmd->llmd_exit))
3627 GOTO(cleanup1, rc = llmd->llmd_post_result);
3629 if (!list_empty(&llmd->llmd_req_list))
3632 if (llmd->llmd_to_post) {
3633 llmd->llmd_to_post = 0;
3634 LASSERT(llmd->llmd_post_result > 0);
3636 memset(lr, 0, sizeof(*lr));
3637 lr->lr_event = LE_PHASE1_DONE;
3638 lr->lr_status = llmd->llmd_post_result;
3639 rc = lfsck_layout_master_notify_others(env, com, lr);
3641 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant "
3642 "failed to notify others for LFSCK "
3644 lfsck_lfsck2name(lfsck), rc);
3646 /* Wakeup the master engine to go ahead. */
3647 wake_up_all(&mthread->t_ctl_waitq);
3650 if (llmd->llmd_to_double_scan) {
3651 llmd->llmd_to_double_scan = 0;
3652 atomic_inc(&lfsck->li_double_scan_count);
3653 llmd->llmd_in_double_scan = 1;
3654 wake_up_all(&mthread->t_ctl_waitq);
3656 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant phase2 "
3657 "scan start\n", lfsck_lfsck2name(lfsck));
3659 com->lc_new_checked = 0;
3660 com->lc_new_scanned = 0;
3661 com->lc_time_last_checkpoint = cfs_time_current();
3662 com->lc_time_next_checkpoint =
3663 com->lc_time_last_checkpoint +
3664 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
3666 /* flush all async updating before handling orphan. */
3667 dt_sync(env, lfsck->li_next);
3669 while (llmd->llmd_in_double_scan) {
3670 struct lfsck_tgt_descs *ltds =
3671 &lfsck->li_ost_descs;
3672 struct lfsck_tgt_desc *ltd;
3674 rc = lfsck_layout_master_query_others(env, com);
3675 if (lfsck_layout_master_to_orphan(llmd))
3681 /* Pull LFSCK status on related targets once
3682 * per 30 seconds if we are not notified. */
3683 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(30),
3684 cfs_time_seconds(1),
3686 rc = l_wait_event(athread->t_ctl_waitq,
3687 lfsck_layout_master_to_orphan(llmd) ||
3689 !thread_is_running(mthread),
3692 if (unlikely(llmd->llmd_exit ||
3693 !thread_is_running(mthread)))
3694 GOTO(cleanup2, rc = 0);
3696 if (rc == -ETIMEDOUT)
3703 spin_lock(<ds->ltd_lock);
3705 &llmd->llmd_ost_phase2_list)) {
3707 llmd->llmd_ost_phase2_list.next,
3708 struct lfsck_tgt_desc,
3709 ltd_layout_phase_list);
3711 <d->ltd_layout_phase_list);
3712 spin_unlock(<ds->ltd_lock);
3714 if (bk->lb_param & LPF_ALL_TGT) {
3715 rc = lfsck_layout_scan_orphan(
3718 bk->lb_param & LPF_FAILOUT)
3722 if (unlikely(llmd->llmd_exit ||
3723 !thread_is_running(mthread)))
3724 GOTO(cleanup2, rc = 0);
3726 spin_lock(<ds->ltd_lock);
3729 if (list_empty(&llmd->llmd_ost_phase1_list)) {
3730 spin_unlock(<ds->ltd_lock);
3731 GOTO(cleanup2, rc = 1);
3733 spin_unlock(<ds->ltd_lock);
3739 /* Cleanup the unfinished requests. */
3740 spin_lock(&llmd->llmd_lock);
3742 llmd->llmd_assistant_status = rc;
3744 while (!list_empty(&llmd->llmd_req_list)) {
3745 llr = list_entry(llmd->llmd_req_list.next,
3746 struct lfsck_layout_req,
3748 list_del_init(&llr->llr_list);
3749 llmd->llmd_prefetched--;
3750 spin_unlock(&llmd->llmd_lock);
3751 lfsck_layout_req_fini(env, llr);
3752 spin_lock(&llmd->llmd_lock);
3754 spin_unlock(&llmd->llmd_lock);
3756 LASSERTF(llmd->llmd_prefetched == 0, "unmatched prefeteched objs %d\n",
3757 llmd->llmd_prefetched);
3760 memset(lr, 0, sizeof(*lr));
3762 lr->lr_event = LE_PHASE2_DONE;
3764 } else if (rc == 0) {
3765 if (lfsck->li_flags & LPF_ALL_TGT) {
3766 lr->lr_event = LE_STOP;
3767 lr->lr_status = LS_STOPPED;
3769 lr->lr_event = LE_PEER_EXIT;
3770 switch (lfsck->li_status) {
3773 lr->lr_status = LS_CO_PAUSED;
3777 lr->lr_status = LS_CO_STOPPED;
3780 CDEBUG(D_LFSCK, "%s: unknown status: rc = %d\n",
3781 lfsck_lfsck2name(lfsck),
3783 lr->lr_status = LS_CO_FAILED;
3788 if (lfsck->li_flags & LPF_ALL_TGT) {
3789 lr->lr_event = LE_STOP;
3790 lr->lr_status = LS_FAILED;
3792 lr->lr_event = LE_PEER_EXIT;
3793 lr->lr_status = LS_CO_FAILED;
3797 rc1 = lfsck_layout_master_notify_others(env, com, lr);
3799 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant failed to "
3800 "notify others for LFSCK quit: rc = %d\n",
3801 lfsck_lfsck2name(lfsck), rc1);
3805 /* flush all async updating before exit. */
3806 dt_sync(env, lfsck->li_next);
3808 /* Under force exit case, some requests may be just freed without
3809 * verification, those objects should be re-handled when next run.
3810 * So not update the on-disk tracing file under such case. */
3811 if (llmd->llmd_in_double_scan) {
3812 struct lfsck_layout *lo = com->lc_file_ram;
3814 if (!llmd->llmd_exit)
3815 rc1 = lfsck_layout_double_scan_result(env, com, rc);
3817 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant phase2 scan "
3818 "finished, status %d: rc = %d\n",
3819 lfsck_lfsck2name(lfsck), lo->ll_status, rc1);
3823 if (llmd->llmd_in_double_scan)
3824 atomic_dec(&lfsck->li_double_scan_count);
3826 spin_lock(&llmd->llmd_lock);
3827 llmd->llmd_assistant_status = (rc1 != 0 ? rc1 : rc);
3828 thread_set_flags(athread, SVC_STOPPED);
3829 wake_up_all(&mthread->t_ctl_waitq);
3830 spin_unlock(&llmd->llmd_lock);
3831 lfsck_thread_args_fini(lta);
3837 lfsck_layout_slave_async_interpret(const struct lu_env *env,
3838 struct ptlrpc_request *req,
3841 struct lfsck_layout_slave_async_args *llsaa = args;
3842 struct obd_export *exp = llsaa->llsaa_exp;
3843 struct lfsck_component *com = llsaa->llsaa_com;
3844 struct lfsck_layout_slave_target *llst = llsaa->llsaa_llst;
3845 struct lfsck_layout_slave_data *llsd = com->lc_data;
3846 struct lfsck_reply *lr = NULL;
3850 /* It is quite probably caused by target crash,
3851 * to make the LFSCK can go ahead, assume that
3852 * the target finished the LFSCK prcoessing. */
3855 lr = req_capsule_server_get(&req->rq_pill, &RMF_LFSCK_REPLY);
3856 if (lr->lr_status != LS_SCANNING_PHASE1 &&
3857 lr->lr_status != LS_SCANNING_PHASE2)
3862 CDEBUG(D_LFSCK, "%s: layout LFSCK slave gets the MDT %x "
3863 "status %d\n", lfsck_lfsck2name(com->lc_lfsck),
3864 llst->llst_index, lr != NULL ? lr->lr_status : rc);
3866 lfsck_layout_llst_del(llsd, llst);
3869 lfsck_layout_llst_put(llst);
3870 lfsck_component_put(env, com);
3871 class_export_put(exp);
3876 static int lfsck_layout_async_query(const struct lu_env *env,
3877 struct lfsck_component *com,
3878 struct obd_export *exp,
3879 struct lfsck_layout_slave_target *llst,
3880 struct lfsck_request *lr,
3881 struct ptlrpc_request_set *set)
3883 struct lfsck_layout_slave_async_args *llsaa;
3884 struct ptlrpc_request *req;
3885 struct lfsck_request *tmp;
3889 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_QUERY);
3893 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_QUERY);
3895 ptlrpc_request_free(req);
3899 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
3901 ptlrpc_request_set_replen(req);
3903 llsaa = ptlrpc_req_async_args(req);
3904 llsaa->llsaa_exp = exp;
3905 llsaa->llsaa_com = lfsck_component_get(com);
3906 llsaa->llsaa_llst = llst;
3907 req->rq_interpret_reply = lfsck_layout_slave_async_interpret;
3908 ptlrpc_set_add_req(set, req);
3913 static int lfsck_layout_async_notify(const struct lu_env *env,
3914 struct obd_export *exp,
3915 struct lfsck_request *lr,
3916 struct ptlrpc_request_set *set)
3918 struct ptlrpc_request *req;
3919 struct lfsck_request *tmp;
3923 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
3927 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
3929 ptlrpc_request_free(req);
3933 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
3935 ptlrpc_request_set_replen(req);
3936 ptlrpc_set_add_req(set, req);
3942 lfsck_layout_slave_query_master(const struct lu_env *env,
3943 struct lfsck_component *com)
3945 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
3946 struct lfsck_instance *lfsck = com->lc_lfsck;
3947 struct lfsck_layout_slave_data *llsd = com->lc_data;
3948 struct lfsck_layout_slave_target *llst;
3949 struct obd_export *exp;
3950 struct ptlrpc_request_set *set;
3955 set = ptlrpc_prep_set();
3957 GOTO(log, rc = -ENOMEM);
3959 memset(lr, 0, sizeof(*lr));
3960 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
3961 lr->lr_event = LE_QUERY;
3962 lr->lr_active = LFSCK_TYPE_LAYOUT;
3964 llsd->llsd_touch_gen++;
3965 spin_lock(&llsd->llsd_lock);
3966 while (!list_empty(&llsd->llsd_master_list)) {
3967 llst = list_entry(llsd->llsd_master_list.next,
3968 struct lfsck_layout_slave_target,
3970 if (llst->llst_gen == llsd->llsd_touch_gen)
3973 llst->llst_gen = llsd->llsd_touch_gen;
3974 list_move_tail(&llst->llst_list,
3975 &llsd->llsd_master_list);
3976 atomic_inc(&llst->llst_ref);
3977 spin_unlock(&llsd->llsd_lock);
3979 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
3982 lfsck_layout_llst_del(llsd, llst);
3983 lfsck_layout_llst_put(llst);
3984 spin_lock(&llsd->llsd_lock);
3988 rc = lfsck_layout_async_query(env, com, exp, llst, lr, set);
3990 CDEBUG(D_LFSCK, "%s: layout LFSCK slave fail to "
3991 "query %s for layout: rc = %d\n",
3992 lfsck_lfsck2name(lfsck),
3993 exp->exp_obd->obd_name, rc);
3996 lfsck_layout_llst_put(llst);
3997 class_export_put(exp);
3999 spin_lock(&llsd->llsd_lock);
4001 spin_unlock(&llsd->llsd_lock);
4003 rc = ptlrpc_set_wait(set);
4004 ptlrpc_set_destroy(set);
4006 GOTO(log, rc = (rc1 != 0 ? rc1 : rc));
4009 CDEBUG(D_LFSCK, "%s: layout LFSCK slave queries master: rc = %d\n",
4010 lfsck_lfsck2name(com->lc_lfsck), rc);
4016 lfsck_layout_slave_notify_master(const struct lu_env *env,
4017 struct lfsck_component *com,
4018 enum lfsck_events event, int result)
4020 struct lfsck_instance *lfsck = com->lc_lfsck;
4021 struct lfsck_layout_slave_data *llsd = com->lc_data;
4022 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
4023 struct lfsck_layout_slave_target *llst;
4024 struct obd_export *exp;
4025 struct ptlrpc_request_set *set;
4029 CDEBUG(D_LFSCK, "%s: layout LFSCK slave notifies master\n",
4030 lfsck_lfsck2name(com->lc_lfsck));
4032 set = ptlrpc_prep_set();
4036 memset(lr, 0, sizeof(*lr));
4037 lr->lr_event = event;
4038 lr->lr_flags = LEF_FROM_OST;
4039 lr->lr_status = result;
4040 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
4041 lr->lr_active = LFSCK_TYPE_LAYOUT;
4042 llsd->llsd_touch_gen++;
4043 spin_lock(&llsd->llsd_lock);
4044 while (!list_empty(&llsd->llsd_master_list)) {
4045 llst = list_entry(llsd->llsd_master_list.next,
4046 struct lfsck_layout_slave_target,
4048 if (llst->llst_gen == llsd->llsd_touch_gen)
4051 llst->llst_gen = llsd->llsd_touch_gen;
4052 list_move_tail(&llst->llst_list,
4053 &llsd->llsd_master_list);
4054 atomic_inc(&llst->llst_ref);
4055 spin_unlock(&llsd->llsd_lock);
4057 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
4060 lfsck_layout_llst_del(llsd, llst);
4061 lfsck_layout_llst_put(llst);
4062 spin_lock(&llsd->llsd_lock);
4066 rc = lfsck_layout_async_notify(env, exp, lr, set);
4068 CDEBUG(D_LFSCK, "%s: layout LFSCK slave fail to "
4069 "notify %s for layout: rc = %d\n",
4070 lfsck_lfsck2name(lfsck),
4071 exp->exp_obd->obd_name, rc);
4073 lfsck_layout_llst_put(llst);
4074 class_export_put(exp);
4075 spin_lock(&llsd->llsd_lock);
4077 spin_unlock(&llsd->llsd_lock);
4079 ptlrpc_set_wait(set);
4080 ptlrpc_set_destroy(set);
4086 * \ret -ENODATA: unrecognized stripe
4087 * \ret = 0 : recognized stripe
4088 * \ret < 0 : other failures
4090 static int lfsck_layout_master_check_pairs(const struct lu_env *env,
4091 struct lfsck_component *com,
4092 struct lu_fid *cfid,
4093 struct lu_fid *pfid)
4095 struct lfsck_thread_info *info = lfsck_env_info(env);
4096 struct lu_buf *buf = &info->lti_big_buf;
4097 struct ost_id *oi = &info->lti_oi;
4098 struct dt_object *obj;
4099 struct lov_mds_md_v1 *lmm;
4100 struct lov_ost_data_v1 *objs;
4101 __u32 idx = pfid->f_stripe_idx;
4109 obj = lfsck_object_find_by_dev(env, com->lc_lfsck->li_bottom, pfid);
4111 RETURN(PTR_ERR(obj));
4113 dt_read_lock(env, obj, 0);
4114 if (unlikely(!dt_object_exists(obj)))
4115 GOTO(unlock, rc = -ENOENT);
4117 rc = lfsck_layout_get_lovea(env, obj, buf, NULL);
4122 GOTO(unlock, rc = -ENODATA);
4125 rc = lfsck_layout_verify_header(lmm);
4129 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
4130 * been verified in lfsck_layout_verify_header() already. If some
4131 * new magic introduced in the future, then layout LFSCK needs to
4132 * be updated also. */
4133 magic = le32_to_cpu(lmm->lmm_magic);
4134 if (magic == LOV_MAGIC_V1) {
4135 objs = &lmm->lmm_objects[0];
4137 LASSERT(magic == LOV_MAGIC_V3);
4138 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
4141 fid_to_ostid(cfid, oi);
4142 count = le16_to_cpu(lmm->lmm_stripe_count);
4143 for (i = 0; i < count; i++, objs++) {
4146 ostid_le_to_cpu(&objs->l_ost_oi, &oi2);
4147 if (memcmp(oi, &oi2, sizeof(*oi)) == 0)
4148 GOTO(unlock, rc = (i != idx ? -ENODATA : 0));
4151 GOTO(unlock, rc = -ENODATA);
4154 dt_read_unlock(env, obj);
4155 lu_object_put(env, &obj->do_lu);
4161 * The LFSCK-on-OST will ask the LFSCK-on-MDT to check whether the given
4162 * MDT-object/OST-object pairs match or not to aviod transfer MDT-object
4163 * layout EA from MDT to OST. On one hand, the OST no need to understand
4164 * the layout EA structure; on the other hand, it may cause trouble when
4165 * transfer large layout EA from MDT to OST via normal OUT RPC.
4167 * \ret > 0: unrecognized stripe
4168 * \ret = 0: recognized stripe
4169 * \ret < 0: other failures
4171 static int lfsck_layout_slave_check_pairs(const struct lu_env *env,
4172 struct lfsck_component *com,
4173 struct lu_fid *cfid,
4174 struct lu_fid *pfid)
4176 struct lfsck_instance *lfsck = com->lc_lfsck;
4177 struct obd_device *obd = lfsck->li_obd;
4178 struct seq_server_site *ss =
4179 lu_site2seq(lfsck->li_bottom->dd_lu_dev.ld_site);
4180 struct obd_export *exp = NULL;
4181 struct ptlrpc_request *req = NULL;
4182 struct lfsck_request *lr;
4183 struct lu_seq_range range = { 0 };
4187 if (unlikely(fid_is_idif(pfid)))
4190 fld_range_set_any(&range);
4191 rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(pfid), &range);
4193 RETURN(rc == -ENOENT ? 1 : rc);
4195 if (unlikely(!fld_range_is_mdt(&range)))
4198 exp = lustre_find_lwp_by_index(obd->obd_name, range.lsr_index);
4199 if (unlikely(exp == NULL))
4202 if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
4203 GOTO(out, rc = -EOPNOTSUPP);
4205 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
4207 GOTO(out, rc = -ENOMEM);
4209 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
4211 ptlrpc_request_free(req);
4216 lr = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
4217 memset(lr, 0, sizeof(*lr));
4218 lr->lr_event = LE_PAIRS_VERIFY;
4219 lr->lr_active = LFSCK_TYPE_LAYOUT;
4220 lr->lr_fid = *cfid; /* OST-object itself FID. */
4221 lr->lr_fid2 = *pfid; /* The claimed parent FID. */
4223 ptlrpc_request_set_replen(req);
4224 rc = ptlrpc_queue_wait(req);
4225 ptlrpc_req_finished(req);
4227 if (rc == -ENOENT || rc == -ENODATA)
4234 class_export_put(exp);
4239 static int lfsck_layout_slave_repair_pfid(const struct lu_env *env,
4240 struct lfsck_component *com,
4241 struct lfsck_request *lr)
4243 struct lfsck_thread_info *info = lfsck_env_info(env);
4244 struct filter_fid *ff = &info->lti_new_pfid;
4246 struct dt_device *dev = com->lc_lfsck->li_bottom;
4247 struct dt_object *obj;
4248 struct thandle *th = NULL;
4252 obj = lfsck_object_find_by_dev(env, dev, &lr->lr_fid);
4254 GOTO(log, rc = PTR_ERR(obj));
4256 fid_cpu_to_le(&ff->ff_parent, &lr->lr_fid2);
4257 buf = lfsck_buf_get(env, ff, sizeof(*ff));
4258 dt_write_lock(env, obj, 0);
4259 if (unlikely(!dt_object_exists(obj)))
4260 GOTO(unlock, rc = 0);
4262 th = dt_trans_create(env, dev);
4264 GOTO(unlock, rc = PTR_ERR(th));
4266 rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_FID, 0, th);
4270 rc = dt_trans_start_local(env, dev, th);
4274 rc = dt_xattr_set(env, obj, buf, XATTR_NAME_FID, 0, th, BYPASS_CAPA);
4279 dt_trans_stop(env, dev, th);
4282 dt_write_unlock(env, obj);
4283 lu_object_put(env, &obj->do_lu);
4286 CDEBUG(D_LFSCK, "%s: layout LFSCK slave repaired pfid for "DFID
4287 ", parent "DFID": rc = %d\n", lfsck_lfsck2name(com->lc_lfsck),
4288 PFID(&lr->lr_fid), PFID(&lr->lr_fid2), rc);
4295 static int lfsck_layout_reset(const struct lu_env *env,
4296 struct lfsck_component *com, bool init)
4298 struct lfsck_layout *lo = com->lc_file_ram;
4301 down_write(&com->lc_sem);
4303 memset(lo, 0, com->lc_file_size);
4305 __u32 count = lo->ll_success_count;
4306 __u64 last_time = lo->ll_time_last_complete;
4308 memset(lo, 0, com->lc_file_size);
4309 lo->ll_success_count = count;
4310 lo->ll_time_last_complete = last_time;
4313 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
4314 lo->ll_status = LS_INIT;
4316 rc = lfsck_layout_store(env, com);
4317 up_write(&com->lc_sem);
4319 CDEBUG(D_LFSCK, "%s: layout LFSCK reset: rc = %d\n",
4320 lfsck_lfsck2name(com->lc_lfsck), rc);
4325 static void lfsck_layout_fail(const struct lu_env *env,
4326 struct lfsck_component *com, bool new_checked)
4328 struct lfsck_layout *lo = com->lc_file_ram;
4330 down_write(&com->lc_sem);
4332 com->lc_new_checked++;
4333 lfsck_layout_record_failure(env, com->lc_lfsck, lo);
4334 up_write(&com->lc_sem);
4337 static int lfsck_layout_master_checkpoint(const struct lu_env *env,
4338 struct lfsck_component *com, bool init)
4340 struct lfsck_instance *lfsck = com->lc_lfsck;
4341 struct lfsck_layout *lo = com->lc_file_ram;
4342 struct lfsck_layout_master_data *llmd = com->lc_data;
4343 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4344 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4345 struct l_wait_info lwi = { 0 };
4348 if (com->lc_new_checked == 0 && !init)
4351 l_wait_event(mthread->t_ctl_waitq,
4352 list_empty(&llmd->llmd_req_list) ||
4353 !thread_is_running(mthread) ||
4354 thread_is_stopped(athread),
4357 if (!thread_is_running(mthread) || thread_is_stopped(athread))
4360 down_write(&com->lc_sem);
4362 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
4364 lo->ll_pos_last_checkpoint =
4365 lfsck->li_pos_current.lp_oit_cookie;
4366 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4367 HALF_SEC - lfsck->li_time_last_checkpoint);
4368 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4369 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4370 com->lc_new_checked = 0;
4373 rc = lfsck_layout_store(env, com);
4374 up_write(&com->lc_sem);
4376 CDEBUG(D_LFSCK, "%s: layout LFSCK master checkpoint at the pos ["
4377 LPU64"]: rc = %d\n", lfsck_lfsck2name(lfsck),
4378 lfsck->li_pos_current.lp_oit_cookie, rc);
4383 static int lfsck_layout_slave_checkpoint(const struct lu_env *env,
4384 struct lfsck_component *com, bool init)
4386 struct lfsck_instance *lfsck = com->lc_lfsck;
4387 struct lfsck_layout *lo = com->lc_file_ram;
4390 if (com->lc_new_checked == 0 && !init)
4393 down_write(&com->lc_sem);
4395 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
4397 lo->ll_pos_last_checkpoint =
4398 lfsck->li_pos_current.lp_oit_cookie;
4399 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4400 HALF_SEC - lfsck->li_time_last_checkpoint);
4401 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4402 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4403 com->lc_new_checked = 0;
4406 rc = lfsck_layout_store(env, com);
4407 up_write(&com->lc_sem);
4409 CDEBUG(D_LFSCK, "%s: layout LFSCK slave checkpoint at the pos ["
4410 LPU64"]: rc = %d\n", lfsck_lfsck2name(lfsck),
4411 lfsck->li_pos_current.lp_oit_cookie, rc);
4416 static int lfsck_layout_prep(const struct lu_env *env,
4417 struct lfsck_component *com,
4418 struct lfsck_start *start)
4420 struct lfsck_instance *lfsck = com->lc_lfsck;
4421 struct lfsck_layout *lo = com->lc_file_ram;
4422 struct lfsck_position *pos = &com->lc_pos_start;
4424 fid_zero(&pos->lp_dir_parent);
4425 pos->lp_dir_cookie = 0;
4426 if (lo->ll_status == LS_COMPLETED ||
4427 lo->ll_status == LS_PARTIAL ||
4428 /* To handle orphan, must scan from the beginning. */
4429 (start != NULL && start->ls_flags & LPF_ORPHAN)) {
4432 rc = lfsck_layout_reset(env, com, false);
4434 rc = lfsck_set_param(env, lfsck, start, true);
4437 CDEBUG(D_LFSCK, "%s: layout LFSCK prep failed: "
4438 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
4444 down_write(&com->lc_sem);
4445 lo->ll_time_latest_start = cfs_time_current_sec();
4446 spin_lock(&lfsck->li_lock);
4447 if (lo->ll_flags & LF_SCANNED_ONCE) {
4448 if (!lfsck->li_drop_dryrun ||
4449 lo->ll_pos_first_inconsistent == 0) {
4450 lo->ll_status = LS_SCANNING_PHASE2;
4451 list_move_tail(&com->lc_link,
4452 &lfsck->li_list_double_scan);
4453 pos->lp_oit_cookie = 0;
4457 lo->ll_status = LS_SCANNING_PHASE1;
4458 lo->ll_run_time_phase1 = 0;
4459 lo->ll_run_time_phase2 = 0;
4460 lo->ll_objs_checked_phase1 = 0;
4461 lo->ll_objs_checked_phase2 = 0;
4462 lo->ll_objs_failed_phase1 = 0;
4463 lo->ll_objs_failed_phase2 = 0;
4464 for (i = 0; i < LLIT_MAX; i++)
4465 lo->ll_objs_repaired[i] = 0;
4467 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
4468 fid_zero(&com->lc_fid_latest_scanned_phase2);
4471 lo->ll_status = LS_SCANNING_PHASE1;
4472 if (!lfsck->li_drop_dryrun ||
4473 lo->ll_pos_first_inconsistent == 0)
4474 pos->lp_oit_cookie = lo->ll_pos_last_checkpoint + 1;
4476 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
4478 spin_unlock(&lfsck->li_lock);
4479 up_write(&com->lc_sem);
4484 static int lfsck_layout_slave_prep(const struct lu_env *env,
4485 struct lfsck_component *com,
4486 struct lfsck_start_param *lsp)
4488 struct lfsck_layout_slave_data *llsd = com->lc_data;
4489 struct lfsck_instance *lfsck = com->lc_lfsck;
4490 struct lfsck_layout *lo = com->lc_file_ram;
4491 struct lfsck_start *start = lsp->lsp_start;
4494 rc = lfsck_layout_prep(env, com, start);
4498 if (lo->ll_flags & LF_CRASHED_LASTID &&
4499 list_empty(&llsd->llsd_master_list)) {
4500 LASSERT(lfsck->li_out_notify != NULL);
4502 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
4503 LE_LASTID_REBUILDING);
4506 if (!lsp->lsp_index_valid)
4509 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
4510 if (rc == 0 && start != NULL && start->ls_flags & LPF_ORPHAN) {
4511 LASSERT(!llsd->llsd_rbtree_valid);
4513 write_lock(&llsd->llsd_rb_lock);
4514 rc = lfsck_rbtree_setup(env, com);
4515 write_unlock(&llsd->llsd_rb_lock);
4518 CDEBUG(D_LFSCK, "%s: layout LFSCK slave prep done, start pos ["
4519 LPU64"]\n", lfsck_lfsck2name(lfsck),
4520 com->lc_pos_start.lp_oit_cookie);
4525 static int lfsck_layout_master_prep(const struct lu_env *env,
4526 struct lfsck_component *com,
4527 struct lfsck_start_param *lsp)
4529 struct lfsck_instance *lfsck = com->lc_lfsck;
4530 struct lfsck_layout_master_data *llmd = com->lc_data;
4531 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4532 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4533 struct lfsck_thread_args *lta;
4534 struct task_struct *task;
4538 rc = lfsck_layout_prep(env, com, lsp->lsp_start);
4542 llmd->llmd_assistant_status = 0;
4543 llmd->llmd_post_result = 0;
4544 llmd->llmd_to_post = 0;
4545 llmd->llmd_to_double_scan = 0;
4546 llmd->llmd_in_double_scan = 0;
4547 llmd->llmd_exit = 0;
4548 thread_set_flags(athread, 0);
4550 lta = lfsck_thread_args_init(lfsck, com, lsp);
4552 RETURN(PTR_ERR(lta));
4554 task = kthread_run(lfsck_layout_assistant, lta, "lfsck_layout");
4557 CERROR("%s: cannot start LFSCK layout assistant thread: "
4558 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
4559 lfsck_thread_args_fini(lta);
4561 struct l_wait_info lwi = { 0 };
4563 l_wait_event(mthread->t_ctl_waitq,
4564 thread_is_running(athread) ||
4565 thread_is_stopped(athread),
4567 if (unlikely(!thread_is_running(athread)))
4568 rc = llmd->llmd_assistant_status;
4573 CDEBUG(D_LFSCK, "%s: layout LFSCK master prep done, start pos ["
4574 LPU64"\n", lfsck_lfsck2name(lfsck),
4575 com->lc_pos_start.lp_oit_cookie);
4580 /* Pre-fetch the attribute for each stripe in the given layout EA. */
4581 static int lfsck_layout_scan_stripes(const struct lu_env *env,
4582 struct lfsck_component *com,
4583 struct dt_object *parent,
4584 struct lov_mds_md_v1 *lmm)
4586 struct lfsck_thread_info *info = lfsck_env_info(env);
4587 struct lfsck_instance *lfsck = com->lc_lfsck;
4588 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
4589 struct lfsck_layout *lo = com->lc_file_ram;
4590 struct lfsck_layout_master_data *llmd = com->lc_data;
4591 struct lfsck_layout_object *llo = NULL;
4592 struct lov_ost_data_v1 *objs;
4593 struct lfsck_tgt_descs *ltds = &lfsck->li_ost_descs;
4594 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4595 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4596 struct l_wait_info lwi = { 0 };
4605 buf = lfsck_buf_get(env, &info->lti_old_pfid,
4606 sizeof(struct filter_fid_old));
4607 count = le16_to_cpu(lmm->lmm_stripe_count);
4608 gen = le16_to_cpu(lmm->lmm_layout_gen);
4609 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
4610 * been verified in lfsck_layout_verify_header() already. If some
4611 * new magic introduced in the future, then layout LFSCK needs to
4612 * be updated also. */
4613 magic = le32_to_cpu(lmm->lmm_magic);
4614 if (magic == LOV_MAGIC_V1) {
4615 objs = &lmm->lmm_objects[0];
4617 LASSERT(magic == LOV_MAGIC_V3);
4618 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
4621 for (i = 0; i < count; i++, objs++) {
4622 struct lu_fid *fid = &info->lti_fid;
4623 struct ost_id *oi = &info->lti_oi;
4624 struct lfsck_layout_req *llr;
4625 struct lfsck_tgt_desc *tgt = NULL;
4626 struct dt_object *cobj = NULL;
4628 le32_to_cpu(objs->l_ost_idx);
4629 bool wakeup = false;
4631 if (unlikely(lovea_slot_is_dummy(objs)))
4634 l_wait_event(mthread->t_ctl_waitq,
4635 bk->lb_async_windows == 0 ||
4636 llmd->llmd_prefetched < bk->lb_async_windows ||
4637 !thread_is_running(mthread) ||
4638 thread_is_stopped(athread),
4641 if (unlikely(!thread_is_running(mthread)) ||
4642 thread_is_stopped(athread))
4645 ostid_le_to_cpu(&objs->l_ost_oi, oi);
4646 ostid_to_fid(fid, oi, index);
4647 tgt = lfsck_tgt_get(ltds, index);
4648 if (unlikely(tgt == NULL)) {
4649 CDEBUG(D_LFSCK, "%s: cannot talk with OST %x which "
4650 "did not join the layout LFSCK\n",
4651 lfsck_lfsck2name(lfsck), index);
4652 lo->ll_flags |= LF_INCOMPLETE;
4656 cobj = lfsck_object_find_by_dev(env, tgt->ltd_tgt, fid);
4662 rc = dt_declare_attr_get(env, cobj, BYPASS_CAPA);
4666 rc = dt_declare_xattr_get(env, cobj, buf, XATTR_NAME_FID,
4672 llo = lfsck_layout_object_init(env, parent, gen);
4679 llr = lfsck_layout_req_init(llo, cobj, index, i);
4686 spin_lock(&llmd->llmd_lock);
4687 if (llmd->llmd_assistant_status < 0) {
4688 spin_unlock(&llmd->llmd_lock);
4689 lfsck_layout_req_fini(env, llr);
4691 RETURN(llmd->llmd_assistant_status);
4694 list_add_tail(&llr->llr_list, &llmd->llmd_req_list);
4695 if (llmd->llmd_prefetched == 0)
4698 llmd->llmd_prefetched++;
4699 spin_unlock(&llmd->llmd_lock);
4701 wake_up_all(&athread->t_ctl_waitq);
4704 down_write(&com->lc_sem);
4705 com->lc_new_checked++;
4707 lfsck_layout_record_failure(env, lfsck, lo);
4708 up_write(&com->lc_sem);
4710 if (cobj != NULL && !IS_ERR(cobj))
4711 lu_object_put(env, &cobj->do_lu);
4713 if (likely(tgt != NULL))
4716 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
4723 if (llo != NULL && !IS_ERR(llo))
4724 lfsck_layout_object_put(env, llo);
4729 /* For the given object, read its layout EA locally. For each stripe, pre-fetch
4730 * the OST-object's attribute and generate an structure lfsck_layout_req on the
4731 * list ::llmd_req_list.
4733 * For each request on above list, the lfsck_layout_assistant thread compares
4734 * the OST side attribute with local attribute, if inconsistent, then repair it.
4736 * All above processing is async mode with pipeline. */
4737 static int lfsck_layout_master_exec_oit(const struct lu_env *env,
4738 struct lfsck_component *com,
4739 struct dt_object *obj)
4741 struct lfsck_thread_info *info = lfsck_env_info(env);
4742 struct ost_id *oi = &info->lti_oi;
4743 struct lfsck_layout *lo = com->lc_file_ram;
4744 struct lfsck_layout_master_data *llmd = com->lc_data;
4745 struct lfsck_instance *lfsck = com->lc_lfsck;
4746 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
4747 struct thandle *handle = NULL;
4748 struct lu_buf *buf = &info->lti_big_buf;
4749 struct lov_mds_md_v1 *lmm = NULL;
4750 struct dt_device *dev = lfsck->li_bottom;
4751 struct lustre_handle lh = { 0 };
4752 ssize_t buflen = buf->lb_len;
4754 bool locked = false;
4755 bool stripe = false;
4756 bool bad_oi = false;
4759 if (!S_ISREG(lfsck_object_type(obj)))
4762 if (llmd->llmd_assistant_status < 0)
4763 GOTO(out, rc = -ESRCH);
4765 fid_to_lmm_oi(lfsck_dto2fid(obj), oi);
4766 lmm_oi_cpu_to_le(oi, oi);
4767 dt_read_lock(env, obj, 0);
4771 rc = lfsck_layout_get_lovea(env, obj, buf, &buflen);
4777 rc = lfsck_layout_verify_header(lmm);
4778 /* If the LOV EA crashed, then it is possible to be rebuilt later
4779 * when handle orphan OST-objects. */
4783 if (memcmp(oi, &lmm->lmm_oi, sizeof(*oi)) == 0)
4784 GOTO(out, stripe = true);
4786 /* Inconsistent lmm_oi, should be repaired. */
4789 if (bk->lb_param & LPF_DRYRUN) {
4790 down_write(&com->lc_sem);
4791 lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
4792 up_write(&com->lc_sem);
4794 GOTO(out, stripe = true);
4797 if (!lustre_handle_is_used(&lh)) {
4798 dt_read_unlock(env, obj);
4800 buf->lb_len = buflen;
4801 rc = lfsck_layout_lock(env, com, obj, &lh,
4802 MDS_INODELOCK_LAYOUT |
4803 MDS_INODELOCK_XATTR);
4807 handle = dt_trans_create(env, dev);
4809 GOTO(out, rc = PTR_ERR(handle));
4811 rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_LOV,
4812 LU_XATTR_REPLACE, handle);
4816 rc = dt_trans_start_local(env, dev, handle);
4820 dt_write_lock(env, obj, 0);
4827 rc = dt_xattr_set(env, obj, buf, XATTR_NAME_LOV,
4828 LU_XATTR_REPLACE, handle, BYPASS_CAPA);
4832 down_write(&com->lc_sem);
4833 lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
4834 up_write(&com->lc_sem);
4836 GOTO(out, stripe = true);
4840 if (lustre_handle_is_used(&lh))
4841 dt_write_unlock(env, obj);
4843 dt_read_unlock(env, obj);
4846 if (handle != NULL && !IS_ERR(handle))
4847 dt_trans_stop(env, dev, handle);
4849 lfsck_layout_unlock(&lh);
4852 CDEBUG(D_LFSCK, "%s: layout LFSCK master %s bad lmm_oi for "
4853 DFID": rc = %d\n", lfsck_lfsck2name(lfsck),
4854 bk->lb_param & LPF_DRYRUN ? "found" : "repaired",
4855 PFID(lfsck_dto2fid(obj)), rc);
4858 rc = lfsck_layout_scan_stripes(env, com, obj, lmm);
4860 down_write(&com->lc_sem);
4861 com->lc_new_checked++;
4863 lfsck_layout_record_failure(env, lfsck, lo);
4864 up_write(&com->lc_sem);
4866 buf->lb_len = buflen;
4871 static int lfsck_layout_slave_exec_oit(const struct lu_env *env,
4872 struct lfsck_component *com,
4873 struct dt_object *obj)
4875 struct lfsck_instance *lfsck = com->lc_lfsck;
4876 struct lfsck_layout *lo = com->lc_file_ram;
4877 const struct lu_fid *fid = lfsck_dto2fid(obj);
4878 struct lfsck_layout_slave_data *llsd = com->lc_data;
4879 struct lfsck_layout_seq *lls;
4885 LASSERT(llsd != NULL);
4887 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY5) &&
4888 cfs_fail_val == lfsck_dev_idx(lfsck->li_bottom)) {
4889 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(1),
4891 struct ptlrpc_thread *thread = &lfsck->li_thread;
4893 l_wait_event(thread->t_ctl_waitq,
4894 !thread_is_running(thread),
4898 lfsck_rbtree_update_bitmap(env, com, fid, false);
4900 down_write(&com->lc_sem);
4901 if (fid_is_idif(fid))
4903 else if (!fid_is_norm(fid) ||
4904 !fid_is_for_ostobj(env, lfsck->li_next, obj, fid))
4905 GOTO(unlock, rc = 0);
4908 com->lc_new_checked++;
4910 lls = lfsck_layout_seq_lookup(llsd, seq);
4913 if (unlikely(lls == NULL))
4914 GOTO(unlock, rc = -ENOMEM);
4916 INIT_LIST_HEAD(&lls->lls_list);
4918 rc = lfsck_layout_lastid_load(env, com, lls);
4920 CDEBUG(D_LFSCK, "%s: layout LFSCK failed to "
4921 "load LAST_ID for "LPX64": rc = %d\n",
4922 lfsck_lfsck2name(com->lc_lfsck), seq, rc);
4923 lo->ll_objs_failed_phase1++;
4928 lfsck_layout_seq_insert(llsd, lls);
4931 if (unlikely(fid_is_last_id(fid)))
4932 GOTO(unlock, rc = 0);
4935 if (oid > lls->lls_lastid_known)
4936 lls->lls_lastid_known = oid;
4938 if (oid > lls->lls_lastid) {
4939 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
4940 /* OFD may create new objects during LFSCK scanning. */
4941 rc = lfsck_layout_lastid_reload(env, com, lls);
4942 if (unlikely(rc != 0))
4943 CDEBUG(D_LFSCK, "%s: layout LFSCK failed to "
4944 "reload LAST_ID for "LPX64": rc = %d\n",
4945 lfsck_lfsck2name(com->lc_lfsck),
4947 if (oid <= lls->lls_lastid)
4948 GOTO(unlock, rc = 0);
4950 LASSERT(lfsck->li_out_notify != NULL);
4952 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
4953 LE_LASTID_REBUILDING);
4954 lo->ll_flags |= LF_CRASHED_LASTID;
4957 lls->lls_lastid = oid;
4961 GOTO(unlock, rc = 0);
4964 up_write(&com->lc_sem);
4969 static int lfsck_layout_exec_dir(const struct lu_env *env,
4970 struct lfsck_component *com,
4971 struct dt_object *obj,
4972 struct lu_dirent *ent)
4977 static int lfsck_layout_master_post(const struct lu_env *env,
4978 struct lfsck_component *com,
4979 int result, bool init)
4981 struct lfsck_instance *lfsck = com->lc_lfsck;
4982 struct lfsck_layout *lo = com->lc_file_ram;
4983 struct lfsck_layout_master_data *llmd = com->lc_data;
4984 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4985 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4986 struct l_wait_info lwi = { 0 };
4991 llmd->llmd_post_result = result;
4992 llmd->llmd_to_post = 1;
4993 if (llmd->llmd_post_result <= 0)
4994 llmd->llmd_exit = 1;
4996 wake_up_all(&athread->t_ctl_waitq);
4997 l_wait_event(mthread->t_ctl_waitq,
4998 (result > 0 && list_empty(&llmd->llmd_req_list)) ||
4999 thread_is_stopped(athread),
5002 if (llmd->llmd_assistant_status < 0)
5003 result = llmd->llmd_assistant_status;
5005 down_write(&com->lc_sem);
5006 spin_lock(&lfsck->li_lock);
5007 /* When LFSCK failed, there may be some prefetched objects those are
5008 * not been processed yet, we do not know the exactly position, then
5009 * just restart from last check-point next time. */
5010 if (!init && !llmd->llmd_exit)
5011 lo->ll_pos_last_checkpoint =
5012 lfsck->li_pos_current.lp_oit_cookie;
5015 lo->ll_status = LS_SCANNING_PHASE2;
5016 lo->ll_flags |= LF_SCANNED_ONCE;
5017 lo->ll_flags &= ~LF_UPGRADE;
5018 list_move_tail(&com->lc_link, &lfsck->li_list_double_scan);
5019 } else if (result == 0) {
5020 lo->ll_status = lfsck->li_status;
5021 if (lo->ll_status == 0)
5022 lo->ll_status = LS_STOPPED;
5023 if (lo->ll_status != LS_PAUSED) {
5024 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
5027 lo->ll_status = LS_FAILED;
5028 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
5030 spin_unlock(&lfsck->li_lock);
5033 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
5034 HALF_SEC - lfsck->li_time_last_checkpoint);
5035 lo->ll_time_last_checkpoint = cfs_time_current_sec();
5036 lo->ll_objs_checked_phase1 += com->lc_new_checked;
5037 com->lc_new_checked = 0;
5040 rc = lfsck_layout_store(env, com);
5041 up_write(&com->lc_sem);
5043 CDEBUG(D_LFSCK, "%s: layout LFSCK master post done: rc = %d\n",
5044 lfsck_lfsck2name(lfsck), rc);
5049 static int lfsck_layout_slave_post(const struct lu_env *env,
5050 struct lfsck_component *com,
5051 int result, bool init)
5053 struct lfsck_instance *lfsck = com->lc_lfsck;
5054 struct lfsck_layout *lo = com->lc_file_ram;
5058 rc = lfsck_layout_lastid_store(env, com);
5062 LASSERT(lfsck->li_out_notify != NULL);
5064 down_write(&com->lc_sem);
5065 spin_lock(&lfsck->li_lock);
5067 lo->ll_pos_last_checkpoint =
5068 lfsck->li_pos_current.lp_oit_cookie;
5070 lo->ll_status = LS_SCANNING_PHASE2;
5071 lo->ll_flags |= LF_SCANNED_ONCE;
5072 if (lo->ll_flags & LF_CRASHED_LASTID) {
5074 lo->ll_flags &= ~LF_CRASHED_LASTID;
5076 lo->ll_flags &= ~LF_UPGRADE;
5077 list_move_tail(&com->lc_link, &lfsck->li_list_double_scan);
5078 } else if (result == 0) {
5079 lo->ll_status = lfsck->li_status;
5080 if (lo->ll_status == 0)
5081 lo->ll_status = LS_STOPPED;
5082 if (lo->ll_status != LS_PAUSED)
5083 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
5085 lo->ll_status = LS_FAILED;
5086 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
5088 spin_unlock(&lfsck->li_lock);
5091 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
5095 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
5096 HALF_SEC - lfsck->li_time_last_checkpoint);
5097 lo->ll_time_last_checkpoint = cfs_time_current_sec();
5098 lo->ll_objs_checked_phase1 += com->lc_new_checked;
5099 com->lc_new_checked = 0;
5102 rc = lfsck_layout_store(env, com);
5103 up_write(&com->lc_sem);
5105 lfsck_layout_slave_notify_master(env, com, LE_PHASE1_DONE, result);
5108 lfsck_rbtree_cleanup(env, com);
5110 CDEBUG(D_LFSCK, "%s: layout LFSCK slave post done: rc = %d\n",
5111 lfsck_lfsck2name(lfsck), rc);
5116 static int lfsck_layout_dump(const struct lu_env *env,
5117 struct lfsck_component *com, struct seq_file *m)
5119 struct lfsck_instance *lfsck = com->lc_lfsck;
5120 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
5121 struct lfsck_layout *lo = com->lc_file_ram;
5124 down_read(&com->lc_sem);
5125 seq_printf(m, "name: lfsck_layout\n"
5131 lfsck_status2names(lo->ll_status));
5133 rc = lfsck_bits_dump(m, lo->ll_flags, lfsck_flags_names, "flags");
5137 rc = lfsck_bits_dump(m, bk->lb_param, lfsck_param_names, "param");
5141 rc = lfsck_time_dump(m, lo->ll_time_last_complete,
5142 "time_since_last_completed");
5146 rc = lfsck_time_dump(m, lo->ll_time_latest_start,
5147 "time_since_latest_start");
5151 rc = lfsck_time_dump(m, lo->ll_time_last_checkpoint,
5152 "time_since_last_checkpoint");
5156 seq_printf(m, "latest_start_position: "LPU64"\n"
5157 "last_checkpoint_position: "LPU64"\n"
5158 "first_failure_position: "LPU64"\n",
5159 lo->ll_pos_latest_start,
5160 lo->ll_pos_last_checkpoint,
5161 lo->ll_pos_first_inconsistent);
5163 seq_printf(m, "success_count: %u\n"
5164 "repaired_dangling: "LPU64"\n"
5165 "repaired_unmatched_pair: "LPU64"\n"
5166 "repaired_multiple_referenced: "LPU64"\n"
5167 "repaired_orphan: "LPU64"\n"
5168 "repaired_inconsistent_owner: "LPU64"\n"
5169 "repaired_others: "LPU64"\n"
5170 "skipped: "LPU64"\n"
5171 "failed_phase1: "LPU64"\n"
5172 "failed_phase2: "LPU64"\n",
5173 lo->ll_success_count,
5174 lo->ll_objs_repaired[LLIT_DANGLING - 1],
5175 lo->ll_objs_repaired[LLIT_UNMATCHED_PAIR - 1],
5176 lo->ll_objs_repaired[LLIT_MULTIPLE_REFERENCED - 1],
5177 lo->ll_objs_repaired[LLIT_ORPHAN - 1],
5178 lo->ll_objs_repaired[LLIT_INCONSISTENT_OWNER - 1],
5179 lo->ll_objs_repaired[LLIT_OTHERS - 1],
5180 lo->ll_objs_skipped,
5181 lo->ll_objs_failed_phase1,
5182 lo->ll_objs_failed_phase2);
5184 if (lo->ll_status == LS_SCANNING_PHASE1) {
5186 const struct dt_it_ops *iops;
5187 cfs_duration_t duration = cfs_time_current() -
5188 lfsck->li_time_last_checkpoint;
5189 __u64 checked = lo->ll_objs_checked_phase1 +
5190 com->lc_new_checked;
5191 __u64 speed = checked;
5192 __u64 new_checked = com->lc_new_checked * HZ;
5193 __u32 rtime = lo->ll_run_time_phase1 +
5194 cfs_duration_sec(duration + HALF_SEC);
5197 do_div(new_checked, duration);
5199 do_div(speed, rtime);
5200 seq_printf(m, "checked_phase1: "LPU64"\n"
5201 "checked_phase2: "LPU64"\n"
5202 "run_time_phase1: %u seconds\n"
5203 "run_time_phase2: %u seconds\n"
5204 "average_speed_phase1: "LPU64" items/sec\n"
5205 "average_speed_phase2: N/A\n"
5206 "real-time_speed_phase1: "LPU64" items/sec\n"
5207 "real-time_speed_phase2: N/A\n",
5209 lo->ll_objs_checked_phase2,
5211 lo->ll_run_time_phase2,
5215 LASSERT(lfsck->li_di_oit != NULL);
5217 iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
5219 /* The low layer otable-based iteration position may NOT
5220 * exactly match the layout-based directory traversal
5221 * cookie. Generally, it is not a serious issue. But the
5222 * caller should NOT make assumption on that. */
5223 pos = iops->store(env, lfsck->li_di_oit);
5224 if (!lfsck->li_current_oit_processed)
5226 seq_printf(m, "current_position: "LPU64"\n", pos);
5228 } else if (lo->ll_status == LS_SCANNING_PHASE2) {
5229 cfs_duration_t duration = cfs_time_current() -
5230 lfsck->li_time_last_checkpoint;
5231 __u64 checked = lo->ll_objs_checked_phase2 +
5232 com->lc_new_checked;
5233 __u64 speed1 = lo->ll_objs_checked_phase1;
5234 __u64 speed2 = checked;
5235 __u64 new_checked = com->lc_new_checked * HZ;
5236 __u32 rtime = lo->ll_run_time_phase2 +
5237 cfs_duration_sec(duration + HALF_SEC);
5240 do_div(new_checked, duration);
5241 if (lo->ll_run_time_phase1 != 0)
5242 do_div(speed1, lo->ll_run_time_phase1);
5244 do_div(speed2, rtime);
5245 rc = seq_printf(m, "checked_phase1: "LPU64"\n"
5246 "checked_phase2: "LPU64"\n"
5247 "run_time_phase1: %u seconds\n"
5248 "run_time_phase2: %u seconds\n"
5249 "average_speed_phase1: "LPU64" items/sec\n"
5250 "average_speed_phase2: "LPU64" items/sec\n"
5251 "real-time_speed_phase1: N/A\n"
5252 "real-time_speed_phase2: "LPU64" items/sec\n"
5253 "current_position: "DFID"\n",
5254 lo->ll_objs_checked_phase1,
5256 lo->ll_run_time_phase1,
5261 PFID(&com->lc_fid_latest_scanned_phase2));
5266 __u64 speed1 = lo->ll_objs_checked_phase1;
5267 __u64 speed2 = lo->ll_objs_checked_phase2;
5269 if (lo->ll_run_time_phase1 != 0)
5270 do_div(speed1, lo->ll_run_time_phase1);
5271 if (lo->ll_run_time_phase2 != 0)
5272 do_div(speed2, lo->ll_run_time_phase2);
5273 seq_printf(m, "checked_phase1: "LPU64"\n"
5274 "checked_phase2: "LPU64"\n"
5275 "run_time_phase1: %u seconds\n"
5276 "run_time_phase2: %u seconds\n"
5277 "average_speed_phase1: "LPU64" items/sec\n"
5278 "average_speed_phase2: "LPU64" objs/sec\n"
5279 "real-time_speed_phase1: N/A\n"
5280 "real-time_speed_phase2: N/A\n"
5281 "current_position: N/A\n",
5282 lo->ll_objs_checked_phase1,
5283 lo->ll_objs_checked_phase2,
5284 lo->ll_run_time_phase1,
5285 lo->ll_run_time_phase2,
5290 up_read(&com->lc_sem);
5295 static int lfsck_layout_master_double_scan(const struct lu_env *env,
5296 struct lfsck_component *com)
5298 struct lfsck_layout_master_data *llmd = com->lc_data;
5299 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
5300 struct ptlrpc_thread *athread = &llmd->llmd_thread;
5301 struct lfsck_layout *lo = com->lc_file_ram;
5302 struct l_wait_info lwi = { 0 };
5304 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2))
5307 llmd->llmd_to_double_scan = 1;
5308 wake_up_all(&athread->t_ctl_waitq);
5309 l_wait_event(mthread->t_ctl_waitq,
5310 llmd->llmd_in_double_scan ||
5311 thread_is_stopped(athread),
5313 if (llmd->llmd_assistant_status < 0)
5314 return llmd->llmd_assistant_status;
5319 static int lfsck_layout_slave_double_scan(const struct lu_env *env,
5320 struct lfsck_component *com)
5322 struct lfsck_instance *lfsck = com->lc_lfsck;
5323 struct lfsck_layout_slave_data *llsd = com->lc_data;
5324 struct lfsck_layout *lo = com->lc_file_ram;
5325 struct ptlrpc_thread *thread = &lfsck->li_thread;
5329 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2)) {
5330 lfsck_rbtree_cleanup(env, com);
5331 lfsck_layout_slave_notify_master(env, com, LE_PHASE2_DONE, 0);
5335 CDEBUG(D_LFSCK, "%s: layout LFSCK slave phase2 scan start\n",
5336 lfsck_lfsck2name(lfsck));
5338 atomic_inc(&lfsck->li_double_scan_count);
5340 com->lc_new_checked = 0;
5341 com->lc_new_scanned = 0;
5342 com->lc_time_last_checkpoint = cfs_time_current();
5343 com->lc_time_next_checkpoint = com->lc_time_last_checkpoint +
5344 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
5347 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(30),
5350 rc = lfsck_layout_slave_query_master(env, com);
5351 if (list_empty(&llsd->llsd_master_list)) {
5352 if (unlikely(!thread_is_running(thread)))
5363 rc = l_wait_event(thread->t_ctl_waitq,
5364 !thread_is_running(thread) ||
5365 list_empty(&llsd->llsd_master_list),
5367 if (unlikely(!thread_is_running(thread)))
5370 if (rc == -ETIMEDOUT)
5373 GOTO(done, rc = (rc < 0 ? rc : 1));
5377 rc = lfsck_layout_double_scan_result(env, com, rc);
5379 lfsck_rbtree_cleanup(env, com);
5380 lfsck_layout_slave_notify_master(env, com, LE_PHASE2_DONE, rc);
5381 if (atomic_dec_and_test(&lfsck->li_double_scan_count))
5382 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
5384 CDEBUG(D_LFSCK, "%s: layout LFSCK slave phase2 scan finished, "
5385 "status %d: rc = %d\n",
5386 lfsck_lfsck2name(lfsck), lo->ll_status, rc);
5391 static void lfsck_layout_master_data_release(const struct lu_env *env,
5392 struct lfsck_component *com)
5394 struct lfsck_layout_master_data *llmd = com->lc_data;
5395 struct lfsck_instance *lfsck = com->lc_lfsck;
5396 struct lfsck_tgt_descs *ltds;
5397 struct lfsck_tgt_desc *ltd;
5398 struct lfsck_tgt_desc *next;
5400 LASSERT(llmd != NULL);
5401 LASSERT(thread_is_init(&llmd->llmd_thread) ||
5402 thread_is_stopped(&llmd->llmd_thread));
5403 LASSERT(list_empty(&llmd->llmd_req_list));
5405 com->lc_data = NULL;
5407 ltds = &lfsck->li_ost_descs;
5408 spin_lock(<ds->ltd_lock);
5409 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase1_list,
5410 ltd_layout_phase_list) {
5411 list_del_init(<d->ltd_layout_phase_list);
5413 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase2_list,
5414 ltd_layout_phase_list) {
5415 list_del_init(<d->ltd_layout_phase_list);
5417 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_list,
5419 list_del_init(<d->ltd_layout_list);
5421 spin_unlock(<ds->ltd_lock);
5423 ltds = &lfsck->li_mdt_descs;
5424 spin_lock(<ds->ltd_lock);
5425 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase1_list,
5426 ltd_layout_phase_list) {
5427 list_del_init(<d->ltd_layout_phase_list);
5429 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase2_list,
5430 ltd_layout_phase_list) {
5431 list_del_init(<d->ltd_layout_phase_list);
5433 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_list,
5435 list_del_init(<d->ltd_layout_list);
5437 spin_unlock(<ds->ltd_lock);
5442 static void lfsck_layout_slave_data_release(const struct lu_env *env,
5443 struct lfsck_component *com)
5445 struct lfsck_layout_slave_data *llsd = com->lc_data;
5446 struct lfsck_layout_seq *lls;
5447 struct lfsck_layout_seq *next;
5448 struct lfsck_layout_slave_target *llst;
5449 struct lfsck_layout_slave_target *tmp;
5451 LASSERT(llsd != NULL);
5453 list_for_each_entry_safe(lls, next, &llsd->llsd_seq_list,
5455 list_del_init(&lls->lls_list);
5456 lfsck_object_put(env, lls->lls_lastid_obj);
5460 list_for_each_entry_safe(llst, tmp, &llsd->llsd_master_list,
5462 list_del_init(&llst->llst_list);
5466 lfsck_rbtree_cleanup(env, com);
5467 com->lc_data = NULL;
5471 static void lfsck_layout_master_quit(const struct lu_env *env,
5472 struct lfsck_component *com)
5474 struct lfsck_layout_master_data *llmd = com->lc_data;
5475 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
5476 struct ptlrpc_thread *athread = &llmd->llmd_thread;
5477 struct l_wait_info lwi = { 0 };
5479 llmd->llmd_exit = 1;
5480 wake_up_all(&athread->t_ctl_waitq);
5481 l_wait_event(mthread->t_ctl_waitq,
5482 thread_is_init(athread) ||
5483 thread_is_stopped(athread),
5487 static void lfsck_layout_slave_quit(const struct lu_env *env,
5488 struct lfsck_component *com)
5490 lfsck_rbtree_cleanup(env, com);
5493 static int lfsck_layout_master_in_notify(const struct lu_env *env,
5494 struct lfsck_component *com,
5495 struct lfsck_request *lr)
5497 struct lfsck_instance *lfsck = com->lc_lfsck;
5498 struct lfsck_layout *lo = com->lc_file_ram;
5499 struct lfsck_layout_master_data *llmd = com->lc_data;
5500 struct lfsck_tgt_descs *ltds;
5501 struct lfsck_tgt_desc *ltd;
5505 if (lr->lr_event == LE_PAIRS_VERIFY) {
5508 rc = lfsck_layout_master_check_pairs(env, com, &lr->lr_fid,
5514 CDEBUG(D_LFSCK, "%s: layout LFSCK master handle notify %u "
5515 "from %s %x, status %d\n", lfsck_lfsck2name(lfsck),
5516 lr->lr_event, (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
5517 lr->lr_index, lr->lr_status);
5519 if (lr->lr_event != LE_PHASE1_DONE &&
5520 lr->lr_event != LE_PHASE2_DONE &&
5521 lr->lr_event != LE_PEER_EXIT)
5524 if (lr->lr_flags & LEF_FROM_OST)
5525 ltds = &lfsck->li_ost_descs;
5527 ltds = &lfsck->li_mdt_descs;
5528 spin_lock(<ds->ltd_lock);
5529 ltd = LTD_TGT(ltds, lr->lr_index);
5531 spin_unlock(<ds->ltd_lock);
5536 list_del_init(<d->ltd_layout_phase_list);
5537 switch (lr->lr_event) {
5538 case LE_PHASE1_DONE:
5539 if (lr->lr_status <= 0) {
5540 ltd->ltd_layout_done = 1;
5541 list_del_init(<d->ltd_layout_list);
5542 lo->ll_flags |= LF_INCOMPLETE;
5547 if (lr->lr_flags & LEF_FROM_OST) {
5548 if (list_empty(<d->ltd_layout_list))
5549 list_add_tail(<d->ltd_layout_list,
5550 &llmd->llmd_ost_list);
5551 list_add_tail(<d->ltd_layout_phase_list,
5552 &llmd->llmd_ost_phase2_list);
5554 if (list_empty(<d->ltd_layout_list))
5555 list_add_tail(<d->ltd_layout_list,
5556 &llmd->llmd_mdt_list);
5557 list_add_tail(<d->ltd_layout_phase_list,
5558 &llmd->llmd_mdt_phase2_list);
5561 case LE_PHASE2_DONE:
5562 ltd->ltd_layout_done = 1;
5563 list_del_init(<d->ltd_layout_list);
5567 ltd->ltd_layout_done = 1;
5568 list_del_init(<d->ltd_layout_list);
5569 if (!(lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT))
5570 lo->ll_flags |= LF_INCOMPLETE;
5575 spin_unlock(<ds->ltd_lock);
5577 if (fail && lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT) {
5578 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
5580 memset(stop, 0, sizeof(*stop));
5581 stop->ls_status = lr->lr_status;
5582 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
5583 lfsck_stop(env, lfsck->li_bottom, stop);
5584 } else if (lfsck_layout_master_to_orphan(llmd)) {
5585 wake_up_all(&llmd->llmd_thread.t_ctl_waitq);
5591 static int lfsck_layout_slave_in_notify(const struct lu_env *env,
5592 struct lfsck_component *com,
5593 struct lfsck_request *lr)
5595 struct lfsck_instance *lfsck = com->lc_lfsck;
5596 struct lfsck_layout_slave_data *llsd = com->lc_data;
5597 struct lfsck_layout_slave_target *llst;
5601 switch (lr->lr_event) {
5602 case LE_FID_ACCESSED:
5603 lfsck_rbtree_update_bitmap(env, com, &lr->lr_fid, true);
5605 case LE_CONDITIONAL_DESTROY:
5606 rc = lfsck_layout_slave_conditional_destroy(env, com, lr);
5608 case LE_PAIRS_VERIFY: {
5609 lr->lr_status = LPVS_INIT;
5610 /* Firstly, if the MDT-object which is claimed via OST-object
5611 * local stored PFID xattr recognizes the OST-object, then it
5612 * must be that the client given PFID is wrong. */
5613 rc = lfsck_layout_slave_check_pairs(env, com, &lr->lr_fid,
5618 lr->lr_status = LPVS_INCONSISTENT;
5619 /* The OST-object local stored PFID xattr is stale. We need to
5620 * check whether the MDT-object that is claimed via the client
5621 * given PFID information recognizes the OST-object or not. If
5622 * matches, then need to update the OST-object's PFID xattr. */
5623 rc = lfsck_layout_slave_check_pairs(env, com, &lr->lr_fid,
5626 * We are not sure whether the client given PFID information
5627 * is correct or not, do nothing to avoid improper fixing.
5630 * The client given PFID information is also invalid, we can
5631 * NOT fix the OST-object inconsistency.
5636 lr->lr_status = LPVS_INCONSISTENT_TOFIX;
5637 rc = lfsck_layout_slave_repair_pfid(env, com, lr);
5641 case LE_PHASE2_DONE:
5643 CDEBUG(D_LFSCK, "%s: layout LFSCK slave handle notify %u "
5644 "from MDT %x, status %d\n", lfsck_lfsck2name(lfsck),
5645 lr->lr_event, lr->lr_index, lr->lr_status);
5651 llst = lfsck_layout_llst_find_and_del(llsd, lr->lr_index, true);
5655 lfsck_layout_llst_put(llst);
5656 if (list_empty(&llsd->llsd_master_list))
5657 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
5659 if (lr->lr_event == LE_PEER_EXIT &&
5660 lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT) {
5661 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
5663 memset(stop, 0, sizeof(*stop));
5664 stop->ls_status = lr->lr_status;
5665 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
5666 lfsck_stop(env, lfsck->li_bottom, stop);
5672 static int lfsck_layout_query(const struct lu_env *env,
5673 struct lfsck_component *com)
5675 struct lfsck_layout *lo = com->lc_file_ram;
5677 return lo->ll_status;
5680 static int lfsck_layout_master_stop_notify(const struct lu_env *env,
5681 struct lfsck_component *com,
5682 struct lfsck_tgt_descs *ltds,
5683 struct lfsck_tgt_desc *ltd,
5684 struct ptlrpc_request_set *set)
5686 struct lfsck_thread_info *info = lfsck_env_info(env);
5687 struct lfsck_async_interpret_args *laia = &info->lti_laia;
5688 struct lfsck_request *lr = &info->lti_lr;
5689 struct lfsck_instance *lfsck = com->lc_lfsck;
5692 spin_lock(<ds->ltd_lock);
5693 if (list_empty(<d->ltd_layout_list)) {
5694 LASSERT(list_empty(<d->ltd_layout_phase_list));
5695 spin_unlock(<ds->ltd_lock);
5700 list_del_init(<d->ltd_layout_phase_list);
5701 list_del_init(<d->ltd_layout_list);
5702 spin_unlock(<ds->ltd_lock);
5704 memset(lr, 0, sizeof(*lr));
5705 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
5706 lr->lr_event = LE_PEER_EXIT;
5707 lr->lr_active = LFSCK_TYPE_LAYOUT;
5708 lr->lr_status = LS_CO_PAUSED;
5709 if (ltds == &lfsck->li_ost_descs)
5710 lr->lr_flags = LEF_TO_OST;
5712 laia->laia_com = com;
5713 laia->laia_ltds = ltds;
5714 atomic_inc(<d->ltd_ref);
5715 laia->laia_ltd = ltd;
5717 laia->laia_shared = 0;
5719 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
5720 lfsck_layout_master_async_interpret,
5721 laia, LFSCK_NOTIFY);
5723 CDEBUG(D_LFSCK, "%s: layout LFSCK fail to notify %s %x "
5724 "for co-stop: rc = %d\n",
5725 lfsck_lfsck2name(lfsck),
5726 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
5727 ltd->ltd_index, rc);
5734 /* with lfsck::li_lock held */
5735 static int lfsck_layout_slave_join(const struct lu_env *env,
5736 struct lfsck_component *com,
5737 struct lfsck_start_param *lsp)
5739 struct lfsck_instance *lfsck = com->lc_lfsck;
5740 struct lfsck_layout_slave_data *llsd = com->lc_data;
5741 struct lfsck_layout_slave_target *llst;
5742 struct lfsck_start *start = lsp->lsp_start;
5746 if (start == NULL || !(start->ls_flags & LPF_ORPHAN))
5749 if (!lsp->lsp_index_valid)
5752 /* If someone is running the LFSCK without orphan handling,
5753 * it will not maintain the object accessing rbtree. So we
5754 * cannot join it for orphan handling. */
5755 if (!llsd->llsd_rbtree_valid)
5758 spin_unlock(&lfsck->li_lock);
5759 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
5760 spin_lock(&lfsck->li_lock);
5761 if (rc == 0 && !thread_is_running(&lfsck->li_thread)) {
5762 spin_unlock(&lfsck->li_lock);
5763 llst = lfsck_layout_llst_find_and_del(llsd, lsp->lsp_index,
5766 lfsck_layout_llst_put(llst);
5767 spin_lock(&lfsck->li_lock);
5774 static struct lfsck_operations lfsck_layout_master_ops = {
5775 .lfsck_reset = lfsck_layout_reset,
5776 .lfsck_fail = lfsck_layout_fail,
5777 .lfsck_checkpoint = lfsck_layout_master_checkpoint,
5778 .lfsck_prep = lfsck_layout_master_prep,
5779 .lfsck_exec_oit = lfsck_layout_master_exec_oit,
5780 .lfsck_exec_dir = lfsck_layout_exec_dir,
5781 .lfsck_post = lfsck_layout_master_post,
5782 .lfsck_interpret = lfsck_layout_master_async_interpret,
5783 .lfsck_dump = lfsck_layout_dump,
5784 .lfsck_double_scan = lfsck_layout_master_double_scan,
5785 .lfsck_data_release = lfsck_layout_master_data_release,
5786 .lfsck_quit = lfsck_layout_master_quit,
5787 .lfsck_in_notify = lfsck_layout_master_in_notify,
5788 .lfsck_query = lfsck_layout_query,
5789 .lfsck_stop_notify = lfsck_layout_master_stop_notify,
5792 static struct lfsck_operations lfsck_layout_slave_ops = {
5793 .lfsck_reset = lfsck_layout_reset,
5794 .lfsck_fail = lfsck_layout_fail,
5795 .lfsck_checkpoint = lfsck_layout_slave_checkpoint,
5796 .lfsck_prep = lfsck_layout_slave_prep,
5797 .lfsck_exec_oit = lfsck_layout_slave_exec_oit,
5798 .lfsck_exec_dir = lfsck_layout_exec_dir,
5799 .lfsck_post = lfsck_layout_slave_post,
5800 .lfsck_dump = lfsck_layout_dump,
5801 .lfsck_double_scan = lfsck_layout_slave_double_scan,
5802 .lfsck_data_release = lfsck_layout_slave_data_release,
5803 .lfsck_quit = lfsck_layout_slave_quit,
5804 .lfsck_in_notify = lfsck_layout_slave_in_notify,
5805 .lfsck_query = lfsck_layout_query,
5806 .lfsck_join = lfsck_layout_slave_join,
5809 int lfsck_layout_setup(const struct lu_env *env, struct lfsck_instance *lfsck)
5811 struct lfsck_component *com;
5812 struct lfsck_layout *lo;
5813 struct dt_object *root = NULL;
5814 struct dt_object *obj;
5822 INIT_LIST_HEAD(&com->lc_link);
5823 INIT_LIST_HEAD(&com->lc_link_dir);
5824 init_rwsem(&com->lc_sem);
5825 atomic_set(&com->lc_ref, 1);
5826 com->lc_lfsck = lfsck;
5827 com->lc_type = LFSCK_TYPE_LAYOUT;
5828 if (lfsck->li_master) {
5829 struct lfsck_layout_master_data *llmd;
5831 com->lc_ops = &lfsck_layout_master_ops;
5832 OBD_ALLOC_PTR(llmd);
5834 GOTO(out, rc = -ENOMEM);
5836 INIT_LIST_HEAD(&llmd->llmd_req_list);
5837 spin_lock_init(&llmd->llmd_lock);
5838 INIT_LIST_HEAD(&llmd->llmd_ost_list);
5839 INIT_LIST_HEAD(&llmd->llmd_ost_phase1_list);
5840 INIT_LIST_HEAD(&llmd->llmd_ost_phase2_list);
5841 INIT_LIST_HEAD(&llmd->llmd_mdt_list);
5842 INIT_LIST_HEAD(&llmd->llmd_mdt_phase1_list);
5843 INIT_LIST_HEAD(&llmd->llmd_mdt_phase2_list);
5844 init_waitqueue_head(&llmd->llmd_thread.t_ctl_waitq);
5845 com->lc_data = llmd;
5847 struct lfsck_layout_slave_data *llsd;
5849 com->lc_ops = &lfsck_layout_slave_ops;
5850 OBD_ALLOC_PTR(llsd);
5852 GOTO(out, rc = -ENOMEM);
5854 INIT_LIST_HEAD(&llsd->llsd_seq_list);
5855 INIT_LIST_HEAD(&llsd->llsd_master_list);
5856 spin_lock_init(&llsd->llsd_lock);
5857 llsd->llsd_rb_root = RB_ROOT;
5858 rwlock_init(&llsd->llsd_rb_lock);
5859 com->lc_data = llsd;
5861 com->lc_file_size = sizeof(*lo);
5862 OBD_ALLOC(com->lc_file_ram, com->lc_file_size);
5863 if (com->lc_file_ram == NULL)
5864 GOTO(out, rc = -ENOMEM);
5866 OBD_ALLOC(com->lc_file_disk, com->lc_file_size);
5867 if (com->lc_file_disk == NULL)
5868 GOTO(out, rc = -ENOMEM);
5870 root = dt_locate(env, lfsck->li_bottom, &lfsck->li_local_root_fid);
5872 GOTO(out, rc = PTR_ERR(root));
5874 if (unlikely(!dt_try_as_dir(env, root)))
5875 GOTO(out, rc = -ENOTDIR);
5877 obj = local_file_find_or_create(env, lfsck->li_los, root,
5879 S_IFREG | S_IRUGO | S_IWUSR);
5881 GOTO(out, rc = PTR_ERR(obj));
5884 rc = lfsck_layout_load(env, com);
5886 rc = lfsck_layout_reset(env, com, true);
5887 else if (rc == -ENOENT)
5888 rc = lfsck_layout_init(env, com);
5893 lo = com->lc_file_ram;
5894 switch (lo->ll_status) {
5900 spin_lock(&lfsck->li_lock);
5901 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
5902 spin_unlock(&lfsck->li_lock);
5905 CERROR("%s: unknown lfsck_layout status %d\n",
5906 lfsck_lfsck2name(lfsck), lo->ll_status);
5908 case LS_SCANNING_PHASE1:
5909 case LS_SCANNING_PHASE2:
5910 /* No need to store the status to disk right now.
5911 * If the system crashed before the status stored,
5912 * it will be loaded back when next time. */
5913 lo->ll_status = LS_CRASHED;
5914 lo->ll_flags |= LF_INCOMPLETE;
5921 spin_lock(&lfsck->li_lock);
5922 list_add_tail(&com->lc_link, &lfsck->li_list_scan);
5923 spin_unlock(&lfsck->li_lock);
5927 if (lo->ll_flags & LF_CRASHED_LASTID) {
5928 LASSERT(lfsck->li_out_notify != NULL);
5930 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
5931 LE_LASTID_REBUILDING);
5937 if (root != NULL && !IS_ERR(root))
5938 lu_object_put(env, &root->do_lu);
5941 lfsck_component_cleanup(env, com);
5942 CERROR("%s: fail to init layout LFSCK component: rc = %d\n",
5943 lfsck_lfsck2name(lfsck), rc);
5949 struct lfsck_orphan_it {
5950 struct lfsck_component *loi_com;
5951 struct lfsck_rbtree_node *loi_lrn;
5952 struct lfsck_layout_slave_target *loi_llst;
5953 struct lu_fid loi_key;
5954 struct lu_orphan_rec loi_rec;
5956 unsigned int loi_over:1;
5959 static int lfsck_fid_match_idx(const struct lu_env *env,
5960 struct lfsck_instance *lfsck,
5961 const struct lu_fid *fid, int idx)
5963 struct seq_server_site *ss;
5964 struct lu_server_fld *sf;
5965 struct lu_seq_range range = { 0 };
5968 /* All abnormal cases will be returned to MDT0. */
5969 if (!fid_is_norm(fid)) {
5976 ss = lu_site2seq(lfsck->li_bottom->dd_lu_dev.ld_site);
5977 if (unlikely(ss == NULL))
5980 sf = ss->ss_server_fld;
5981 LASSERT(sf != NULL);
5983 fld_range_set_any(&range);
5984 rc = fld_server_lookup(env, sf, fid_seq(fid), &range);
5988 if (!fld_range_is_mdt(&range))
5991 if (range.lsr_index == idx)
5997 static void lfsck_layout_destroy_orphan(const struct lu_env *env,
5998 struct dt_device *dev,
5999 struct dt_object *obj)
6001 struct thandle *handle;
6005 handle = dt_trans_create(env, dev);
6009 rc = dt_declare_ref_del(env, obj, handle);
6013 rc = dt_declare_destroy(env, obj, handle);
6017 rc = dt_trans_start_local(env, dev, handle);
6021 dt_write_lock(env, obj, 0);
6022 rc = dt_ref_del(env, obj, handle);
6024 rc = dt_destroy(env, obj, handle);
6025 dt_write_unlock(env, obj);
6030 dt_trans_stop(env, dev, handle);
6032 CDEBUG(D_LFSCK, "destroy orphan OST-object "DFID": rc = %d\n",
6033 PFID(lfsck_dto2fid(obj)), rc);
6038 static int lfsck_orphan_index_lookup(const struct lu_env *env,
6039 struct dt_object *dt,
6041 const struct dt_key *key,
6042 struct lustre_capa *capa)
6047 static int lfsck_orphan_index_declare_insert(const struct lu_env *env,
6048 struct dt_object *dt,
6049 const struct dt_rec *rec,
6050 const struct dt_key *key,
6051 struct thandle *handle)
6056 static int lfsck_orphan_index_insert(const struct lu_env *env,
6057 struct dt_object *dt,
6058 const struct dt_rec *rec,
6059 const struct dt_key *key,
6060 struct thandle *handle,
6061 struct lustre_capa *capa,
6067 static int lfsck_orphan_index_declare_delete(const struct lu_env *env,
6068 struct dt_object *dt,
6069 const struct dt_key *key,
6070 struct thandle *handle)
6075 static int lfsck_orphan_index_delete(const struct lu_env *env,
6076 struct dt_object *dt,
6077 const struct dt_key *key,
6078 struct thandle *handle,
6079 struct lustre_capa *capa)
6084 static struct dt_it *lfsck_orphan_it_init(const struct lu_env *env,
6085 struct dt_object *dt,
6087 struct lustre_capa *capa)
6089 struct dt_device *dev = lu2dt_dev(dt->do_lu.lo_dev);
6090 struct lfsck_instance *lfsck;
6091 struct lfsck_component *com = NULL;
6092 struct lfsck_layout_slave_data *llsd;
6093 struct lfsck_orphan_it *it = NULL;
6097 lfsck = lfsck_instance_find(dev, true, false);
6098 if (unlikely(lfsck == NULL))
6099 RETURN(ERR_PTR(-ENXIO));
6101 com = lfsck_component_find(lfsck, LFSCK_TYPE_LAYOUT);
6102 if (unlikely(com == NULL))
6103 GOTO(out, rc = -ENOENT);
6105 llsd = com->lc_data;
6106 if (!llsd->llsd_rbtree_valid)
6107 GOTO(out, rc = -ESRCH);
6111 GOTO(out, rc = -ENOMEM);
6113 it->loi_llst = lfsck_layout_llst_find_and_del(llsd, attr, false);
6114 if (it->loi_llst == NULL)
6115 GOTO(out, rc = -ENXIO);
6117 if (dev->dd_record_fid_accessed) {
6118 /* The first iteration against the rbtree, scan the whole rbtree
6119 * to remove the nodes which do NOT need to be handled. */
6120 write_lock(&llsd->llsd_rb_lock);
6121 if (dev->dd_record_fid_accessed) {
6122 struct rb_node *node;
6123 struct rb_node *next;
6124 struct lfsck_rbtree_node *lrn;
6126 /* No need to record the fid accessing anymore. */
6127 dev->dd_record_fid_accessed = 0;
6129 node = rb_first(&llsd->llsd_rb_root);
6130 while (node != NULL) {
6131 next = rb_next(node);
6132 lrn = rb_entry(node, struct lfsck_rbtree_node,
6134 if (atomic_read(&lrn->lrn_known_count) <=
6135 atomic_read(&lrn->lrn_accessed_count)) {
6136 rb_erase(node, &llsd->llsd_rb_root);
6137 lfsck_rbtree_free(lrn);
6142 write_unlock(&llsd->llsd_rb_lock);
6145 /* read lock the rbtree when init, and unlock when fini */
6146 read_lock(&llsd->llsd_rb_lock);
6154 lfsck_component_put(env, com);
6156 CDEBUG(D_LFSCK, "%s: init the orphan iteration: rc = %d\n",
6157 lfsck_lfsck2name(lfsck), rc);
6159 lfsck_instance_put(env, lfsck);
6164 it = (struct lfsck_orphan_it *)ERR_PTR(rc);
6167 return (struct dt_it *)it;
6170 static void lfsck_orphan_it_fini(const struct lu_env *env,
6173 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6174 struct lfsck_component *com = it->loi_com;
6175 struct lfsck_layout_slave_data *llsd;
6176 struct lfsck_layout_slave_target *llst;
6179 CDEBUG(D_LFSCK, "%s: fini the orphan iteration\n",
6180 lfsck_lfsck2name(com->lc_lfsck));
6182 llsd = com->lc_data;
6183 read_unlock(&llsd->llsd_rb_lock);
6184 llst = it->loi_llst;
6185 LASSERT(llst != NULL);
6187 /* Save the key and hash for iterate next. */
6188 llst->llst_fid = it->loi_key;
6189 llst->llst_hash = it->loi_hash;
6190 lfsck_layout_llst_put(llst);
6191 lfsck_component_put(env, com);
6197 * \retval +1: the iteration finished
6198 * \retval 0: on success, not finished
6199 * \retval -ve: on error
6201 static int lfsck_orphan_it_next(const struct lu_env *env,
6204 struct lfsck_thread_info *info = lfsck_env_info(env);
6205 struct filter_fid_old *pfid = &info->lti_old_pfid;
6206 struct lu_attr *la = &info->lti_la;
6207 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6208 struct lu_fid *key = &it->loi_key;
6209 struct lu_orphan_rec *rec = &it->loi_rec;
6210 struct lfsck_component *com = it->loi_com;
6211 struct lfsck_instance *lfsck = com->lc_lfsck;
6212 struct lfsck_layout_slave_data *llsd = com->lc_data;
6213 struct dt_object *obj;
6214 struct lfsck_rbtree_node *lrn;
6218 __u32 idx = it->loi_llst->llst_index;
6228 lrn = lfsck_rbtree_search(llsd, key, &exact);
6236 key->f_seq = lrn->lrn_seq;
6237 key->f_oid = lrn->lrn_first_oid;
6242 if (unlikely(key->f_oid == 0)) {
6249 lrn->lrn_first_oid + LFSCK_RBTREE_BITMAP_WIDTH) {
6255 if (unlikely(atomic_read(&lrn->lrn_known_count) <=
6256 atomic_read(&lrn->lrn_accessed_count))) {
6257 struct rb_node *next = rb_next(&lrn->lrn_node);
6259 while (next != NULL) {
6260 lrn = rb_entry(next, struct lfsck_rbtree_node,
6262 if (atomic_read(&lrn->lrn_known_count) >
6263 atomic_read(&lrn->lrn_accessed_count))
6265 next = rb_next(next);
6274 key->f_seq = lrn->lrn_seq;
6275 key->f_oid = lrn->lrn_first_oid;
6279 pos = key->f_oid - lrn->lrn_first_oid;
6282 pos = find_next_bit(lrn->lrn_known_bitmap,
6283 LFSCK_RBTREE_BITMAP_WIDTH, pos);
6284 if (pos >= LFSCK_RBTREE_BITMAP_WIDTH) {
6285 key->f_oid = lrn->lrn_first_oid + pos;
6286 if (unlikely(key->f_oid < lrn->lrn_first_oid)) {
6294 if (test_bit(pos, lrn->lrn_accessed_bitmap)) {
6299 key->f_oid = lrn->lrn_first_oid + pos;
6300 obj = lfsck_object_find(env, lfsck, key);
6303 if (rc == -ENOENT) {
6310 dt_read_lock(env, obj, 0);
6311 if (!dt_object_exists(obj)) {
6312 dt_read_unlock(env, obj);
6313 lfsck_object_put(env, obj);
6318 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
6322 rc = dt_xattr_get(env, obj, lfsck_buf_get(env, pfid, sizeof(*pfid)),
6323 XATTR_NAME_FID, BYPASS_CAPA);
6324 if (rc == -ENODATA) {
6325 /* For the pre-created OST-object, update the bitmap to avoid
6326 * others LFSCK (second phase) iteration to touch it again. */
6327 if (la->la_ctime == 0) {
6328 if (!test_and_set_bit(pos, lrn->lrn_accessed_bitmap))
6329 atomic_inc(&lrn->lrn_accessed_count);
6331 /* For the race between repairing dangling referenced
6332 * MDT-object and unlink the file, it may left orphan
6333 * OST-object there. Destroy it now! */
6334 if (unlikely(!(la->la_mode & S_ISUID))) {
6335 dt_read_unlock(env, obj);
6336 lfsck_layout_destroy_orphan(env,
6339 lfsck_object_put(env, obj);
6343 } else if (idx == 0) {
6344 /* If the orphan OST-object has no parent information,
6345 * regard it as referenced by the MDT-object on MDT0. */
6346 fid_zero(&rec->lor_fid);
6347 rec->lor_uid = la->la_uid;
6348 rec->lor_gid = la->la_gid;
6352 dt_read_unlock(env, obj);
6353 lfsck_object_put(env, obj);
6361 if (rc != sizeof(struct filter_fid) &&
6362 rc != sizeof(struct filter_fid_old))
6363 GOTO(out, rc = -EINVAL);
6365 fid_le_to_cpu(&rec->lor_fid, &pfid->ff_parent);
6366 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
6367 * MDT-object's FID::f_ver, instead it is the OST-object index in its
6368 * parent MDT-object's layout EA. */
6369 save = rec->lor_fid.f_stripe_idx;
6370 rec->lor_fid.f_ver = 0;
6371 rc = lfsck_fid_match_idx(env, lfsck, &rec->lor_fid, idx);
6372 /* If the orphan OST-object does not claim the MDT, then next.
6374 * If we do not know whether it matches or not, then return it
6375 * to the MDT for further check. */
6377 dt_read_unlock(env, obj);
6378 lfsck_object_put(env, obj);
6383 rec->lor_fid.f_stripe_idx = save;
6384 rec->lor_uid = la->la_uid;
6385 rec->lor_gid = la->la_gid;
6387 CDEBUG(D_LFSCK, "%s: return orphan "DFID", PFID "DFID", owner %u:%u\n",
6388 lfsck_lfsck2name(com->lc_lfsck), PFID(key), PFID(&rec->lor_fid),
6389 rec->lor_uid, rec->lor_gid);
6394 dt_read_unlock(env, obj);
6395 lfsck_object_put(env, obj);
6403 * \retval +1: locate to the exactly position
6404 * \retval 0: cannot locate to the exactly position,
6405 * call next() to move to a valid position.
6406 * \retval -ve: on error
6408 static int lfsck_orphan_it_get(const struct lu_env *env,
6410 const struct dt_key *key)
6412 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6415 it->loi_key = *(struct lu_fid *)key;
6416 rc = lfsck_orphan_it_next(env, di);
6426 static void lfsck_orphan_it_put(const struct lu_env *env,
6431 static struct dt_key *lfsck_orphan_it_key(const struct lu_env *env,
6432 const struct dt_it *di)
6434 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6436 return (struct dt_key *)&it->loi_key;
6439 static int lfsck_orphan_it_key_size(const struct lu_env *env,
6440 const struct dt_it *di)
6442 return sizeof(struct lu_fid);
6445 static int lfsck_orphan_it_rec(const struct lu_env *env,
6446 const struct dt_it *di,
6450 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6452 *(struct lu_orphan_rec *)rec = it->loi_rec;
6457 static __u64 lfsck_orphan_it_store(const struct lu_env *env,
6458 const struct dt_it *di)
6460 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6462 return it->loi_hash;
6466 * \retval +1: locate to the exactly position
6467 * \retval 0: cannot locate to the exactly position,
6468 * call next() to move to a valid position.
6469 * \retval -ve: on error
6471 static int lfsck_orphan_it_load(const struct lu_env *env,
6472 const struct dt_it *di,
6475 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6476 struct lfsck_layout_slave_target *llst = it->loi_llst;
6479 LASSERT(llst != NULL);
6481 if (hash != llst->llst_hash) {
6482 CDEBUG(D_LFSCK, "%s: the given hash "LPU64" for orphan "
6483 "iteration does not match the one when fini "
6484 LPU64", to be reset.\n",
6485 lfsck_lfsck2name(it->loi_com->lc_lfsck), hash,
6487 fid_zero(&llst->llst_fid);
6488 llst->llst_hash = 0;
6491 it->loi_key = llst->llst_fid;
6492 it->loi_hash = llst->llst_hash;
6493 rc = lfsck_orphan_it_next(env, (struct dt_it *)di);
6503 static int lfsck_orphan_it_key_rec(const struct lu_env *env,
6504 const struct dt_it *di,
6510 const struct dt_index_operations lfsck_orphan_index_ops = {
6511 .dio_lookup = lfsck_orphan_index_lookup,
6512 .dio_declare_insert = lfsck_orphan_index_declare_insert,
6513 .dio_insert = lfsck_orphan_index_insert,
6514 .dio_declare_delete = lfsck_orphan_index_declare_delete,
6515 .dio_delete = lfsck_orphan_index_delete,
6517 .init = lfsck_orphan_it_init,
6518 .fini = lfsck_orphan_it_fini,
6519 .get = lfsck_orphan_it_get,
6520 .put = lfsck_orphan_it_put,
6521 .next = lfsck_orphan_it_next,
6522 .key = lfsck_orphan_it_key,
6523 .key_size = lfsck_orphan_it_key_size,
6524 .rec = lfsck_orphan_it_rec,
6525 .store = lfsck_orphan_it_store,
6526 .load = lfsck_orphan_it_load,
6527 .key_rec = lfsck_orphan_it_key_rec,