4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2013, Intel Corporation.
26 * lustre/lfsck/lfsck_layout.c
28 * Author: Fan, Yong <fan.yong@intel.com>
32 # define EXPORT_SYMTAB
34 #define DEBUG_SUBSYSTEM S_LFSCK
36 #include <linux/bitops.h>
37 #include <linux/rbtree.h>
39 #include <lustre/lustre_idl.h>
40 #include <lu_object.h>
41 #include <dt_object.h>
42 #include <lustre_fid.h>
43 #include <lustre_lib.h>
44 #include <lustre_net.h>
45 #include <lustre/lustre_user.h>
46 #include <md_object.h>
47 #include <obd_class.h>
49 #include "lfsck_internal.h"
51 #define LFSCK_LAYOUT_MAGIC 0xB173AE14
53 static const char lfsck_layout_name[] = "lfsck_layout";
55 struct lfsck_layout_seq {
56 struct list_head lls_list;
59 __u64 lls_lastid_known;
60 struct dt_object *lls_lastid_obj;
61 unsigned int lls_dirty:1;
64 struct lfsck_layout_slave_target {
65 /* link into lfsck_layout_slave_data::llsd_master_list. */
66 struct list_head llst_list;
67 /* The position for next record in the rbtree for iteration. */
68 struct lu_fid llst_fid;
69 /* Dummy hash for iteration against the rbtree. */
76 struct lfsck_layout_slave_data {
77 /* list for lfsck_layout_seq */
78 struct list_head llsd_seq_list;
80 /* list for the masters involve layout verification. */
81 struct list_head llsd_master_list;
84 struct dt_object *llsd_rb_obj;
85 struct rb_root llsd_rb_root;
86 rwlock_t llsd_rb_lock;
87 unsigned int llsd_rbtree_valid:1;
90 struct lfsck_layout_object {
91 struct dt_object *llo_obj;
92 struct lu_attr llo_attr;
97 struct lfsck_layout_req {
98 struct list_head llr_list;
99 struct lfsck_layout_object *llr_parent;
100 struct dt_object *llr_child;
102 __u32 llr_lov_idx; /* offset in LOV EA */
105 struct lfsck_layout_master_data {
106 spinlock_t llmd_lock;
107 struct list_head llmd_req_list;
109 /* list for the ost targets involve layout verification. */
110 struct list_head llmd_ost_list;
112 /* list for the ost targets in phase1 scanning. */
113 struct list_head llmd_ost_phase1_list;
115 /* list for the ost targets in phase1 scanning. */
116 struct list_head llmd_ost_phase2_list;
118 /* list for the mdt targets involve layout verification. */
119 struct list_head llmd_mdt_list;
121 /* list for the mdt targets in phase1 scanning. */
122 struct list_head llmd_mdt_phase1_list;
124 /* list for the mdt targets in phase1 scanning. */
125 struct list_head llmd_mdt_phase2_list;
127 struct ptlrpc_thread llmd_thread;
128 __u32 llmd_touch_gen;
130 int llmd_assistant_status;
131 int llmd_post_result;
132 unsigned int llmd_to_post:1,
133 llmd_to_double_scan:1,
134 llmd_in_double_scan:1,
138 struct lfsck_layout_slave_async_args {
139 struct obd_export *llsaa_exp;
140 struct lfsck_component *llsaa_com;
141 struct lfsck_layout_slave_target *llsaa_llst;
144 static struct lfsck_layout_object *
145 lfsck_layout_object_init(const struct lu_env *env, struct dt_object *obj,
148 struct lfsck_layout_object *llo;
153 return ERR_PTR(-ENOMEM);
155 rc = dt_attr_get(env, obj, &llo->llo_attr, BYPASS_CAPA);
162 lu_object_get(&obj->do_lu);
164 /* The gen can be used to check whether some others have changed the
165 * file layout after LFSCK pre-fetching but before real verification. */
167 atomic_set(&llo->llo_ref, 1);
173 lfsck_layout_llst_put(struct lfsck_layout_slave_target *llst)
175 if (atomic_dec_and_test(&llst->llst_ref)) {
176 LASSERT(list_empty(&llst->llst_list));
183 lfsck_layout_llst_add(struct lfsck_layout_slave_data *llsd, __u32 index)
185 struct lfsck_layout_slave_target *llst;
186 struct lfsck_layout_slave_target *tmp;
193 INIT_LIST_HEAD(&llst->llst_list);
195 llst->llst_index = index;
196 atomic_set(&llst->llst_ref, 1);
198 spin_lock(&llsd->llsd_lock);
199 list_for_each_entry(tmp, &llsd->llsd_master_list, llst_list) {
200 if (tmp->llst_index == index) {
206 list_add_tail(&llst->llst_list, &llsd->llsd_master_list);
207 spin_unlock(&llsd->llsd_lock);
216 lfsck_layout_llst_del(struct lfsck_layout_slave_data *llsd,
217 struct lfsck_layout_slave_target *llst)
221 spin_lock(&llsd->llsd_lock);
222 if (!list_empty(&llst->llst_list)) {
223 list_del_init(&llst->llst_list);
226 spin_unlock(&llsd->llsd_lock);
229 lfsck_layout_llst_put(llst);
232 static inline struct lfsck_layout_slave_target *
233 lfsck_layout_llst_find_and_del(struct lfsck_layout_slave_data *llsd,
234 __u32 index, bool unlink)
236 struct lfsck_layout_slave_target *llst;
238 spin_lock(&llsd->llsd_lock);
239 list_for_each_entry(llst, &llsd->llsd_master_list, llst_list) {
240 if (llst->llst_index == index) {
242 list_del_init(&llst->llst_list);
244 atomic_inc(&llst->llst_ref);
245 spin_unlock(&llsd->llsd_lock);
250 spin_unlock(&llsd->llsd_lock);
255 static inline void lfsck_layout_object_put(const struct lu_env *env,
256 struct lfsck_layout_object *llo)
258 if (atomic_dec_and_test(&llo->llo_ref)) {
259 lfsck_object_put(env, llo->llo_obj);
264 static struct lfsck_layout_req *
265 lfsck_layout_req_init(struct lfsck_layout_object *parent,
266 struct dt_object *child, __u32 ost_idx, __u32 lov_idx)
268 struct lfsck_layout_req *llr;
272 return ERR_PTR(-ENOMEM);
274 INIT_LIST_HEAD(&llr->llr_list);
275 atomic_inc(&parent->llo_ref);
276 llr->llr_parent = parent;
277 llr->llr_child = child;
278 llr->llr_ost_idx = ost_idx;
279 llr->llr_lov_idx = lov_idx;
284 static inline void lfsck_layout_req_fini(const struct lu_env *env,
285 struct lfsck_layout_req *llr)
287 lu_object_put(env, &llr->llr_child->do_lu);
288 lfsck_layout_object_put(env, llr->llr_parent);
292 static inline bool lfsck_layout_req_empty(struct lfsck_layout_master_data *llmd)
296 spin_lock(&llmd->llmd_lock);
297 if (list_empty(&llmd->llmd_req_list))
299 spin_unlock(&llmd->llmd_lock);
304 static int lfsck_layout_get_lovea(const struct lu_env *env,
305 struct dt_object *obj,
306 struct lu_buf *buf, ssize_t *buflen)
311 rc = dt_xattr_get(env, obj, buf, XATTR_NAME_LOV, BYPASS_CAPA);
313 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_LOV,
318 lu_buf_realloc(buf, rc);
320 *buflen = buf->lb_len;
322 if (buf->lb_buf == NULL)
334 if (unlikely(buf->lb_buf == NULL)) {
335 lu_buf_alloc(buf, rc);
337 *buflen = buf->lb_len;
339 if (buf->lb_buf == NULL)
348 static int lfsck_layout_verify_header(struct lov_mds_md_v1 *lmm)
353 magic = le32_to_cpu(lmm->lmm_magic);
354 /* If magic crashed, keep it there. Sometime later, during OST-object
355 * orphan handling, if some OST-object(s) back-point to it, it can be
356 * verified and repaired. */
357 if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3) {
361 lmm_oi_cpu_to_le(&oi, &lmm->lmm_oi);
362 if ((magic & LOV_MAGIC_MASK) == LOV_MAGIC_MAGIC)
367 CDEBUG(D_LFSCK, "%s LOV EA magic %u on "DOSTID"\n",
368 rc == -EINVAL ? "Unknown" : "Unsupported",
374 pattern = le32_to_cpu(lmm->lmm_pattern);
375 /* XXX: currently, we only support LOV_PATTERN_RAID0. */
376 if (lov_pattern(pattern) != LOV_PATTERN_RAID0) {
379 lmm_oi_cpu_to_le(&oi, &lmm->lmm_oi);
380 CDEBUG(D_LFSCK, "Unsupported LOV EA pattern %u on "DOSTID"\n",
381 pattern, POSTID(&oi));
389 #define LFSCK_RBTREE_BITMAP_SIZE PAGE_CACHE_SIZE
390 #define LFSCK_RBTREE_BITMAP_WIDTH (LFSCK_RBTREE_BITMAP_SIZE << 3)
391 #define LFSCK_RBTREE_BITMAP_MASK (LFSCK_RBTREE_BITMAP_WIDTH - 1)
393 struct lfsck_rbtree_node {
394 struct rb_node lrn_node;
397 atomic_t lrn_known_count;
398 atomic_t lrn_accessed_count;
399 void *lrn_known_bitmap;
400 void *lrn_accessed_bitmap;
403 static inline int lfsck_rbtree_cmp(struct lfsck_rbtree_node *lrn,
404 __u64 seq, __u32 oid)
406 if (seq < lrn->lrn_seq)
409 if (seq > lrn->lrn_seq)
412 if (oid < lrn->lrn_first_oid)
415 if (oid - lrn->lrn_first_oid >= LFSCK_RBTREE_BITMAP_WIDTH)
421 /* The caller should hold llsd->llsd_rb_lock. */
422 static struct lfsck_rbtree_node *
423 lfsck_rbtree_search(struct lfsck_layout_slave_data *llsd,
424 const struct lu_fid *fid, bool *exact)
426 struct rb_node *node = llsd->llsd_rb_root.rb_node;
427 struct rb_node *prev = NULL;
428 struct lfsck_rbtree_node *lrn = NULL;
434 while (node != NULL) {
436 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
437 rc = lfsck_rbtree_cmp(lrn, fid_seq(fid), fid_oid(fid));
439 node = node->rb_left;
441 node = node->rb_right;
449 /* If there is no exactly matched one, then to the next valid one. */
452 /* The rbtree is empty. */
459 node = rb_next(prev);
461 /* The end of the rbtree. */
465 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
470 static struct lfsck_rbtree_node *lfsck_rbtree_new(const struct lu_env *env,
471 const struct lu_fid *fid)
473 struct lfsck_rbtree_node *lrn;
477 return ERR_PTR(-ENOMEM);
479 OBD_ALLOC(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
480 if (lrn->lrn_known_bitmap == NULL) {
483 return ERR_PTR(-ENOMEM);
486 OBD_ALLOC(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
487 if (lrn->lrn_accessed_bitmap == NULL) {
488 OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
491 return ERR_PTR(-ENOMEM);
494 RB_CLEAR_NODE(&lrn->lrn_node);
495 lrn->lrn_seq = fid_seq(fid);
496 lrn->lrn_first_oid = fid_oid(fid) & ~LFSCK_RBTREE_BITMAP_MASK;
497 atomic_set(&lrn->lrn_known_count, 0);
498 atomic_set(&lrn->lrn_accessed_count, 0);
503 static void lfsck_rbtree_free(struct lfsck_rbtree_node *lrn)
505 OBD_FREE(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
506 OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
510 /* The caller should hold lock. */
511 static struct lfsck_rbtree_node *
512 lfsck_rbtree_insert(struct lfsck_layout_slave_data *llsd,
513 struct lfsck_rbtree_node *lrn)
515 struct rb_node **pos = &llsd->llsd_rb_root.rb_node;
516 struct rb_node *parent = NULL;
517 struct lfsck_rbtree_node *tmp;
520 while (*pos != NULL) {
522 tmp = rb_entry(parent, struct lfsck_rbtree_node, lrn_node);
523 rc = lfsck_rbtree_cmp(tmp, lrn->lrn_seq, lrn->lrn_first_oid);
525 pos = &(*pos)->rb_left;
527 pos = &(*pos)->rb_right;
532 rb_link_node(&lrn->lrn_node, parent, pos);
533 rb_insert_color(&lrn->lrn_node, &llsd->llsd_rb_root);
538 extern const struct dt_index_operations lfsck_orphan_index_ops;
540 static int lfsck_rbtree_setup(const struct lu_env *env,
541 struct lfsck_component *com)
543 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
544 struct lfsck_instance *lfsck = com->lc_lfsck;
545 struct dt_device *dev = lfsck->li_bottom;
546 struct lfsck_layout_slave_data *llsd = com->lc_data;
547 struct dt_object *obj;
549 fid->f_seq = FID_SEQ_LAYOUT_RBTREE;
550 fid->f_oid = lfsck_dev_idx(dev);
552 obj = dt_locate(env, dev, fid);
554 RETURN(PTR_ERR(obj));
556 /* Generate an in-RAM object to stand for the layout rbtree.
557 * Scanning the layout rbtree will be via the iteration over
558 * the object. In the future, the rbtree may be written onto
559 * disk with the object.
561 * Mark the object to be as exist. */
562 obj->do_lu.lo_header->loh_attr |= LOHA_EXISTS;
563 obj->do_index_ops = &lfsck_orphan_index_ops;
564 llsd->llsd_rb_obj = obj;
565 llsd->llsd_rbtree_valid = 1;
566 dev->dd_record_fid_accessed = 1;
568 CDEBUG(D_LFSCK, "%s: layout LFSCK init OST-objects accessing bitmap\n",
569 lfsck_lfsck2name(lfsck));
574 static void lfsck_rbtree_cleanup(const struct lu_env *env,
575 struct lfsck_component *com)
577 struct lfsck_instance *lfsck = com->lc_lfsck;
578 struct lfsck_layout_slave_data *llsd = com->lc_data;
579 struct rb_node *node = rb_first(&llsd->llsd_rb_root);
580 struct rb_node *next;
581 struct lfsck_rbtree_node *lrn;
583 lfsck->li_bottom->dd_record_fid_accessed = 0;
584 /* Invalid the rbtree, then no others will use it. */
585 write_lock(&llsd->llsd_rb_lock);
586 llsd->llsd_rbtree_valid = 0;
587 write_unlock(&llsd->llsd_rb_lock);
589 while (node != NULL) {
590 next = rb_next(node);
591 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
592 rb_erase(node, &llsd->llsd_rb_root);
593 lfsck_rbtree_free(lrn);
597 if (llsd->llsd_rb_obj != NULL) {
598 lu_object_put(env, &llsd->llsd_rb_obj->do_lu);
599 llsd->llsd_rb_obj = NULL;
602 CDEBUG(D_LFSCK, "%s: layout LFSCK fini OST-objects accessing bitmap\n",
603 lfsck_lfsck2name(lfsck));
606 static void lfsck_rbtree_update_bitmap(const struct lu_env *env,
607 struct lfsck_component *com,
608 const struct lu_fid *fid,
611 struct lfsck_layout_slave_data *llsd = com->lc_data;
612 struct lfsck_rbtree_node *lrn;
618 if (unlikely(!fid_is_sane(fid) || fid_is_last_id(fid)))
621 if (!fid_is_idif(fid) && !fid_is_norm(fid))
624 read_lock(&llsd->llsd_rb_lock);
625 if (!llsd->llsd_rbtree_valid)
626 GOTO(unlock, rc = 0);
628 lrn = lfsck_rbtree_search(llsd, fid, NULL);
630 struct lfsck_rbtree_node *tmp;
634 read_unlock(&llsd->llsd_rb_lock);
635 tmp = lfsck_rbtree_new(env, fid);
637 GOTO(out, rc = PTR_ERR(tmp));
640 write_lock(&llsd->llsd_rb_lock);
641 if (!llsd->llsd_rbtree_valid) {
642 lfsck_rbtree_free(tmp);
643 GOTO(unlock, rc = 0);
646 lrn = lfsck_rbtree_insert(llsd, tmp);
648 lfsck_rbtree_free(tmp);
651 idx = fid_oid(fid) & LFSCK_RBTREE_BITMAP_MASK;
652 /* Any accessed object must be a known object. */
653 if (!test_and_set_bit(idx, lrn->lrn_known_bitmap))
654 atomic_inc(&lrn->lrn_known_count);
655 if (accessed && !test_and_set_bit(idx, lrn->lrn_accessed_bitmap))
656 atomic_inc(&lrn->lrn_accessed_count);
658 GOTO(unlock, rc = 0);
662 write_unlock(&llsd->llsd_rb_lock);
664 read_unlock(&llsd->llsd_rb_lock);
666 if (rc != 0 && accessed) {
667 struct lfsck_layout *lo = com->lc_file_ram;
669 CDEBUG(D_LFSCK, "%s: fail to update OST-objects accessing "
670 "bitmap, and will cause incorrect LFSCK OST-object "
671 "handling, so disable it to cancel orphan handling "
672 "for related device. rc = %d\n",
673 lfsck_lfsck2name(com->lc_lfsck), rc);
675 lo->ll_flags |= LF_INCOMPLETE;
676 lfsck_rbtree_cleanup(env, com);
680 static void lfsck_layout_le_to_cpu(struct lfsck_layout *des,
681 const struct lfsck_layout *src)
685 des->ll_magic = le32_to_cpu(src->ll_magic);
686 des->ll_status = le32_to_cpu(src->ll_status);
687 des->ll_flags = le32_to_cpu(src->ll_flags);
688 des->ll_success_count = le32_to_cpu(src->ll_success_count);
689 des->ll_run_time_phase1 = le32_to_cpu(src->ll_run_time_phase1);
690 des->ll_run_time_phase2 = le32_to_cpu(src->ll_run_time_phase2);
691 des->ll_time_last_complete = le64_to_cpu(src->ll_time_last_complete);
692 des->ll_time_latest_start = le64_to_cpu(src->ll_time_latest_start);
693 des->ll_time_last_checkpoint =
694 le64_to_cpu(src->ll_time_last_checkpoint);
695 des->ll_pos_latest_start = le64_to_cpu(src->ll_pos_latest_start);
696 des->ll_pos_last_checkpoint = le64_to_cpu(src->ll_pos_last_checkpoint);
697 des->ll_pos_first_inconsistent =
698 le64_to_cpu(src->ll_pos_first_inconsistent);
699 des->ll_objs_checked_phase1 = le64_to_cpu(src->ll_objs_checked_phase1);
700 des->ll_objs_failed_phase1 = le64_to_cpu(src->ll_objs_failed_phase1);
701 des->ll_objs_checked_phase2 = le64_to_cpu(src->ll_objs_checked_phase2);
702 des->ll_objs_failed_phase2 = le64_to_cpu(src->ll_objs_failed_phase2);
703 for (i = 0; i < LLIT_MAX; i++)
704 des->ll_objs_repaired[i] =
705 le64_to_cpu(src->ll_objs_repaired[i]);
706 des->ll_objs_skipped = le64_to_cpu(src->ll_objs_skipped);
709 static void lfsck_layout_cpu_to_le(struct lfsck_layout *des,
710 const struct lfsck_layout *src)
714 des->ll_magic = cpu_to_le32(src->ll_magic);
715 des->ll_status = cpu_to_le32(src->ll_status);
716 des->ll_flags = cpu_to_le32(src->ll_flags);
717 des->ll_success_count = cpu_to_le32(src->ll_success_count);
718 des->ll_run_time_phase1 = cpu_to_le32(src->ll_run_time_phase1);
719 des->ll_run_time_phase2 = cpu_to_le32(src->ll_run_time_phase2);
720 des->ll_time_last_complete = cpu_to_le64(src->ll_time_last_complete);
721 des->ll_time_latest_start = cpu_to_le64(src->ll_time_latest_start);
722 des->ll_time_last_checkpoint =
723 cpu_to_le64(src->ll_time_last_checkpoint);
724 des->ll_pos_latest_start = cpu_to_le64(src->ll_pos_latest_start);
725 des->ll_pos_last_checkpoint = cpu_to_le64(src->ll_pos_last_checkpoint);
726 des->ll_pos_first_inconsistent =
727 cpu_to_le64(src->ll_pos_first_inconsistent);
728 des->ll_objs_checked_phase1 = cpu_to_le64(src->ll_objs_checked_phase1);
729 des->ll_objs_failed_phase1 = cpu_to_le64(src->ll_objs_failed_phase1);
730 des->ll_objs_checked_phase2 = cpu_to_le64(src->ll_objs_checked_phase2);
731 des->ll_objs_failed_phase2 = cpu_to_le64(src->ll_objs_failed_phase2);
732 for (i = 0; i < LLIT_MAX; i++)
733 des->ll_objs_repaired[i] =
734 cpu_to_le64(src->ll_objs_repaired[i]);
735 des->ll_objs_skipped = cpu_to_le64(src->ll_objs_skipped);
739 * \retval +ve: the lfsck_layout is broken, the caller should reset it.
740 * \retval 0: succeed.
741 * \retval -ve: failed cases.
743 static int lfsck_layout_load(const struct lu_env *env,
744 struct lfsck_component *com)
746 struct lfsck_layout *lo = com->lc_file_ram;
747 const struct dt_body_operations *dbo = com->lc_obj->do_body_ops;
748 ssize_t size = com->lc_file_size;
752 rc = dbo->dbo_read(env, com->lc_obj,
753 lfsck_buf_get(env, com->lc_file_disk, size), &pos,
758 CDEBUG(D_LFSCK, "%s: failed to load lfsck_layout: rc = %d\n",
759 lfsck_lfsck2name(com->lc_lfsck), rc);
761 } else if (rc != size) {
762 CDEBUG(D_LFSCK, "%s: lfsck_layout size %u != %u; reset it\n",
763 lfsck_lfsck2name(com->lc_lfsck), rc, (unsigned int)size);
767 lfsck_layout_le_to_cpu(lo, com->lc_file_disk);
768 if (lo->ll_magic != LFSCK_LAYOUT_MAGIC) {
769 CDEBUG(D_LFSCK, "%s: invalid lfsck_layout magic %#x != %#x, "
770 "to be reset\n", lfsck_lfsck2name(com->lc_lfsck),
771 lo->ll_magic, LFSCK_LAYOUT_MAGIC);
778 static int lfsck_layout_store(const struct lu_env *env,
779 struct lfsck_component *com)
781 struct dt_object *obj = com->lc_obj;
782 struct lfsck_instance *lfsck = com->lc_lfsck;
783 struct lfsck_layout *lo = com->lc_file_disk;
784 struct thandle *handle;
785 ssize_t size = com->lc_file_size;
790 lfsck_layout_cpu_to_le(lo, com->lc_file_ram);
791 handle = dt_trans_create(env, lfsck->li_bottom);
793 GOTO(log, rc = PTR_ERR(handle));
795 rc = dt_declare_record_write(env, obj, lfsck_buf_get(env, lo, size),
800 rc = dt_trans_start_local(env, lfsck->li_bottom, handle);
804 rc = dt_record_write(env, obj, lfsck_buf_get(env, lo, size), &pos,
810 dt_trans_stop(env, lfsck->li_bottom, handle);
814 CDEBUG(D_LFSCK, "%s: fail to store lfsck_layout: rc = %d\n",
815 lfsck_lfsck2name(lfsck), rc);
819 static int lfsck_layout_init(const struct lu_env *env,
820 struct lfsck_component *com)
822 struct lfsck_layout *lo = com->lc_file_ram;
825 memset(lo, 0, com->lc_file_size);
826 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
827 lo->ll_status = LS_INIT;
828 down_write(&com->lc_sem);
829 rc = lfsck_layout_store(env, com);
830 up_write(&com->lc_sem);
835 static int fid_is_for_ostobj(const struct lu_env *env, struct dt_device *dt,
836 struct dt_object *obj, const struct lu_fid *fid)
838 struct seq_server_site *ss = lu_site2seq(dt->dd_lu_dev.ld_site);
839 struct lu_seq_range range = { 0 };
840 struct lustre_mdt_attrs *lma;
843 fld_range_set_any(&range);
844 rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(fid), &range);
846 if (fld_range_is_ost(&range))
852 lma = &lfsck_env_info(env)->lti_lma;
853 rc = dt_xattr_get(env, obj, lfsck_buf_get(env, lma, sizeof(*lma)),
854 XATTR_NAME_LMA, BYPASS_CAPA);
855 if (rc == sizeof(*lma)) {
856 lustre_lma_swab(lma);
858 return lma->lma_compat & LMAC_FID_ON_OST ? 1 : 0;
861 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_FID, BYPASS_CAPA);
866 static struct lfsck_layout_seq *
867 lfsck_layout_seq_lookup(struct lfsck_layout_slave_data *llsd, __u64 seq)
869 struct lfsck_layout_seq *lls;
871 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
872 if (lls->lls_seq == seq)
875 if (lls->lls_seq > seq)
883 lfsck_layout_seq_insert(struct lfsck_layout_slave_data *llsd,
884 struct lfsck_layout_seq *lls)
886 struct lfsck_layout_seq *tmp;
887 struct list_head *pos = &llsd->llsd_seq_list;
889 list_for_each_entry(tmp, &llsd->llsd_seq_list, lls_list) {
890 if (lls->lls_seq < tmp->lls_seq) {
891 pos = &tmp->lls_list;
895 list_add_tail(&lls->lls_list, pos);
899 lfsck_layout_lastid_create(const struct lu_env *env,
900 struct lfsck_instance *lfsck,
901 struct dt_object *obj)
903 struct lfsck_thread_info *info = lfsck_env_info(env);
904 struct lu_attr *la = &info->lti_la;
905 struct dt_object_format *dof = &info->lti_dof;
906 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
907 struct dt_device *dt = lfsck->li_bottom;
914 if (bk->lb_param & LPF_DRYRUN)
917 memset(la, 0, sizeof(*la));
918 la->la_mode = S_IFREG | S_IRUGO | S_IWUSR;
919 la->la_valid = LA_MODE | LA_UID | LA_GID;
920 dof->dof_type = dt_mode_to_dft(S_IFREG);
922 th = dt_trans_create(env, dt);
924 GOTO(log, rc = PTR_ERR(th));
926 rc = dt_declare_create(env, obj, la, NULL, dof, th);
930 rc = dt_declare_record_write(env, obj,
931 lfsck_buf_get(env, &lastid,
937 rc = dt_trans_start_local(env, dt, th);
941 dt_write_lock(env, obj, 0);
942 if (likely(!dt_object_exists(obj))) {
943 rc = dt_create(env, obj, la, NULL, dof, th);
945 rc = dt_record_write(env, obj,
946 lfsck_buf_get(env, &lastid, sizeof(lastid)),
949 dt_write_unlock(env, obj);
954 dt_trans_stop(env, dt, th);
957 CDEBUG(D_LFSCK, "%s: layout LFSCK will create LAST_ID for <seq> "
959 lfsck_lfsck2name(lfsck), fid_seq(lfsck_dto2fid(obj)), rc);
965 lfsck_layout_lastid_reload(const struct lu_env *env,
966 struct lfsck_component *com,
967 struct lfsck_layout_seq *lls)
973 dt_read_lock(env, lls->lls_lastid_obj, 0);
974 rc = dt_record_read(env, lls->lls_lastid_obj,
975 lfsck_buf_get(env, &lastid, sizeof(lastid)), &pos);
976 dt_read_unlock(env, lls->lls_lastid_obj);
977 if (unlikely(rc != 0))
980 lastid = le64_to_cpu(lastid);
981 if (lastid < lls->lls_lastid_known) {
982 struct lfsck_instance *lfsck = com->lc_lfsck;
983 struct lfsck_layout *lo = com->lc_file_ram;
985 lls->lls_lastid = lls->lls_lastid_known;
987 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
988 LASSERT(lfsck->li_out_notify != NULL);
990 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
991 LE_LASTID_REBUILDING);
992 lo->ll_flags |= LF_CRASHED_LASTID;
994 } else if (lastid >= lls->lls_lastid) {
995 lls->lls_lastid = lastid;
1003 lfsck_layout_lastid_store(const struct lu_env *env,
1004 struct lfsck_component *com)
1006 struct lfsck_instance *lfsck = com->lc_lfsck;
1007 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1008 struct dt_device *dt = lfsck->li_bottom;
1009 struct lfsck_layout_slave_data *llsd = com->lc_data;
1010 struct lfsck_layout_seq *lls;
1016 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
1019 /* XXX: Add the code back if we really found related
1020 * inconsistent cases in the future. */
1022 if (!lls->lls_dirty) {
1023 /* In OFD, before the pre-creation, the LAST_ID
1024 * file will be updated firstly, which may hide
1025 * some potential crashed cases. For example:
1027 * The old obj1's ID is higher than old LAST_ID
1028 * but lower than the new LAST_ID, but the LFSCK
1029 * have not touch the obj1 until the OFD updated
1030 * the LAST_ID. So the LFSCK does not regard it
1031 * as crashed case. But when OFD does not create
1032 * successfully, it will set the LAST_ID as the
1033 * real created objects' ID, then LFSCK needs to
1034 * found related inconsistency. */
1035 rc = lfsck_layout_lastid_reload(env, com, lls);
1036 if (likely(!lls->lls_dirty))
1041 CDEBUG(D_LFSCK, "%s: layout LFSCK will sync the LAST_ID for "
1042 "<seq> "LPX64" as <oid> "LPU64"\n",
1043 lfsck_lfsck2name(lfsck), lls->lls_seq, lls->lls_lastid);
1045 if (bk->lb_param & LPF_DRYRUN) {
1050 th = dt_trans_create(env, dt);
1053 CDEBUG(D_LFSCK, "%s: layout LFSCK failed to store "
1054 "the LAST_ID for <seq> "LPX64"(1): rc = %d\n",
1055 lfsck_lfsck2name(com->lc_lfsck),
1060 lastid = cpu_to_le64(lls->lls_lastid);
1061 rc = dt_declare_record_write(env, lls->lls_lastid_obj,
1062 lfsck_buf_get(env, &lastid,
1068 rc = dt_trans_start_local(env, dt, th);
1072 dt_write_lock(env, lls->lls_lastid_obj, 0);
1073 rc = dt_record_write(env, lls->lls_lastid_obj,
1074 lfsck_buf_get(env, &lastid,
1075 sizeof(lastid)), &pos, th);
1076 dt_write_unlock(env, lls->lls_lastid_obj);
1081 dt_trans_stop(env, dt, th);
1084 CDEBUG(D_LFSCK, "%s: layout LFSCK failed to store "
1085 "the LAST_ID for <seq> "LPX64"(2): rc = %d\n",
1086 lfsck_lfsck2name(com->lc_lfsck),
1095 lfsck_layout_lastid_load(const struct lu_env *env,
1096 struct lfsck_component *com,
1097 struct lfsck_layout_seq *lls)
1099 struct lfsck_instance *lfsck = com->lc_lfsck;
1100 struct lfsck_layout *lo = com->lc_file_ram;
1101 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
1102 struct dt_object *obj;
1107 lu_last_id_fid(fid, lls->lls_seq, lfsck_dev_idx(lfsck->li_bottom));
1108 obj = dt_locate(env, lfsck->li_bottom, fid);
1110 RETURN(PTR_ERR(obj));
1112 /* LAST_ID crashed, to be rebuilt */
1113 if (!dt_object_exists(obj)) {
1114 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
1115 LASSERT(lfsck->li_out_notify != NULL);
1117 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1118 LE_LASTID_REBUILDING);
1119 lo->ll_flags |= LF_CRASHED_LASTID;
1121 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY4) &&
1123 struct l_wait_info lwi = LWI_TIMEOUT(
1124 cfs_time_seconds(cfs_fail_val),
1127 up_write(&com->lc_sem);
1128 l_wait_event(lfsck->li_thread.t_ctl_waitq,
1129 !thread_is_running(&lfsck->li_thread),
1131 down_write(&com->lc_sem);
1135 rc = lfsck_layout_lastid_create(env, lfsck, obj);
1137 dt_read_lock(env, obj, 0);
1138 rc = dt_read(env, obj,
1139 lfsck_buf_get(env, &lls->lls_lastid, sizeof(__u64)),
1141 dt_read_unlock(env, obj);
1142 if (rc != 0 && rc != sizeof(__u64))
1143 GOTO(out, rc = (rc > 0 ? -EFAULT : rc));
1145 if (rc == 0 && !(lo->ll_flags & LF_CRASHED_LASTID)) {
1146 LASSERT(lfsck->li_out_notify != NULL);
1148 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1149 LE_LASTID_REBUILDING);
1150 lo->ll_flags |= LF_CRASHED_LASTID;
1153 lls->lls_lastid = le64_to_cpu(lls->lls_lastid);
1161 lfsck_object_put(env, obj);
1163 lls->lls_lastid_obj = obj;
1168 static void lfsck_layout_record_failure(const struct lu_env *env,
1169 struct lfsck_instance *lfsck,
1170 struct lfsck_layout *lo)
1172 lo->ll_objs_failed_phase1++;
1173 if (unlikely(lo->ll_pos_first_inconsistent == 0)) {
1174 lo->ll_pos_first_inconsistent =
1175 lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
1178 CDEBUG(D_LFSCK, "%s: layout LFSCK hit first non-repaired "
1179 "inconsistency at the pos ["LPU64"]\n",
1180 lfsck_lfsck2name(lfsck),
1181 lo->ll_pos_first_inconsistent);
1185 static int lfsck_layout_master_async_interpret(const struct lu_env *env,
1186 struct ptlrpc_request *req,
1189 struct lfsck_async_interpret_args *laia = args;
1190 struct lfsck_component *com = laia->laia_com;
1191 struct lfsck_layout_master_data *llmd = com->lc_data;
1192 struct lfsck_tgt_descs *ltds = laia->laia_ltds;
1193 struct lfsck_tgt_desc *ltd = laia->laia_ltd;
1194 struct lfsck_request *lr = laia->laia_lr;
1196 switch (lr->lr_event) {
1199 struct lfsck_layout *lo = com->lc_file_ram;
1201 CDEBUG(D_LFSCK, "%s: fail to notify %s %x for layout "
1203 lfsck_lfsck2name(com->lc_lfsck),
1204 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1205 ltd->ltd_index, rc);
1206 lo->ll_flags |= LF_INCOMPLETE;
1210 spin_lock(<ds->ltd_lock);
1211 if (ltd->ltd_dead || ltd->ltd_layout_done) {
1212 spin_unlock(<ds->ltd_lock);
1216 if (lr->lr_flags & LEF_TO_OST) {
1217 if (list_empty(<d->ltd_layout_list))
1218 list_add_tail(<d->ltd_layout_list,
1219 &llmd->llmd_ost_list);
1220 if (list_empty(<d->ltd_layout_phase_list))
1221 list_add_tail(<d->ltd_layout_phase_list,
1222 &llmd->llmd_ost_phase1_list);
1224 if (list_empty(<d->ltd_layout_list))
1225 list_add_tail(<d->ltd_layout_list,
1226 &llmd->llmd_mdt_list);
1227 if (list_empty(<d->ltd_layout_phase_list))
1228 list_add_tail(<d->ltd_layout_phase_list,
1229 &llmd->llmd_mdt_phase1_list);
1231 spin_unlock(<ds->ltd_lock);
1234 case LE_PHASE1_DONE:
1235 case LE_PHASE2_DONE:
1237 if (rc != 0 && rc != -EALREADY)
1238 CDEBUG(D_LFSCK, "%s: fail to notify %s %x for layout: "
1239 "event = %d, rc = %d\n",
1240 lfsck_lfsck2name(com->lc_lfsck),
1241 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1242 ltd->ltd_index, lr->lr_event, rc);
1245 struct lfsck_reply *reply;
1248 spin_lock(<ds->ltd_lock);
1249 list_del_init(<d->ltd_layout_phase_list);
1250 list_del_init(<d->ltd_layout_list);
1251 spin_unlock(<ds->ltd_lock);
1255 reply = req_capsule_server_get(&req->rq_pill,
1257 if (reply == NULL) {
1259 CDEBUG(D_LFSCK, "%s: invalid query reply: rc = %d\n",
1260 lfsck_lfsck2name(com->lc_lfsck), rc);
1261 spin_lock(<ds->ltd_lock);
1262 list_del_init(<d->ltd_layout_phase_list);
1263 list_del_init(<d->ltd_layout_list);
1264 spin_unlock(<ds->ltd_lock);
1268 switch (reply->lr_status) {
1269 case LS_SCANNING_PHASE1:
1271 case LS_SCANNING_PHASE2:
1272 spin_lock(<ds->ltd_lock);
1273 list_del_init(<d->ltd_layout_phase_list);
1274 if (ltd->ltd_dead || ltd->ltd_layout_done) {
1275 spin_unlock(<ds->ltd_lock);
1279 if (lr->lr_flags & LEF_TO_OST)
1280 list_add_tail(<d->ltd_layout_phase_list,
1281 &llmd->llmd_ost_phase2_list);
1283 list_add_tail(<d->ltd_layout_phase_list,
1284 &llmd->llmd_mdt_phase2_list);
1285 spin_unlock(<ds->ltd_lock);
1288 spin_lock(<ds->ltd_lock);
1289 list_del_init(<d->ltd_layout_phase_list);
1290 list_del_init(<d->ltd_layout_list);
1291 spin_unlock(<ds->ltd_lock);
1297 CDEBUG(D_LFSCK, "%s: layout LFSCK unexpected event: rc = %d\n",
1298 lfsck_lfsck2name(com->lc_lfsck), lr->lr_event);
1302 if (!laia->laia_shared) {
1304 lfsck_component_put(env, com);
1310 static int lfsck_layout_master_query_others(const struct lu_env *env,
1311 struct lfsck_component *com)
1313 struct lfsck_thread_info *info = lfsck_env_info(env);
1314 struct lfsck_request *lr = &info->lti_lr;
1315 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1316 struct lfsck_instance *lfsck = com->lc_lfsck;
1317 struct lfsck_layout_master_data *llmd = com->lc_data;
1318 struct ptlrpc_request_set *set;
1319 struct lfsck_tgt_descs *ltds;
1320 struct lfsck_tgt_desc *ltd;
1321 struct list_head *head;
1326 set = ptlrpc_prep_set();
1330 llmd->llmd_touch_gen++;
1331 memset(lr, 0, sizeof(*lr));
1332 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1333 lr->lr_event = LE_QUERY;
1334 lr->lr_active = LFSCK_TYPE_LAYOUT;
1335 laia->laia_com = com;
1337 laia->laia_shared = 0;
1339 if (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1340 ltds = &lfsck->li_mdt_descs;
1342 head = &llmd->llmd_mdt_phase1_list;
1346 ltds = &lfsck->li_ost_descs;
1347 lr->lr_flags = LEF_TO_OST;
1348 head = &llmd->llmd_ost_phase1_list;
1351 laia->laia_ltds = ltds;
1352 spin_lock(<ds->ltd_lock);
1353 while (!list_empty(head)) {
1354 ltd = list_entry(head->next,
1355 struct lfsck_tgt_desc,
1356 ltd_layout_phase_list);
1357 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1360 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1361 list_move_tail(<d->ltd_layout_phase_list, head);
1362 atomic_inc(<d->ltd_ref);
1363 laia->laia_ltd = ltd;
1364 spin_unlock(<ds->ltd_lock);
1365 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1366 lfsck_layout_master_async_interpret,
1369 CDEBUG(D_LFSCK, "%s: layout LFSCK fail to query %s %x: "
1370 "rc = %d\n", lfsck_lfsck2name(lfsck),
1371 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1372 ltd->ltd_index, rc);
1376 spin_lock(<ds->ltd_lock);
1378 spin_unlock(<ds->ltd_lock);
1380 rc = ptlrpc_set_wait(set);
1382 ptlrpc_set_destroy(set);
1386 if (!(lr->lr_flags & LEF_TO_OST) &&
1387 list_empty(&llmd->llmd_mdt_phase1_list))
1390 ptlrpc_set_destroy(set);
1392 RETURN(rc1 != 0 ? rc1 : rc);
1396 lfsck_layout_master_to_orphan(struct lfsck_layout_master_data *llmd)
1398 return list_empty(&llmd->llmd_mdt_phase1_list) &&
1399 (!list_empty(&llmd->llmd_ost_phase2_list) ||
1400 list_empty(&llmd->llmd_ost_phase1_list));
1403 static int lfsck_layout_master_notify_others(const struct lu_env *env,
1404 struct lfsck_component *com,
1405 struct lfsck_request *lr)
1407 struct lfsck_thread_info *info = lfsck_env_info(env);
1408 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1409 struct lfsck_instance *lfsck = com->lc_lfsck;
1410 struct lfsck_layout_master_data *llmd = com->lc_data;
1411 struct lfsck_layout *lo = com->lc_file_ram;
1412 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1413 struct ptlrpc_request_set *set;
1414 struct lfsck_tgt_descs *ltds;
1415 struct lfsck_tgt_desc *ltd;
1416 struct lfsck_tgt_desc *next;
1417 struct list_head *head;
1422 set = ptlrpc_prep_set();
1426 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1427 lr->lr_active = LFSCK_TYPE_LAYOUT;
1428 laia->laia_com = com;
1430 laia->laia_shared = 0;
1431 switch (lr->lr_event) {
1433 /* Notify OSTs firstly, then handle other MDTs if needed. */
1434 ltds = &lfsck->li_ost_descs;
1435 laia->laia_ltds = ltds;
1436 down_read(<ds->ltd_rw_sem);
1437 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1438 ltd = lfsck_tgt_get(ltds, idx);
1439 LASSERT(ltd != NULL);
1441 laia->laia_ltd = ltd;
1442 ltd->ltd_layout_done = 0;
1443 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1444 lfsck_layout_master_async_interpret,
1445 laia, LFSCK_NOTIFY);
1447 CDEBUG(D_LFSCK, "%s: layout LFSCK fail to "
1448 "notify %s %x for start: rc = %d\n",
1449 lfsck_lfsck2name(lfsck),
1450 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1453 lo->ll_flags |= LF_INCOMPLETE;
1456 up_read(<ds->ltd_rw_sem);
1459 rc = ptlrpc_set_wait(set);
1461 ptlrpc_set_destroy(set);
1465 if (!(bk->lb_param & LPF_ALL_TGT))
1468 /* link other MDT targets locallly. */
1469 ltds = &lfsck->li_mdt_descs;
1470 spin_lock(<ds->ltd_lock);
1471 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1472 ltd = LTD_TGT(ltds, idx);
1473 LASSERT(ltd != NULL);
1475 if (!list_empty(<d->ltd_layout_list))
1478 list_add_tail(<d->ltd_layout_list,
1479 &llmd->llmd_mdt_list);
1480 list_add_tail(<d->ltd_layout_phase_list,
1481 &llmd->llmd_mdt_phase1_list);
1483 spin_unlock(<ds->ltd_lock);
1486 case LE_PHASE2_DONE:
1487 case LE_PEER_EXIT: {
1488 /* Handle other MDTs firstly if needed, then notify the OSTs. */
1489 if (bk->lb_param & LPF_ALL_TGT) {
1490 head = &llmd->llmd_mdt_list;
1491 ltds = &lfsck->li_mdt_descs;
1492 if (lr->lr_event == LE_STOP) {
1493 /* unlink other MDT targets locallly. */
1494 spin_lock(<ds->ltd_lock);
1495 list_for_each_entry_safe(ltd, next, head,
1497 list_del_init(<d->ltd_layout_phase_list);
1498 list_del_init(<d->ltd_layout_list);
1500 spin_unlock(<ds->ltd_lock);
1502 lr->lr_flags |= LEF_TO_OST;
1503 head = &llmd->llmd_ost_list;
1504 ltds = &lfsck->li_ost_descs;
1506 lr->lr_flags &= ~LEF_TO_OST;
1509 lr->lr_flags |= LEF_TO_OST;
1510 head = &llmd->llmd_ost_list;
1511 ltds = &lfsck->li_ost_descs;
1515 laia->laia_ltds = ltds;
1516 spin_lock(<ds->ltd_lock);
1517 while (!list_empty(head)) {
1518 ltd = list_entry(head->next, struct lfsck_tgt_desc,
1520 if (!list_empty(<d->ltd_layout_phase_list))
1521 list_del_init(<d->ltd_layout_phase_list);
1522 list_del_init(<d->ltd_layout_list);
1523 atomic_inc(<d->ltd_ref);
1524 laia->laia_ltd = ltd;
1525 spin_unlock(<ds->ltd_lock);
1526 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1527 lfsck_layout_master_async_interpret,
1528 laia, LFSCK_NOTIFY);
1530 CDEBUG(D_LFSCK, "%s: layout LFSCK fail to "
1531 "notify %s %x for stop/phase2_done/"
1532 "peer_exit: rc = %d\n",
1533 lfsck_lfsck2name(lfsck),
1534 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1535 "MDT", ltd->ltd_index, rc);
1538 spin_lock(<ds->ltd_lock);
1540 spin_unlock(<ds->ltd_lock);
1542 rc = ptlrpc_set_wait(set);
1544 ptlrpc_set_destroy(set);
1548 if (!(lr->lr_flags & LEF_TO_OST)) {
1549 lr->lr_flags |= LEF_TO_OST;
1550 head = &llmd->llmd_ost_list;
1551 ltds = &lfsck->li_ost_descs;
1556 case LE_PHASE1_DONE:
1557 llmd->llmd_touch_gen++;
1558 ltds = &lfsck->li_mdt_descs;
1559 laia->laia_ltds = ltds;
1560 spin_lock(<ds->ltd_lock);
1561 while (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1562 ltd = list_entry(llmd->llmd_mdt_phase1_list.next,
1563 struct lfsck_tgt_desc,
1564 ltd_layout_phase_list);
1565 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1568 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1569 list_move_tail(<d->ltd_layout_phase_list,
1570 &llmd->llmd_mdt_phase1_list);
1571 atomic_inc(<d->ltd_ref);
1572 laia->laia_ltd = ltd;
1573 spin_unlock(<ds->ltd_lock);
1574 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1575 lfsck_layout_master_async_interpret,
1576 laia, LFSCK_NOTIFY);
1578 CDEBUG(D_LFSCK, "%s: layout LFSCK fail to "
1579 "notify MDT %x for phase1_done: "
1580 "rc = %d\n", lfsck_lfsck2name(lfsck),
1581 ltd->ltd_index, rc);
1584 spin_lock(<ds->ltd_lock);
1586 spin_unlock(<ds->ltd_lock);
1589 CDEBUG(D_LFSCK, "%s: layout LFSCK unexpected event: rc = %d\n",
1590 lfsck_lfsck2name(lfsck), lr->lr_event);
1595 rc = ptlrpc_set_wait(set);
1596 ptlrpc_set_destroy(set);
1601 static int lfsck_layout_double_scan_result(const struct lu_env *env,
1602 struct lfsck_component *com,
1605 struct lfsck_instance *lfsck = com->lc_lfsck;
1606 struct lfsck_layout *lo = com->lc_file_ram;
1607 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1609 down_write(&com->lc_sem);
1610 lo->ll_run_time_phase2 += cfs_duration_sec(cfs_time_current() +
1611 HALF_SEC - lfsck->li_time_last_checkpoint);
1612 lo->ll_time_last_checkpoint = cfs_time_current_sec();
1613 lo->ll_objs_checked_phase2 += com->lc_new_checked;
1616 com->lc_journal = 0;
1617 if (lo->ll_flags & LF_INCOMPLETE)
1618 lo->ll_status = LS_PARTIAL;
1620 lo->ll_status = LS_COMPLETED;
1621 if (!(bk->lb_param & LPF_DRYRUN))
1622 lo->ll_flags &= ~(LF_SCANNED_ONCE | LF_INCONSISTENT);
1623 lo->ll_time_last_complete = lo->ll_time_last_checkpoint;
1624 lo->ll_success_count++;
1625 } else if (rc == 0) {
1626 lo->ll_status = lfsck->li_status;
1627 if (lo->ll_status == 0)
1628 lo->ll_status = LS_STOPPED;
1630 lo->ll_status = LS_FAILED;
1633 rc = lfsck_layout_store(env, com);
1634 up_write(&com->lc_sem);
1639 static int lfsck_layout_lock(const struct lu_env *env,
1640 struct lfsck_component *com,
1641 struct dt_object *obj,
1642 struct lustre_handle *lh, __u64 bits)
1644 struct lfsck_thread_info *info = lfsck_env_info(env);
1645 ldlm_policy_data_t *policy = &info->lti_policy;
1646 struct ldlm_res_id *resid = &info->lti_resid;
1647 struct lfsck_instance *lfsck = com->lc_lfsck;
1648 __u64 flags = LDLM_FL_ATOMIC_CB;
1651 LASSERT(lfsck->li_namespace != NULL);
1653 memset(policy, 0, sizeof(*policy));
1654 policy->l_inodebits.bits = bits;
1655 fid_build_reg_res_name(lfsck_dto2fid(obj), resid);
1656 rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid, LDLM_IBITS,
1657 policy, LCK_EX, &flags, ldlm_blocking_ast,
1658 ldlm_completion_ast, NULL, NULL, 0,
1659 LVB_T_NONE, NULL, lh);
1660 if (rc == ELDLM_OK) {
1663 memset(lh, 0, sizeof(*lh));
1670 static void lfsck_layout_unlock(struct lustre_handle *lh)
1672 if (lustre_handle_is_used(lh)) {
1673 ldlm_lock_decref(lh, LCK_EX);
1674 memset(lh, 0, sizeof(*lh));
1678 static int lfsck_layout_trans_stop(const struct lu_env *env,
1679 struct dt_device *dev,
1680 struct thandle *handle, int result)
1684 handle->th_result = result;
1685 rc = dt_trans_stop(env, dev, handle);
1695 * Get the system default stripe size.
1697 * \param[in] env pointer to the thread context
1698 * \param[in] lfsck pointer to the lfsck instance
1699 * \param[out] size pointer to the default stripe size
1701 * \retval 0 for success
1702 * \retval negative error number on failure
1704 static int lfsck_layout_get_def_stripesize(const struct lu_env *env,
1705 struct lfsck_instance *lfsck,
1708 struct lov_user_md *lum = &lfsck_env_info(env)->lti_lum;
1709 struct dt_object *root;
1712 root = dt_locate(env, lfsck->li_next, &lfsck->li_local_root_fid);
1714 return PTR_ERR(root);
1716 /* Get the default stripe size via xattr_get on the backend root. */
1717 rc = dt_xattr_get(env, root, lfsck_buf_get(env, lum, sizeof(*lum)),
1718 XATTR_NAME_LOV, BYPASS_CAPA);
1720 /* The lum->lmm_stripe_size is LE mode. The *size also
1721 * should be LE mode. So it is unnecessary to convert. */
1722 *size = lum->lmm_stripe_size;
1724 } else if (unlikely(rc == 0)) {
1728 lfsck_object_put(env, root);
1734 * \retval +1: repaired
1735 * \retval 0: did nothing
1736 * \retval -ve: on error
1738 static int lfsck_layout_refill_lovea(const struct lu_env *env,
1739 struct thandle *handle,
1740 struct dt_object *parent,
1741 struct lu_fid *cfid,
1743 struct lov_ost_data_v1 *slot,
1744 int fl, __u32 ost_idx)
1746 struct ost_id *oi = &lfsck_env_info(env)->lti_oi;
1747 struct lov_mds_md_v1 *lmm = buf->lb_buf;
1750 fid_to_ostid(cfid, oi);
1751 ostid_cpu_to_le(oi, &slot->l_ost_oi);
1752 slot->l_ost_gen = cpu_to_le32(0);
1753 slot->l_ost_idx = cpu_to_le32(ost_idx);
1755 if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_HOLE) {
1756 struct lov_ost_data_v1 *objs;
1760 count = le16_to_cpu(lmm->lmm_stripe_count);
1761 if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_V1)
1762 objs = &lmm->lmm_objects[0];
1764 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
1765 for (i = 0; i < count; i++, objs++) {
1766 if (objs != slot && lovea_slot_is_dummy(objs))
1770 /* If the @slot is the last dummy slot to be refilled,
1771 * then drop LOV_PATTERN_F_HOLE from lmm::lmm_pattern. */
1773 lmm->lmm_pattern &= ~cpu_to_le32(LOV_PATTERN_F_HOLE);
1776 rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV, fl, handle,
1785 * \retval +1: repaired
1786 * \retval 0: did nothing
1787 * \retval -ve: on error
1789 static int lfsck_layout_extend_lovea(const struct lu_env *env,
1790 struct lfsck_instance *lfsck,
1791 struct thandle *handle,
1792 struct dt_object *parent,
1793 struct lu_fid *cfid,
1794 struct lu_buf *buf, int fl,
1795 __u32 ost_idx, __u32 ea_off, bool reset)
1797 struct lov_mds_md_v1 *lmm = buf->lb_buf;
1798 struct lov_ost_data_v1 *objs;
1804 if (fl == LU_XATTR_CREATE || reset) {
1805 __u32 pattern = LOV_PATTERN_RAID0;
1808 LASSERT(buf->lb_len == lov_mds_md_size(count, LOV_MAGIC_V1));
1810 if (ea_off != 0 || reset) {
1811 pattern |= LOV_PATTERN_F_HOLE;
1815 memset(lmm, 0, buf->lb_len);
1816 lmm->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
1817 lmm->lmm_pattern = cpu_to_le32(pattern);
1818 fid_to_lmm_oi(lfsck_dto2fid(parent), &lmm->lmm_oi);
1819 lmm_oi_cpu_to_le(&lmm->lmm_oi, &lmm->lmm_oi);
1821 rc = lfsck_layout_get_def_stripesize(env, lfsck,
1822 &lmm->lmm_stripe_size);
1826 objs = &lmm->lmm_objects[ea_off];
1828 __u32 magic = le32_to_cpu(lmm->lmm_magic);
1831 count = le16_to_cpu(lmm->lmm_stripe_count);
1832 if (magic == LOV_MAGIC_V1)
1833 objs = &lmm->lmm_objects[count];
1835 objs = &((struct lov_mds_md_v3 *)lmm)->
1838 gap = ea_off - count;
1841 LASSERT(buf->lb_len == lov_mds_md_size(count, magic));
1844 memset(objs, 0, gap * sizeof(*objs));
1845 lmm->lmm_pattern |= cpu_to_le32(LOV_PATTERN_F_HOLE);
1849 lmm->lmm_layout_gen =
1850 cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
1854 lmm->lmm_stripe_count = cpu_to_le16(count);
1855 rc = lfsck_layout_refill_lovea(env, handle, parent, cfid, buf, objs,
1858 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant extend layout EA for "
1859 DFID": parent "DFID", OST-index %u, stripe-index %u, fl %d, "
1860 "reset %s, %s LOV EA hole: rc = %d\n",
1861 lfsck_lfsck2name(lfsck), PFID(cfid), PFID(lfsck_dto2fid(parent)),
1862 ost_idx, ea_off, fl, reset ? "yes" : "no",
1863 hole ? "with" : "without", rc);
1869 * \retval +1: repaired
1870 * \retval 0: did nothing
1871 * \retval -ve: on error
1873 static int lfsck_layout_update_pfid(const struct lu_env *env,
1874 struct lfsck_component *com,
1875 struct dt_object *parent,
1876 struct lu_fid *cfid,
1877 struct dt_device *cdev, __u32 ea_off)
1879 struct filter_fid *pfid = &lfsck_env_info(env)->lti_new_pfid;
1880 struct dt_object *child;
1881 struct thandle *handle;
1882 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
1887 child = lfsck_object_find_by_dev(env, cdev, cfid);
1889 RETURN(PTR_ERR(child));
1891 handle = dt_trans_create(env, cdev);
1893 GOTO(out, rc = PTR_ERR(handle));
1895 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
1896 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
1897 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
1898 * MDT-object's FID::f_ver, instead it is the OST-object index in its
1899 * parent MDT-object's layout EA. */
1900 pfid->ff_parent.f_stripe_idx = cpu_to_le32(ea_off);
1901 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
1903 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle);
1907 rc = dt_trans_start(env, cdev, handle);
1911 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle,
1914 GOTO(stop, rc = (rc == 0 ? 1 : rc));
1917 dt_trans_stop(env, cdev, handle);
1920 lu_object_put(env, &child->do_lu);
1926 * This function will create the MDT-object with the given (partial) LOV EA.
1928 * Under some data corruption cases, the MDT-object of the file may be lost,
1929 * but its OST-objects, or some of them are there. The layout LFSCK needs to
1930 * re-create the MDT-object with the orphan OST-object(s) information.
1932 * On the other hand, the LFSCK may has created some OST-object for repairing
1933 * dangling LOV EA reference, but as the LFSCK processing, it may find that
1934 * the old OST-object is there and should replace the former new created OST
1935 * object. Unfortunately, some others have modified such newly created object.
1936 * To keep the data (both new and old), the LFSCK will create MDT-object with
1937 * new FID to reference the original OST-object.
1939 * \param[in] env pointer to the thread context
1940 * \param[in] com pointer to the lfsck component
1941 * \param[in] ltd pointer to target device descriptor
1942 * \param[in] rec pointer to the record for the orphan OST-object
1943 * \param[in] cfid pointer to FID for the orphan OST-object
1944 * \param[in] infix additional information, such as the FID for original
1945 * MDT-object and the stripe offset in the LOV EA
1946 * \param[in] type the type for describing why the orphan MDT-object is
1947 * created. The rules are as following:
1949 * type "C": Multiple OST-objects claim the same MDT-object and the
1950 * same slot in the layout EA. Then the LFSCK will create
1951 * new MDT-object(s) to hold the conflict OST-object(s).
1953 * type "N": The orphan OST-object does not know which one was the
1954 * real parent MDT-object, so the LFSCK uses new FID for
1955 * its parent MDT-object.
1957 * type "R": The orphan OST-object knows its parent MDT-object FID,
1958 * but does not know the position (the file name) in the
1961 * The orphan name will be like:
1962 * ${FID}-${infix}-${type}-${conflict_version}
1964 * \param[in] ea_off the stripe offset in the LOV EA
1966 * \retval positive on repaired something
1967 * \retval 0 if needs to repair nothing
1968 * \retval negative error number on failure
1970 static int lfsck_layout_recreate_parent(const struct lu_env *env,
1971 struct lfsck_component *com,
1972 struct lfsck_tgt_desc *ltd,
1973 struct lu_orphan_rec *rec,
1974 struct lu_fid *cfid,
1979 struct lfsck_thread_info *info = lfsck_env_info(env);
1980 struct dt_insert_rec *dtrec = &info->lti_dt_rec;
1981 char *name = info->lti_key;
1982 struct lu_attr *la = &info->lti_la;
1983 struct dt_object_format *dof = &info->lti_dof;
1984 struct lfsck_instance *lfsck = com->lc_lfsck;
1985 struct lu_fid *pfid = &rec->lor_fid;
1986 struct lu_fid *tfid = &info->lti_fid3;
1987 struct dt_device *next = lfsck->li_next;
1988 struct dt_object *pobj = NULL;
1989 struct dt_object *cobj = NULL;
1990 struct thandle *th = NULL;
1991 struct lu_buf *pbuf = NULL;
1992 struct lu_buf *ea_buf = &info->lti_big_buf;
1993 struct lustre_handle lh = { 0 };
1994 struct linkea_data ldata = { 0 };
1995 struct lu_buf linkea_buf;
1996 const struct lu_name *pname;
1997 int buflen = ea_buf->lb_len;
2002 /* Create .lustre/lost+found/MDTxxxx when needed. */
2003 if (unlikely(lfsck->li_lpf_obj == NULL)) {
2004 rc = lfsck_create_lpf(env, lfsck);
2009 if (fid_is_zero(pfid)) {
2010 struct filter_fid *ff = &info->lti_new_pfid;
2012 rc = lfsck_fid_alloc(env, lfsck, pfid, false);
2016 ff->ff_parent.f_seq = cpu_to_le64(pfid->f_seq);
2017 ff->ff_parent.f_oid = cpu_to_le32(pfid->f_oid);
2018 /* Currently, the filter_fid::ff_parent::f_ver is not the
2019 * real parent MDT-object's FID::f_ver, instead it is the
2020 * OST-object index in its parent MDT-object's layout EA. */
2021 ff->ff_parent.f_stripe_idx = cpu_to_le32(ea_off);
2022 pbuf = lfsck_buf_get(env, ff, sizeof(struct filter_fid));
2023 cobj = lfsck_object_find_by_dev(env, ltd->ltd_tgt, cfid);
2025 GOTO(log, rc = PTR_ERR(cobj));
2028 pobj = lfsck_object_find_by_dev(env, lfsck->li_bottom, pfid);
2030 GOTO(put, rc = PTR_ERR(pobj));
2032 LASSERT(infix != NULL);
2033 LASSERT(type != NULL);
2036 snprintf(name, NAME_MAX, DFID"%s-%s-%d", PFID(pfid), infix,
2038 rc = dt_lookup(env, lfsck->li_lpf_obj, (struct dt_rec *)tfid,
2039 (const struct dt_key *)name, BYPASS_CAPA);
2040 if (rc != 0 && rc != -ENOENT)
2044 rc = linkea_data_new(&ldata,
2045 &lfsck_env_info(env)->lti_linkea_buf);
2049 pname = lfsck_name_get_const(env, name, strlen(name));
2050 rc = linkea_add_buf(&ldata, pname, lfsck_dto2fid(lfsck->li_lpf_obj));
2054 memset(la, 0, sizeof(*la));
2055 la->la_uid = rec->lor_uid;
2056 la->la_gid = rec->lor_gid;
2057 la->la_mode = S_IFREG | S_IRUSR;
2058 la->la_valid = LA_MODE | LA_UID | LA_GID;
2060 memset(dof, 0, sizeof(*dof));
2061 dof->dof_type = dt_mode_to_dft(S_IFREG);
2063 rc = lov_mds_md_size(ea_off + 1, LOV_MAGIC_V1);
2065 lu_buf_realloc(ea_buf, rc);
2066 buflen = ea_buf->lb_len;
2067 if (ea_buf->lb_buf == NULL)
2068 GOTO(put, rc = -ENOMEM);
2070 ea_buf->lb_len = rc;
2073 /* Hold update lock on the .lustre/lost+found/MDTxxxx/.
2075 * XXX: Currently, we do not grab the PDO lock as normal create cases,
2076 * because creating MDT-object for orphan OST-object is rare, we
2077 * do not much care about the performance. It can be improved in
2078 * the future when needed. */
2079 rc = lfsck_layout_lock(env, com, lfsck->li_lpf_obj, &lh,
2080 MDS_INODELOCK_UPDATE);
2084 th = dt_trans_create(env, next);
2086 GOTO(unlock, rc = PTR_ERR(th));
2088 /* 1a. Update OST-object's parent information remotely.
2090 * If other subsequent modifications failed, then next LFSCK scanning
2091 * will process the OST-object as orphan again with known parent FID. */
2093 rc = dt_declare_xattr_set(env, cobj, pbuf, XATTR_NAME_FID, 0, th);
2098 /* 2a. Create the MDT-object locally. */
2099 rc = dt_declare_create(env, pobj, la, NULL, dof, th);
2103 /* 3a. Add layout EA for the MDT-object. */
2104 rc = dt_declare_xattr_set(env, pobj, ea_buf, XATTR_NAME_LOV,
2105 LU_XATTR_CREATE, th);
2109 /* 4a. Insert the MDT-object to .lustre/lost+found/MDTxxxx/ */
2110 dtrec->rec_fid = pfid;
2111 dtrec->rec_type = S_IFREG;
2112 rc = dt_declare_insert(env, lfsck->li_lpf_obj,
2113 (const struct dt_rec *)dtrec,
2114 (const struct dt_key *)name, th);
2118 /* 5a. insert linkEA for parent. */
2119 linkea_buf.lb_buf = ldata.ld_buf->lb_buf;
2120 linkea_buf.lb_len = ldata.ld_leh->leh_len;
2121 rc = dt_declare_xattr_set(env, pobj, &linkea_buf,
2122 XATTR_NAME_LINK, 0, th);
2126 rc = dt_trans_start(env, next, th);
2130 /* 1b. Update OST-object's parent information remotely. */
2132 rc = dt_xattr_set(env, cobj, pbuf, XATTR_NAME_FID, 0, th,
2138 dt_write_lock(env, pobj, 0);
2139 /* 2b. Create the MDT-object locally. */
2140 rc = dt_create(env, pobj, la, NULL, dof, th);
2142 /* 3b. Add layout EA for the MDT-object. */
2143 rc = lfsck_layout_extend_lovea(env, lfsck, th, pobj, cfid,
2144 ea_buf, LU_XATTR_CREATE,
2145 ltd->ltd_index, ea_off, false);
2146 dt_write_unlock(env, pobj);
2150 /* 4b. Insert the MDT-object to .lustre/lost+found/MDTxxxx/ */
2151 rc = dt_insert(env, lfsck->li_lpf_obj, (const struct dt_rec *)dtrec,
2152 (const struct dt_key *)name, th, BYPASS_CAPA, 1);
2156 /* 5b. insert linkEA for parent. */
2157 rc = dt_xattr_set(env, pobj, &linkea_buf,
2158 XATTR_NAME_LINK, 0, th, BYPASS_CAPA);
2163 dt_trans_stop(env, next, th);
2166 lfsck_layout_unlock(&lh);
2169 if (cobj != NULL && !IS_ERR(cobj))
2170 lu_object_put(env, &cobj->do_lu);
2171 if (pobj != NULL && !IS_ERR(pobj))
2172 lu_object_put(env, &pobj->do_lu);
2173 ea_buf->lb_len = buflen;
2177 CDEBUG(D_LFSCK, "%s layout LFSCK assistant failed to "
2178 "recreate the lost MDT-object: parent "DFID
2179 ", child "DFID", OST-index %u, stripe-index %u, "
2180 "infix %s, type %s: rc = %d\n",
2181 lfsck_lfsck2name(lfsck), PFID(pfid), PFID(cfid),
2182 ltd->ltd_index, ea_off, infix, type, rc);
2184 return rc >= 0 ? 1 : rc;
2187 static int lfsck_layout_master_conditional_destroy(const struct lu_env *env,
2188 struct lfsck_component *com,
2189 const struct lu_fid *fid,
2192 struct lfsck_thread_info *info = lfsck_env_info(env);
2193 struct lfsck_request *lr = &info->lti_lr;
2194 struct lfsck_instance *lfsck = com->lc_lfsck;
2195 struct lfsck_tgt_desc *ltd;
2196 struct ptlrpc_request *req;
2197 struct lfsck_request *tmp;
2198 struct obd_export *exp;
2202 ltd = lfsck_tgt_get(&lfsck->li_ost_descs, index);
2203 if (unlikely(ltd == NULL))
2207 if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
2208 GOTO(put, rc = -EOPNOTSUPP);
2210 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
2212 GOTO(put, rc = -ENOMEM);
2214 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
2216 ptlrpc_request_free(req);
2221 memset(lr, 0, sizeof(*lr));
2222 lr->lr_event = LE_CONDITIONAL_DESTROY;
2223 lr->lr_active = LFSCK_TYPE_LAYOUT;
2226 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
2228 ptlrpc_request_set_replen(req);
2230 rc = ptlrpc_queue_wait(req);
2231 ptlrpc_req_finished(req);
2241 static int lfsck_layout_slave_conditional_destroy(const struct lu_env *env,
2242 struct lfsck_component *com,
2243 struct lfsck_request *lr)
2245 struct lfsck_thread_info *info = lfsck_env_info(env);
2246 struct lu_attr *la = &info->lti_la;
2247 ldlm_policy_data_t *policy = &info->lti_policy;
2248 struct ldlm_res_id *resid = &info->lti_resid;
2249 struct lfsck_instance *lfsck = com->lc_lfsck;
2250 struct dt_device *dev = lfsck->li_bottom;
2251 struct lu_fid *fid = &lr->lr_fid;
2252 struct dt_object *obj;
2253 struct thandle *th = NULL;
2254 struct lustre_handle lh = { 0 };
2259 obj = lfsck_object_find_by_dev(env, dev, fid);
2261 RETURN(PTR_ERR(obj));
2263 dt_read_lock(env, obj, 0);
2264 if (dt_object_exists(obj) == 0) {
2265 dt_read_unlock(env, obj);
2267 GOTO(put, rc = -ENOENT);
2270 /* Get obj's attr without lock firstly. */
2271 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
2272 dt_read_unlock(env, obj);
2276 if (likely(la->la_ctime != 0 || la->la_mode & S_ISUID))
2277 GOTO(put, rc = -ETXTBSY);
2279 /* Acquire extent lock on [0, EOF] to sync with all possible written. */
2280 LASSERT(lfsck->li_namespace != NULL);
2282 memset(policy, 0, sizeof(*policy));
2283 policy->l_extent.end = OBD_OBJECT_EOF;
2284 ost_fid_build_resid(fid, resid);
2285 rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid, LDLM_EXTENT,
2286 policy, LCK_EX, &flags, ldlm_blocking_ast,
2287 ldlm_completion_ast, NULL, NULL, 0,
2288 LVB_T_NONE, NULL, &lh);
2290 GOTO(put, rc = -EIO);
2292 dt_write_lock(env, obj, 0);
2293 /* Get obj's attr within lock again. */
2294 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
2298 if (la->la_ctime != 0)
2299 GOTO(unlock, rc = -ETXTBSY);
2301 th = dt_trans_create(env, dev);
2303 GOTO(unlock, rc = PTR_ERR(th));
2305 rc = dt_declare_ref_del(env, obj, th);
2309 rc = dt_declare_destroy(env, obj, th);
2313 rc = dt_trans_start_local(env, dev, th);
2317 rc = dt_ref_del(env, obj, th);
2321 rc = dt_destroy(env, obj, th);
2323 CDEBUG(D_LFSCK, "%s: layout LFSCK destroyed the empty "
2324 "OST-object "DFID" that was created for reparing "
2325 "dangling referenced case. But the original missed "
2326 "OST-object is found now.\n",
2327 lfsck_lfsck2name(lfsck), PFID(fid));
2332 dt_trans_stop(env, dev, th);
2335 dt_write_unlock(env, obj);
2336 ldlm_lock_decref(&lh, LCK_EX);
2339 lu_object_put(env, &obj->do_lu);
2345 * Some OST-object has occupied the specified layout EA slot.
2346 * Such OST-object may be generated by the LFSCK when repair
2347 * dangling referenced MDT-object, which can be indicated by
2348 * attr::la_ctime == 0 but without S_ISUID in la_mode. If it
2349 * is true and such OST-object has not been modified yet, we
2350 * will replace it with the orphan OST-object; otherwise the
2351 * LFSCK will create new MDT-object to reference the orphan.
2353 * \retval +1: repaired
2354 * \retval 0: did nothing
2355 * \retval -ve: on error
2357 static int lfsck_layout_conflict_create(const struct lu_env *env,
2358 struct lfsck_component *com,
2359 struct lfsck_tgt_desc *ltd,
2360 struct lu_orphan_rec *rec,
2361 struct dt_object *parent,
2362 struct lu_fid *cfid,
2363 struct lu_buf *ea_buf,
2364 struct lov_ost_data_v1 *slot,
2365 __u32 ea_off, __u32 ori_len)
2367 struct lfsck_thread_info *info = lfsck_env_info(env);
2368 struct lu_fid *cfid2 = &info->lti_fid2;
2369 struct ost_id *oi = &info->lti_oi;
2370 char *infix = info->lti_tmpbuf;
2371 struct lov_mds_md_v1 *lmm = ea_buf->lb_buf;
2372 struct dt_device *dev = com->lc_lfsck->li_bottom;
2373 struct thandle *th = NULL;
2374 struct lustre_handle lh = { 0 };
2375 __u32 ost_idx2 = le32_to_cpu(slot->l_ost_idx);
2379 ostid_le_to_cpu(&slot->l_ost_oi, oi);
2380 ostid_to_fid(cfid2, oi, ost_idx2);
2382 /* Hold layout lock on the parent to prevent others to access. */
2383 rc = lfsck_layout_lock(env, com, parent, &lh,
2384 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2388 rc = lfsck_layout_master_conditional_destroy(env, com, cfid2, ost_idx2);
2390 /* If the conflict OST-obejct is not created for fixing dangling
2391 * referenced MDT-object in former LFSCK check/repair, or it has
2392 * been modified by others, then we cannot destroy it. Re-create
2393 * a new MDT-object for the orphan OST-object. */
2394 if (rc == -ETXTBSY) {
2395 /* No need the layout lock on the original parent. */
2396 lfsck_layout_unlock(&lh);
2397 ea_buf->lb_len = ori_len;
2399 fid_zero(&rec->lor_fid);
2400 snprintf(infix, LFSCK_TMPBUF_LEN, "-"DFID"-%x",
2401 PFID(lu_object_fid(&parent->do_lu)), ea_off);
2402 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2403 infix, "C", ea_off);
2408 if (rc != 0 && rc != -ENOENT)
2411 th = dt_trans_create(env, dev);
2413 GOTO(unlock, rc = PTR_ERR(th));
2415 rc = dt_declare_xattr_set(env, parent, ea_buf, XATTR_NAME_LOV,
2416 LU_XATTR_REPLACE, th);
2420 rc = dt_trans_start_local(env, dev, th);
2424 dt_write_lock(env, parent, 0);
2425 lmm->lmm_layout_gen = cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
2426 rc = lfsck_layout_refill_lovea(env, th, parent, cfid, ea_buf, slot,
2427 LU_XATTR_REPLACE, ltd->ltd_index);
2428 dt_write_unlock(env, parent);
2433 dt_trans_stop(env, dev, th);
2436 lfsck_layout_unlock(&lh);
2439 ea_buf->lb_len = ori_len;
2441 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant replaced the conflict "
2442 "OST-object "DFID" on the OST %x with the orphan "DFID" on "
2443 "the OST %x: parent "DFID", stripe-index %u: rc = %d\n",
2444 lfsck_lfsck2name(com->lc_lfsck), PFID(cfid2), ost_idx2,
2445 PFID(cfid), ltd->ltd_index, PFID(lfsck_dto2fid(parent)),
2448 return rc >= 0 ? 1 : rc;
2452 * \retval +1: repaired
2453 * \retval 0: did nothing
2454 * \retval -ve: on error
2456 static int lfsck_layout_recreate_lovea(const struct lu_env *env,
2457 struct lfsck_component *com,
2458 struct lfsck_tgt_desc *ltd,
2459 struct lu_orphan_rec *rec,
2460 struct dt_object *parent,
2461 struct lu_fid *cfid,
2462 __u32 ost_idx, __u32 ea_off)
2464 struct lfsck_thread_info *info = lfsck_env_info(env);
2465 struct lu_buf *buf = &info->lti_big_buf;
2466 struct lu_fid *fid = &info->lti_fid2;
2467 struct ost_id *oi = &info->lti_oi;
2468 struct lfsck_instance *lfsck = com->lc_lfsck;
2469 struct dt_device *dt = lfsck->li_bottom;
2470 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2471 struct thandle *handle = NULL;
2472 size_t buflen = buf->lb_len;
2474 struct lov_mds_md_v1 *lmm;
2475 struct lov_ost_data_v1 *objs;
2476 struct lustre_handle lh = { 0 };
2483 bool locked = false;
2486 rc = lfsck_layout_lock(env, com, parent, &lh,
2487 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2489 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant failed to recreate "
2490 "LOV EA for "DFID": parent "DFID", OST-index %u, "
2491 "stripe-index %u: rc = %d\n",
2492 lfsck_lfsck2name(lfsck), PFID(cfid),
2493 PFID(lfsck_dto2fid(parent)), ost_idx, ea_off, rc);
2500 dt_write_unlock(env, parent);
2504 if (handle != NULL) {
2505 dt_trans_stop(env, dt, handle);
2510 GOTO(unlock_layout, rc);
2513 if (buf->lb_len < lovea_size) {
2514 lu_buf_realloc(buf, lovea_size);
2515 buflen = buf->lb_len;
2516 if (buf->lb_buf == NULL)
2517 GOTO(unlock_layout, rc = -ENOMEM);
2520 if (!(bk->lb_param & LPF_DRYRUN)) {
2521 handle = dt_trans_create(env, dt);
2523 GOTO(unlock_layout, rc = PTR_ERR(handle));
2525 rc = dt_declare_xattr_set(env, parent, buf, XATTR_NAME_LOV,
2530 rc = dt_trans_start_local(env, dt, handle);
2535 dt_write_lock(env, parent, 0);
2537 rc = dt_xattr_get(env, parent, buf, XATTR_NAME_LOV, BYPASS_CAPA);
2538 if (rc == -ERANGE) {
2539 rc = dt_xattr_get(env, parent, &LU_BUF_NULL, XATTR_NAME_LOV,
2543 } else if (rc == -ENODATA || rc == 0) {
2544 lovea_size = lov_mds_md_size(ea_off + 1, LOV_MAGIC_V1);
2545 /* If the declared is not big enough, re-try. */
2546 if (buf->lb_len < lovea_size) {
2550 fl = LU_XATTR_CREATE;
2551 } else if (rc < 0) {
2552 GOTO(unlock_parent, rc);
2553 } else if (unlikely(buf->lb_len == 0)) {
2556 fl = LU_XATTR_REPLACE;
2560 if (fl == LU_XATTR_CREATE) {
2561 if (bk->lb_param & LPF_DRYRUN)
2562 GOTO(unlock_parent, rc = 1);
2564 LASSERT(buf->lb_len >= lovea_size);
2566 buf->lb_len = lovea_size;
2567 rc = lfsck_layout_extend_lovea(env, lfsck, handle, parent, cfid,
2568 buf, fl, ost_idx, ea_off, false);
2570 GOTO(unlock_parent, rc);
2574 rc1 = lfsck_layout_verify_header(lmm);
2576 /* If the LOV EA crashed, the rebuild it. */
2577 if (rc1 == -EINVAL) {
2578 if (bk->lb_param & LPF_DRYRUN)
2579 GOTO(unlock_parent, rc = 1);
2581 LASSERT(buf->lb_len >= lovea_size);
2583 buf->lb_len = lovea_size;
2584 memset(lmm, 0, buf->lb_len);
2585 rc = lfsck_layout_extend_lovea(env, lfsck, handle, parent, cfid,
2586 buf, fl, ost_idx, ea_off, true);
2588 GOTO(unlock_parent, rc);
2591 /* For other unknown magic/pattern, keep the current LOV EA. */
2593 GOTO(unlock_parent, rc = rc1);
2595 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
2596 * been verified in lfsck_layout_verify_header() already. If some
2597 * new magic introduced in the future, then layout LFSCK needs to
2598 * be updated also. */
2599 magic = le32_to_cpu(lmm->lmm_magic);
2600 if (magic == LOV_MAGIC_V1) {
2601 objs = &lmm->lmm_objects[0];
2603 LASSERT(magic == LOV_MAGIC_V3);
2604 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
2607 count = le16_to_cpu(lmm->lmm_stripe_count);
2609 GOTO(unlock_parent, rc = -EINVAL);
2612 /* Exceed the current end of MDT-object layout EA. Then extend it. */
2613 if (count <= ea_off) {
2614 if (bk->lb_param & LPF_DRYRUN)
2615 GOTO(unlock_parent, rc = 1);
2617 lovea_size = lov_mds_md_size(ea_off + 1, magic);
2618 /* If the declared is not big enough, re-try. */
2619 if (buf->lb_len < lovea_size) {
2623 buf->lb_len = lovea_size;
2624 rc = lfsck_layout_extend_lovea(env, lfsck, handle, parent, cfid,
2625 buf, fl, ost_idx, ea_off, false);
2627 GOTO(unlock_parent, rc);
2630 LASSERTF(rc > 0, "invalid rc = %d\n", rc);
2632 buf->lb_len = lovea_size;
2633 for (i = 0; i < count; i++, objs++) {
2634 /* The MDT-object was created via lfsck_layout_recover_create()
2635 * by others before, and we fill the dummy layout EA. */
2636 if (lovea_slot_is_dummy(objs)) {
2640 if (bk->lb_param & LPF_DRYRUN)
2641 GOTO(unlock_parent, rc = 1);
2643 lmm->lmm_layout_gen =
2644 cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
2645 rc = lfsck_layout_refill_lovea(env, handle, parent,
2646 cfid, buf, objs, fl,
2649 CDEBUG(D_LFSCK, "%s layout LFSCK assistant fill "
2650 "dummy layout slot for "DFID": parent "DFID
2651 ", OST-index %u, stripe-index %u: rc = %d\n",
2652 lfsck_lfsck2name(lfsck), PFID(cfid),
2653 PFID(lfsck_dto2fid(parent)), ost_idx, i, rc);
2655 GOTO(unlock_parent, rc);
2658 ostid_le_to_cpu(&objs->l_ost_oi, oi);
2659 ostid_to_fid(fid, oi, le32_to_cpu(objs->l_ost_idx));
2660 /* It should be rare case, the slot is there, but the LFSCK
2661 * does not handle it during the first-phase cycle scanning. */
2662 if (unlikely(lu_fid_eq(fid, cfid))) {
2664 GOTO(unlock_parent, rc = 0);
2666 /* Rare case that the OST-object index
2667 * does not match the parent MDT-object
2668 * layout EA. We trust the later one. */
2669 if (bk->lb_param & LPF_DRYRUN)
2670 GOTO(unlock_parent, rc = 1);
2672 dt_write_unlock(env, parent);
2674 dt_trans_stop(env, dt, handle);
2675 lfsck_layout_unlock(&lh);
2676 buf->lb_len = buflen;
2677 rc = lfsck_layout_update_pfid(env, com, parent,
2678 cfid, ltd->ltd_tgt, i);
2680 CDEBUG(D_LFSCK, "%s layout LFSCK assistant "
2681 "updated OST-object's pfid for "DFID
2682 ": parent "DFID", OST-index %u, "
2683 "stripe-index %u: rc = %d\n",
2684 lfsck_lfsck2name(lfsck), PFID(cfid),
2685 PFID(lfsck_dto2fid(parent)),
2686 ltd->ltd_index, i, rc);
2693 /* The MDT-object exists, but related layout EA slot is occupied
2695 if (bk->lb_param & LPF_DRYRUN)
2696 GOTO(unlock_parent, rc = 1);
2698 dt_write_unlock(env, parent);
2700 dt_trans_stop(env, dt, handle);
2701 lfsck_layout_unlock(&lh);
2702 if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_V1)
2703 objs = &lmm->lmm_objects[ea_off];
2705 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[ea_off];
2706 rc = lfsck_layout_conflict_create(env, com, ltd, rec, parent, cfid,
2707 buf, objs, ea_off, buflen);
2713 dt_write_unlock(env, parent);
2717 dt_trans_stop(env, dt, handle);
2720 lfsck_layout_unlock(&lh);
2721 buf->lb_len = buflen;
2726 static int lfsck_layout_scan_orphan_one(const struct lu_env *env,
2727 struct lfsck_component *com,
2728 struct lfsck_tgt_desc *ltd,
2729 struct lu_orphan_rec *rec,
2730 struct lu_fid *cfid)
2732 struct lfsck_layout *lo = com->lc_file_ram;
2733 struct lu_fid *pfid = &rec->lor_fid;
2734 struct dt_object *parent = NULL;
2735 __u32 ea_off = pfid->f_stripe_idx;
2739 if (!fid_is_sane(cfid))
2740 GOTO(out, rc = -EINVAL);
2742 if (fid_is_zero(pfid)) {
2743 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2749 if (!fid_is_sane(pfid))
2750 GOTO(out, rc = -EINVAL);
2752 parent = lfsck_object_find_by_dev(env, com->lc_lfsck->li_bottom, pfid);
2754 GOTO(out, rc = PTR_ERR(parent));
2756 if (unlikely(dt_object_remote(parent) != 0))
2757 GOTO(put, rc = -EXDEV);
2759 if (dt_object_exists(parent) == 0) {
2760 lu_object_put(env, &parent->do_lu);
2761 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2766 if (!S_ISREG(lu_object_attr(&parent->do_lu)))
2767 GOTO(put, rc = -EISDIR);
2769 rc = lfsck_layout_recreate_lovea(env, com, ltd, rec, parent, cfid,
2770 ltd->ltd_index, ea_off);
2776 lu_object_put(env, &parent->do_lu);
2778 /* The layout EA is changed, need to be reloaded next time. */
2779 lu_object_put_nocache(env, &parent->do_lu);
2782 down_write(&com->lc_sem);
2783 com->lc_new_scanned++;
2784 com->lc_new_checked++;
2786 lo->ll_objs_repaired[LLIT_ORPHAN - 1]++;
2788 } else if (rc < 0) {
2789 lo->ll_objs_failed_phase2++;
2791 up_write(&com->lc_sem);
2796 static int lfsck_layout_scan_orphan(const struct lu_env *env,
2797 struct lfsck_component *com,
2798 struct lfsck_tgt_desc *ltd)
2800 struct lfsck_layout *lo = com->lc_file_ram;
2801 struct lfsck_instance *lfsck = com->lc_lfsck;
2802 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2803 struct lfsck_thread_info *info = lfsck_env_info(env);
2804 struct ost_id *oi = &info->lti_oi;
2805 struct lu_fid *fid = &info->lti_fid;
2806 struct dt_object *obj;
2807 const struct dt_it_ops *iops;
2812 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant starts the orphan "
2813 "scanning for OST%04x\n",
2814 lfsck_lfsck2name(lfsck), ltd->ltd_index);
2816 ostid_set_seq(oi, FID_SEQ_IDIF);
2817 ostid_set_id(oi, 0);
2818 ostid_to_fid(fid, oi, ltd->ltd_index);
2819 obj = lfsck_object_find_by_dev(env, ltd->ltd_tgt, fid);
2820 if (unlikely(IS_ERR(obj)))
2821 GOTO(log, rc = PTR_ERR(obj));
2823 rc = obj->do_ops->do_index_try(env, obj, &dt_lfsck_orphan_features);
2827 iops = &obj->do_index_ops->dio_it;
2828 di = iops->init(env, obj, 0, BYPASS_CAPA);
2830 GOTO(put, rc = PTR_ERR(di));
2832 rc = iops->load(env, di, 0);
2834 /* -ESRCH means that the orphan OST-objects rbtree has been
2835 * cleanup because of the OSS server restart or other errors. */
2836 lo->ll_flags |= LF_INCOMPLETE;
2841 rc = iops->next(env, di);
2853 struct lu_orphan_rec *rec = &info->lti_rec;
2855 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY3) &&
2857 struct ptlrpc_thread *thread = &lfsck->li_thread;
2858 struct l_wait_info lwi;
2860 lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val),
2862 l_wait_event(thread->t_ctl_waitq,
2863 !thread_is_running(thread),
2867 key = iops->key(env, di);
2868 com->lc_fid_latest_scanned_phase2 = *(struct lu_fid *)key;
2869 rc = iops->rec(env, di, (struct dt_rec *)rec, 0);
2871 rc = lfsck_layout_scan_orphan_one(env, com, ltd, rec,
2872 &com->lc_fid_latest_scanned_phase2);
2873 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
2876 lfsck_control_speed_by_self(com);
2878 rc = iops->next(env, di);
2879 } while (rc < 0 && !(bk->lb_param & LPF_FAILOUT));
2886 iops->fini(env, di);
2888 lu_object_put(env, &obj->do_lu);
2891 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant finished the orphan "
2892 "scanning for OST%04x: rc = %d\n",
2893 lfsck_lfsck2name(lfsck), ltd->ltd_index, rc);
2895 return rc > 0 ? 0 : rc;
2898 /* For the MDT-object with dangling reference, we need to repare the
2899 * inconsistency according to the LFSCK sponsor's requirement:
2901 * 1) Keep the inconsistency there and report the inconsistency case,
2902 * then give the chance to the application to find related issues,
2903 * and the users can make the decision about how to handle it with
2904 * more human knownledge. (by default)
2906 * 2) Re-create the missed OST-object with the FID/owner information. */
2907 static int lfsck_layout_repair_dangling(const struct lu_env *env,
2908 struct lfsck_component *com,
2909 struct lfsck_layout_req *llr,
2910 const struct lu_attr *pla)
2912 struct lfsck_thread_info *info = lfsck_env_info(env);
2913 struct filter_fid *pfid = &info->lti_new_pfid;
2914 struct dt_allocation_hint *hint = &info->lti_hint;
2915 struct lu_attr *cla = &info->lti_la2;
2916 struct dt_object *parent = llr->llr_parent->llo_obj;
2917 struct dt_object *child = llr->llr_child;
2918 struct dt_device *dev = lfsck_obj2dt_dev(child);
2919 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
2920 struct thandle *handle;
2922 struct lustre_handle lh = { 0 };
2927 if (com->lc_lfsck->li_bookmark_ram.lb_param & LPF_CREATE_OSTOBJ)
2935 memset(cla, 0, sizeof(*cla));
2936 cla->la_uid = pla->la_uid;
2937 cla->la_gid = pla->la_gid;
2938 cla->la_mode = S_IFREG | 0666;
2939 cla->la_valid = LA_TYPE | LA_MODE | LA_UID | LA_GID |
2940 LA_ATIME | LA_MTIME | LA_CTIME;
2942 rc = lfsck_layout_lock(env, com, parent, &lh,
2943 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2947 handle = dt_trans_create(env, dev);
2949 GOTO(unlock1, rc = PTR_ERR(handle));
2951 hint->dah_parent = NULL;
2953 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
2954 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
2955 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
2956 * MDT-object's FID::f_ver, instead it is the OST-object index in its
2957 * parent MDT-object's layout EA. */
2958 pfid->ff_parent.f_stripe_idx = cpu_to_le32(llr->llr_lov_idx);
2959 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
2961 rc = dt_declare_create(env, child, cla, hint, NULL, handle);
2965 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID,
2966 LU_XATTR_CREATE, handle);
2970 rc = dt_trans_start(env, dev, handle);
2974 dt_read_lock(env, parent, 0);
2975 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
2976 GOTO(unlock2, rc = 1);
2978 rc = dt_create(env, child, cla, hint, NULL, handle);
2982 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, LU_XATTR_CREATE,
2983 handle, BYPASS_CAPA);
2988 dt_read_unlock(env, parent);
2991 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
2994 lfsck_layout_unlock(&lh);
2997 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant found dangling "
2998 "reference for: parent "DFID", child "DFID", OST-index %u, "
2999 "stripe-index %u, owner %u/%u. %s: rc = %d\n",
3000 lfsck_lfsck2name(com->lc_lfsck), PFID(lfsck_dto2fid(parent)),
3001 PFID(lfsck_dto2fid(child)), llr->llr_ost_idx,
3002 llr->llr_lov_idx, pla->la_uid, pla->la_gid,
3003 create ? "Create the lost OST-object as required" :
3004 "Keep the MDT-object there by default", rc);
3009 /* If the OST-object does not recognize the MDT-object as its parent, and
3010 * there is no other MDT-object claims as its parent, then just trust the
3011 * given MDT-object as its parent. So update the OST-object filter_fid. */
3012 static int lfsck_layout_repair_unmatched_pair(const struct lu_env *env,
3013 struct lfsck_component *com,
3014 struct lfsck_layout_req *llr,
3015 const struct lu_attr *pla)
3017 struct lfsck_thread_info *info = lfsck_env_info(env);
3018 struct filter_fid *pfid = &info->lti_new_pfid;
3019 struct lu_attr *tla = &info->lti_la3;
3020 struct dt_object *parent = llr->llr_parent->llo_obj;
3021 struct dt_object *child = llr->llr_child;
3022 struct dt_device *dev = lfsck_obj2dt_dev(child);
3023 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
3024 struct thandle *handle;
3026 struct lustre_handle lh = { 0 };
3030 rc = lfsck_layout_lock(env, com, parent, &lh,
3031 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
3035 handle = dt_trans_create(env, dev);
3037 GOTO(unlock1, rc = PTR_ERR(handle));
3039 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
3040 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
3041 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
3042 * MDT-object's FID::f_ver, instead it is the OST-object index in its
3043 * parent MDT-object's layout EA. */
3044 pfid->ff_parent.f_stripe_idx = cpu_to_le32(llr->llr_lov_idx);
3045 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
3047 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle);
3051 tla->la_valid = LA_UID | LA_GID;
3052 tla->la_uid = pla->la_uid;
3053 tla->la_gid = pla->la_gid;
3054 rc = dt_declare_attr_set(env, child, tla, handle);
3058 rc = dt_trans_start(env, dev, handle);
3062 dt_write_lock(env, parent, 0);
3063 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
3064 GOTO(unlock2, rc = 1);
3066 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle,
3071 /* Get the latest parent's owner. */
3072 rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
3076 tla->la_valid = LA_UID | LA_GID;
3077 rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
3082 dt_write_unlock(env, parent);
3085 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
3088 lfsck_layout_unlock(&lh);
3091 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant repaired unmatched "
3092 "MDT-OST pair for: parent "DFID", child "DFID", OST-index %u, "
3093 "stripe-index %u, owner %u/%u: rc = %d\n",
3094 lfsck_lfsck2name(com->lc_lfsck), PFID(lfsck_dto2fid(parent)),
3095 PFID(lfsck_dto2fid(child)), llr->llr_ost_idx, llr->llr_lov_idx,
3096 pla->la_uid, pla->la_gid, rc);
3101 /* If there are more than one MDT-objects claim as the OST-object's parent,
3102 * and the OST-object only recognizes one of them, then we need to generate
3103 * new OST-object(s) with new fid(s) for the non-recognized MDT-object(s). */
3104 static int lfsck_layout_repair_multiple_references(const struct lu_env *env,
3105 struct lfsck_component *com,
3106 struct lfsck_layout_req *llr,
3110 struct lfsck_thread_info *info = lfsck_env_info(env);
3111 struct dt_allocation_hint *hint = &info->lti_hint;
3112 struct dt_object_format *dof = &info->lti_dof;
3113 struct dt_device *pdev = com->lc_lfsck->li_next;
3114 struct ost_id *oi = &info->lti_oi;
3115 struct dt_object *parent = llr->llr_parent->llo_obj;
3116 struct dt_device *cdev = lfsck_obj2dt_dev(llr->llr_child);
3117 struct dt_object *child = NULL;
3118 struct lu_device *d = &cdev->dd_lu_dev;
3119 struct lu_object *o = NULL;
3120 struct thandle *handle;
3121 struct lov_mds_md_v1 *lmm;
3122 struct lov_ost_data_v1 *objs;
3123 struct lustre_handle lh = { 0 };
3128 rc = lfsck_layout_lock(env, com, parent, &lh,
3129 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
3133 handle = dt_trans_create(env, pdev);
3135 GOTO(unlock1, rc = PTR_ERR(handle));
3137 o = lu_object_anon(env, d, NULL);
3139 GOTO(stop, rc = PTR_ERR(o));
3141 child = container_of(o, struct dt_object, do_lu);
3142 o = lu_object_locate(o->lo_header, d->ld_type);
3143 if (unlikely(o == NULL))
3144 GOTO(stop, rc = -EINVAL);
3146 child = container_of(o, struct dt_object, do_lu);
3147 la->la_valid = LA_UID | LA_GID;
3148 hint->dah_parent = NULL;
3150 dof->dof_type = DFT_REGULAR;
3151 rc = dt_declare_create(env, child, la, NULL, NULL, handle);
3155 rc = dt_declare_xattr_set(env, parent, buf, XATTR_NAME_LOV,
3156 LU_XATTR_REPLACE, handle);
3160 rc = dt_trans_start(env, pdev, handle);
3164 dt_write_lock(env, parent, 0);
3165 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
3166 GOTO(unlock2, rc = 0);
3168 rc = dt_xattr_get(env, parent, buf, XATTR_NAME_LOV, BYPASS_CAPA);
3169 if (unlikely(rc == 0 || rc == -ENODATA || rc == -ERANGE))
3170 GOTO(unlock2, rc = 0);
3173 /* Someone change layout during the LFSCK, no need to repair then. */
3174 if (le16_to_cpu(lmm->lmm_layout_gen) != llr->llr_parent->llo_gen)
3175 GOTO(unlock2, rc = 0);
3177 rc = dt_create(env, child, la, hint, dof, handle);
3181 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
3182 * been verified in lfsck_layout_verify_header() already. If some
3183 * new magic introduced in the future, then layout LFSCK needs to
3184 * be updated also. */
3185 magic = le32_to_cpu(lmm->lmm_magic);
3186 if (magic == LOV_MAGIC_V1) {
3187 objs = &lmm->lmm_objects[0];
3189 LASSERT(magic == LOV_MAGIC_V3);
3190 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
3193 lmm->lmm_layout_gen = cpu_to_le16(llr->llr_parent->llo_gen + 1);
3194 fid_to_ostid(lu_object_fid(&child->do_lu), oi);
3195 ostid_cpu_to_le(oi, &objs[llr->llr_lov_idx].l_ost_oi);
3196 objs[llr->llr_lov_idx].l_ost_gen = cpu_to_le32(0);
3197 objs[llr->llr_lov_idx].l_ost_idx = cpu_to_le32(llr->llr_ost_idx);
3198 rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV,
3199 LU_XATTR_REPLACE, handle, BYPASS_CAPA);
3201 GOTO(unlock2, rc = (rc == 0 ? 1 : rc));
3204 dt_write_unlock(env, parent);
3208 lu_object_put(env, &child->do_lu);
3210 dt_trans_stop(env, pdev, handle);
3213 lfsck_layout_unlock(&lh);
3216 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant repaired multiple "
3217 "references for: parent "DFID", OST-index %u, stripe-index %u, "
3218 "owner %u/%u: rc = %d\n",
3219 lfsck_lfsck2name(com->lc_lfsck), PFID(lfsck_dto2fid(parent)),
3220 llr->llr_ost_idx, llr->llr_lov_idx, la->la_uid, la->la_gid, rc);
3225 /* If the MDT-object and the OST-object have different owner information,
3226 * then trust the MDT-object, because the normal chown/chgrp handle order
3227 * is from MDT to OST, and it is possible that some chown/chgrp operation
3228 * is partly done. */
3229 static int lfsck_layout_repair_owner(const struct lu_env *env,
3230 struct lfsck_component *com,
3231 struct lfsck_layout_req *llr,
3232 struct lu_attr *pla)
3234 struct lfsck_thread_info *info = lfsck_env_info(env);
3235 struct lu_attr *tla = &info->lti_la3;
3236 struct dt_object *parent = llr->llr_parent->llo_obj;
3237 struct dt_object *child = llr->llr_child;
3238 struct dt_device *dev = lfsck_obj2dt_dev(child);
3239 struct thandle *handle;
3243 handle = dt_trans_create(env, dev);
3245 GOTO(log, rc = PTR_ERR(handle));
3247 tla->la_uid = pla->la_uid;
3248 tla->la_gid = pla->la_gid;
3249 tla->la_valid = LA_UID | LA_GID;
3250 rc = dt_declare_attr_set(env, child, tla, handle);
3254 rc = dt_trans_start(env, dev, handle);
3258 /* Use the dt_object lock to serialize with destroy and attr_set. */
3259 dt_read_lock(env, parent, 0);
3260 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
3261 GOTO(unlock, rc = 1);
3263 /* Get the latest parent's owner. */
3264 rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
3268 /* Some others chown/chgrp during the LFSCK, needs to do nothing. */
3269 if (unlikely(tla->la_uid != pla->la_uid ||
3270 tla->la_gid != pla->la_gid))
3271 GOTO(unlock, rc = 1);
3273 tla->la_valid = LA_UID | LA_GID;
3274 rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
3279 dt_read_unlock(env, parent);
3282 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
3285 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant repaired inconsistent "
3286 "file owner for: parent "DFID", child "DFID", OST-index %u, "
3287 "stripe-index %u, owner %u/%u: rc = %d\n",
3288 lfsck_lfsck2name(com->lc_lfsck), PFID(lfsck_dto2fid(parent)),
3289 PFID(lfsck_dto2fid(child)), llr->llr_ost_idx, llr->llr_lov_idx,
3290 pla->la_uid, pla->la_gid, rc);
3295 /* Check whether the OST-object correctly back points to the
3296 * MDT-object (@parent) via the XATTR_NAME_FID xattr (@pfid). */
3297 static int lfsck_layout_check_parent(const struct lu_env *env,
3298 struct lfsck_component *com,
3299 struct dt_object *parent,
3300 const struct lu_fid *pfid,
3301 const struct lu_fid *cfid,
3302 const struct lu_attr *pla,
3303 const struct lu_attr *cla,
3304 struct lfsck_layout_req *llr,
3305 struct lu_buf *lov_ea, __u32 idx)
3307 struct lfsck_thread_info *info = lfsck_env_info(env);
3308 struct lu_buf *buf = &info->lti_big_buf;
3309 struct dt_object *tobj;
3310 struct lov_mds_md_v1 *lmm;
3311 struct lov_ost_data_v1 *objs;
3318 if (fid_is_zero(pfid)) {
3319 /* client never wrote. */
3320 if (cla->la_size == 0 && cla->la_blocks == 0) {
3321 if (unlikely(cla->la_uid != pla->la_uid ||
3322 cla->la_gid != pla->la_gid))
3323 RETURN (LLIT_INCONSISTENT_OWNER);
3328 RETURN(LLIT_UNMATCHED_PAIR);
3331 if (unlikely(!fid_is_sane(pfid)))
3332 RETURN(LLIT_UNMATCHED_PAIR);
3334 if (lu_fid_eq(pfid, lu_object_fid(&parent->do_lu))) {
3335 if (llr->llr_lov_idx == idx)
3338 RETURN(LLIT_UNMATCHED_PAIR);
3341 tobj = lfsck_object_find(env, com->lc_lfsck, pfid);
3343 RETURN(LLIT_UNMATCHED_PAIR);
3346 RETURN(PTR_ERR(tobj));
3348 if (!dt_object_exists(tobj))
3349 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3351 /* Load the tobj's layout EA, in spite of it is a local MDT-object or
3352 * remote one on another MDT. Then check whether the given OST-object
3353 * is in such layout. If yes, it is multiple referenced, otherwise it
3354 * is unmatched referenced case. */
3355 rc = lfsck_layout_get_lovea(env, tobj, buf, NULL);
3357 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3363 magic = le32_to_cpu(lmm->lmm_magic);
3364 if (magic == LOV_MAGIC_V1) {
3365 objs = &lmm->lmm_objects[0];
3367 LASSERT(magic == LOV_MAGIC_V3);
3368 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
3371 count = le16_to_cpu(lmm->lmm_stripe_count);
3372 for (i = 0; i < count; i++, objs++) {
3373 struct lu_fid *tfid = &info->lti_fid2;
3374 struct ost_id *oi = &info->lti_oi;
3376 if (lovea_slot_is_dummy(objs))
3379 ostid_le_to_cpu(&objs->l_ost_oi, oi);
3380 ostid_to_fid(tfid, oi, le32_to_cpu(objs->l_ost_idx));
3381 if (lu_fid_eq(cfid, tfid)) {
3384 GOTO(out, rc = LLIT_MULTIPLE_REFERENCED);
3388 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3391 lfsck_object_put(env, tobj);
3396 static int lfsck_layout_assistant_handle_one(const struct lu_env *env,
3397 struct lfsck_component *com,
3398 struct lfsck_layout_req *llr)
3400 struct lfsck_layout *lo = com->lc_file_ram;
3401 struct lfsck_thread_info *info = lfsck_env_info(env);
3402 struct filter_fid_old *pea = &info->lti_old_pfid;
3403 struct lu_fid *pfid = &info->lti_fid;
3404 struct lu_buf *buf = NULL;
3405 struct dt_object *parent = llr->llr_parent->llo_obj;
3406 struct dt_object *child = llr->llr_child;
3407 struct lu_attr *pla = &info->lti_la;
3408 struct lu_attr *cla = &info->lti_la2;
3409 struct lfsck_instance *lfsck = com->lc_lfsck;
3410 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3411 enum lfsck_layout_inconsistency_type type = LLIT_NONE;
3416 rc = dt_attr_get(env, parent, pla, BYPASS_CAPA);
3418 if (lu_object_is_dying(parent->do_lu.lo_header))
3424 rc = dt_attr_get(env, child, cla, BYPASS_CAPA);
3425 if (rc == -ENOENT) {
3426 if (lu_object_is_dying(parent->do_lu.lo_header))
3429 type = LLIT_DANGLING;
3436 buf = lfsck_buf_get(env, pea, sizeof(struct filter_fid_old));
3437 rc= dt_xattr_get(env, child, buf, XATTR_NAME_FID, BYPASS_CAPA);
3438 if (unlikely(rc >= 0 && rc != sizeof(struct filter_fid_old) &&
3439 rc != sizeof(struct filter_fid))) {
3440 type = LLIT_UNMATCHED_PAIR;
3444 if (rc < 0 && rc != -ENODATA)
3447 if (rc == -ENODATA) {
3450 fid_le_to_cpu(pfid, &pea->ff_parent);
3451 /* Currently, the filter_fid::ff_parent::f_ver is not the
3452 * real parent MDT-object's FID::f_ver, instead it is the
3453 * OST-object index in its parent MDT-object's layout EA. */
3454 idx = pfid->f_stripe_idx;
3458 rc = lfsck_layout_check_parent(env, com, parent, pfid,
3459 lu_object_fid(&child->do_lu),
3460 pla, cla, llr, buf, idx);
3469 if (unlikely(cla->la_uid != pla->la_uid ||
3470 cla->la_gid != pla->la_gid)) {
3471 type = LLIT_INCONSISTENT_OWNER;
3476 if (bk->lb_param & LPF_DRYRUN) {
3477 if (type != LLIT_NONE)
3485 rc = lfsck_layout_repair_dangling(env, com, llr, pla);
3487 case LLIT_UNMATCHED_PAIR:
3488 rc = lfsck_layout_repair_unmatched_pair(env, com, llr, pla);
3490 case LLIT_MULTIPLE_REFERENCED:
3491 rc = lfsck_layout_repair_multiple_references(env, com, llr,
3494 case LLIT_INCONSISTENT_OWNER:
3495 rc = lfsck_layout_repair_owner(env, com, llr, pla);
3505 down_write(&com->lc_sem);
3507 struct lfsck_layout_master_data *llmd = com->lc_data;
3509 if (unlikely(llmd->llmd_exit)) {
3511 } else if (rc == -ENOTCONN || rc == -ESHUTDOWN ||
3512 rc == -ETIMEDOUT || rc == -EHOSTDOWN ||
3513 rc == -EHOSTUNREACH) {
3514 /* If cannot touch the target server,
3515 * mark the LFSCK as INCOMPLETE. */
3516 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant fail to "
3517 "talk with OST %x: rc = %d\n",
3518 lfsck_lfsck2name(lfsck), llr->llr_ost_idx, rc);
3519 lo->ll_flags |= LF_INCOMPLETE;
3520 lo->ll_objs_skipped++;
3523 lfsck_layout_record_failure(env, lfsck, lo);
3525 } else if (rc > 0) {
3526 LASSERTF(type > LLIT_NONE && type <= LLIT_MAX,
3527 "unknown type = %d\n", type);
3529 lo->ll_objs_repaired[type - 1]++;
3530 if (bk->lb_param & LPF_DRYRUN &&
3531 unlikely(lo->ll_pos_first_inconsistent == 0))
3532 lo->ll_pos_first_inconsistent =
3533 lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
3536 up_write(&com->lc_sem);
3541 static int lfsck_layout_assistant(void *args)
3543 struct lfsck_thread_args *lta = args;
3544 struct lu_env *env = <a->lta_env;
3545 struct lfsck_component *com = lta->lta_com;
3546 struct lfsck_instance *lfsck = lta->lta_lfsck;
3547 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3548 struct lfsck_position *pos = &com->lc_pos_start;
3549 struct lfsck_thread_info *info = lfsck_env_info(env);
3550 struct lfsck_request *lr = &info->lti_lr;
3551 struct lfsck_layout_master_data *llmd = com->lc_data;
3552 struct ptlrpc_thread *mthread = &lfsck->li_thread;
3553 struct ptlrpc_thread *athread = &llmd->llmd_thread;
3554 struct lfsck_layout_req *llr;
3555 struct l_wait_info lwi = { 0 };
3560 memset(lr, 0, sizeof(*lr));
3561 lr->lr_event = LE_START;
3562 lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
3563 LSV_ASYNC_WINDOWS | LSV_CREATE_OSTOBJ;
3564 lr->lr_speed = bk->lb_speed_limit;
3565 lr->lr_version = bk->lb_version;
3566 lr->lr_param = bk->lb_param;
3567 lr->lr_async_windows = bk->lb_async_windows;
3568 lr->lr_flags = LEF_TO_OST;
3569 if (pos->lp_oit_cookie <= 1)
3570 lr->lr_param |= LPF_RESET;
3572 rc = lfsck_layout_master_notify_others(env, com, lr);
3574 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant failed to notify "
3575 "others for LFSCK start: rc = %d\n",
3576 lfsck_lfsck2name(lfsck), rc);
3580 spin_lock(&llmd->llmd_lock);
3581 thread_set_flags(athread, SVC_RUNNING);
3582 spin_unlock(&llmd->llmd_lock);
3583 wake_up_all(&mthread->t_ctl_waitq);
3586 while (!list_empty(&llmd->llmd_req_list)) {
3587 bool wakeup = false;
3589 if (unlikely(llmd->llmd_exit ||
3590 !thread_is_running(mthread)))
3591 GOTO(cleanup1, rc = llmd->llmd_post_result);
3593 llr = list_entry(llmd->llmd_req_list.next,
3594 struct lfsck_layout_req,
3596 /* Only the lfsck_layout_assistant thread itself can
3597 * remove the "llr" from the head of the list, LFSCK
3598 * engine thread only inserts other new "lld" at the
3599 * end of the list. So it is safe to handle current
3600 * "llr" without the spin_lock. */
3601 rc = lfsck_layout_assistant_handle_one(env, com, llr);
3602 spin_lock(&llmd->llmd_lock);
3603 list_del_init(&llr->llr_list);
3604 llmd->llmd_prefetched--;
3605 /* Wake up the main engine thread only when the list
3606 * is empty or half of the prefetched items have been
3607 * handled to avoid too frequent thread schedule. */
3608 if (llmd->llmd_prefetched == 0 ||
3609 (bk->lb_async_windows != 0 &&
3610 bk->lb_async_windows / 2 ==
3611 llmd->llmd_prefetched))
3613 spin_unlock(&llmd->llmd_lock);
3615 wake_up_all(&mthread->t_ctl_waitq);
3617 lfsck_layout_req_fini(env, llr);
3618 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
3622 l_wait_event(athread->t_ctl_waitq,
3623 !lfsck_layout_req_empty(llmd) ||
3625 llmd->llmd_to_post ||
3626 llmd->llmd_to_double_scan,
3629 if (unlikely(llmd->llmd_exit))
3630 GOTO(cleanup1, rc = llmd->llmd_post_result);
3632 if (!list_empty(&llmd->llmd_req_list))
3635 if (llmd->llmd_to_post) {
3636 llmd->llmd_to_post = 0;
3637 LASSERT(llmd->llmd_post_result > 0);
3639 memset(lr, 0, sizeof(*lr));
3640 lr->lr_event = LE_PHASE1_DONE;
3641 lr->lr_status = llmd->llmd_post_result;
3642 rc = lfsck_layout_master_notify_others(env, com, lr);
3644 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant "
3645 "failed to notify others for LFSCK "
3647 lfsck_lfsck2name(lfsck), rc);
3649 /* Wakeup the master engine to go ahead. */
3650 wake_up_all(&mthread->t_ctl_waitq);
3653 if (llmd->llmd_to_double_scan) {
3654 llmd->llmd_to_double_scan = 0;
3655 atomic_inc(&lfsck->li_double_scan_count);
3656 llmd->llmd_in_double_scan = 1;
3657 wake_up_all(&mthread->t_ctl_waitq);
3659 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant phase2 "
3660 "scan start\n", lfsck_lfsck2name(lfsck));
3662 com->lc_new_checked = 0;
3663 com->lc_new_scanned = 0;
3664 com->lc_time_last_checkpoint = cfs_time_current();
3665 com->lc_time_next_checkpoint =
3666 com->lc_time_last_checkpoint +
3667 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
3669 /* flush all async updating before handling orphan. */
3670 dt_sync(env, lfsck->li_next);
3672 while (llmd->llmd_in_double_scan) {
3673 struct lfsck_tgt_descs *ltds =
3674 &lfsck->li_ost_descs;
3675 struct lfsck_tgt_desc *ltd;
3677 rc = lfsck_layout_master_query_others(env, com);
3678 if (lfsck_layout_master_to_orphan(llmd))
3684 /* Pull LFSCK status on related targets once
3685 * per 30 seconds if we are not notified. */
3686 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(30),
3687 cfs_time_seconds(1),
3689 rc = l_wait_event(athread->t_ctl_waitq,
3690 lfsck_layout_master_to_orphan(llmd) ||
3692 !thread_is_running(mthread),
3695 if (unlikely(llmd->llmd_exit ||
3696 !thread_is_running(mthread)))
3697 GOTO(cleanup2, rc = 0);
3699 if (rc == -ETIMEDOUT)
3706 spin_lock(<ds->ltd_lock);
3708 &llmd->llmd_ost_phase2_list)) {
3710 llmd->llmd_ost_phase2_list.next,
3711 struct lfsck_tgt_desc,
3712 ltd_layout_phase_list);
3714 <d->ltd_layout_phase_list);
3715 spin_unlock(<ds->ltd_lock);
3717 if (bk->lb_param & LPF_ALL_TGT) {
3718 rc = lfsck_layout_scan_orphan(
3721 bk->lb_param & LPF_FAILOUT)
3725 if (unlikely(llmd->llmd_exit ||
3726 !thread_is_running(mthread)))
3727 GOTO(cleanup2, rc = 0);
3729 spin_lock(<ds->ltd_lock);
3732 if (list_empty(&llmd->llmd_ost_phase1_list)) {
3733 spin_unlock(<ds->ltd_lock);
3734 GOTO(cleanup2, rc = 1);
3736 spin_unlock(<ds->ltd_lock);
3742 /* Cleanup the unfinished requests. */
3743 spin_lock(&llmd->llmd_lock);
3745 llmd->llmd_assistant_status = rc;
3747 while (!list_empty(&llmd->llmd_req_list)) {
3748 llr = list_entry(llmd->llmd_req_list.next,
3749 struct lfsck_layout_req,
3751 list_del_init(&llr->llr_list);
3752 llmd->llmd_prefetched--;
3753 spin_unlock(&llmd->llmd_lock);
3754 lfsck_layout_req_fini(env, llr);
3755 spin_lock(&llmd->llmd_lock);
3757 spin_unlock(&llmd->llmd_lock);
3759 LASSERTF(llmd->llmd_prefetched == 0, "unmatched prefeteched objs %d\n",
3760 llmd->llmd_prefetched);
3763 memset(lr, 0, sizeof(*lr));
3765 lr->lr_event = LE_PHASE2_DONE;
3767 } else if (rc == 0) {
3768 if (lfsck->li_flags & LPF_ALL_TGT) {
3769 lr->lr_event = LE_STOP;
3770 lr->lr_status = LS_STOPPED;
3772 lr->lr_event = LE_PEER_EXIT;
3773 switch (lfsck->li_status) {
3776 lr->lr_status = LS_CO_PAUSED;
3780 lr->lr_status = LS_CO_STOPPED;
3783 CDEBUG(D_LFSCK, "%s: unknown status: rc = %d\n",
3784 lfsck_lfsck2name(lfsck),
3786 lr->lr_status = LS_CO_FAILED;
3791 if (lfsck->li_flags & LPF_ALL_TGT) {
3792 lr->lr_event = LE_STOP;
3793 lr->lr_status = LS_FAILED;
3795 lr->lr_event = LE_PEER_EXIT;
3796 lr->lr_status = LS_CO_FAILED;
3800 rc1 = lfsck_layout_master_notify_others(env, com, lr);
3802 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant failed to "
3803 "notify others for LFSCK quit: rc = %d\n",
3804 lfsck_lfsck2name(lfsck), rc1);
3808 /* flush all async updating before exit. */
3809 dt_sync(env, lfsck->li_next);
3811 /* Under force exit case, some requests may be just freed without
3812 * verification, those objects should be re-handled when next run.
3813 * So not update the on-disk tracing file under such case. */
3814 if (llmd->llmd_in_double_scan) {
3815 struct lfsck_layout *lo = com->lc_file_ram;
3817 if (!llmd->llmd_exit)
3818 rc1 = lfsck_layout_double_scan_result(env, com, rc);
3820 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant phase2 scan "
3821 "finished, status %d: rc = %d\n",
3822 lfsck_lfsck2name(lfsck), lo->ll_status, rc1);
3826 if (llmd->llmd_in_double_scan)
3827 atomic_dec(&lfsck->li_double_scan_count);
3829 spin_lock(&llmd->llmd_lock);
3830 llmd->llmd_assistant_status = (rc1 != 0 ? rc1 : rc);
3831 thread_set_flags(athread, SVC_STOPPED);
3832 wake_up_all(&mthread->t_ctl_waitq);
3833 spin_unlock(&llmd->llmd_lock);
3834 lfsck_thread_args_fini(lta);
3840 lfsck_layout_slave_async_interpret(const struct lu_env *env,
3841 struct ptlrpc_request *req,
3844 struct lfsck_layout_slave_async_args *llsaa = args;
3845 struct obd_export *exp = llsaa->llsaa_exp;
3846 struct lfsck_component *com = llsaa->llsaa_com;
3847 struct lfsck_layout_slave_target *llst = llsaa->llsaa_llst;
3848 struct lfsck_layout_slave_data *llsd = com->lc_data;
3849 struct lfsck_reply *lr = NULL;
3853 /* It is quite probably caused by target crash,
3854 * to make the LFSCK can go ahead, assume that
3855 * the target finished the LFSCK prcoessing. */
3858 lr = req_capsule_server_get(&req->rq_pill, &RMF_LFSCK_REPLY);
3859 if (lr->lr_status != LS_SCANNING_PHASE1 &&
3860 lr->lr_status != LS_SCANNING_PHASE2)
3865 CDEBUG(D_LFSCK, "%s: layout LFSCK slave gets the MDT %x "
3866 "status %d\n", lfsck_lfsck2name(com->lc_lfsck),
3867 llst->llst_index, lr != NULL ? lr->lr_status : rc);
3869 lfsck_layout_llst_del(llsd, llst);
3872 lfsck_layout_llst_put(llst);
3873 lfsck_component_put(env, com);
3874 class_export_put(exp);
3879 static int lfsck_layout_async_query(const struct lu_env *env,
3880 struct lfsck_component *com,
3881 struct obd_export *exp,
3882 struct lfsck_layout_slave_target *llst,
3883 struct lfsck_request *lr,
3884 struct ptlrpc_request_set *set)
3886 struct lfsck_layout_slave_async_args *llsaa;
3887 struct ptlrpc_request *req;
3888 struct lfsck_request *tmp;
3892 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_QUERY);
3896 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_QUERY);
3898 ptlrpc_request_free(req);
3902 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
3904 ptlrpc_request_set_replen(req);
3906 llsaa = ptlrpc_req_async_args(req);
3907 llsaa->llsaa_exp = exp;
3908 llsaa->llsaa_com = lfsck_component_get(com);
3909 llsaa->llsaa_llst = llst;
3910 req->rq_interpret_reply = lfsck_layout_slave_async_interpret;
3911 ptlrpc_set_add_req(set, req);
3916 static int lfsck_layout_async_notify(const struct lu_env *env,
3917 struct obd_export *exp,
3918 struct lfsck_request *lr,
3919 struct ptlrpc_request_set *set)
3921 struct ptlrpc_request *req;
3922 struct lfsck_request *tmp;
3926 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
3930 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
3932 ptlrpc_request_free(req);
3936 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
3938 ptlrpc_request_set_replen(req);
3939 ptlrpc_set_add_req(set, req);
3945 lfsck_layout_slave_query_master(const struct lu_env *env,
3946 struct lfsck_component *com)
3948 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
3949 struct lfsck_instance *lfsck = com->lc_lfsck;
3950 struct lfsck_layout_slave_data *llsd = com->lc_data;
3951 struct lfsck_layout_slave_target *llst;
3952 struct obd_export *exp;
3953 struct ptlrpc_request_set *set;
3958 set = ptlrpc_prep_set();
3960 GOTO(log, rc = -ENOMEM);
3962 memset(lr, 0, sizeof(*lr));
3963 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
3964 lr->lr_event = LE_QUERY;
3965 lr->lr_active = LFSCK_TYPE_LAYOUT;
3967 llsd->llsd_touch_gen++;
3968 spin_lock(&llsd->llsd_lock);
3969 while (!list_empty(&llsd->llsd_master_list)) {
3970 llst = list_entry(llsd->llsd_master_list.next,
3971 struct lfsck_layout_slave_target,
3973 if (llst->llst_gen == llsd->llsd_touch_gen)
3976 llst->llst_gen = llsd->llsd_touch_gen;
3977 list_move_tail(&llst->llst_list,
3978 &llsd->llsd_master_list);
3979 atomic_inc(&llst->llst_ref);
3980 spin_unlock(&llsd->llsd_lock);
3982 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
3985 lfsck_layout_llst_del(llsd, llst);
3986 lfsck_layout_llst_put(llst);
3987 spin_lock(&llsd->llsd_lock);
3991 rc = lfsck_layout_async_query(env, com, exp, llst, lr, set);
3993 CDEBUG(D_LFSCK, "%s: layout LFSCK slave fail to "
3994 "query %s for layout: rc = %d\n",
3995 lfsck_lfsck2name(lfsck),
3996 exp->exp_obd->obd_name, rc);
3999 lfsck_layout_llst_put(llst);
4000 class_export_put(exp);
4002 spin_lock(&llsd->llsd_lock);
4004 spin_unlock(&llsd->llsd_lock);
4006 rc = ptlrpc_set_wait(set);
4007 ptlrpc_set_destroy(set);
4009 GOTO(log, rc = (rc1 != 0 ? rc1 : rc));
4012 CDEBUG(D_LFSCK, "%s: layout LFSCK slave queries master: rc = %d\n",
4013 lfsck_lfsck2name(com->lc_lfsck), rc);
4019 lfsck_layout_slave_notify_master(const struct lu_env *env,
4020 struct lfsck_component *com,
4021 enum lfsck_events event, int result)
4023 struct lfsck_instance *lfsck = com->lc_lfsck;
4024 struct lfsck_layout_slave_data *llsd = com->lc_data;
4025 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
4026 struct lfsck_layout_slave_target *llst;
4027 struct obd_export *exp;
4028 struct ptlrpc_request_set *set;
4032 CDEBUG(D_LFSCK, "%s: layout LFSCK slave notifies master\n",
4033 lfsck_lfsck2name(com->lc_lfsck));
4035 set = ptlrpc_prep_set();
4039 memset(lr, 0, sizeof(*lr));
4040 lr->lr_event = event;
4041 lr->lr_flags = LEF_FROM_OST;
4042 lr->lr_status = result;
4043 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
4044 lr->lr_active = LFSCK_TYPE_LAYOUT;
4045 llsd->llsd_touch_gen++;
4046 spin_lock(&llsd->llsd_lock);
4047 while (!list_empty(&llsd->llsd_master_list)) {
4048 llst = list_entry(llsd->llsd_master_list.next,
4049 struct lfsck_layout_slave_target,
4051 if (llst->llst_gen == llsd->llsd_touch_gen)
4054 llst->llst_gen = llsd->llsd_touch_gen;
4055 list_move_tail(&llst->llst_list,
4056 &llsd->llsd_master_list);
4057 atomic_inc(&llst->llst_ref);
4058 spin_unlock(&llsd->llsd_lock);
4060 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
4063 lfsck_layout_llst_del(llsd, llst);
4064 lfsck_layout_llst_put(llst);
4065 spin_lock(&llsd->llsd_lock);
4069 rc = lfsck_layout_async_notify(env, exp, lr, set);
4071 CDEBUG(D_LFSCK, "%s: layout LFSCK slave fail to "
4072 "notify %s for layout: rc = %d\n",
4073 lfsck_lfsck2name(lfsck),
4074 exp->exp_obd->obd_name, rc);
4076 lfsck_layout_llst_put(llst);
4077 class_export_put(exp);
4078 spin_lock(&llsd->llsd_lock);
4080 spin_unlock(&llsd->llsd_lock);
4082 ptlrpc_set_wait(set);
4083 ptlrpc_set_destroy(set);
4089 * \ret -ENODATA: unrecognized stripe
4090 * \ret = 0 : recognized stripe
4091 * \ret < 0 : other failures
4093 static int lfsck_layout_master_check_pairs(const struct lu_env *env,
4094 struct lfsck_component *com,
4095 struct lu_fid *cfid,
4096 struct lu_fid *pfid)
4098 struct lfsck_thread_info *info = lfsck_env_info(env);
4099 struct lu_buf *buf = &info->lti_big_buf;
4100 struct ost_id *oi = &info->lti_oi;
4101 struct dt_object *obj;
4102 struct lov_mds_md_v1 *lmm;
4103 struct lov_ost_data_v1 *objs;
4104 __u32 idx = pfid->f_stripe_idx;
4112 obj = lfsck_object_find_by_dev(env, com->lc_lfsck->li_bottom, pfid);
4114 RETURN(PTR_ERR(obj));
4116 dt_read_lock(env, obj, 0);
4117 if (unlikely(!dt_object_exists(obj)))
4118 GOTO(unlock, rc = -ENOENT);
4120 rc = lfsck_layout_get_lovea(env, obj, buf, NULL);
4125 GOTO(unlock, rc = -ENODATA);
4128 rc = lfsck_layout_verify_header(lmm);
4132 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
4133 * been verified in lfsck_layout_verify_header() already. If some
4134 * new magic introduced in the future, then layout LFSCK needs to
4135 * be updated also. */
4136 magic = le32_to_cpu(lmm->lmm_magic);
4137 if (magic == LOV_MAGIC_V1) {
4138 objs = &lmm->lmm_objects[0];
4140 LASSERT(magic == LOV_MAGIC_V3);
4141 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
4144 fid_to_ostid(cfid, oi);
4145 count = le16_to_cpu(lmm->lmm_stripe_count);
4146 for (i = 0; i < count; i++, objs++) {
4149 ostid_le_to_cpu(&objs->l_ost_oi, &oi2);
4150 if (memcmp(oi, &oi2, sizeof(*oi)) == 0)
4151 GOTO(unlock, rc = (i != idx ? -ENODATA : 0));
4154 GOTO(unlock, rc = -ENODATA);
4157 dt_read_unlock(env, obj);
4158 lu_object_put(env, &obj->do_lu);
4164 * The LFSCK-on-OST will ask the LFSCK-on-MDT to check whether the given
4165 * MDT-object/OST-object pairs match or not to aviod transfer MDT-object
4166 * layout EA from MDT to OST. On one hand, the OST no need to understand
4167 * the layout EA structure; on the other hand, it may cause trouble when
4168 * transfer large layout EA from MDT to OST via normal OUT RPC.
4170 * \ret > 0: unrecognized stripe
4171 * \ret = 0: recognized stripe
4172 * \ret < 0: other failures
4174 static int lfsck_layout_slave_check_pairs(const struct lu_env *env,
4175 struct lfsck_component *com,
4176 struct lu_fid *cfid,
4177 struct lu_fid *pfid)
4179 struct lfsck_instance *lfsck = com->lc_lfsck;
4180 struct obd_device *obd = lfsck->li_obd;
4181 struct seq_server_site *ss =
4182 lu_site2seq(lfsck->li_bottom->dd_lu_dev.ld_site);
4183 struct obd_export *exp = NULL;
4184 struct ptlrpc_request *req = NULL;
4185 struct lfsck_request *lr;
4186 struct lu_seq_range range = { 0 };
4190 if (unlikely(fid_is_idif(pfid)))
4193 fld_range_set_any(&range);
4194 rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(pfid), &range);
4196 RETURN(rc == -ENOENT ? 1 : rc);
4198 if (unlikely(!fld_range_is_mdt(&range)))
4201 exp = lustre_find_lwp_by_index(obd->obd_name, range.lsr_index);
4202 if (unlikely(exp == NULL))
4205 if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
4206 GOTO(out, rc = -EOPNOTSUPP);
4208 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
4210 GOTO(out, rc = -ENOMEM);
4212 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
4214 ptlrpc_request_free(req);
4219 lr = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
4220 memset(lr, 0, sizeof(*lr));
4221 lr->lr_event = LE_PAIRS_VERIFY;
4222 lr->lr_active = LFSCK_TYPE_LAYOUT;
4223 lr->lr_fid = *cfid; /* OST-object itself FID. */
4224 lr->lr_fid2 = *pfid; /* The claimed parent FID. */
4226 ptlrpc_request_set_replen(req);
4227 rc = ptlrpc_queue_wait(req);
4228 ptlrpc_req_finished(req);
4230 if (rc == -ENOENT || rc == -ENODATA)
4237 class_export_put(exp);
4242 static int lfsck_layout_slave_repair_pfid(const struct lu_env *env,
4243 struct lfsck_component *com,
4244 struct lfsck_request *lr)
4246 struct lfsck_thread_info *info = lfsck_env_info(env);
4247 struct filter_fid *ff = &info->lti_new_pfid;
4249 struct dt_device *dev = com->lc_lfsck->li_bottom;
4250 struct dt_object *obj;
4251 struct thandle *th = NULL;
4255 obj = lfsck_object_find_by_dev(env, dev, &lr->lr_fid);
4257 GOTO(log, rc = PTR_ERR(obj));
4259 fid_cpu_to_le(&ff->ff_parent, &lr->lr_fid2);
4260 buf = lfsck_buf_get(env, ff, sizeof(*ff));
4261 dt_write_lock(env, obj, 0);
4262 if (unlikely(!dt_object_exists(obj)))
4263 GOTO(unlock, rc = 0);
4265 th = dt_trans_create(env, dev);
4267 GOTO(unlock, rc = PTR_ERR(th));
4269 rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_FID, 0, th);
4273 rc = dt_trans_start_local(env, dev, th);
4277 rc = dt_xattr_set(env, obj, buf, XATTR_NAME_FID, 0, th, BYPASS_CAPA);
4282 dt_trans_stop(env, dev, th);
4285 dt_write_unlock(env, obj);
4286 lu_object_put(env, &obj->do_lu);
4289 CDEBUG(D_LFSCK, "%s: layout LFSCK slave repaired pfid for "DFID
4290 ", parent "DFID": rc = %d\n", lfsck_lfsck2name(com->lc_lfsck),
4291 PFID(&lr->lr_fid), PFID(&lr->lr_fid2), rc);
4298 static int lfsck_layout_reset(const struct lu_env *env,
4299 struct lfsck_component *com, bool init)
4301 struct lfsck_layout *lo = com->lc_file_ram;
4304 down_write(&com->lc_sem);
4306 memset(lo, 0, com->lc_file_size);
4308 __u32 count = lo->ll_success_count;
4309 __u64 last_time = lo->ll_time_last_complete;
4311 memset(lo, 0, com->lc_file_size);
4312 lo->ll_success_count = count;
4313 lo->ll_time_last_complete = last_time;
4316 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
4317 lo->ll_status = LS_INIT;
4319 rc = lfsck_layout_store(env, com);
4320 up_write(&com->lc_sem);
4322 CDEBUG(D_LFSCK, "%s: layout LFSCK reset: rc = %d\n",
4323 lfsck_lfsck2name(com->lc_lfsck), rc);
4328 static void lfsck_layout_fail(const struct lu_env *env,
4329 struct lfsck_component *com, bool new_checked)
4331 struct lfsck_layout *lo = com->lc_file_ram;
4333 down_write(&com->lc_sem);
4335 com->lc_new_checked++;
4336 lfsck_layout_record_failure(env, com->lc_lfsck, lo);
4337 up_write(&com->lc_sem);
4340 static int lfsck_layout_master_checkpoint(const struct lu_env *env,
4341 struct lfsck_component *com, bool init)
4343 struct lfsck_instance *lfsck = com->lc_lfsck;
4344 struct lfsck_layout *lo = com->lc_file_ram;
4345 struct lfsck_layout_master_data *llmd = com->lc_data;
4346 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4347 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4348 struct l_wait_info lwi = { 0 };
4351 if (com->lc_new_checked == 0 && !init)
4354 l_wait_event(mthread->t_ctl_waitq,
4355 list_empty(&llmd->llmd_req_list) ||
4356 !thread_is_running(mthread) ||
4357 thread_is_stopped(athread),
4360 if (!thread_is_running(mthread) || thread_is_stopped(athread))
4363 down_write(&com->lc_sem);
4365 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
4367 lo->ll_pos_last_checkpoint =
4368 lfsck->li_pos_current.lp_oit_cookie;
4369 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4370 HALF_SEC - lfsck->li_time_last_checkpoint);
4371 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4372 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4373 com->lc_new_checked = 0;
4376 rc = lfsck_layout_store(env, com);
4377 up_write(&com->lc_sem);
4379 CDEBUG(D_LFSCK, "%s: layout LFSCK master checkpoint at the pos ["
4380 LPU64"]: rc = %d\n", lfsck_lfsck2name(lfsck),
4381 lfsck->li_pos_current.lp_oit_cookie, rc);
4386 static int lfsck_layout_slave_checkpoint(const struct lu_env *env,
4387 struct lfsck_component *com, bool init)
4389 struct lfsck_instance *lfsck = com->lc_lfsck;
4390 struct lfsck_layout *lo = com->lc_file_ram;
4393 if (com->lc_new_checked == 0 && !init)
4396 down_write(&com->lc_sem);
4398 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
4400 lo->ll_pos_last_checkpoint =
4401 lfsck->li_pos_current.lp_oit_cookie;
4402 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4403 HALF_SEC - lfsck->li_time_last_checkpoint);
4404 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4405 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4406 com->lc_new_checked = 0;
4409 rc = lfsck_layout_store(env, com);
4410 up_write(&com->lc_sem);
4412 CDEBUG(D_LFSCK, "%s: layout LFSCK slave checkpoint at the pos ["
4413 LPU64"]: rc = %d\n", lfsck_lfsck2name(lfsck),
4414 lfsck->li_pos_current.lp_oit_cookie, rc);
4419 static int lfsck_layout_prep(const struct lu_env *env,
4420 struct lfsck_component *com,
4421 struct lfsck_start *start)
4423 struct lfsck_instance *lfsck = com->lc_lfsck;
4424 struct lfsck_layout *lo = com->lc_file_ram;
4425 struct lfsck_position *pos = &com->lc_pos_start;
4427 fid_zero(&pos->lp_dir_parent);
4428 pos->lp_dir_cookie = 0;
4429 if (lo->ll_status == LS_COMPLETED ||
4430 lo->ll_status == LS_PARTIAL ||
4431 /* To handle orphan, must scan from the beginning. */
4432 (start != NULL && start->ls_flags & LPF_ORPHAN)) {
4435 rc = lfsck_layout_reset(env, com, false);
4437 rc = lfsck_set_param(env, lfsck, start, true);
4440 CDEBUG(D_LFSCK, "%s: layout LFSCK prep failed: "
4441 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
4447 down_write(&com->lc_sem);
4448 lo->ll_time_latest_start = cfs_time_current_sec();
4449 spin_lock(&lfsck->li_lock);
4450 if (lo->ll_flags & LF_SCANNED_ONCE) {
4451 if (!lfsck->li_drop_dryrun ||
4452 lo->ll_pos_first_inconsistent == 0) {
4453 lo->ll_status = LS_SCANNING_PHASE2;
4454 list_move_tail(&com->lc_link,
4455 &lfsck->li_list_double_scan);
4456 pos->lp_oit_cookie = 0;
4460 lo->ll_status = LS_SCANNING_PHASE1;
4461 lo->ll_run_time_phase1 = 0;
4462 lo->ll_run_time_phase2 = 0;
4463 lo->ll_objs_checked_phase1 = 0;
4464 lo->ll_objs_checked_phase2 = 0;
4465 lo->ll_objs_failed_phase1 = 0;
4466 lo->ll_objs_failed_phase2 = 0;
4467 for (i = 0; i < LLIT_MAX; i++)
4468 lo->ll_objs_repaired[i] = 0;
4470 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
4471 fid_zero(&com->lc_fid_latest_scanned_phase2);
4474 lo->ll_status = LS_SCANNING_PHASE1;
4475 if (!lfsck->li_drop_dryrun ||
4476 lo->ll_pos_first_inconsistent == 0)
4477 pos->lp_oit_cookie = lo->ll_pos_last_checkpoint + 1;
4479 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
4481 spin_unlock(&lfsck->li_lock);
4482 up_write(&com->lc_sem);
4487 static int lfsck_layout_slave_prep(const struct lu_env *env,
4488 struct lfsck_component *com,
4489 struct lfsck_start_param *lsp)
4491 struct lfsck_layout_slave_data *llsd = com->lc_data;
4492 struct lfsck_instance *lfsck = com->lc_lfsck;
4493 struct lfsck_layout *lo = com->lc_file_ram;
4494 struct lfsck_start *start = lsp->lsp_start;
4497 rc = lfsck_layout_prep(env, com, start);
4501 if (lo->ll_flags & LF_CRASHED_LASTID &&
4502 list_empty(&llsd->llsd_master_list)) {
4503 LASSERT(lfsck->li_out_notify != NULL);
4505 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
4506 LE_LASTID_REBUILDING);
4509 if (!lsp->lsp_index_valid)
4512 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
4513 if (rc == 0 && start != NULL && start->ls_flags & LPF_ORPHAN) {
4514 LASSERT(!llsd->llsd_rbtree_valid);
4516 write_lock(&llsd->llsd_rb_lock);
4517 rc = lfsck_rbtree_setup(env, com);
4518 write_unlock(&llsd->llsd_rb_lock);
4521 CDEBUG(D_LFSCK, "%s: layout LFSCK slave prep done, start pos ["
4522 LPU64"]\n", lfsck_lfsck2name(lfsck),
4523 com->lc_pos_start.lp_oit_cookie);
4528 static int lfsck_layout_master_prep(const struct lu_env *env,
4529 struct lfsck_component *com,
4530 struct lfsck_start_param *lsp)
4532 struct lfsck_instance *lfsck = com->lc_lfsck;
4533 struct lfsck_layout_master_data *llmd = com->lc_data;
4534 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4535 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4536 struct lfsck_thread_args *lta;
4537 struct task_struct *task;
4541 rc = lfsck_layout_prep(env, com, lsp->lsp_start);
4545 llmd->llmd_assistant_status = 0;
4546 llmd->llmd_post_result = 0;
4547 llmd->llmd_to_post = 0;
4548 llmd->llmd_to_double_scan = 0;
4549 llmd->llmd_in_double_scan = 0;
4550 llmd->llmd_exit = 0;
4551 thread_set_flags(athread, 0);
4553 lta = lfsck_thread_args_init(lfsck, com, lsp);
4555 RETURN(PTR_ERR(lta));
4557 task = kthread_run(lfsck_layout_assistant, lta, "lfsck_layout");
4560 CERROR("%s: cannot start LFSCK layout assistant thread: "
4561 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
4562 lfsck_thread_args_fini(lta);
4564 struct l_wait_info lwi = { 0 };
4566 l_wait_event(mthread->t_ctl_waitq,
4567 thread_is_running(athread) ||
4568 thread_is_stopped(athread),
4570 if (unlikely(!thread_is_running(athread)))
4571 rc = llmd->llmd_assistant_status;
4576 CDEBUG(D_LFSCK, "%s: layout LFSCK master prep done, start pos ["
4577 LPU64"\n", lfsck_lfsck2name(lfsck),
4578 com->lc_pos_start.lp_oit_cookie);
4583 /* Pre-fetch the attribute for each stripe in the given layout EA. */
4584 static int lfsck_layout_scan_stripes(const struct lu_env *env,
4585 struct lfsck_component *com,
4586 struct dt_object *parent,
4587 struct lov_mds_md_v1 *lmm)
4589 struct lfsck_thread_info *info = lfsck_env_info(env);
4590 struct lfsck_instance *lfsck = com->lc_lfsck;
4591 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
4592 struct lfsck_layout *lo = com->lc_file_ram;
4593 struct lfsck_layout_master_data *llmd = com->lc_data;
4594 struct lfsck_layout_object *llo = NULL;
4595 struct lov_ost_data_v1 *objs;
4596 struct lfsck_tgt_descs *ltds = &lfsck->li_ost_descs;
4597 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4598 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4599 struct l_wait_info lwi = { 0 };
4608 buf = lfsck_buf_get(env, &info->lti_old_pfid,
4609 sizeof(struct filter_fid_old));
4610 count = le16_to_cpu(lmm->lmm_stripe_count);
4611 gen = le16_to_cpu(lmm->lmm_layout_gen);
4612 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
4613 * been verified in lfsck_layout_verify_header() already. If some
4614 * new magic introduced in the future, then layout LFSCK needs to
4615 * be updated also. */
4616 magic = le32_to_cpu(lmm->lmm_magic);
4617 if (magic == LOV_MAGIC_V1) {
4618 objs = &lmm->lmm_objects[0];
4620 LASSERT(magic == LOV_MAGIC_V3);
4621 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
4624 for (i = 0; i < count; i++, objs++) {
4625 struct lu_fid *fid = &info->lti_fid;
4626 struct ost_id *oi = &info->lti_oi;
4627 struct lfsck_layout_req *llr;
4628 struct lfsck_tgt_desc *tgt = NULL;
4629 struct dt_object *cobj = NULL;
4631 le32_to_cpu(objs->l_ost_idx);
4632 bool wakeup = false;
4634 if (unlikely(lovea_slot_is_dummy(objs)))
4637 l_wait_event(mthread->t_ctl_waitq,
4638 bk->lb_async_windows == 0 ||
4639 llmd->llmd_prefetched < bk->lb_async_windows ||
4640 !thread_is_running(mthread) ||
4641 thread_is_stopped(athread),
4644 if (unlikely(!thread_is_running(mthread)) ||
4645 thread_is_stopped(athread))
4648 ostid_le_to_cpu(&objs->l_ost_oi, oi);
4649 ostid_to_fid(fid, oi, index);
4650 tgt = lfsck_tgt_get(ltds, index);
4651 if (unlikely(tgt == NULL)) {
4652 CDEBUG(D_LFSCK, "%s: cannot talk with OST %x which "
4653 "did not join the layout LFSCK\n",
4654 lfsck_lfsck2name(lfsck), index);
4655 lo->ll_flags |= LF_INCOMPLETE;
4659 cobj = lfsck_object_find_by_dev(env, tgt->ltd_tgt, fid);
4665 rc = dt_declare_attr_get(env, cobj, BYPASS_CAPA);
4669 rc = dt_declare_xattr_get(env, cobj, buf, XATTR_NAME_FID,
4675 llo = lfsck_layout_object_init(env, parent, gen);
4682 llr = lfsck_layout_req_init(llo, cobj, index, i);
4689 spin_lock(&llmd->llmd_lock);
4690 if (llmd->llmd_assistant_status < 0) {
4691 spin_unlock(&llmd->llmd_lock);
4692 lfsck_layout_req_fini(env, llr);
4694 RETURN(llmd->llmd_assistant_status);
4697 list_add_tail(&llr->llr_list, &llmd->llmd_req_list);
4698 if (llmd->llmd_prefetched == 0)
4701 llmd->llmd_prefetched++;
4702 spin_unlock(&llmd->llmd_lock);
4704 wake_up_all(&athread->t_ctl_waitq);
4707 down_write(&com->lc_sem);
4708 com->lc_new_checked++;
4710 lfsck_layout_record_failure(env, lfsck, lo);
4711 up_write(&com->lc_sem);
4713 if (cobj != NULL && !IS_ERR(cobj))
4714 lu_object_put(env, &cobj->do_lu);
4716 if (likely(tgt != NULL))
4719 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
4726 if (llo != NULL && !IS_ERR(llo))
4727 lfsck_layout_object_put(env, llo);
4732 /* For the given object, read its layout EA locally. For each stripe, pre-fetch
4733 * the OST-object's attribute and generate an structure lfsck_layout_req on the
4734 * list ::llmd_req_list.
4736 * For each request on above list, the lfsck_layout_assistant thread compares
4737 * the OST side attribute with local attribute, if inconsistent, then repair it.
4739 * All above processing is async mode with pipeline. */
4740 static int lfsck_layout_master_exec_oit(const struct lu_env *env,
4741 struct lfsck_component *com,
4742 struct dt_object *obj)
4744 struct lfsck_thread_info *info = lfsck_env_info(env);
4745 struct ost_id *oi = &info->lti_oi;
4746 struct lfsck_layout *lo = com->lc_file_ram;
4747 struct lfsck_layout_master_data *llmd = com->lc_data;
4748 struct lfsck_instance *lfsck = com->lc_lfsck;
4749 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
4750 struct thandle *handle = NULL;
4751 struct lu_buf *buf = &info->lti_big_buf;
4752 struct lov_mds_md_v1 *lmm = NULL;
4753 struct dt_device *dev = lfsck->li_bottom;
4754 struct lustre_handle lh = { 0 };
4755 ssize_t buflen = buf->lb_len;
4757 bool locked = false;
4758 bool stripe = false;
4759 bool bad_oi = false;
4762 if (!S_ISREG(lfsck_object_type(obj)))
4765 if (llmd->llmd_assistant_status < 0)
4766 GOTO(out, rc = -ESRCH);
4768 fid_to_lmm_oi(lfsck_dto2fid(obj), oi);
4769 lmm_oi_cpu_to_le(oi, oi);
4770 dt_read_lock(env, obj, 0);
4774 rc = lfsck_layout_get_lovea(env, obj, buf, &buflen);
4780 rc = lfsck_layout_verify_header(lmm);
4781 /* If the LOV EA crashed, then it is possible to be rebuilt later
4782 * when handle orphan OST-objects. */
4786 if (memcmp(oi, &lmm->lmm_oi, sizeof(*oi)) == 0)
4787 GOTO(out, stripe = true);
4789 /* Inconsistent lmm_oi, should be repaired. */
4792 if (bk->lb_param & LPF_DRYRUN) {
4793 down_write(&com->lc_sem);
4794 lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
4795 up_write(&com->lc_sem);
4797 GOTO(out, stripe = true);
4800 if (!lustre_handle_is_used(&lh)) {
4801 dt_read_unlock(env, obj);
4803 buf->lb_len = buflen;
4804 rc = lfsck_layout_lock(env, com, obj, &lh,
4805 MDS_INODELOCK_LAYOUT |
4806 MDS_INODELOCK_XATTR);
4810 handle = dt_trans_create(env, dev);
4812 GOTO(out, rc = PTR_ERR(handle));
4814 rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_LOV,
4815 LU_XATTR_REPLACE, handle);
4819 rc = dt_trans_start_local(env, dev, handle);
4823 dt_write_lock(env, obj, 0);
4830 rc = dt_xattr_set(env, obj, buf, XATTR_NAME_LOV,
4831 LU_XATTR_REPLACE, handle, BYPASS_CAPA);
4835 down_write(&com->lc_sem);
4836 lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
4837 up_write(&com->lc_sem);
4839 GOTO(out, stripe = true);
4843 if (lustre_handle_is_used(&lh))
4844 dt_write_unlock(env, obj);
4846 dt_read_unlock(env, obj);
4849 if (handle != NULL && !IS_ERR(handle))
4850 dt_trans_stop(env, dev, handle);
4852 lfsck_layout_unlock(&lh);
4855 CDEBUG(D_LFSCK, "%s: layout LFSCK master %s bad lmm_oi for "
4856 DFID": rc = %d\n", lfsck_lfsck2name(lfsck),
4857 bk->lb_param & LPF_DRYRUN ? "found" : "repaired",
4858 PFID(lfsck_dto2fid(obj)), rc);
4861 rc = lfsck_layout_scan_stripes(env, com, obj, lmm);
4863 down_write(&com->lc_sem);
4864 com->lc_new_checked++;
4866 lfsck_layout_record_failure(env, lfsck, lo);
4867 up_write(&com->lc_sem);
4869 buf->lb_len = buflen;
4874 static int lfsck_layout_slave_exec_oit(const struct lu_env *env,
4875 struct lfsck_component *com,
4876 struct dt_object *obj)
4878 struct lfsck_instance *lfsck = com->lc_lfsck;
4879 struct lfsck_layout *lo = com->lc_file_ram;
4880 const struct lu_fid *fid = lfsck_dto2fid(obj);
4881 struct lfsck_layout_slave_data *llsd = com->lc_data;
4882 struct lfsck_layout_seq *lls;
4888 LASSERT(llsd != NULL);
4890 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY5) &&
4891 cfs_fail_val == lfsck_dev_idx(lfsck->li_bottom)) {
4892 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(1),
4894 struct ptlrpc_thread *thread = &lfsck->li_thread;
4896 l_wait_event(thread->t_ctl_waitq,
4897 !thread_is_running(thread),
4901 lfsck_rbtree_update_bitmap(env, com, fid, false);
4903 down_write(&com->lc_sem);
4904 if (fid_is_idif(fid))
4906 else if (!fid_is_norm(fid) ||
4907 !fid_is_for_ostobj(env, lfsck->li_next, obj, fid))
4908 GOTO(unlock, rc = 0);
4911 com->lc_new_checked++;
4913 lls = lfsck_layout_seq_lookup(llsd, seq);
4916 if (unlikely(lls == NULL))
4917 GOTO(unlock, rc = -ENOMEM);
4919 INIT_LIST_HEAD(&lls->lls_list);
4921 rc = lfsck_layout_lastid_load(env, com, lls);
4923 CDEBUG(D_LFSCK, "%s: layout LFSCK failed to "
4924 "load LAST_ID for "LPX64": rc = %d\n",
4925 lfsck_lfsck2name(com->lc_lfsck), seq, rc);
4926 lo->ll_objs_failed_phase1++;
4931 lfsck_layout_seq_insert(llsd, lls);
4934 if (unlikely(fid_is_last_id(fid)))
4935 GOTO(unlock, rc = 0);
4938 if (oid > lls->lls_lastid_known)
4939 lls->lls_lastid_known = oid;
4941 if (oid > lls->lls_lastid) {
4942 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
4943 /* OFD may create new objects during LFSCK scanning. */
4944 rc = lfsck_layout_lastid_reload(env, com, lls);
4945 if (unlikely(rc != 0))
4946 CDEBUG(D_LFSCK, "%s: layout LFSCK failed to "
4947 "reload LAST_ID for "LPX64": rc = %d\n",
4948 lfsck_lfsck2name(com->lc_lfsck),
4950 if (oid <= lls->lls_lastid)
4951 GOTO(unlock, rc = 0);
4953 LASSERT(lfsck->li_out_notify != NULL);
4955 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
4956 LE_LASTID_REBUILDING);
4957 lo->ll_flags |= LF_CRASHED_LASTID;
4960 lls->lls_lastid = oid;
4964 GOTO(unlock, rc = 0);
4967 up_write(&com->lc_sem);
4972 static int lfsck_layout_exec_dir(const struct lu_env *env,
4973 struct lfsck_component *com,
4974 struct dt_object *obj,
4975 struct lu_dirent *ent)
4980 static int lfsck_layout_master_post(const struct lu_env *env,
4981 struct lfsck_component *com,
4982 int result, bool init)
4984 struct lfsck_instance *lfsck = com->lc_lfsck;
4985 struct lfsck_layout *lo = com->lc_file_ram;
4986 struct lfsck_layout_master_data *llmd = com->lc_data;
4987 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4988 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4989 struct l_wait_info lwi = { 0 };
4994 llmd->llmd_post_result = result;
4995 llmd->llmd_to_post = 1;
4996 if (llmd->llmd_post_result <= 0)
4997 llmd->llmd_exit = 1;
4999 wake_up_all(&athread->t_ctl_waitq);
5000 l_wait_event(mthread->t_ctl_waitq,
5001 (result > 0 && list_empty(&llmd->llmd_req_list)) ||
5002 thread_is_stopped(athread),
5005 if (llmd->llmd_assistant_status < 0)
5006 result = llmd->llmd_assistant_status;
5008 down_write(&com->lc_sem);
5009 spin_lock(&lfsck->li_lock);
5010 /* When LFSCK failed, there may be some prefetched objects those are
5011 * not been processed yet, we do not know the exactly position, then
5012 * just restart from last check-point next time. */
5013 if (!init && !llmd->llmd_exit)
5014 lo->ll_pos_last_checkpoint =
5015 lfsck->li_pos_current.lp_oit_cookie;
5018 lo->ll_status = LS_SCANNING_PHASE2;
5019 lo->ll_flags |= LF_SCANNED_ONCE;
5020 lo->ll_flags &= ~LF_UPGRADE;
5021 list_move_tail(&com->lc_link, &lfsck->li_list_double_scan);
5022 } else if (result == 0) {
5023 lo->ll_status = lfsck->li_status;
5024 if (lo->ll_status == 0)
5025 lo->ll_status = LS_STOPPED;
5026 if (lo->ll_status != LS_PAUSED) {
5027 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
5030 lo->ll_status = LS_FAILED;
5031 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
5033 spin_unlock(&lfsck->li_lock);
5036 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
5037 HALF_SEC - lfsck->li_time_last_checkpoint);
5038 lo->ll_time_last_checkpoint = cfs_time_current_sec();
5039 lo->ll_objs_checked_phase1 += com->lc_new_checked;
5040 com->lc_new_checked = 0;
5043 rc = lfsck_layout_store(env, com);
5044 up_write(&com->lc_sem);
5046 CDEBUG(D_LFSCK, "%s: layout LFSCK master post done: rc = %d\n",
5047 lfsck_lfsck2name(lfsck), rc);
5052 static int lfsck_layout_slave_post(const struct lu_env *env,
5053 struct lfsck_component *com,
5054 int result, bool init)
5056 struct lfsck_instance *lfsck = com->lc_lfsck;
5057 struct lfsck_layout *lo = com->lc_file_ram;
5061 rc = lfsck_layout_lastid_store(env, com);
5065 LASSERT(lfsck->li_out_notify != NULL);
5067 down_write(&com->lc_sem);
5068 spin_lock(&lfsck->li_lock);
5070 lo->ll_pos_last_checkpoint =
5071 lfsck->li_pos_current.lp_oit_cookie;
5073 lo->ll_status = LS_SCANNING_PHASE2;
5074 lo->ll_flags |= LF_SCANNED_ONCE;
5075 if (lo->ll_flags & LF_CRASHED_LASTID) {
5077 lo->ll_flags &= ~LF_CRASHED_LASTID;
5079 lo->ll_flags &= ~LF_UPGRADE;
5080 list_move_tail(&com->lc_link, &lfsck->li_list_double_scan);
5081 } else if (result == 0) {
5082 lo->ll_status = lfsck->li_status;
5083 if (lo->ll_status == 0)
5084 lo->ll_status = LS_STOPPED;
5085 if (lo->ll_status != LS_PAUSED)
5086 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
5088 lo->ll_status = LS_FAILED;
5089 list_move_tail(&com->lc_link, &lfsck->li_list_idle);
5091 spin_unlock(&lfsck->li_lock);
5094 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
5098 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
5099 HALF_SEC - lfsck->li_time_last_checkpoint);
5100 lo->ll_time_last_checkpoint = cfs_time_current_sec();
5101 lo->ll_objs_checked_phase1 += com->lc_new_checked;
5102 com->lc_new_checked = 0;
5105 rc = lfsck_layout_store(env, com);
5106 up_write(&com->lc_sem);
5108 lfsck_layout_slave_notify_master(env, com, LE_PHASE1_DONE, result);
5111 lfsck_rbtree_cleanup(env, com);
5113 CDEBUG(D_LFSCK, "%s: layout LFSCK slave post done: rc = %d\n",
5114 lfsck_lfsck2name(lfsck), rc);
5119 static int lfsck_layout_dump(const struct lu_env *env,
5120 struct lfsck_component *com, struct seq_file *m)
5122 struct lfsck_instance *lfsck = com->lc_lfsck;
5123 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
5124 struct lfsck_layout *lo = com->lc_file_ram;
5127 down_read(&com->lc_sem);
5128 seq_printf(m, "name: lfsck_layout\n"
5134 lfsck_status2names(lo->ll_status));
5136 rc = lfsck_bits_dump(m, lo->ll_flags, lfsck_flags_names, "flags");
5140 rc = lfsck_bits_dump(m, bk->lb_param, lfsck_param_names, "param");
5144 rc = lfsck_time_dump(m, lo->ll_time_last_complete,
5145 "time_since_last_completed");
5149 rc = lfsck_time_dump(m, lo->ll_time_latest_start,
5150 "time_since_latest_start");
5154 rc = lfsck_time_dump(m, lo->ll_time_last_checkpoint,
5155 "time_since_last_checkpoint");
5159 seq_printf(m, "latest_start_position: "LPU64"\n"
5160 "last_checkpoint_position: "LPU64"\n"
5161 "first_failure_position: "LPU64"\n",
5162 lo->ll_pos_latest_start,
5163 lo->ll_pos_last_checkpoint,
5164 lo->ll_pos_first_inconsistent);
5166 seq_printf(m, "success_count: %u\n"
5167 "repaired_dangling: "LPU64"\n"
5168 "repaired_unmatched_pair: "LPU64"\n"
5169 "repaired_multiple_referenced: "LPU64"\n"
5170 "repaired_orphan: "LPU64"\n"
5171 "repaired_inconsistent_owner: "LPU64"\n"
5172 "repaired_others: "LPU64"\n"
5173 "skipped: "LPU64"\n"
5174 "failed_phase1: "LPU64"\n"
5175 "failed_phase2: "LPU64"\n",
5176 lo->ll_success_count,
5177 lo->ll_objs_repaired[LLIT_DANGLING - 1],
5178 lo->ll_objs_repaired[LLIT_UNMATCHED_PAIR - 1],
5179 lo->ll_objs_repaired[LLIT_MULTIPLE_REFERENCED - 1],
5180 lo->ll_objs_repaired[LLIT_ORPHAN - 1],
5181 lo->ll_objs_repaired[LLIT_INCONSISTENT_OWNER - 1],
5182 lo->ll_objs_repaired[LLIT_OTHERS - 1],
5183 lo->ll_objs_skipped,
5184 lo->ll_objs_failed_phase1,
5185 lo->ll_objs_failed_phase2);
5187 if (lo->ll_status == LS_SCANNING_PHASE1) {
5189 const struct dt_it_ops *iops;
5190 cfs_duration_t duration = cfs_time_current() -
5191 lfsck->li_time_last_checkpoint;
5192 __u64 checked = lo->ll_objs_checked_phase1 +
5193 com->lc_new_checked;
5194 __u64 speed = checked;
5195 __u64 new_checked = com->lc_new_checked * HZ;
5196 __u32 rtime = lo->ll_run_time_phase1 +
5197 cfs_duration_sec(duration + HALF_SEC);
5200 do_div(new_checked, duration);
5202 do_div(speed, rtime);
5203 seq_printf(m, "checked_phase1: "LPU64"\n"
5204 "checked_phase2: "LPU64"\n"
5205 "run_time_phase1: %u seconds\n"
5206 "run_time_phase2: %u seconds\n"
5207 "average_speed_phase1: "LPU64" items/sec\n"
5208 "average_speed_phase2: N/A\n"
5209 "real-time_speed_phase1: "LPU64" items/sec\n"
5210 "real-time_speed_phase2: N/A\n",
5212 lo->ll_objs_checked_phase2,
5214 lo->ll_run_time_phase2,
5218 LASSERT(lfsck->li_di_oit != NULL);
5220 iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
5222 /* The low layer otable-based iteration position may NOT
5223 * exactly match the layout-based directory traversal
5224 * cookie. Generally, it is not a serious issue. But the
5225 * caller should NOT make assumption on that. */
5226 pos = iops->store(env, lfsck->li_di_oit);
5227 if (!lfsck->li_current_oit_processed)
5229 seq_printf(m, "current_position: "LPU64"\n", pos);
5231 } else if (lo->ll_status == LS_SCANNING_PHASE2) {
5232 cfs_duration_t duration = cfs_time_current() -
5233 lfsck->li_time_last_checkpoint;
5234 __u64 checked = lo->ll_objs_checked_phase2 +
5235 com->lc_new_checked;
5236 __u64 speed1 = lo->ll_objs_checked_phase1;
5237 __u64 speed2 = checked;
5238 __u64 new_checked = com->lc_new_checked * HZ;
5239 __u32 rtime = lo->ll_run_time_phase2 +
5240 cfs_duration_sec(duration + HALF_SEC);
5243 do_div(new_checked, duration);
5244 if (lo->ll_run_time_phase1 != 0)
5245 do_div(speed1, lo->ll_run_time_phase1);
5247 do_div(speed2, rtime);
5248 rc = seq_printf(m, "checked_phase1: "LPU64"\n"
5249 "checked_phase2: "LPU64"\n"
5250 "run_time_phase1: %u seconds\n"
5251 "run_time_phase2: %u seconds\n"
5252 "average_speed_phase1: "LPU64" items/sec\n"
5253 "average_speed_phase2: "LPU64" items/sec\n"
5254 "real-time_speed_phase1: N/A\n"
5255 "real-time_speed_phase2: "LPU64" items/sec\n"
5256 "current_position: "DFID"\n",
5257 lo->ll_objs_checked_phase1,
5259 lo->ll_run_time_phase1,
5264 PFID(&com->lc_fid_latest_scanned_phase2));
5269 __u64 speed1 = lo->ll_objs_checked_phase1;
5270 __u64 speed2 = lo->ll_objs_checked_phase2;
5272 if (lo->ll_run_time_phase1 != 0)
5273 do_div(speed1, lo->ll_run_time_phase1);
5274 if (lo->ll_run_time_phase2 != 0)
5275 do_div(speed2, lo->ll_run_time_phase2);
5276 seq_printf(m, "checked_phase1: "LPU64"\n"
5277 "checked_phase2: "LPU64"\n"
5278 "run_time_phase1: %u seconds\n"
5279 "run_time_phase2: %u seconds\n"
5280 "average_speed_phase1: "LPU64" items/sec\n"
5281 "average_speed_phase2: "LPU64" objs/sec\n"
5282 "real-time_speed_phase1: N/A\n"
5283 "real-time_speed_phase2: N/A\n"
5284 "current_position: N/A\n",
5285 lo->ll_objs_checked_phase1,
5286 lo->ll_objs_checked_phase2,
5287 lo->ll_run_time_phase1,
5288 lo->ll_run_time_phase2,
5293 up_read(&com->lc_sem);
5298 static int lfsck_layout_master_double_scan(const struct lu_env *env,
5299 struct lfsck_component *com)
5301 struct lfsck_layout_master_data *llmd = com->lc_data;
5302 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
5303 struct ptlrpc_thread *athread = &llmd->llmd_thread;
5304 struct lfsck_layout *lo = com->lc_file_ram;
5305 struct l_wait_info lwi = { 0 };
5307 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2))
5310 llmd->llmd_to_double_scan = 1;
5311 wake_up_all(&athread->t_ctl_waitq);
5312 l_wait_event(mthread->t_ctl_waitq,
5313 llmd->llmd_in_double_scan ||
5314 thread_is_stopped(athread),
5316 if (llmd->llmd_assistant_status < 0)
5317 return llmd->llmd_assistant_status;
5322 static int lfsck_layout_slave_double_scan(const struct lu_env *env,
5323 struct lfsck_component *com)
5325 struct lfsck_instance *lfsck = com->lc_lfsck;
5326 struct lfsck_layout_slave_data *llsd = com->lc_data;
5327 struct lfsck_layout *lo = com->lc_file_ram;
5328 struct ptlrpc_thread *thread = &lfsck->li_thread;
5332 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2)) {
5333 lfsck_rbtree_cleanup(env, com);
5334 lfsck_layout_slave_notify_master(env, com, LE_PHASE2_DONE, 0);
5338 CDEBUG(D_LFSCK, "%s: layout LFSCK slave phase2 scan start\n",
5339 lfsck_lfsck2name(lfsck));
5341 atomic_inc(&lfsck->li_double_scan_count);
5343 com->lc_new_checked = 0;
5344 com->lc_new_scanned = 0;
5345 com->lc_time_last_checkpoint = cfs_time_current();
5346 com->lc_time_next_checkpoint = com->lc_time_last_checkpoint +
5347 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
5350 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(30),
5353 rc = lfsck_layout_slave_query_master(env, com);
5354 if (list_empty(&llsd->llsd_master_list)) {
5355 if (unlikely(!thread_is_running(thread)))
5366 rc = l_wait_event(thread->t_ctl_waitq,
5367 !thread_is_running(thread) ||
5368 list_empty(&llsd->llsd_master_list),
5370 if (unlikely(!thread_is_running(thread)))
5373 if (rc == -ETIMEDOUT)
5376 GOTO(done, rc = (rc < 0 ? rc : 1));
5380 rc = lfsck_layout_double_scan_result(env, com, rc);
5382 lfsck_rbtree_cleanup(env, com);
5383 lfsck_layout_slave_notify_master(env, com, LE_PHASE2_DONE, rc);
5384 if (atomic_dec_and_test(&lfsck->li_double_scan_count))
5385 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
5387 CDEBUG(D_LFSCK, "%s: layout LFSCK slave phase2 scan finished, "
5388 "status %d: rc = %d\n",
5389 lfsck_lfsck2name(lfsck), lo->ll_status, rc);
5394 static void lfsck_layout_master_data_release(const struct lu_env *env,
5395 struct lfsck_component *com)
5397 struct lfsck_layout_master_data *llmd = com->lc_data;
5398 struct lfsck_instance *lfsck = com->lc_lfsck;
5399 struct lfsck_tgt_descs *ltds;
5400 struct lfsck_tgt_desc *ltd;
5401 struct lfsck_tgt_desc *next;
5403 LASSERT(llmd != NULL);
5404 LASSERT(thread_is_init(&llmd->llmd_thread) ||
5405 thread_is_stopped(&llmd->llmd_thread));
5406 LASSERT(list_empty(&llmd->llmd_req_list));
5408 com->lc_data = NULL;
5410 ltds = &lfsck->li_ost_descs;
5411 spin_lock(<ds->ltd_lock);
5412 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase1_list,
5413 ltd_layout_phase_list) {
5414 list_del_init(<d->ltd_layout_phase_list);
5416 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase2_list,
5417 ltd_layout_phase_list) {
5418 list_del_init(<d->ltd_layout_phase_list);
5420 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_list,
5422 list_del_init(<d->ltd_layout_list);
5424 spin_unlock(<ds->ltd_lock);
5426 ltds = &lfsck->li_mdt_descs;
5427 spin_lock(<ds->ltd_lock);
5428 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase1_list,
5429 ltd_layout_phase_list) {
5430 list_del_init(<d->ltd_layout_phase_list);
5432 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase2_list,
5433 ltd_layout_phase_list) {
5434 list_del_init(<d->ltd_layout_phase_list);
5436 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_list,
5438 list_del_init(<d->ltd_layout_list);
5440 spin_unlock(<ds->ltd_lock);
5445 static void lfsck_layout_slave_data_release(const struct lu_env *env,
5446 struct lfsck_component *com)
5448 struct lfsck_layout_slave_data *llsd = com->lc_data;
5449 struct lfsck_layout_seq *lls;
5450 struct lfsck_layout_seq *next;
5451 struct lfsck_layout_slave_target *llst;
5452 struct lfsck_layout_slave_target *tmp;
5454 LASSERT(llsd != NULL);
5456 list_for_each_entry_safe(lls, next, &llsd->llsd_seq_list,
5458 list_del_init(&lls->lls_list);
5459 lfsck_object_put(env, lls->lls_lastid_obj);
5463 list_for_each_entry_safe(llst, tmp, &llsd->llsd_master_list,
5465 list_del_init(&llst->llst_list);
5469 lfsck_rbtree_cleanup(env, com);
5470 com->lc_data = NULL;
5474 static void lfsck_layout_master_quit(const struct lu_env *env,
5475 struct lfsck_component *com)
5477 struct lfsck_layout_master_data *llmd = com->lc_data;
5478 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
5479 struct ptlrpc_thread *athread = &llmd->llmd_thread;
5480 struct l_wait_info lwi = { 0 };
5482 llmd->llmd_exit = 1;
5483 wake_up_all(&athread->t_ctl_waitq);
5484 l_wait_event(mthread->t_ctl_waitq,
5485 thread_is_init(athread) ||
5486 thread_is_stopped(athread),
5490 static void lfsck_layout_slave_quit(const struct lu_env *env,
5491 struct lfsck_component *com)
5493 lfsck_rbtree_cleanup(env, com);
5496 static int lfsck_layout_master_in_notify(const struct lu_env *env,
5497 struct lfsck_component *com,
5498 struct lfsck_request *lr)
5500 struct lfsck_instance *lfsck = com->lc_lfsck;
5501 struct lfsck_layout *lo = com->lc_file_ram;
5502 struct lfsck_layout_master_data *llmd = com->lc_data;
5503 struct lfsck_tgt_descs *ltds;
5504 struct lfsck_tgt_desc *ltd;
5508 if (lr->lr_event == LE_PAIRS_VERIFY) {
5511 rc = lfsck_layout_master_check_pairs(env, com, &lr->lr_fid,
5517 CDEBUG(D_LFSCK, "%s: layout LFSCK master handle notify %u "
5518 "from %s %x, status %d\n", lfsck_lfsck2name(lfsck),
5519 lr->lr_event, (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
5520 lr->lr_index, lr->lr_status);
5522 if (lr->lr_event != LE_PHASE1_DONE &&
5523 lr->lr_event != LE_PHASE2_DONE &&
5524 lr->lr_event != LE_PEER_EXIT)
5527 if (lr->lr_flags & LEF_FROM_OST)
5528 ltds = &lfsck->li_ost_descs;
5530 ltds = &lfsck->li_mdt_descs;
5531 spin_lock(<ds->ltd_lock);
5532 ltd = LTD_TGT(ltds, lr->lr_index);
5534 spin_unlock(<ds->ltd_lock);
5539 list_del_init(<d->ltd_layout_phase_list);
5540 switch (lr->lr_event) {
5541 case LE_PHASE1_DONE:
5542 if (lr->lr_status <= 0) {
5543 ltd->ltd_layout_done = 1;
5544 list_del_init(<d->ltd_layout_list);
5545 lo->ll_flags |= LF_INCOMPLETE;
5550 if (lr->lr_flags & LEF_FROM_OST) {
5551 if (list_empty(<d->ltd_layout_list))
5552 list_add_tail(<d->ltd_layout_list,
5553 &llmd->llmd_ost_list);
5554 list_add_tail(<d->ltd_layout_phase_list,
5555 &llmd->llmd_ost_phase2_list);
5557 if (list_empty(<d->ltd_layout_list))
5558 list_add_tail(<d->ltd_layout_list,
5559 &llmd->llmd_mdt_list);
5560 list_add_tail(<d->ltd_layout_phase_list,
5561 &llmd->llmd_mdt_phase2_list);
5564 case LE_PHASE2_DONE:
5565 ltd->ltd_layout_done = 1;
5566 list_del_init(<d->ltd_layout_list);
5570 ltd->ltd_layout_done = 1;
5571 list_del_init(<d->ltd_layout_list);
5572 if (!(lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT))
5573 lo->ll_flags |= LF_INCOMPLETE;
5578 spin_unlock(<ds->ltd_lock);
5580 if (fail && lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT) {
5581 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
5583 memset(stop, 0, sizeof(*stop));
5584 stop->ls_status = lr->lr_status;
5585 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
5586 lfsck_stop(env, lfsck->li_bottom, stop);
5587 } else if (lfsck_layout_master_to_orphan(llmd)) {
5588 wake_up_all(&llmd->llmd_thread.t_ctl_waitq);
5594 static int lfsck_layout_slave_in_notify(const struct lu_env *env,
5595 struct lfsck_component *com,
5596 struct lfsck_request *lr)
5598 struct lfsck_instance *lfsck = com->lc_lfsck;
5599 struct lfsck_layout_slave_data *llsd = com->lc_data;
5600 struct lfsck_layout_slave_target *llst;
5604 switch (lr->lr_event) {
5605 case LE_FID_ACCESSED:
5606 lfsck_rbtree_update_bitmap(env, com, &lr->lr_fid, true);
5608 case LE_CONDITIONAL_DESTROY:
5609 rc = lfsck_layout_slave_conditional_destroy(env, com, lr);
5611 case LE_PAIRS_VERIFY: {
5612 lr->lr_status = LPVS_INIT;
5613 /* Firstly, if the MDT-object which is claimed via OST-object
5614 * local stored PFID xattr recognizes the OST-object, then it
5615 * must be that the client given PFID is wrong. */
5616 rc = lfsck_layout_slave_check_pairs(env, com, &lr->lr_fid,
5621 lr->lr_status = LPVS_INCONSISTENT;
5622 /* The OST-object local stored PFID xattr is stale. We need to
5623 * check whether the MDT-object that is claimed via the client
5624 * given PFID information recognizes the OST-object or not. If
5625 * matches, then need to update the OST-object's PFID xattr. */
5626 rc = lfsck_layout_slave_check_pairs(env, com, &lr->lr_fid,
5629 * We are not sure whether the client given PFID information
5630 * is correct or not, do nothing to avoid improper fixing.
5633 * The client given PFID information is also invalid, we can
5634 * NOT fix the OST-object inconsistency.
5639 lr->lr_status = LPVS_INCONSISTENT_TOFIX;
5640 rc = lfsck_layout_slave_repair_pfid(env, com, lr);
5644 case LE_PHASE2_DONE:
5646 CDEBUG(D_LFSCK, "%s: layout LFSCK slave handle notify %u "
5647 "from MDT %x, status %d\n", lfsck_lfsck2name(lfsck),
5648 lr->lr_event, lr->lr_index, lr->lr_status);
5654 llst = lfsck_layout_llst_find_and_del(llsd, lr->lr_index, true);
5658 lfsck_layout_llst_put(llst);
5659 if (list_empty(&llsd->llsd_master_list))
5660 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
5662 if (lr->lr_event == LE_PEER_EXIT &&
5663 lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT) {
5664 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
5666 memset(stop, 0, sizeof(*stop));
5667 stop->ls_status = lr->lr_status;
5668 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
5669 lfsck_stop(env, lfsck->li_bottom, stop);
5675 static int lfsck_layout_query(const struct lu_env *env,
5676 struct lfsck_component *com)
5678 struct lfsck_layout *lo = com->lc_file_ram;
5680 return lo->ll_status;
5683 static int lfsck_layout_master_stop_notify(const struct lu_env *env,
5684 struct lfsck_component *com,
5685 struct lfsck_tgt_descs *ltds,
5686 struct lfsck_tgt_desc *ltd,
5687 struct ptlrpc_request_set *set)
5689 struct lfsck_thread_info *info = lfsck_env_info(env);
5690 struct lfsck_async_interpret_args *laia = &info->lti_laia;
5691 struct lfsck_request *lr = &info->lti_lr;
5692 struct lfsck_instance *lfsck = com->lc_lfsck;
5695 spin_lock(<ds->ltd_lock);
5696 if (list_empty(<d->ltd_layout_list)) {
5697 LASSERT(list_empty(<d->ltd_layout_phase_list));
5698 spin_unlock(<ds->ltd_lock);
5703 list_del_init(<d->ltd_layout_phase_list);
5704 list_del_init(<d->ltd_layout_list);
5705 spin_unlock(<ds->ltd_lock);
5707 memset(lr, 0, sizeof(*lr));
5708 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
5709 lr->lr_event = LE_PEER_EXIT;
5710 lr->lr_active = LFSCK_TYPE_LAYOUT;
5711 lr->lr_status = LS_CO_PAUSED;
5712 if (ltds == &lfsck->li_ost_descs)
5713 lr->lr_flags = LEF_TO_OST;
5715 laia->laia_com = com;
5716 laia->laia_ltds = ltds;
5717 atomic_inc(<d->ltd_ref);
5718 laia->laia_ltd = ltd;
5720 laia->laia_shared = 0;
5722 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
5723 lfsck_layout_master_async_interpret,
5724 laia, LFSCK_NOTIFY);
5726 CDEBUG(D_LFSCK, "%s: layout LFSCK fail to notify %s %x "
5727 "for co-stop: rc = %d\n",
5728 lfsck_lfsck2name(lfsck),
5729 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
5730 ltd->ltd_index, rc);
5737 /* with lfsck::li_lock held */
5738 static int lfsck_layout_slave_join(const struct lu_env *env,
5739 struct lfsck_component *com,
5740 struct lfsck_start_param *lsp)
5742 struct lfsck_instance *lfsck = com->lc_lfsck;
5743 struct lfsck_layout_slave_data *llsd = com->lc_data;
5744 struct lfsck_layout_slave_target *llst;
5745 struct lfsck_start *start = lsp->lsp_start;
5749 if (start == NULL || !(start->ls_flags & LPF_ORPHAN))
5752 if (!lsp->lsp_index_valid)
5755 /* If someone is running the LFSCK without orphan handling,
5756 * it will not maintain the object accessing rbtree. So we
5757 * cannot join it for orphan handling. */
5758 if (!llsd->llsd_rbtree_valid)
5761 spin_unlock(&lfsck->li_lock);
5762 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
5763 spin_lock(&lfsck->li_lock);
5764 if (rc == 0 && !thread_is_running(&lfsck->li_thread)) {
5765 spin_unlock(&lfsck->li_lock);
5766 llst = lfsck_layout_llst_find_and_del(llsd, lsp->lsp_index,
5769 lfsck_layout_llst_put(llst);
5770 spin_lock(&lfsck->li_lock);
5777 static struct lfsck_operations lfsck_layout_master_ops = {
5778 .lfsck_reset = lfsck_layout_reset,
5779 .lfsck_fail = lfsck_layout_fail,
5780 .lfsck_checkpoint = lfsck_layout_master_checkpoint,
5781 .lfsck_prep = lfsck_layout_master_prep,
5782 .lfsck_exec_oit = lfsck_layout_master_exec_oit,
5783 .lfsck_exec_dir = lfsck_layout_exec_dir,
5784 .lfsck_post = lfsck_layout_master_post,
5785 .lfsck_interpret = lfsck_layout_master_async_interpret,
5786 .lfsck_dump = lfsck_layout_dump,
5787 .lfsck_double_scan = lfsck_layout_master_double_scan,
5788 .lfsck_data_release = lfsck_layout_master_data_release,
5789 .lfsck_quit = lfsck_layout_master_quit,
5790 .lfsck_in_notify = lfsck_layout_master_in_notify,
5791 .lfsck_query = lfsck_layout_query,
5792 .lfsck_stop_notify = lfsck_layout_master_stop_notify,
5795 static struct lfsck_operations lfsck_layout_slave_ops = {
5796 .lfsck_reset = lfsck_layout_reset,
5797 .lfsck_fail = lfsck_layout_fail,
5798 .lfsck_checkpoint = lfsck_layout_slave_checkpoint,
5799 .lfsck_prep = lfsck_layout_slave_prep,
5800 .lfsck_exec_oit = lfsck_layout_slave_exec_oit,
5801 .lfsck_exec_dir = lfsck_layout_exec_dir,
5802 .lfsck_post = lfsck_layout_slave_post,
5803 .lfsck_dump = lfsck_layout_dump,
5804 .lfsck_double_scan = lfsck_layout_slave_double_scan,
5805 .lfsck_data_release = lfsck_layout_slave_data_release,
5806 .lfsck_quit = lfsck_layout_slave_quit,
5807 .lfsck_in_notify = lfsck_layout_slave_in_notify,
5808 .lfsck_query = lfsck_layout_query,
5809 .lfsck_join = lfsck_layout_slave_join,
5812 int lfsck_layout_setup(const struct lu_env *env, struct lfsck_instance *lfsck)
5814 struct lfsck_component *com;
5815 struct lfsck_layout *lo;
5816 struct dt_object *root = NULL;
5817 struct dt_object *obj;
5825 INIT_LIST_HEAD(&com->lc_link);
5826 INIT_LIST_HEAD(&com->lc_link_dir);
5827 init_rwsem(&com->lc_sem);
5828 atomic_set(&com->lc_ref, 1);
5829 com->lc_lfsck = lfsck;
5830 com->lc_type = LFSCK_TYPE_LAYOUT;
5831 if (lfsck->li_master) {
5832 struct lfsck_layout_master_data *llmd;
5834 com->lc_ops = &lfsck_layout_master_ops;
5835 OBD_ALLOC_PTR(llmd);
5837 GOTO(out, rc = -ENOMEM);
5839 INIT_LIST_HEAD(&llmd->llmd_req_list);
5840 spin_lock_init(&llmd->llmd_lock);
5841 INIT_LIST_HEAD(&llmd->llmd_ost_list);
5842 INIT_LIST_HEAD(&llmd->llmd_ost_phase1_list);
5843 INIT_LIST_HEAD(&llmd->llmd_ost_phase2_list);
5844 INIT_LIST_HEAD(&llmd->llmd_mdt_list);
5845 INIT_LIST_HEAD(&llmd->llmd_mdt_phase1_list);
5846 INIT_LIST_HEAD(&llmd->llmd_mdt_phase2_list);
5847 init_waitqueue_head(&llmd->llmd_thread.t_ctl_waitq);
5848 com->lc_data = llmd;
5850 struct lfsck_layout_slave_data *llsd;
5852 com->lc_ops = &lfsck_layout_slave_ops;
5853 OBD_ALLOC_PTR(llsd);
5855 GOTO(out, rc = -ENOMEM);
5857 INIT_LIST_HEAD(&llsd->llsd_seq_list);
5858 INIT_LIST_HEAD(&llsd->llsd_master_list);
5859 spin_lock_init(&llsd->llsd_lock);
5860 llsd->llsd_rb_root = RB_ROOT;
5861 rwlock_init(&llsd->llsd_rb_lock);
5862 com->lc_data = llsd;
5864 com->lc_file_size = sizeof(*lo);
5865 OBD_ALLOC(com->lc_file_ram, com->lc_file_size);
5866 if (com->lc_file_ram == NULL)
5867 GOTO(out, rc = -ENOMEM);
5869 OBD_ALLOC(com->lc_file_disk, com->lc_file_size);
5870 if (com->lc_file_disk == NULL)
5871 GOTO(out, rc = -ENOMEM);
5873 root = dt_locate(env, lfsck->li_bottom, &lfsck->li_local_root_fid);
5875 GOTO(out, rc = PTR_ERR(root));
5877 if (unlikely(!dt_try_as_dir(env, root)))
5878 GOTO(out, rc = -ENOTDIR);
5880 obj = local_file_find_or_create(env, lfsck->li_los, root,
5882 S_IFREG | S_IRUGO | S_IWUSR);
5884 GOTO(out, rc = PTR_ERR(obj));
5887 rc = lfsck_layout_load(env, com);
5889 rc = lfsck_layout_reset(env, com, true);
5890 else if (rc == -ENOENT)
5891 rc = lfsck_layout_init(env, com);
5896 lo = com->lc_file_ram;
5897 switch (lo->ll_status) {
5903 spin_lock(&lfsck->li_lock);
5904 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
5905 spin_unlock(&lfsck->li_lock);
5908 CERROR("%s: unknown lfsck_layout status %d\n",
5909 lfsck_lfsck2name(lfsck), lo->ll_status);
5911 case LS_SCANNING_PHASE1:
5912 case LS_SCANNING_PHASE2:
5913 /* No need to store the status to disk right now.
5914 * If the system crashed before the status stored,
5915 * it will be loaded back when next time. */
5916 lo->ll_status = LS_CRASHED;
5917 lo->ll_flags |= LF_INCOMPLETE;
5924 spin_lock(&lfsck->li_lock);
5925 list_add_tail(&com->lc_link, &lfsck->li_list_scan);
5926 spin_unlock(&lfsck->li_lock);
5930 if (lo->ll_flags & LF_CRASHED_LASTID) {
5931 LASSERT(lfsck->li_out_notify != NULL);
5933 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
5934 LE_LASTID_REBUILDING);
5940 if (root != NULL && !IS_ERR(root))
5941 lu_object_put(env, &root->do_lu);
5944 lfsck_component_cleanup(env, com);
5945 CERROR("%s: fail to init layout LFSCK component: rc = %d\n",
5946 lfsck_lfsck2name(lfsck), rc);
5952 struct lfsck_orphan_it {
5953 struct lfsck_component *loi_com;
5954 struct lfsck_rbtree_node *loi_lrn;
5955 struct lfsck_layout_slave_target *loi_llst;
5956 struct lu_fid loi_key;
5957 struct lu_orphan_rec loi_rec;
5959 unsigned int loi_over:1;
5962 static int lfsck_fid_match_idx(const struct lu_env *env,
5963 struct lfsck_instance *lfsck,
5964 const struct lu_fid *fid, int idx)
5966 struct seq_server_site *ss;
5967 struct lu_server_fld *sf;
5968 struct lu_seq_range range = { 0 };
5971 /* All abnormal cases will be returned to MDT0. */
5972 if (!fid_is_norm(fid)) {
5979 ss = lu_site2seq(lfsck->li_bottom->dd_lu_dev.ld_site);
5980 if (unlikely(ss == NULL))
5983 sf = ss->ss_server_fld;
5984 LASSERT(sf != NULL);
5986 fld_range_set_any(&range);
5987 rc = fld_server_lookup(env, sf, fid_seq(fid), &range);
5991 if (!fld_range_is_mdt(&range))
5994 if (range.lsr_index == idx)
6000 static void lfsck_layout_destroy_orphan(const struct lu_env *env,
6001 struct dt_device *dev,
6002 struct dt_object *obj)
6004 struct thandle *handle;
6008 handle = dt_trans_create(env, dev);
6012 rc = dt_declare_ref_del(env, obj, handle);
6016 rc = dt_declare_destroy(env, obj, handle);
6020 rc = dt_trans_start_local(env, dev, handle);
6024 dt_write_lock(env, obj, 0);
6025 rc = dt_ref_del(env, obj, handle);
6027 rc = dt_destroy(env, obj, handle);
6028 dt_write_unlock(env, obj);
6033 dt_trans_stop(env, dev, handle);
6035 CDEBUG(D_LFSCK, "destroy orphan OST-object "DFID": rc = %d\n",
6036 PFID(lfsck_dto2fid(obj)), rc);
6041 static int lfsck_orphan_index_lookup(const struct lu_env *env,
6042 struct dt_object *dt,
6044 const struct dt_key *key,
6045 struct lustre_capa *capa)
6050 static int lfsck_orphan_index_declare_insert(const struct lu_env *env,
6051 struct dt_object *dt,
6052 const struct dt_rec *rec,
6053 const struct dt_key *key,
6054 struct thandle *handle)
6059 static int lfsck_orphan_index_insert(const struct lu_env *env,
6060 struct dt_object *dt,
6061 const struct dt_rec *rec,
6062 const struct dt_key *key,
6063 struct thandle *handle,
6064 struct lustre_capa *capa,
6070 static int lfsck_orphan_index_declare_delete(const struct lu_env *env,
6071 struct dt_object *dt,
6072 const struct dt_key *key,
6073 struct thandle *handle)
6078 static int lfsck_orphan_index_delete(const struct lu_env *env,
6079 struct dt_object *dt,
6080 const struct dt_key *key,
6081 struct thandle *handle,
6082 struct lustre_capa *capa)
6087 static struct dt_it *lfsck_orphan_it_init(const struct lu_env *env,
6088 struct dt_object *dt,
6090 struct lustre_capa *capa)
6092 struct dt_device *dev = lu2dt_dev(dt->do_lu.lo_dev);
6093 struct lfsck_instance *lfsck;
6094 struct lfsck_component *com = NULL;
6095 struct lfsck_layout_slave_data *llsd;
6096 struct lfsck_orphan_it *it = NULL;
6100 lfsck = lfsck_instance_find(dev, true, false);
6101 if (unlikely(lfsck == NULL))
6102 RETURN(ERR_PTR(-ENXIO));
6104 com = lfsck_component_find(lfsck, LFSCK_TYPE_LAYOUT);
6105 if (unlikely(com == NULL))
6106 GOTO(out, rc = -ENOENT);
6108 llsd = com->lc_data;
6109 if (!llsd->llsd_rbtree_valid)
6110 GOTO(out, rc = -ESRCH);
6114 GOTO(out, rc = -ENOMEM);
6116 it->loi_llst = lfsck_layout_llst_find_and_del(llsd, attr, false);
6117 if (it->loi_llst == NULL)
6118 GOTO(out, rc = -ENXIO);
6120 if (dev->dd_record_fid_accessed) {
6121 /* The first iteration against the rbtree, scan the whole rbtree
6122 * to remove the nodes which do NOT need to be handled. */
6123 write_lock(&llsd->llsd_rb_lock);
6124 if (dev->dd_record_fid_accessed) {
6125 struct rb_node *node;
6126 struct rb_node *next;
6127 struct lfsck_rbtree_node *lrn;
6129 /* No need to record the fid accessing anymore. */
6130 dev->dd_record_fid_accessed = 0;
6132 node = rb_first(&llsd->llsd_rb_root);
6133 while (node != NULL) {
6134 next = rb_next(node);
6135 lrn = rb_entry(node, struct lfsck_rbtree_node,
6137 if (atomic_read(&lrn->lrn_known_count) <=
6138 atomic_read(&lrn->lrn_accessed_count)) {
6139 rb_erase(node, &llsd->llsd_rb_root);
6140 lfsck_rbtree_free(lrn);
6145 write_unlock(&llsd->llsd_rb_lock);
6148 /* read lock the rbtree when init, and unlock when fini */
6149 read_lock(&llsd->llsd_rb_lock);
6157 lfsck_component_put(env, com);
6159 CDEBUG(D_LFSCK, "%s: init the orphan iteration: rc = %d\n",
6160 lfsck_lfsck2name(lfsck), rc);
6162 lfsck_instance_put(env, lfsck);
6167 it = (struct lfsck_orphan_it *)ERR_PTR(rc);
6170 return (struct dt_it *)it;
6173 static void lfsck_orphan_it_fini(const struct lu_env *env,
6176 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6177 struct lfsck_component *com = it->loi_com;
6178 struct lfsck_layout_slave_data *llsd;
6179 struct lfsck_layout_slave_target *llst;
6182 CDEBUG(D_LFSCK, "%s: fini the orphan iteration\n",
6183 lfsck_lfsck2name(com->lc_lfsck));
6185 llsd = com->lc_data;
6186 read_unlock(&llsd->llsd_rb_lock);
6187 llst = it->loi_llst;
6188 LASSERT(llst != NULL);
6190 /* Save the key and hash for iterate next. */
6191 llst->llst_fid = it->loi_key;
6192 llst->llst_hash = it->loi_hash;
6193 lfsck_layout_llst_put(llst);
6194 lfsck_component_put(env, com);
6200 * \retval +1: the iteration finished
6201 * \retval 0: on success, not finished
6202 * \retval -ve: on error
6204 static int lfsck_orphan_it_next(const struct lu_env *env,
6207 struct lfsck_thread_info *info = lfsck_env_info(env);
6208 struct filter_fid_old *pfid = &info->lti_old_pfid;
6209 struct lu_attr *la = &info->lti_la;
6210 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6211 struct lu_fid *key = &it->loi_key;
6212 struct lu_orphan_rec *rec = &it->loi_rec;
6213 struct lfsck_component *com = it->loi_com;
6214 struct lfsck_instance *lfsck = com->lc_lfsck;
6215 struct lfsck_layout_slave_data *llsd = com->lc_data;
6216 struct dt_object *obj;
6217 struct lfsck_rbtree_node *lrn;
6221 __u32 idx = it->loi_llst->llst_index;
6231 lrn = lfsck_rbtree_search(llsd, key, &exact);
6239 key->f_seq = lrn->lrn_seq;
6240 key->f_oid = lrn->lrn_first_oid;
6245 if (unlikely(key->f_oid == 0)) {
6252 lrn->lrn_first_oid + LFSCK_RBTREE_BITMAP_WIDTH) {
6258 if (unlikely(atomic_read(&lrn->lrn_known_count) <=
6259 atomic_read(&lrn->lrn_accessed_count))) {
6260 struct rb_node *next = rb_next(&lrn->lrn_node);
6262 while (next != NULL) {
6263 lrn = rb_entry(next, struct lfsck_rbtree_node,
6265 if (atomic_read(&lrn->lrn_known_count) >
6266 atomic_read(&lrn->lrn_accessed_count))
6268 next = rb_next(next);
6277 key->f_seq = lrn->lrn_seq;
6278 key->f_oid = lrn->lrn_first_oid;
6282 pos = key->f_oid - lrn->lrn_first_oid;
6285 pos = find_next_bit(lrn->lrn_known_bitmap,
6286 LFSCK_RBTREE_BITMAP_WIDTH, pos);
6287 if (pos >= LFSCK_RBTREE_BITMAP_WIDTH) {
6288 key->f_oid = lrn->lrn_first_oid + pos;
6289 if (unlikely(key->f_oid < lrn->lrn_first_oid)) {
6297 if (test_bit(pos, lrn->lrn_accessed_bitmap)) {
6302 key->f_oid = lrn->lrn_first_oid + pos;
6303 obj = lfsck_object_find(env, lfsck, key);
6306 if (rc == -ENOENT) {
6313 dt_read_lock(env, obj, 0);
6314 if (!dt_object_exists(obj)) {
6315 dt_read_unlock(env, obj);
6316 lfsck_object_put(env, obj);
6321 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
6325 rc = dt_xattr_get(env, obj, lfsck_buf_get(env, pfid, sizeof(*pfid)),
6326 XATTR_NAME_FID, BYPASS_CAPA);
6327 if (rc == -ENODATA) {
6328 /* For the pre-created OST-object, update the bitmap to avoid
6329 * others LFSCK (second phase) iteration to touch it again. */
6330 if (la->la_ctime == 0) {
6331 if (!test_and_set_bit(pos, lrn->lrn_accessed_bitmap))
6332 atomic_inc(&lrn->lrn_accessed_count);
6334 /* For the race between repairing dangling referenced
6335 * MDT-object and unlink the file, it may left orphan
6336 * OST-object there. Destroy it now! */
6337 if (unlikely(!(la->la_mode & S_ISUID))) {
6338 dt_read_unlock(env, obj);
6339 lfsck_layout_destroy_orphan(env,
6342 lfsck_object_put(env, obj);
6346 } else if (idx == 0) {
6347 /* If the orphan OST-object has no parent information,
6348 * regard it as referenced by the MDT-object on MDT0. */
6349 fid_zero(&rec->lor_fid);
6350 rec->lor_uid = la->la_uid;
6351 rec->lor_gid = la->la_gid;
6355 dt_read_unlock(env, obj);
6356 lfsck_object_put(env, obj);
6364 if (rc != sizeof(struct filter_fid) &&
6365 rc != sizeof(struct filter_fid_old))
6366 GOTO(out, rc = -EINVAL);
6368 fid_le_to_cpu(&rec->lor_fid, &pfid->ff_parent);
6369 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
6370 * MDT-object's FID::f_ver, instead it is the OST-object index in its
6371 * parent MDT-object's layout EA. */
6372 save = rec->lor_fid.f_stripe_idx;
6373 rec->lor_fid.f_ver = 0;
6374 rc = lfsck_fid_match_idx(env, lfsck, &rec->lor_fid, idx);
6375 /* If the orphan OST-object does not claim the MDT, then next.
6377 * If we do not know whether it matches or not, then return it
6378 * to the MDT for further check. */
6380 dt_read_unlock(env, obj);
6381 lfsck_object_put(env, obj);
6386 rec->lor_fid.f_stripe_idx = save;
6387 rec->lor_uid = la->la_uid;
6388 rec->lor_gid = la->la_gid;
6390 CDEBUG(D_LFSCK, "%s: return orphan "DFID", PFID "DFID", owner %u:%u\n",
6391 lfsck_lfsck2name(com->lc_lfsck), PFID(key), PFID(&rec->lor_fid),
6392 rec->lor_uid, rec->lor_gid);
6397 dt_read_unlock(env, obj);
6398 lfsck_object_put(env, obj);
6406 * \retval +1: locate to the exactly position
6407 * \retval 0: cannot locate to the exactly position,
6408 * call next() to move to a valid position.
6409 * \retval -ve: on error
6411 static int lfsck_orphan_it_get(const struct lu_env *env,
6413 const struct dt_key *key)
6415 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6418 it->loi_key = *(struct lu_fid *)key;
6419 rc = lfsck_orphan_it_next(env, di);
6429 static void lfsck_orphan_it_put(const struct lu_env *env,
6434 static struct dt_key *lfsck_orphan_it_key(const struct lu_env *env,
6435 const struct dt_it *di)
6437 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6439 return (struct dt_key *)&it->loi_key;
6442 static int lfsck_orphan_it_key_size(const struct lu_env *env,
6443 const struct dt_it *di)
6445 return sizeof(struct lu_fid);
6448 static int lfsck_orphan_it_rec(const struct lu_env *env,
6449 const struct dt_it *di,
6453 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6455 *(struct lu_orphan_rec *)rec = it->loi_rec;
6460 static __u64 lfsck_orphan_it_store(const struct lu_env *env,
6461 const struct dt_it *di)
6463 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6465 return it->loi_hash;
6469 * \retval +1: locate to the exactly position
6470 * \retval 0: cannot locate to the exactly position,
6471 * call next() to move to a valid position.
6472 * \retval -ve: on error
6474 static int lfsck_orphan_it_load(const struct lu_env *env,
6475 const struct dt_it *di,
6478 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6479 struct lfsck_layout_slave_target *llst = it->loi_llst;
6482 LASSERT(llst != NULL);
6484 if (hash != llst->llst_hash) {
6485 CDEBUG(D_LFSCK, "%s: the given hash "LPU64" for orphan "
6486 "iteration does not match the one when fini "
6487 LPU64", to be reset.\n",
6488 lfsck_lfsck2name(it->loi_com->lc_lfsck), hash,
6490 fid_zero(&llst->llst_fid);
6491 llst->llst_hash = 0;
6494 it->loi_key = llst->llst_fid;
6495 it->loi_hash = llst->llst_hash;
6496 rc = lfsck_orphan_it_next(env, (struct dt_it *)di);
6506 static int lfsck_orphan_it_key_rec(const struct lu_env *env,
6507 const struct dt_it *di,
6513 const struct dt_index_operations lfsck_orphan_index_ops = {
6514 .dio_lookup = lfsck_orphan_index_lookup,
6515 .dio_declare_insert = lfsck_orphan_index_declare_insert,
6516 .dio_insert = lfsck_orphan_index_insert,
6517 .dio_declare_delete = lfsck_orphan_index_declare_delete,
6518 .dio_delete = lfsck_orphan_index_delete,
6520 .init = lfsck_orphan_it_init,
6521 .fini = lfsck_orphan_it_fini,
6522 .get = lfsck_orphan_it_get,
6523 .put = lfsck_orphan_it_put,
6524 .next = lfsck_orphan_it_next,
6525 .key = lfsck_orphan_it_key,
6526 .key_size = lfsck_orphan_it_key_size,
6527 .rec = lfsck_orphan_it_rec,
6528 .store = lfsck_orphan_it_store,
6529 .load = lfsck_orphan_it_load,
6530 .key_rec = lfsck_orphan_it_key_rec,