4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2013, Intel Corporation.
26 * lustre/lfsck/lfsck_layout.c
28 * Author: Fan, Yong <fan.yong@intel.com>
32 # define EXPORT_SYMTAB
34 #define DEBUG_SUBSYSTEM S_LFSCK
36 #include <linux/bitops.h>
37 #include <linux/rbtree.h>
39 #include <lustre/lustre_idl.h>
40 #include <lu_object.h>
41 #include <dt_object.h>
42 #include <lustre_fid.h>
43 #include <lustre_lib.h>
44 #include <lustre_net.h>
45 #include <lustre/lustre_user.h>
46 #include <md_object.h>
47 #include <obd_class.h>
49 #include "lfsck_internal.h"
51 #define LFSCK_LAYOUT_MAGIC 0xB173AE14
53 static const char lfsck_layout_name[] = "lfsck_layout";
55 struct lfsck_layout_seq {
56 struct list_head lls_list;
59 __u64 lls_lastid_known;
60 struct dt_object *lls_lastid_obj;
61 unsigned int lls_dirty:1;
64 struct lfsck_layout_slave_target {
65 /* link into lfsck_layout_slave_data::llsd_master_list. */
66 struct list_head llst_list;
67 /* The position for next record in the rbtree for iteration. */
68 struct lu_fid llst_fid;
69 /* Dummy hash for iteration against the rbtree. */
76 struct lfsck_layout_slave_data {
77 /* list for lfsck_layout_seq */
78 struct list_head llsd_seq_list;
80 /* list for the masters involve layout verification. */
81 struct list_head llsd_master_list;
84 struct dt_object *llsd_rb_obj;
85 struct rb_root llsd_rb_root;
86 rwlock_t llsd_rb_lock;
87 unsigned int llsd_rbtree_valid:1;
90 struct lfsck_layout_object {
91 struct dt_object *llo_obj;
92 struct lu_attr llo_attr;
97 struct lfsck_layout_req {
98 struct list_head llr_list;
99 struct lfsck_layout_object *llr_parent;
100 struct dt_object *llr_child;
102 __u32 llr_lov_idx; /* offset in LOV EA */
105 struct lfsck_layout_master_data {
106 spinlock_t llmd_lock;
107 struct list_head llmd_req_list;
109 /* list for the ost targets involve layout verification. */
110 struct list_head llmd_ost_list;
112 /* list for the ost targets in phase1 scanning. */
113 struct list_head llmd_ost_phase1_list;
115 /* list for the ost targets in phase1 scanning. */
116 struct list_head llmd_ost_phase2_list;
118 /* list for the mdt targets involve layout verification. */
119 struct list_head llmd_mdt_list;
121 /* list for the mdt targets in phase1 scanning. */
122 struct list_head llmd_mdt_phase1_list;
124 /* list for the mdt targets in phase1 scanning. */
125 struct list_head llmd_mdt_phase2_list;
127 struct ptlrpc_thread llmd_thread;
128 __u32 llmd_touch_gen;
130 int llmd_assistant_status;
131 int llmd_post_result;
132 unsigned int llmd_to_post:1,
133 llmd_to_double_scan:1,
134 llmd_in_double_scan:1,
138 struct lfsck_layout_slave_async_args {
139 struct obd_export *llsaa_exp;
140 struct lfsck_component *llsaa_com;
141 struct lfsck_layout_slave_target *llsaa_llst;
144 static struct lfsck_layout_object *
145 lfsck_layout_object_init(const struct lu_env *env, struct dt_object *obj,
148 struct lfsck_layout_object *llo;
153 return ERR_PTR(-ENOMEM);
155 rc = dt_attr_get(env, obj, &llo->llo_attr, BYPASS_CAPA);
162 lu_object_get(&obj->do_lu);
164 /* The gen can be used to check whether some others have changed the
165 * file layout after LFSCK pre-fetching but before real verification. */
167 atomic_set(&llo->llo_ref, 1);
173 lfsck_layout_llst_put(struct lfsck_layout_slave_target *llst)
175 if (atomic_dec_and_test(&llst->llst_ref)) {
176 LASSERT(list_empty(&llst->llst_list));
183 lfsck_layout_llst_add(struct lfsck_layout_slave_data *llsd, __u32 index)
185 struct lfsck_layout_slave_target *llst;
186 struct lfsck_layout_slave_target *tmp;
193 INIT_LIST_HEAD(&llst->llst_list);
195 llst->llst_index = index;
196 atomic_set(&llst->llst_ref, 1);
198 spin_lock(&llsd->llsd_lock);
199 list_for_each_entry(tmp, &llsd->llsd_master_list, llst_list) {
200 if (tmp->llst_index == index) {
206 list_add_tail(&llst->llst_list, &llsd->llsd_master_list);
207 spin_unlock(&llsd->llsd_lock);
216 lfsck_layout_llst_del(struct lfsck_layout_slave_data *llsd,
217 struct lfsck_layout_slave_target *llst)
221 spin_lock(&llsd->llsd_lock);
222 if (!list_empty(&llst->llst_list)) {
223 list_del_init(&llst->llst_list);
226 spin_unlock(&llsd->llsd_lock);
229 lfsck_layout_llst_put(llst);
232 static inline struct lfsck_layout_slave_target *
233 lfsck_layout_llst_find_and_del(struct lfsck_layout_slave_data *llsd,
234 __u32 index, bool unlink)
236 struct lfsck_layout_slave_target *llst;
238 spin_lock(&llsd->llsd_lock);
239 list_for_each_entry(llst, &llsd->llsd_master_list, llst_list) {
240 if (llst->llst_index == index) {
242 list_del_init(&llst->llst_list);
244 atomic_inc(&llst->llst_ref);
245 spin_unlock(&llsd->llsd_lock);
250 spin_unlock(&llsd->llsd_lock);
255 static inline void lfsck_layout_object_put(const struct lu_env *env,
256 struct lfsck_layout_object *llo)
258 if (atomic_dec_and_test(&llo->llo_ref)) {
259 lfsck_object_put(env, llo->llo_obj);
264 static struct lfsck_layout_req *
265 lfsck_layout_req_init(struct lfsck_layout_object *parent,
266 struct dt_object *child, __u32 ost_idx, __u32 lov_idx)
268 struct lfsck_layout_req *llr;
272 return ERR_PTR(-ENOMEM);
274 INIT_LIST_HEAD(&llr->llr_list);
275 atomic_inc(&parent->llo_ref);
276 llr->llr_parent = parent;
277 llr->llr_child = child;
278 llr->llr_ost_idx = ost_idx;
279 llr->llr_lov_idx = lov_idx;
284 static inline void lfsck_layout_req_fini(const struct lu_env *env,
285 struct lfsck_layout_req *llr)
287 lu_object_put(env, &llr->llr_child->do_lu);
288 lfsck_layout_object_put(env, llr->llr_parent);
292 static inline bool lfsck_layout_req_empty(struct lfsck_layout_master_data *llmd)
296 spin_lock(&llmd->llmd_lock);
297 if (list_empty(&llmd->llmd_req_list))
299 spin_unlock(&llmd->llmd_lock);
304 static int lfsck_layout_get_lovea(const struct lu_env *env,
305 struct dt_object *obj,
306 struct lu_buf *buf, ssize_t *buflen)
311 rc = dt_xattr_get(env, obj, buf, XATTR_NAME_LOV, BYPASS_CAPA);
313 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_LOV,
318 lu_buf_realloc(buf, rc);
320 *buflen = buf->lb_len;
322 if (buf->lb_buf == NULL)
334 if (unlikely(buf->lb_buf == NULL)) {
335 lu_buf_alloc(buf, rc);
337 *buflen = buf->lb_len;
339 if (buf->lb_buf == NULL)
348 static int lfsck_layout_verify_header(struct lov_mds_md_v1 *lmm)
353 magic = le32_to_cpu(lmm->lmm_magic);
354 /* If magic crashed, keep it there. Sometime later, during OST-object
355 * orphan handling, if some OST-object(s) back-point to it, it can be
356 * verified and repaired. */
357 if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3) {
361 lmm_oi_cpu_to_le(&oi, &lmm->lmm_oi);
362 if ((magic & LOV_MAGIC_MASK) == LOV_MAGIC_MAGIC)
367 CDEBUG(D_LFSCK, "%s LOV EA magic %u on "DOSTID"\n",
368 rc == -EINVAL ? "Unknown" : "Unsupported",
374 pattern = le32_to_cpu(lmm->lmm_pattern);
375 /* XXX: currently, we only support LOV_PATTERN_RAID0. */
376 if (lov_pattern(pattern) != LOV_PATTERN_RAID0) {
379 lmm_oi_cpu_to_le(&oi, &lmm->lmm_oi);
380 CDEBUG(D_LFSCK, "Unsupported LOV EA pattern %u on "DOSTID"\n",
381 pattern, POSTID(&oi));
389 #define LFSCK_RBTREE_BITMAP_SIZE PAGE_CACHE_SIZE
390 #define LFSCK_RBTREE_BITMAP_WIDTH (LFSCK_RBTREE_BITMAP_SIZE << 3)
391 #define LFSCK_RBTREE_BITMAP_MASK (LFSCK_RBTREE_BITMAP_WIDTH - 1)
393 struct lfsck_rbtree_node {
394 struct rb_node lrn_node;
397 atomic_t lrn_known_count;
398 atomic_t lrn_accessed_count;
399 void *lrn_known_bitmap;
400 void *lrn_accessed_bitmap;
403 static inline int lfsck_rbtree_cmp(struct lfsck_rbtree_node *lrn,
404 __u64 seq, __u32 oid)
406 if (seq < lrn->lrn_seq)
409 if (seq > lrn->lrn_seq)
412 if (oid < lrn->lrn_first_oid)
415 if (oid - lrn->lrn_first_oid >= LFSCK_RBTREE_BITMAP_WIDTH)
421 /* The caller should hold llsd->llsd_rb_lock. */
422 static struct lfsck_rbtree_node *
423 lfsck_rbtree_search(struct lfsck_layout_slave_data *llsd,
424 const struct lu_fid *fid, bool *exact)
426 struct rb_node *node = llsd->llsd_rb_root.rb_node;
427 struct rb_node *prev = NULL;
428 struct lfsck_rbtree_node *lrn = NULL;
434 while (node != NULL) {
436 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
437 rc = lfsck_rbtree_cmp(lrn, fid_seq(fid), fid_oid(fid));
439 node = node->rb_left;
441 node = node->rb_right;
449 /* If there is no exactly matched one, then to the next valid one. */
452 /* The rbtree is empty. */
459 node = rb_next(prev);
461 /* The end of the rbtree. */
465 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
470 static struct lfsck_rbtree_node *lfsck_rbtree_new(const struct lu_env *env,
471 const struct lu_fid *fid)
473 struct lfsck_rbtree_node *lrn;
477 return ERR_PTR(-ENOMEM);
479 OBD_ALLOC(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
480 if (lrn->lrn_known_bitmap == NULL) {
483 return ERR_PTR(-ENOMEM);
486 OBD_ALLOC(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
487 if (lrn->lrn_accessed_bitmap == NULL) {
488 OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
491 return ERR_PTR(-ENOMEM);
494 RB_CLEAR_NODE(&lrn->lrn_node);
495 lrn->lrn_seq = fid_seq(fid);
496 lrn->lrn_first_oid = fid_oid(fid) & ~LFSCK_RBTREE_BITMAP_MASK;
497 atomic_set(&lrn->lrn_known_count, 0);
498 atomic_set(&lrn->lrn_accessed_count, 0);
503 static void lfsck_rbtree_free(struct lfsck_rbtree_node *lrn)
505 OBD_FREE(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
506 OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
510 /* The caller should hold lock. */
511 static struct lfsck_rbtree_node *
512 lfsck_rbtree_insert(struct lfsck_layout_slave_data *llsd,
513 struct lfsck_rbtree_node *lrn)
515 struct rb_node **pos = &llsd->llsd_rb_root.rb_node;
516 struct rb_node *parent = NULL;
517 struct lfsck_rbtree_node *tmp;
520 while (*pos != NULL) {
522 tmp = rb_entry(parent, struct lfsck_rbtree_node, lrn_node);
523 rc = lfsck_rbtree_cmp(tmp, lrn->lrn_seq, lrn->lrn_first_oid);
525 pos = &(*pos)->rb_left;
527 pos = &(*pos)->rb_right;
532 rb_link_node(&lrn->lrn_node, parent, pos);
533 rb_insert_color(&lrn->lrn_node, &llsd->llsd_rb_root);
538 extern const struct dt_index_operations lfsck_orphan_index_ops;
540 static int lfsck_rbtree_setup(const struct lu_env *env,
541 struct lfsck_component *com)
543 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
544 struct lfsck_instance *lfsck = com->lc_lfsck;
545 struct dt_device *dev = lfsck->li_bottom;
546 struct lfsck_layout_slave_data *llsd = com->lc_data;
547 struct dt_object *obj;
549 fid->f_seq = FID_SEQ_LAYOUT_RBTREE;
550 fid->f_oid = lfsck_dev_idx(dev);
552 obj = dt_locate(env, dev, fid);
554 RETURN(PTR_ERR(obj));
556 /* Generate an in-RAM object to stand for the layout rbtree.
557 * Scanning the layout rbtree will be via the iteration over
558 * the object. In the future, the rbtree may be written onto
559 * disk with the object.
561 * Mark the object to be as exist. */
562 obj->do_lu.lo_header->loh_attr |= LOHA_EXISTS;
563 obj->do_index_ops = &lfsck_orphan_index_ops;
564 llsd->llsd_rb_obj = obj;
565 llsd->llsd_rbtree_valid = 1;
566 dev->dd_record_fid_accessed = 1;
568 CDEBUG(D_LFSCK, "%s: layout LFSCK init OST-objects accessing bitmap\n",
569 lfsck_lfsck2name(lfsck));
574 static void lfsck_rbtree_cleanup(const struct lu_env *env,
575 struct lfsck_component *com)
577 struct lfsck_instance *lfsck = com->lc_lfsck;
578 struct lfsck_layout_slave_data *llsd = com->lc_data;
579 struct rb_node *node = rb_first(&llsd->llsd_rb_root);
580 struct rb_node *next;
581 struct lfsck_rbtree_node *lrn;
583 lfsck->li_bottom->dd_record_fid_accessed = 0;
584 /* Invalid the rbtree, then no others will use it. */
585 write_lock(&llsd->llsd_rb_lock);
586 llsd->llsd_rbtree_valid = 0;
587 write_unlock(&llsd->llsd_rb_lock);
589 while (node != NULL) {
590 next = rb_next(node);
591 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
592 rb_erase(node, &llsd->llsd_rb_root);
593 lfsck_rbtree_free(lrn);
597 if (llsd->llsd_rb_obj != NULL) {
598 lu_object_put(env, &llsd->llsd_rb_obj->do_lu);
599 llsd->llsd_rb_obj = NULL;
602 CDEBUG(D_LFSCK, "%s: layout LFSCK fini OST-objects accessing bitmap\n",
603 lfsck_lfsck2name(lfsck));
606 static void lfsck_rbtree_update_bitmap(const struct lu_env *env,
607 struct lfsck_component *com,
608 const struct lu_fid *fid,
611 struct lfsck_layout_slave_data *llsd = com->lc_data;
612 struct lfsck_rbtree_node *lrn;
618 if (unlikely(!fid_is_sane(fid) || fid_is_last_id(fid)))
621 if (!fid_is_idif(fid) && !fid_is_norm(fid))
624 read_lock(&llsd->llsd_rb_lock);
625 if (!llsd->llsd_rbtree_valid)
626 GOTO(unlock, rc = 0);
628 lrn = lfsck_rbtree_search(llsd, fid, NULL);
630 struct lfsck_rbtree_node *tmp;
634 read_unlock(&llsd->llsd_rb_lock);
635 tmp = lfsck_rbtree_new(env, fid);
637 GOTO(out, rc = PTR_ERR(tmp));
640 write_lock(&llsd->llsd_rb_lock);
641 if (!llsd->llsd_rbtree_valid) {
642 lfsck_rbtree_free(tmp);
643 GOTO(unlock, rc = 0);
646 lrn = lfsck_rbtree_insert(llsd, tmp);
648 lfsck_rbtree_free(tmp);
651 idx = fid_oid(fid) & LFSCK_RBTREE_BITMAP_MASK;
652 /* Any accessed object must be a known object. */
653 if (!test_and_set_bit(idx, lrn->lrn_known_bitmap))
654 atomic_inc(&lrn->lrn_known_count);
655 if (accessed && !test_and_set_bit(idx, lrn->lrn_accessed_bitmap))
656 atomic_inc(&lrn->lrn_accessed_count);
658 GOTO(unlock, rc = 0);
662 write_unlock(&llsd->llsd_rb_lock);
664 read_unlock(&llsd->llsd_rb_lock);
666 if (rc != 0 && accessed) {
667 struct lfsck_layout *lo = com->lc_file_ram;
669 CDEBUG(D_LFSCK, "%s: fail to update OST-objects accessing "
670 "bitmap, and will cause incorrect LFSCK OST-object "
671 "handling, so disable it to cancel orphan handling "
672 "for related device. rc = %d\n",
673 lfsck_lfsck2name(com->lc_lfsck), rc);
675 lo->ll_flags |= LF_INCOMPLETE;
676 lfsck_rbtree_cleanup(env, com);
680 static void lfsck_layout_le_to_cpu(struct lfsck_layout *des,
681 const struct lfsck_layout *src)
685 des->ll_magic = le32_to_cpu(src->ll_magic);
686 des->ll_status = le32_to_cpu(src->ll_status);
687 des->ll_flags = le32_to_cpu(src->ll_flags);
688 des->ll_success_count = le32_to_cpu(src->ll_success_count);
689 des->ll_run_time_phase1 = le32_to_cpu(src->ll_run_time_phase1);
690 des->ll_run_time_phase2 = le32_to_cpu(src->ll_run_time_phase2);
691 des->ll_time_last_complete = le64_to_cpu(src->ll_time_last_complete);
692 des->ll_time_latest_start = le64_to_cpu(src->ll_time_latest_start);
693 des->ll_time_last_checkpoint =
694 le64_to_cpu(src->ll_time_last_checkpoint);
695 des->ll_pos_latest_start = le64_to_cpu(src->ll_pos_latest_start);
696 des->ll_pos_last_checkpoint = le64_to_cpu(src->ll_pos_last_checkpoint);
697 des->ll_pos_first_inconsistent =
698 le64_to_cpu(src->ll_pos_first_inconsistent);
699 des->ll_objs_checked_phase1 = le64_to_cpu(src->ll_objs_checked_phase1);
700 des->ll_objs_failed_phase1 = le64_to_cpu(src->ll_objs_failed_phase1);
701 des->ll_objs_checked_phase2 = le64_to_cpu(src->ll_objs_checked_phase2);
702 des->ll_objs_failed_phase2 = le64_to_cpu(src->ll_objs_failed_phase2);
703 for (i = 0; i < LLIT_MAX; i++)
704 des->ll_objs_repaired[i] =
705 le64_to_cpu(src->ll_objs_repaired[i]);
706 des->ll_objs_skipped = le64_to_cpu(src->ll_objs_skipped);
709 static void lfsck_layout_cpu_to_le(struct lfsck_layout *des,
710 const struct lfsck_layout *src)
714 des->ll_magic = cpu_to_le32(src->ll_magic);
715 des->ll_status = cpu_to_le32(src->ll_status);
716 des->ll_flags = cpu_to_le32(src->ll_flags);
717 des->ll_success_count = cpu_to_le32(src->ll_success_count);
718 des->ll_run_time_phase1 = cpu_to_le32(src->ll_run_time_phase1);
719 des->ll_run_time_phase2 = cpu_to_le32(src->ll_run_time_phase2);
720 des->ll_time_last_complete = cpu_to_le64(src->ll_time_last_complete);
721 des->ll_time_latest_start = cpu_to_le64(src->ll_time_latest_start);
722 des->ll_time_last_checkpoint =
723 cpu_to_le64(src->ll_time_last_checkpoint);
724 des->ll_pos_latest_start = cpu_to_le64(src->ll_pos_latest_start);
725 des->ll_pos_last_checkpoint = cpu_to_le64(src->ll_pos_last_checkpoint);
726 des->ll_pos_first_inconsistent =
727 cpu_to_le64(src->ll_pos_first_inconsistent);
728 des->ll_objs_checked_phase1 = cpu_to_le64(src->ll_objs_checked_phase1);
729 des->ll_objs_failed_phase1 = cpu_to_le64(src->ll_objs_failed_phase1);
730 des->ll_objs_checked_phase2 = cpu_to_le64(src->ll_objs_checked_phase2);
731 des->ll_objs_failed_phase2 = cpu_to_le64(src->ll_objs_failed_phase2);
732 for (i = 0; i < LLIT_MAX; i++)
733 des->ll_objs_repaired[i] =
734 cpu_to_le64(src->ll_objs_repaired[i]);
735 des->ll_objs_skipped = cpu_to_le64(src->ll_objs_skipped);
739 * \retval +ve: the lfsck_layout is broken, the caller should reset it.
740 * \retval 0: succeed.
741 * \retval -ve: failed cases.
743 static int lfsck_layout_load(const struct lu_env *env,
744 struct lfsck_component *com)
746 struct lfsck_layout *lo = com->lc_file_ram;
747 const struct dt_body_operations *dbo = com->lc_obj->do_body_ops;
748 ssize_t size = com->lc_file_size;
752 rc = dbo->dbo_read(env, com->lc_obj,
753 lfsck_buf_get(env, com->lc_file_disk, size), &pos,
758 CDEBUG(D_LFSCK, "%s: failed to load lfsck_layout: rc = %d\n",
759 lfsck_lfsck2name(com->lc_lfsck), rc);
761 } else if (rc != size) {
762 CDEBUG(D_LFSCK, "%s: lfsck_layout size %u != %u; reset it\n",
763 lfsck_lfsck2name(com->lc_lfsck), rc, (unsigned int)size);
767 lfsck_layout_le_to_cpu(lo, com->lc_file_disk);
768 if (lo->ll_magic != LFSCK_LAYOUT_MAGIC) {
769 CDEBUG(D_LFSCK, "%s: invalid lfsck_layout magic %#x != %#x, "
770 "to be reset\n", lfsck_lfsck2name(com->lc_lfsck),
771 lo->ll_magic, LFSCK_LAYOUT_MAGIC);
778 static int lfsck_layout_store(const struct lu_env *env,
779 struct lfsck_component *com)
781 struct dt_object *obj = com->lc_obj;
782 struct lfsck_instance *lfsck = com->lc_lfsck;
783 struct lfsck_layout *lo = com->lc_file_disk;
784 struct thandle *handle;
785 ssize_t size = com->lc_file_size;
790 lfsck_layout_cpu_to_le(lo, com->lc_file_ram);
791 handle = dt_trans_create(env, lfsck->li_bottom);
793 GOTO(log, rc = PTR_ERR(handle));
795 rc = dt_declare_record_write(env, obj, lfsck_buf_get(env, lo, size),
800 rc = dt_trans_start_local(env, lfsck->li_bottom, handle);
804 rc = dt_record_write(env, obj, lfsck_buf_get(env, lo, size), &pos,
810 dt_trans_stop(env, lfsck->li_bottom, handle);
814 CDEBUG(D_LFSCK, "%s: fail to store lfsck_layout: rc = %d\n",
815 lfsck_lfsck2name(lfsck), rc);
819 static int lfsck_layout_init(const struct lu_env *env,
820 struct lfsck_component *com)
822 struct lfsck_layout *lo = com->lc_file_ram;
825 memset(lo, 0, com->lc_file_size);
826 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
827 lo->ll_status = LS_INIT;
828 down_write(&com->lc_sem);
829 rc = lfsck_layout_store(env, com);
830 up_write(&com->lc_sem);
835 static int fid_is_for_ostobj(const struct lu_env *env, struct dt_device *dt,
836 struct dt_object *obj, const struct lu_fid *fid)
838 struct seq_server_site *ss = lu_site2seq(dt->dd_lu_dev.ld_site);
839 struct lu_seq_range range = { 0 };
840 struct lustre_mdt_attrs *lma;
843 fld_range_set_any(&range);
844 rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(fid), &range);
846 if (fld_range_is_ost(&range))
852 lma = &lfsck_env_info(env)->lti_lma;
853 rc = dt_xattr_get(env, obj, lfsck_buf_get(env, lma, sizeof(*lma)),
854 XATTR_NAME_LMA, BYPASS_CAPA);
855 if (rc == sizeof(*lma)) {
856 lustre_lma_swab(lma);
858 return lma->lma_compat & LMAC_FID_ON_OST ? 1 : 0;
861 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_FID, BYPASS_CAPA);
866 static struct lfsck_layout_seq *
867 lfsck_layout_seq_lookup(struct lfsck_layout_slave_data *llsd, __u64 seq)
869 struct lfsck_layout_seq *lls;
871 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
872 if (lls->lls_seq == seq)
875 if (lls->lls_seq > seq)
883 lfsck_layout_seq_insert(struct lfsck_layout_slave_data *llsd,
884 struct lfsck_layout_seq *lls)
886 struct lfsck_layout_seq *tmp;
887 struct list_head *pos = &llsd->llsd_seq_list;
889 list_for_each_entry(tmp, &llsd->llsd_seq_list, lls_list) {
890 if (lls->lls_seq < tmp->lls_seq) {
891 pos = &tmp->lls_list;
895 list_add_tail(&lls->lls_list, pos);
899 lfsck_layout_lastid_create(const struct lu_env *env,
900 struct lfsck_instance *lfsck,
901 struct dt_object *obj)
903 struct lfsck_thread_info *info = lfsck_env_info(env);
904 struct lu_attr *la = &info->lti_la;
905 struct dt_object_format *dof = &info->lti_dof;
906 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
907 struct dt_device *dt = lfsck->li_bottom;
914 if (bk->lb_param & LPF_DRYRUN)
917 memset(la, 0, sizeof(*la));
918 la->la_mode = S_IFREG | S_IRUGO | S_IWUSR;
919 la->la_valid = LA_MODE | LA_UID | LA_GID;
920 dof->dof_type = dt_mode_to_dft(S_IFREG);
922 th = dt_trans_create(env, dt);
924 GOTO(log, rc = PTR_ERR(th));
926 rc = dt_declare_create(env, obj, la, NULL, dof, th);
930 rc = dt_declare_record_write(env, obj,
931 lfsck_buf_get(env, &lastid,
937 rc = dt_trans_start_local(env, dt, th);
941 dt_write_lock(env, obj, 0);
942 if (likely(!dt_object_exists(obj))) {
943 rc = dt_create(env, obj, la, NULL, dof, th);
945 rc = dt_record_write(env, obj,
946 lfsck_buf_get(env, &lastid, sizeof(lastid)),
949 dt_write_unlock(env, obj);
954 dt_trans_stop(env, dt, th);
957 CDEBUG(D_LFSCK, "%s: layout LFSCK will create LAST_ID for <seq> "
959 lfsck_lfsck2name(lfsck), fid_seq(lfsck_dto2fid(obj)), rc);
965 lfsck_layout_lastid_reload(const struct lu_env *env,
966 struct lfsck_component *com,
967 struct lfsck_layout_seq *lls)
973 dt_read_lock(env, lls->lls_lastid_obj, 0);
974 rc = dt_record_read(env, lls->lls_lastid_obj,
975 lfsck_buf_get(env, &lastid, sizeof(lastid)), &pos);
976 dt_read_unlock(env, lls->lls_lastid_obj);
977 if (unlikely(rc != 0))
980 lastid = le64_to_cpu(lastid);
981 if (lastid < lls->lls_lastid_known) {
982 struct lfsck_instance *lfsck = com->lc_lfsck;
983 struct lfsck_layout *lo = com->lc_file_ram;
985 lls->lls_lastid = lls->lls_lastid_known;
987 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
988 LASSERT(lfsck->li_out_notify != NULL);
990 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
991 LE_LASTID_REBUILDING);
992 lo->ll_flags |= LF_CRASHED_LASTID;
994 } else if (lastid >= lls->lls_lastid) {
995 lls->lls_lastid = lastid;
1003 lfsck_layout_lastid_store(const struct lu_env *env,
1004 struct lfsck_component *com)
1006 struct lfsck_instance *lfsck = com->lc_lfsck;
1007 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1008 struct dt_device *dt = lfsck->li_bottom;
1009 struct lfsck_layout_slave_data *llsd = com->lc_data;
1010 struct lfsck_layout_seq *lls;
1016 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
1019 /* XXX: Add the code back if we really found related
1020 * inconsistent cases in the future. */
1022 if (!lls->lls_dirty) {
1023 /* In OFD, before the pre-creation, the LAST_ID
1024 * file will be updated firstly, which may hide
1025 * some potential crashed cases. For example:
1027 * The old obj1's ID is higher than old LAST_ID
1028 * but lower than the new LAST_ID, but the LFSCK
1029 * have not touch the obj1 until the OFD updated
1030 * the LAST_ID. So the LFSCK does not regard it
1031 * as crashed case. But when OFD does not create
1032 * successfully, it will set the LAST_ID as the
1033 * real created objects' ID, then LFSCK needs to
1034 * found related inconsistency. */
1035 rc = lfsck_layout_lastid_reload(env, com, lls);
1036 if (likely(!lls->lls_dirty))
1041 CDEBUG(D_LFSCK, "%s: layout LFSCK will sync the LAST_ID for "
1042 "<seq> "LPX64" as <oid> "LPU64"\n",
1043 lfsck_lfsck2name(lfsck), lls->lls_seq, lls->lls_lastid);
1045 if (bk->lb_param & LPF_DRYRUN) {
1050 th = dt_trans_create(env, dt);
1053 CDEBUG(D_LFSCK, "%s: layout LFSCK failed to store "
1054 "the LAST_ID for <seq> "LPX64"(1): rc = %d\n",
1055 lfsck_lfsck2name(com->lc_lfsck),
1060 lastid = cpu_to_le64(lls->lls_lastid);
1061 rc = dt_declare_record_write(env, lls->lls_lastid_obj,
1062 lfsck_buf_get(env, &lastid,
1068 rc = dt_trans_start_local(env, dt, th);
1072 dt_write_lock(env, lls->lls_lastid_obj, 0);
1073 rc = dt_record_write(env, lls->lls_lastid_obj,
1074 lfsck_buf_get(env, &lastid,
1075 sizeof(lastid)), &pos, th);
1076 dt_write_unlock(env, lls->lls_lastid_obj);
1081 dt_trans_stop(env, dt, th);
1084 CDEBUG(D_LFSCK, "%s: layout LFSCK failed to store "
1085 "the LAST_ID for <seq> "LPX64"(2): rc = %d\n",
1086 lfsck_lfsck2name(com->lc_lfsck),
1095 lfsck_layout_lastid_load(const struct lu_env *env,
1096 struct lfsck_component *com,
1097 struct lfsck_layout_seq *lls)
1099 struct lfsck_instance *lfsck = com->lc_lfsck;
1100 struct lfsck_layout *lo = com->lc_file_ram;
1101 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
1102 struct dt_object *obj;
1107 lu_last_id_fid(fid, lls->lls_seq, lfsck_dev_idx(lfsck->li_bottom));
1108 obj = dt_locate(env, lfsck->li_bottom, fid);
1110 RETURN(PTR_ERR(obj));
1112 /* LAST_ID crashed, to be rebuilt */
1113 if (!dt_object_exists(obj)) {
1114 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
1115 LASSERT(lfsck->li_out_notify != NULL);
1117 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1118 LE_LASTID_REBUILDING);
1119 lo->ll_flags |= LF_CRASHED_LASTID;
1121 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY4) &&
1123 struct l_wait_info lwi = LWI_TIMEOUT(
1124 cfs_time_seconds(cfs_fail_val),
1127 up_write(&com->lc_sem);
1128 l_wait_event(lfsck->li_thread.t_ctl_waitq,
1129 !thread_is_running(&lfsck->li_thread),
1131 down_write(&com->lc_sem);
1135 rc = lfsck_layout_lastid_create(env, lfsck, obj);
1137 dt_read_lock(env, obj, 0);
1138 rc = dt_read(env, obj,
1139 lfsck_buf_get(env, &lls->lls_lastid, sizeof(__u64)),
1141 dt_read_unlock(env, obj);
1142 if (rc != 0 && rc != sizeof(__u64))
1143 GOTO(out, rc = (rc > 0 ? -EFAULT : rc));
1145 if (rc == 0 && !(lo->ll_flags & LF_CRASHED_LASTID)) {
1146 LASSERT(lfsck->li_out_notify != NULL);
1148 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1149 LE_LASTID_REBUILDING);
1150 lo->ll_flags |= LF_CRASHED_LASTID;
1153 lls->lls_lastid = le64_to_cpu(lls->lls_lastid);
1161 lfsck_object_put(env, obj);
1163 lls->lls_lastid_obj = obj;
1168 static void lfsck_layout_record_failure(const struct lu_env *env,
1169 struct lfsck_instance *lfsck,
1170 struct lfsck_layout *lo)
1172 lo->ll_objs_failed_phase1++;
1173 if (unlikely(lo->ll_pos_first_inconsistent == 0)) {
1174 lo->ll_pos_first_inconsistent =
1175 lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
1178 CDEBUG(D_LFSCK, "%s: layout LFSCK hit first non-repaired "
1179 "inconsistency at the pos ["LPU64"]\n",
1180 lfsck_lfsck2name(lfsck),
1181 lo->ll_pos_first_inconsistent);
1185 static int lfsck_layout_master_async_interpret(const struct lu_env *env,
1186 struct ptlrpc_request *req,
1189 struct lfsck_async_interpret_args *laia = args;
1190 struct lfsck_component *com = laia->laia_com;
1191 struct lfsck_layout_master_data *llmd = com->lc_data;
1192 struct lfsck_tgt_descs *ltds = laia->laia_ltds;
1193 struct lfsck_tgt_desc *ltd = laia->laia_ltd;
1194 struct lfsck_request *lr = laia->laia_lr;
1196 switch (lr->lr_event) {
1199 struct lfsck_layout *lo = com->lc_file_ram;
1201 CDEBUG(D_LFSCK, "%s: fail to notify %s %x for layout "
1203 lfsck_lfsck2name(com->lc_lfsck),
1204 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1205 ltd->ltd_index, rc);
1206 lo->ll_flags |= LF_INCOMPLETE;
1210 spin_lock(<ds->ltd_lock);
1211 if (ltd->ltd_dead || ltd->ltd_layout_done) {
1212 spin_unlock(<ds->ltd_lock);
1216 if (lr->lr_flags & LEF_TO_OST) {
1217 if (list_empty(<d->ltd_layout_list))
1218 list_add_tail(<d->ltd_layout_list,
1219 &llmd->llmd_ost_list);
1220 if (list_empty(<d->ltd_layout_phase_list))
1221 list_add_tail(<d->ltd_layout_phase_list,
1222 &llmd->llmd_ost_phase1_list);
1224 if (list_empty(<d->ltd_layout_list))
1225 list_add_tail(<d->ltd_layout_list,
1226 &llmd->llmd_mdt_list);
1227 if (list_empty(<d->ltd_layout_phase_list))
1228 list_add_tail(<d->ltd_layout_phase_list,
1229 &llmd->llmd_mdt_phase1_list);
1231 spin_unlock(<ds->ltd_lock);
1234 case LE_PHASE1_DONE:
1235 case LE_PHASE2_DONE:
1237 if (rc != 0 && rc != -EALREADY)
1238 CDEBUG(D_LFSCK, "%s: fail to notify %s %x for layout: "
1239 "event = %d, rc = %d\n",
1240 lfsck_lfsck2name(com->lc_lfsck),
1241 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1242 ltd->ltd_index, lr->lr_event, rc);
1245 struct lfsck_reply *reply;
1248 spin_lock(<ds->ltd_lock);
1249 list_del_init(<d->ltd_layout_phase_list);
1250 list_del_init(<d->ltd_layout_list);
1251 spin_unlock(<ds->ltd_lock);
1255 reply = req_capsule_server_get(&req->rq_pill,
1257 if (reply == NULL) {
1259 CDEBUG(D_LFSCK, "%s: invalid query reply: rc = %d\n",
1260 lfsck_lfsck2name(com->lc_lfsck), rc);
1261 spin_lock(<ds->ltd_lock);
1262 list_del_init(<d->ltd_layout_phase_list);
1263 list_del_init(<d->ltd_layout_list);
1264 spin_unlock(<ds->ltd_lock);
1268 switch (reply->lr_status) {
1269 case LS_SCANNING_PHASE1:
1271 case LS_SCANNING_PHASE2:
1272 spin_lock(<ds->ltd_lock);
1273 list_del_init(<d->ltd_layout_phase_list);
1274 if (ltd->ltd_dead || ltd->ltd_layout_done) {
1275 spin_unlock(<ds->ltd_lock);
1279 if (lr->lr_flags & LEF_TO_OST)
1280 list_add_tail(<d->ltd_layout_phase_list,
1281 &llmd->llmd_ost_phase2_list);
1283 list_add_tail(<d->ltd_layout_phase_list,
1284 &llmd->llmd_mdt_phase2_list);
1285 spin_unlock(<ds->ltd_lock);
1288 spin_lock(<ds->ltd_lock);
1289 list_del_init(<d->ltd_layout_phase_list);
1290 list_del_init(<d->ltd_layout_list);
1291 spin_unlock(<ds->ltd_lock);
1297 CDEBUG(D_LFSCK, "%s: layout LFSCK unexpected event: rc = %d\n",
1298 lfsck_lfsck2name(com->lc_lfsck), lr->lr_event);
1302 if (!laia->laia_shared) {
1304 lfsck_component_put(env, com);
1310 static int lfsck_layout_master_query_others(const struct lu_env *env,
1311 struct lfsck_component *com)
1313 struct lfsck_thread_info *info = lfsck_env_info(env);
1314 struct lfsck_request *lr = &info->lti_lr;
1315 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1316 struct lfsck_instance *lfsck = com->lc_lfsck;
1317 struct lfsck_layout_master_data *llmd = com->lc_data;
1318 struct ptlrpc_request_set *set;
1319 struct lfsck_tgt_descs *ltds;
1320 struct lfsck_tgt_desc *ltd;
1321 struct list_head *head;
1326 set = ptlrpc_prep_set();
1330 llmd->llmd_touch_gen++;
1331 memset(lr, 0, sizeof(*lr));
1332 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1333 lr->lr_event = LE_QUERY;
1334 lr->lr_active = LFSCK_TYPE_LAYOUT;
1335 laia->laia_com = com;
1337 laia->laia_shared = 0;
1339 if (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1340 ltds = &lfsck->li_mdt_descs;
1342 head = &llmd->llmd_mdt_phase1_list;
1346 ltds = &lfsck->li_ost_descs;
1347 lr->lr_flags = LEF_TO_OST;
1348 head = &llmd->llmd_ost_phase1_list;
1351 laia->laia_ltds = ltds;
1352 spin_lock(<ds->ltd_lock);
1353 while (!list_empty(head)) {
1354 ltd = list_entry(head->next,
1355 struct lfsck_tgt_desc,
1356 ltd_layout_phase_list);
1357 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1360 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1361 list_del(<d->ltd_layout_phase_list);
1362 list_add_tail(<d->ltd_layout_phase_list, head);
1363 atomic_inc(<d->ltd_ref);
1364 laia->laia_ltd = ltd;
1365 spin_unlock(<ds->ltd_lock);
1366 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1367 lfsck_layout_master_async_interpret,
1370 CDEBUG(D_LFSCK, "%s: layout LFSCK fail to query %s %x: "
1371 "rc = %d\n", lfsck_lfsck2name(lfsck),
1372 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1373 ltd->ltd_index, rc);
1377 spin_lock(<ds->ltd_lock);
1379 spin_unlock(<ds->ltd_lock);
1381 rc = ptlrpc_set_wait(set);
1383 ptlrpc_set_destroy(set);
1387 if (!(lr->lr_flags & LEF_TO_OST) &&
1388 list_empty(&llmd->llmd_mdt_phase1_list))
1391 ptlrpc_set_destroy(set);
1393 RETURN(rc1 != 0 ? rc1 : rc);
1397 lfsck_layout_master_to_orphan(struct lfsck_layout_master_data *llmd)
1399 return list_empty(&llmd->llmd_mdt_phase1_list) &&
1400 (!list_empty(&llmd->llmd_ost_phase2_list) ||
1401 list_empty(&llmd->llmd_ost_phase1_list));
1404 static int lfsck_layout_master_notify_others(const struct lu_env *env,
1405 struct lfsck_component *com,
1406 struct lfsck_request *lr)
1408 struct lfsck_thread_info *info = lfsck_env_info(env);
1409 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1410 struct lfsck_instance *lfsck = com->lc_lfsck;
1411 struct lfsck_layout_master_data *llmd = com->lc_data;
1412 struct lfsck_layout *lo = com->lc_file_ram;
1413 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1414 struct ptlrpc_request_set *set;
1415 struct lfsck_tgt_descs *ltds;
1416 struct lfsck_tgt_desc *ltd;
1417 struct lfsck_tgt_desc *next;
1418 struct list_head *head;
1423 set = ptlrpc_prep_set();
1427 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1428 lr->lr_active = LFSCK_TYPE_LAYOUT;
1429 laia->laia_com = com;
1431 laia->laia_shared = 0;
1432 switch (lr->lr_event) {
1434 /* Notify OSTs firstly, then handle other MDTs if needed. */
1435 ltds = &lfsck->li_ost_descs;
1436 laia->laia_ltds = ltds;
1437 down_read(<ds->ltd_rw_sem);
1438 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1439 ltd = lfsck_tgt_get(ltds, idx);
1440 LASSERT(ltd != NULL);
1442 laia->laia_ltd = ltd;
1443 ltd->ltd_layout_done = 0;
1444 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1445 lfsck_layout_master_async_interpret,
1446 laia, LFSCK_NOTIFY);
1448 CDEBUG(D_LFSCK, "%s: layout LFSCK fail to "
1449 "notify %s %x for start: rc = %d\n",
1450 lfsck_lfsck2name(lfsck),
1451 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1454 lo->ll_flags |= LF_INCOMPLETE;
1457 up_read(<ds->ltd_rw_sem);
1460 rc = ptlrpc_set_wait(set);
1462 ptlrpc_set_destroy(set);
1466 if (!(bk->lb_param & LPF_ALL_TGT))
1469 /* link other MDT targets locallly. */
1470 ltds = &lfsck->li_mdt_descs;
1471 spin_lock(<ds->ltd_lock);
1472 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1473 ltd = LTD_TGT(ltds, idx);
1474 LASSERT(ltd != NULL);
1476 if (!list_empty(<d->ltd_layout_list))
1479 list_add_tail(<d->ltd_layout_list,
1480 &llmd->llmd_mdt_list);
1481 list_add_tail(<d->ltd_layout_phase_list,
1482 &llmd->llmd_mdt_phase1_list);
1484 spin_unlock(<ds->ltd_lock);
1487 case LE_PHASE2_DONE:
1488 case LE_PEER_EXIT: {
1489 /* Handle other MDTs firstly if needed, then notify the OSTs. */
1490 if (bk->lb_param & LPF_ALL_TGT) {
1491 head = &llmd->llmd_mdt_list;
1492 ltds = &lfsck->li_mdt_descs;
1493 if (lr->lr_event == LE_STOP) {
1494 /* unlink other MDT targets locallly. */
1495 spin_lock(<ds->ltd_lock);
1496 list_for_each_entry_safe(ltd, next, head,
1498 list_del_init(<d->ltd_layout_phase_list);
1499 list_del_init(<d->ltd_layout_list);
1501 spin_unlock(<ds->ltd_lock);
1503 lr->lr_flags |= LEF_TO_OST;
1504 head = &llmd->llmd_ost_list;
1505 ltds = &lfsck->li_ost_descs;
1507 lr->lr_flags &= ~LEF_TO_OST;
1510 lr->lr_flags |= LEF_TO_OST;
1511 head = &llmd->llmd_ost_list;
1512 ltds = &lfsck->li_ost_descs;
1516 laia->laia_ltds = ltds;
1517 spin_lock(<ds->ltd_lock);
1518 while (!list_empty(head)) {
1519 ltd = list_entry(head->next, struct lfsck_tgt_desc,
1521 if (!list_empty(<d->ltd_layout_phase_list))
1522 list_del_init(<d->ltd_layout_phase_list);
1523 list_del_init(<d->ltd_layout_list);
1524 atomic_inc(<d->ltd_ref);
1525 laia->laia_ltd = ltd;
1526 spin_unlock(<ds->ltd_lock);
1527 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1528 lfsck_layout_master_async_interpret,
1529 laia, LFSCK_NOTIFY);
1531 CDEBUG(D_LFSCK, "%s: layout LFSCK fail to "
1532 "notify %s %x for stop/phase2_done/"
1533 "peer_exit: rc = %d\n",
1534 lfsck_lfsck2name(lfsck),
1535 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1536 "MDT", ltd->ltd_index, rc);
1539 spin_lock(<ds->ltd_lock);
1541 spin_unlock(<ds->ltd_lock);
1543 rc = ptlrpc_set_wait(set);
1545 ptlrpc_set_destroy(set);
1549 if (!(lr->lr_flags & LEF_TO_OST)) {
1550 lr->lr_flags |= LEF_TO_OST;
1551 head = &llmd->llmd_ost_list;
1552 ltds = &lfsck->li_ost_descs;
1557 case LE_PHASE1_DONE:
1558 llmd->llmd_touch_gen++;
1559 ltds = &lfsck->li_mdt_descs;
1560 laia->laia_ltds = ltds;
1561 spin_lock(<ds->ltd_lock);
1562 while (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1563 ltd = list_entry(llmd->llmd_mdt_phase1_list.next,
1564 struct lfsck_tgt_desc,
1565 ltd_layout_phase_list);
1566 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1569 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1570 list_del_init(<d->ltd_layout_phase_list);
1571 list_add_tail(<d->ltd_layout_phase_list,
1572 &llmd->llmd_mdt_phase1_list);
1573 atomic_inc(<d->ltd_ref);
1574 laia->laia_ltd = ltd;
1575 spin_unlock(<ds->ltd_lock);
1576 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1577 lfsck_layout_master_async_interpret,
1578 laia, LFSCK_NOTIFY);
1580 CDEBUG(D_LFSCK, "%s: layout LFSCK fail to "
1581 "notify MDT %x for phase1_done: "
1582 "rc = %d\n", lfsck_lfsck2name(lfsck),
1583 ltd->ltd_index, rc);
1586 spin_lock(<ds->ltd_lock);
1588 spin_unlock(<ds->ltd_lock);
1591 CDEBUG(D_LFSCK, "%s: layout LFSCK unexpected event: rc = %d\n",
1592 lfsck_lfsck2name(lfsck), lr->lr_event);
1597 rc = ptlrpc_set_wait(set);
1598 ptlrpc_set_destroy(set);
1603 static int lfsck_layout_double_scan_result(const struct lu_env *env,
1604 struct lfsck_component *com,
1607 struct lfsck_instance *lfsck = com->lc_lfsck;
1608 struct lfsck_layout *lo = com->lc_file_ram;
1609 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1611 down_write(&com->lc_sem);
1612 lo->ll_run_time_phase2 += cfs_duration_sec(cfs_time_current() +
1613 HALF_SEC - lfsck->li_time_last_checkpoint);
1614 lo->ll_time_last_checkpoint = cfs_time_current_sec();
1615 lo->ll_objs_checked_phase2 += com->lc_new_checked;
1618 com->lc_journal = 0;
1619 if (lo->ll_flags & LF_INCOMPLETE)
1620 lo->ll_status = LS_PARTIAL;
1622 lo->ll_status = LS_COMPLETED;
1623 if (!(bk->lb_param & LPF_DRYRUN))
1624 lo->ll_flags &= ~(LF_SCANNED_ONCE | LF_INCONSISTENT);
1625 lo->ll_time_last_complete = lo->ll_time_last_checkpoint;
1626 lo->ll_success_count++;
1627 } else if (rc == 0) {
1628 lo->ll_status = lfsck->li_status;
1629 if (lo->ll_status == 0)
1630 lo->ll_status = LS_STOPPED;
1632 lo->ll_status = LS_FAILED;
1635 rc = lfsck_layout_store(env, com);
1636 up_write(&com->lc_sem);
1641 static int lfsck_layout_lock(const struct lu_env *env,
1642 struct lfsck_component *com,
1643 struct dt_object *obj,
1644 struct lustre_handle *lh, __u64 bits)
1646 struct lfsck_thread_info *info = lfsck_env_info(env);
1647 ldlm_policy_data_t *policy = &info->lti_policy;
1648 struct ldlm_res_id *resid = &info->lti_resid;
1649 struct lfsck_instance *lfsck = com->lc_lfsck;
1650 __u64 flags = LDLM_FL_ATOMIC_CB;
1653 LASSERT(lfsck->li_namespace != NULL);
1655 memset(policy, 0, sizeof(*policy));
1656 policy->l_inodebits.bits = bits;
1657 fid_build_reg_res_name(lfsck_dto2fid(obj), resid);
1658 rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid, LDLM_IBITS,
1659 policy, LCK_EX, &flags, ldlm_blocking_ast,
1660 ldlm_completion_ast, NULL, NULL, 0,
1661 LVB_T_NONE, NULL, lh);
1662 if (rc == ELDLM_OK) {
1665 memset(lh, 0, sizeof(*lh));
1672 static void lfsck_layout_unlock(struct lustre_handle *lh)
1674 if (lustre_handle_is_used(lh)) {
1675 ldlm_lock_decref(lh, LCK_EX);
1676 memset(lh, 0, sizeof(*lh));
1680 static int lfsck_layout_trans_stop(const struct lu_env *env,
1681 struct dt_device *dev,
1682 struct thandle *handle, int result)
1686 handle->th_result = result;
1687 rc = dt_trans_stop(env, dev, handle);
1697 * Get the system default stripe size.
1699 * \param[in] env pointer to the thread context
1700 * \param[in] lfsck pointer to the lfsck instance
1701 * \param[out] size pointer to the default stripe size
1703 * \retval 0 for success
1704 * \retval negative error number on failure
1706 static int lfsck_layout_get_def_stripesize(const struct lu_env *env,
1707 struct lfsck_instance *lfsck,
1710 struct lov_user_md *lum = &lfsck_env_info(env)->lti_lum;
1711 struct dt_object *root;
1714 root = dt_locate(env, lfsck->li_next, &lfsck->li_local_root_fid);
1716 return PTR_ERR(root);
1718 /* Get the default stripe size via xattr_get on the backend root. */
1719 rc = dt_xattr_get(env, root, lfsck_buf_get(env, lum, sizeof(*lum)),
1720 XATTR_NAME_LOV, BYPASS_CAPA);
1722 /* The lum->lmm_stripe_size is LE mode. The *size also
1723 * should be LE mode. So it is unnecessary to convert. */
1724 *size = lum->lmm_stripe_size;
1726 } else if (unlikely(rc == 0)) {
1730 lfsck_object_put(env, root);
1736 * \retval +1: repaired
1737 * \retval 0: did nothing
1738 * \retval -ve: on error
1740 static int lfsck_layout_refill_lovea(const struct lu_env *env,
1741 struct thandle *handle,
1742 struct dt_object *parent,
1743 struct lu_fid *cfid,
1745 struct lov_ost_data_v1 *slot,
1746 int fl, __u32 ost_idx)
1748 struct ost_id *oi = &lfsck_env_info(env)->lti_oi;
1749 struct lov_mds_md_v1 *lmm = buf->lb_buf;
1752 fid_to_ostid(cfid, oi);
1753 ostid_cpu_to_le(oi, &slot->l_ost_oi);
1754 slot->l_ost_gen = cpu_to_le32(0);
1755 slot->l_ost_idx = cpu_to_le32(ost_idx);
1757 if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_HOLE) {
1758 struct lov_ost_data_v1 *objs;
1762 count = le16_to_cpu(lmm->lmm_stripe_count);
1763 if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_V1)
1764 objs = &lmm->lmm_objects[0];
1766 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
1767 for (i = 0; i < count; i++, objs++) {
1768 if (objs != slot && lovea_slot_is_dummy(objs))
1772 /* If the @slot is the last dummy slot to be refilled,
1773 * then drop LOV_PATTERN_F_HOLE from lmm::lmm_pattern. */
1775 lmm->lmm_pattern &= ~cpu_to_le32(LOV_PATTERN_F_HOLE);
1778 rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV, fl, handle,
1787 * \retval +1: repaired
1788 * \retval 0: did nothing
1789 * \retval -ve: on error
1791 static int lfsck_layout_extend_lovea(const struct lu_env *env,
1792 struct lfsck_instance *lfsck,
1793 struct thandle *handle,
1794 struct dt_object *parent,
1795 struct lu_fid *cfid,
1796 struct lu_buf *buf, int fl,
1797 __u32 ost_idx, __u32 ea_off, bool reset)
1799 struct lov_mds_md_v1 *lmm = buf->lb_buf;
1800 struct lov_ost_data_v1 *objs;
1806 if (fl == LU_XATTR_CREATE || reset) {
1807 __u32 pattern = LOV_PATTERN_RAID0;
1810 LASSERT(buf->lb_len == lov_mds_md_size(count, LOV_MAGIC_V1));
1812 if (ea_off != 0 || reset) {
1813 pattern |= LOV_PATTERN_F_HOLE;
1817 memset(lmm, 0, buf->lb_len);
1818 lmm->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
1819 lmm->lmm_pattern = cpu_to_le32(pattern);
1820 fid_to_lmm_oi(lfsck_dto2fid(parent), &lmm->lmm_oi);
1821 lmm_oi_cpu_to_le(&lmm->lmm_oi, &lmm->lmm_oi);
1823 rc = lfsck_layout_get_def_stripesize(env, lfsck,
1824 &lmm->lmm_stripe_size);
1828 objs = &lmm->lmm_objects[ea_off];
1830 __u32 magic = le32_to_cpu(lmm->lmm_magic);
1833 count = le16_to_cpu(lmm->lmm_stripe_count);
1834 if (magic == LOV_MAGIC_V1)
1835 objs = &lmm->lmm_objects[count];
1837 objs = &((struct lov_mds_md_v3 *)lmm)->
1840 gap = ea_off - count;
1843 LASSERT(buf->lb_len == lov_mds_md_size(count, magic));
1846 memset(objs, 0, gap * sizeof(*objs));
1847 lmm->lmm_pattern |= cpu_to_le32(LOV_PATTERN_F_HOLE);
1851 lmm->lmm_layout_gen =
1852 cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
1856 lmm->lmm_stripe_count = cpu_to_le16(count);
1857 rc = lfsck_layout_refill_lovea(env, handle, parent, cfid, buf, objs,
1860 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant extend layout EA for "
1861 DFID": parent "DFID", OST-index %u, stripe-index %u, fl %d, "
1862 "reset %s, %s LOV EA hole: rc = %d\n",
1863 lfsck_lfsck2name(lfsck), PFID(cfid), PFID(lfsck_dto2fid(parent)),
1864 ost_idx, ea_off, fl, reset ? "yes" : "no",
1865 hole ? "with" : "without", rc);
1871 * \retval +1: repaired
1872 * \retval 0: did nothing
1873 * \retval -ve: on error
1875 static int lfsck_layout_update_pfid(const struct lu_env *env,
1876 struct lfsck_component *com,
1877 struct dt_object *parent,
1878 struct lu_fid *cfid,
1879 struct dt_device *cdev, __u32 ea_off)
1881 struct filter_fid *pfid = &lfsck_env_info(env)->lti_new_pfid;
1882 struct dt_object *child;
1883 struct thandle *handle;
1884 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
1889 child = lfsck_object_find_by_dev(env, cdev, cfid);
1891 RETURN(PTR_ERR(child));
1893 handle = dt_trans_create(env, cdev);
1895 GOTO(out, rc = PTR_ERR(handle));
1897 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
1898 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
1899 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
1900 * MDT-object's FID::f_ver, instead it is the OST-object index in its
1901 * parent MDT-object's layout EA. */
1902 pfid->ff_parent.f_stripe_idx = cpu_to_le32(ea_off);
1903 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
1905 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle);
1909 rc = dt_trans_start(env, cdev, handle);
1913 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle,
1916 GOTO(stop, rc = (rc == 0 ? 1 : rc));
1919 dt_trans_stop(env, cdev, handle);
1922 lu_object_put(env, &child->do_lu);
1928 * This function will create the MDT-object with the given (partial) LOV EA.
1930 * Under some data corruption cases, the MDT-object of the file may be lost,
1931 * but its OST-objects, or some of them are there. The layout LFSCK needs to
1932 * re-create the MDT-object with the orphan OST-object(s) information.
1934 * On the other hand, the LFSCK may has created some OST-object for repairing
1935 * dangling LOV EA reference, but as the LFSCK processing, it may find that
1936 * the old OST-object is there and should replace the former new created OST
1937 * object. Unfortunately, some others have modified such newly created object.
1938 * To keep the data (both new and old), the LFSCK will create MDT-object with
1939 * new FID to reference the original OST-object.
1941 * \param[in] env pointer to the thread context
1942 * \param[in] com pointer to the lfsck component
1943 * \param[in] ltd pointer to target device descriptor
1944 * \param[in] rec pointer to the record for the orphan OST-object
1945 * \param[in] cfid pointer to FID for the orphan OST-object
1946 * \param[in] infix additional information, such as the FID for original
1947 * MDT-object and the stripe offset in the LOV EA
1948 * \param[in] type the type for describing why the orphan MDT-object is
1949 * created. The rules are as following:
1951 * type "C": Multiple OST-objects claim the same MDT-object and the
1952 * same slot in the layout EA. Then the LFSCK will create
1953 * new MDT-object(s) to hold the conflict OST-object(s).
1955 * type "N": The orphan OST-object does not know which one was the
1956 * real parent MDT-object, so the LFSCK uses new FID for
1957 * its parent MDT-object.
1959 * type "R": The orphan OST-object knows its parent MDT-object FID,
1960 * but does not know the position (the file name) in the
1963 * The orphan name will be like:
1964 * ${FID}-${infix}-${type}-${conflict_version}
1966 * \param[in] ea_off the stripe offset in the LOV EA
1968 * \retval positive on repaired something
1969 * \retval 0 if needs to repair nothing
1970 * \retval negative error number on failure
1972 static int lfsck_layout_recreate_parent(const struct lu_env *env,
1973 struct lfsck_component *com,
1974 struct lfsck_tgt_desc *ltd,
1975 struct lu_orphan_rec *rec,
1976 struct lu_fid *cfid,
1981 struct lfsck_thread_info *info = lfsck_env_info(env);
1982 struct dt_insert_rec *dtrec = &info->lti_dt_rec;
1983 char *name = info->lti_key;
1984 struct lu_attr *la = &info->lti_la;
1985 struct dt_object_format *dof = &info->lti_dof;
1986 struct lfsck_instance *lfsck = com->lc_lfsck;
1987 struct lu_fid *pfid = &rec->lor_fid;
1988 struct lu_fid *tfid = &info->lti_fid3;
1989 struct dt_device *next = lfsck->li_next;
1990 struct dt_object *pobj = NULL;
1991 struct dt_object *cobj = NULL;
1992 struct thandle *th = NULL;
1993 struct lu_buf *pbuf = NULL;
1994 struct lu_buf *ea_buf = &info->lti_big_buf;
1995 struct lustre_handle lh = { 0 };
1996 struct linkea_data ldata = { 0 };
1997 struct lu_buf linkea_buf;
1998 const struct lu_name *pname;
1999 int buflen = ea_buf->lb_len;
2004 /* Create .lustre/lost+found/MDTxxxx when needed. */
2005 if (unlikely(lfsck->li_lpf_obj == NULL)) {
2006 rc = lfsck_create_lpf(env, lfsck);
2011 if (fid_is_zero(pfid)) {
2012 struct filter_fid *ff = &info->lti_new_pfid;
2014 rc = lfsck_fid_alloc(env, lfsck, pfid, false);
2018 ff->ff_parent.f_seq = cpu_to_le64(pfid->f_seq);
2019 ff->ff_parent.f_oid = cpu_to_le32(pfid->f_oid);
2020 /* Currently, the filter_fid::ff_parent::f_ver is not the
2021 * real parent MDT-object's FID::f_ver, instead it is the
2022 * OST-object index in its parent MDT-object's layout EA. */
2023 ff->ff_parent.f_stripe_idx = cpu_to_le32(ea_off);
2024 pbuf = lfsck_buf_get(env, ff, sizeof(struct filter_fid));
2025 cobj = lfsck_object_find_by_dev(env, ltd->ltd_tgt, cfid);
2027 GOTO(log, rc = PTR_ERR(cobj));
2030 pobj = lfsck_object_find_by_dev(env, lfsck->li_bottom, pfid);
2032 GOTO(put, rc = PTR_ERR(pobj));
2034 LASSERT(infix != NULL);
2035 LASSERT(type != NULL);
2038 snprintf(name, NAME_MAX, DFID"%s-%s-%d", PFID(pfid), infix,
2040 rc = dt_lookup(env, lfsck->li_lpf_obj, (struct dt_rec *)tfid,
2041 (const struct dt_key *)name, BYPASS_CAPA);
2042 if (rc != 0 && rc != -ENOENT)
2046 rc = linkea_data_new(&ldata,
2047 &lfsck_env_info(env)->lti_linkea_buf);
2051 pname = lfsck_name_get_const(env, name, strlen(name));
2052 rc = linkea_add_buf(&ldata, pname, lfsck_dto2fid(lfsck->li_lpf_obj));
2056 memset(la, 0, sizeof(*la));
2057 la->la_uid = rec->lor_uid;
2058 la->la_gid = rec->lor_gid;
2059 la->la_mode = S_IFREG | S_IRUSR;
2060 la->la_valid = LA_MODE | LA_UID | LA_GID;
2062 memset(dof, 0, sizeof(*dof));
2063 dof->dof_type = dt_mode_to_dft(S_IFREG);
2065 rc = lov_mds_md_size(ea_off + 1, LOV_MAGIC_V1);
2067 lu_buf_realloc(ea_buf, rc);
2068 buflen = ea_buf->lb_len;
2069 if (ea_buf->lb_buf == NULL)
2070 GOTO(put, rc = -ENOMEM);
2072 ea_buf->lb_len = rc;
2075 /* Hold update lock on the .lustre/lost+found/MDTxxxx/.
2077 * XXX: Currently, we do not grab the PDO lock as normal create cases,
2078 * because creating MDT-object for orphan OST-object is rare, we
2079 * do not much care about the performance. It can be improved in
2080 * the future when needed. */
2081 rc = lfsck_layout_lock(env, com, lfsck->li_lpf_obj, &lh,
2082 MDS_INODELOCK_UPDATE);
2086 th = dt_trans_create(env, next);
2088 GOTO(unlock, rc = PTR_ERR(th));
2090 /* 1a. Update OST-object's parent information remotely.
2092 * If other subsequent modifications failed, then next LFSCK scanning
2093 * will process the OST-object as orphan again with known parent FID. */
2095 rc = dt_declare_xattr_set(env, cobj, pbuf, XATTR_NAME_FID, 0, th);
2100 /* 2a. Create the MDT-object locally. */
2101 rc = dt_declare_create(env, pobj, la, NULL, dof, th);
2105 /* 3a. Add layout EA for the MDT-object. */
2106 rc = dt_declare_xattr_set(env, pobj, ea_buf, XATTR_NAME_LOV,
2107 LU_XATTR_CREATE, th);
2111 /* 4a. Insert the MDT-object to .lustre/lost+found/MDTxxxx/ */
2112 dtrec->rec_fid = pfid;
2113 dtrec->rec_type = S_IFREG;
2114 rc = dt_declare_insert(env, lfsck->li_lpf_obj,
2115 (const struct dt_rec *)dtrec,
2116 (const struct dt_key *)name, th);
2120 /* 5a. insert linkEA for parent. */
2121 linkea_buf.lb_buf = ldata.ld_buf->lb_buf;
2122 linkea_buf.lb_len = ldata.ld_leh->leh_len;
2123 rc = dt_declare_xattr_set(env, pobj, &linkea_buf,
2124 XATTR_NAME_LINK, 0, th);
2128 rc = dt_trans_start(env, next, th);
2132 /* 1b. Update OST-object's parent information remotely. */
2134 rc = dt_xattr_set(env, cobj, pbuf, XATTR_NAME_FID, 0, th,
2140 dt_write_lock(env, pobj, 0);
2141 /* 2b. Create the MDT-object locally. */
2142 rc = dt_create(env, pobj, la, NULL, dof, th);
2144 /* 3b. Add layout EA for the MDT-object. */
2145 rc = lfsck_layout_extend_lovea(env, lfsck, th, pobj, cfid,
2146 ea_buf, LU_XATTR_CREATE,
2147 ltd->ltd_index, ea_off, false);
2148 dt_write_unlock(env, pobj);
2152 /* 4b. Insert the MDT-object to .lustre/lost+found/MDTxxxx/ */
2153 rc = dt_insert(env, lfsck->li_lpf_obj, (const struct dt_rec *)dtrec,
2154 (const struct dt_key *)name, th, BYPASS_CAPA, 1);
2158 /* 5b. insert linkEA for parent. */
2159 rc = dt_xattr_set(env, pobj, &linkea_buf,
2160 XATTR_NAME_LINK, 0, th, BYPASS_CAPA);
2165 dt_trans_stop(env, next, th);
2168 lfsck_layout_unlock(&lh);
2171 if (cobj != NULL && !IS_ERR(cobj))
2172 lu_object_put(env, &cobj->do_lu);
2173 if (pobj != NULL && !IS_ERR(pobj))
2174 lu_object_put(env, &pobj->do_lu);
2175 ea_buf->lb_len = buflen;
2179 CDEBUG(D_LFSCK, "%s layout LFSCK assistant failed to "
2180 "recreate the lost MDT-object: parent "DFID
2181 ", child "DFID", OST-index %u, stripe-index %u, "
2182 "infix %s, type %s: rc = %d\n",
2183 lfsck_lfsck2name(lfsck), PFID(pfid), PFID(cfid),
2184 ltd->ltd_index, ea_off, infix, type, rc);
2186 return rc >= 0 ? 1 : rc;
2189 static int lfsck_layout_master_conditional_destroy(const struct lu_env *env,
2190 struct lfsck_component *com,
2191 const struct lu_fid *fid,
2194 struct lfsck_thread_info *info = lfsck_env_info(env);
2195 struct lfsck_request *lr = &info->lti_lr;
2196 struct lfsck_instance *lfsck = com->lc_lfsck;
2197 struct lfsck_tgt_desc *ltd;
2198 struct ptlrpc_request *req;
2199 struct lfsck_request *tmp;
2200 struct obd_export *exp;
2204 ltd = lfsck_tgt_get(&lfsck->li_ost_descs, index);
2205 if (unlikely(ltd == NULL))
2209 if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
2210 GOTO(put, rc = -EOPNOTSUPP);
2212 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
2214 GOTO(put, rc = -ENOMEM);
2216 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
2218 ptlrpc_request_free(req);
2223 memset(lr, 0, sizeof(*lr));
2224 lr->lr_event = LE_CONDITIONAL_DESTROY;
2225 lr->lr_active = LFSCK_TYPE_LAYOUT;
2228 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
2230 ptlrpc_request_set_replen(req);
2232 rc = ptlrpc_queue_wait(req);
2233 ptlrpc_req_finished(req);
2243 static int lfsck_layout_slave_conditional_destroy(const struct lu_env *env,
2244 struct lfsck_component *com,
2245 struct lfsck_request *lr)
2247 struct lfsck_thread_info *info = lfsck_env_info(env);
2248 struct lu_attr *la = &info->lti_la;
2249 ldlm_policy_data_t *policy = &info->lti_policy;
2250 struct ldlm_res_id *resid = &info->lti_resid;
2251 struct lfsck_instance *lfsck = com->lc_lfsck;
2252 struct dt_device *dev = lfsck->li_bottom;
2253 struct lu_fid *fid = &lr->lr_fid;
2254 struct dt_object *obj;
2255 struct thandle *th = NULL;
2256 struct lustre_handle lh = { 0 };
2261 obj = lfsck_object_find_by_dev(env, dev, fid);
2263 RETURN(PTR_ERR(obj));
2265 dt_read_lock(env, obj, 0);
2266 if (dt_object_exists(obj) == 0) {
2267 dt_read_unlock(env, obj);
2269 GOTO(put, rc = -ENOENT);
2272 /* Get obj's attr without lock firstly. */
2273 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
2274 dt_read_unlock(env, obj);
2278 if (likely(la->la_ctime != 0 || la->la_mode & S_ISUID))
2279 GOTO(put, rc = -ETXTBSY);
2281 /* Acquire extent lock on [0, EOF] to sync with all possible written. */
2282 LASSERT(lfsck->li_namespace != NULL);
2284 memset(policy, 0, sizeof(*policy));
2285 policy->l_extent.end = OBD_OBJECT_EOF;
2286 ost_fid_build_resid(fid, resid);
2287 rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid, LDLM_EXTENT,
2288 policy, LCK_EX, &flags, ldlm_blocking_ast,
2289 ldlm_completion_ast, NULL, NULL, 0,
2290 LVB_T_NONE, NULL, &lh);
2292 GOTO(put, rc = -EIO);
2294 dt_write_lock(env, obj, 0);
2295 /* Get obj's attr within lock again. */
2296 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
2300 if (la->la_ctime != 0)
2301 GOTO(unlock, rc = -ETXTBSY);
2303 th = dt_trans_create(env, dev);
2305 GOTO(unlock, rc = PTR_ERR(th));
2307 rc = dt_declare_ref_del(env, obj, th);
2311 rc = dt_declare_destroy(env, obj, th);
2315 rc = dt_trans_start_local(env, dev, th);
2319 rc = dt_ref_del(env, obj, th);
2323 rc = dt_destroy(env, obj, th);
2325 CDEBUG(D_LFSCK, "%s: layout LFSCK destroyed the empty "
2326 "OST-object "DFID" that was created for reparing "
2327 "dangling referenced case. But the original missed "
2328 "OST-object is found now.\n",
2329 lfsck_lfsck2name(lfsck), PFID(fid));
2334 dt_trans_stop(env, dev, th);
2337 dt_write_unlock(env, obj);
2338 ldlm_lock_decref(&lh, LCK_EX);
2341 lu_object_put(env, &obj->do_lu);
2347 * Some OST-object has occupied the specified layout EA slot.
2348 * Such OST-object may be generated by the LFSCK when repair
2349 * dangling referenced MDT-object, which can be indicated by
2350 * attr::la_ctime == 0 but without S_ISUID in la_mode. If it
2351 * is true and such OST-object has not been modified yet, we
2352 * will replace it with the orphan OST-object; otherwise the
2353 * LFSCK will create new MDT-object to reference the orphan.
2355 * \retval +1: repaired
2356 * \retval 0: did nothing
2357 * \retval -ve: on error
2359 static int lfsck_layout_conflict_create(const struct lu_env *env,
2360 struct lfsck_component *com,
2361 struct lfsck_tgt_desc *ltd,
2362 struct lu_orphan_rec *rec,
2363 struct dt_object *parent,
2364 struct lu_fid *cfid,
2365 struct lu_buf *ea_buf,
2366 struct lov_ost_data_v1 *slot,
2367 __u32 ea_off, __u32 ori_len)
2369 struct lfsck_thread_info *info = lfsck_env_info(env);
2370 struct lu_fid *cfid2 = &info->lti_fid2;
2371 struct ost_id *oi = &info->lti_oi;
2372 char *infix = info->lti_tmpbuf;
2373 struct lov_mds_md_v1 *lmm = ea_buf->lb_buf;
2374 struct dt_device *dev = com->lc_lfsck->li_bottom;
2375 struct thandle *th = NULL;
2376 struct lustre_handle lh = { 0 };
2377 __u32 ost_idx2 = le32_to_cpu(slot->l_ost_idx);
2381 ostid_le_to_cpu(&slot->l_ost_oi, oi);
2382 ostid_to_fid(cfid2, oi, ost_idx2);
2384 /* Hold layout lock on the parent to prevent others to access. */
2385 rc = lfsck_layout_lock(env, com, parent, &lh,
2386 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2390 rc = lfsck_layout_master_conditional_destroy(env, com, cfid2, ost_idx2);
2392 /* If the conflict OST-obejct is not created for fixing dangling
2393 * referenced MDT-object in former LFSCK check/repair, or it has
2394 * been modified by others, then we cannot destroy it. Re-create
2395 * a new MDT-object for the orphan OST-object. */
2396 if (rc == -ETXTBSY) {
2397 /* No need the layout lock on the original parent. */
2398 lfsck_layout_unlock(&lh);
2399 ea_buf->lb_len = ori_len;
2401 fid_zero(&rec->lor_fid);
2402 snprintf(infix, LFSCK_TMPBUF_LEN, "-"DFID"-%x",
2403 PFID(lu_object_fid(&parent->do_lu)), ea_off);
2404 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2405 infix, "C", ea_off);
2410 if (rc != 0 && rc != -ENOENT)
2413 th = dt_trans_create(env, dev);
2415 GOTO(unlock, rc = PTR_ERR(th));
2417 rc = dt_declare_xattr_set(env, parent, ea_buf, XATTR_NAME_LOV,
2418 LU_XATTR_REPLACE, th);
2422 rc = dt_trans_start_local(env, dev, th);
2426 dt_write_lock(env, parent, 0);
2427 lmm->lmm_layout_gen = cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
2428 rc = lfsck_layout_refill_lovea(env, th, parent, cfid, ea_buf, slot,
2429 LU_XATTR_REPLACE, ltd->ltd_index);
2430 dt_write_unlock(env, parent);
2435 dt_trans_stop(env, dev, th);
2438 lfsck_layout_unlock(&lh);
2441 ea_buf->lb_len = ori_len;
2443 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant replaced the conflict "
2444 "OST-object "DFID" on the OST %x with the orphan "DFID" on "
2445 "the OST %x: parent "DFID", stripe-index %u: rc = %d\n",
2446 lfsck_lfsck2name(com->lc_lfsck), PFID(cfid2), ost_idx2,
2447 PFID(cfid), ltd->ltd_index, PFID(lfsck_dto2fid(parent)),
2450 return rc >= 0 ? 1 : rc;
2454 * \retval +1: repaired
2455 * \retval 0: did nothing
2456 * \retval -ve: on error
2458 static int lfsck_layout_recreate_lovea(const struct lu_env *env,
2459 struct lfsck_component *com,
2460 struct lfsck_tgt_desc *ltd,
2461 struct lu_orphan_rec *rec,
2462 struct dt_object *parent,
2463 struct lu_fid *cfid,
2464 __u32 ost_idx, __u32 ea_off)
2466 struct lfsck_thread_info *info = lfsck_env_info(env);
2467 struct lu_buf *buf = &info->lti_big_buf;
2468 struct lu_fid *fid = &info->lti_fid2;
2469 struct ost_id *oi = &info->lti_oi;
2470 struct lfsck_instance *lfsck = com->lc_lfsck;
2471 struct dt_device *dt = lfsck->li_bottom;
2472 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2473 struct thandle *handle = NULL;
2474 size_t buflen = buf->lb_len;
2476 struct lov_mds_md_v1 *lmm;
2477 struct lov_ost_data_v1 *objs;
2478 struct lustre_handle lh = { 0 };
2485 bool locked = false;
2488 rc = lfsck_layout_lock(env, com, parent, &lh,
2489 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2491 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant failed to recreate "
2492 "LOV EA for "DFID": parent "DFID", OST-index %u, "
2493 "stripe-index %u: rc = %d\n",
2494 lfsck_lfsck2name(lfsck), PFID(cfid),
2495 PFID(lfsck_dto2fid(parent)), ost_idx, ea_off, rc);
2502 dt_write_unlock(env, parent);
2506 if (handle != NULL) {
2507 dt_trans_stop(env, dt, handle);
2512 GOTO(unlock_layout, rc);
2515 if (buf->lb_len < lovea_size) {
2516 lu_buf_realloc(buf, lovea_size);
2517 buflen = buf->lb_len;
2518 if (buf->lb_buf == NULL)
2519 GOTO(unlock_layout, rc = -ENOMEM);
2522 if (!(bk->lb_param & LPF_DRYRUN)) {
2523 handle = dt_trans_create(env, dt);
2525 GOTO(unlock_layout, rc = PTR_ERR(handle));
2527 rc = dt_declare_xattr_set(env, parent, buf, XATTR_NAME_LOV,
2532 rc = dt_trans_start_local(env, dt, handle);
2537 dt_write_lock(env, parent, 0);
2539 rc = dt_xattr_get(env, parent, buf, XATTR_NAME_LOV, BYPASS_CAPA);
2540 if (rc == -ERANGE) {
2541 rc = dt_xattr_get(env, parent, &LU_BUF_NULL, XATTR_NAME_LOV,
2545 } else if (rc == -ENODATA || rc == 0) {
2546 lovea_size = lov_mds_md_size(ea_off + 1, LOV_MAGIC_V1);
2547 /* If the declared is not big enough, re-try. */
2548 if (buf->lb_len < lovea_size) {
2552 fl = LU_XATTR_CREATE;
2553 } else if (rc < 0) {
2554 GOTO(unlock_parent, rc);
2555 } else if (unlikely(buf->lb_len == 0)) {
2558 fl = LU_XATTR_REPLACE;
2562 if (fl == LU_XATTR_CREATE) {
2563 if (bk->lb_param & LPF_DRYRUN)
2564 GOTO(unlock_parent, rc = 1);
2566 LASSERT(buf->lb_len >= lovea_size);
2568 buf->lb_len = lovea_size;
2569 rc = lfsck_layout_extend_lovea(env, lfsck, handle, parent, cfid,
2570 buf, fl, ost_idx, ea_off, false);
2572 GOTO(unlock_parent, rc);
2576 rc1 = lfsck_layout_verify_header(lmm);
2578 /* If the LOV EA crashed, the rebuild it. */
2579 if (rc1 == -EINVAL) {
2580 if (bk->lb_param & LPF_DRYRUN)
2581 GOTO(unlock_parent, rc = 1);
2583 LASSERT(buf->lb_len >= lovea_size);
2585 buf->lb_len = lovea_size;
2586 memset(lmm, 0, buf->lb_len);
2587 rc = lfsck_layout_extend_lovea(env, lfsck, handle, parent, cfid,
2588 buf, fl, ost_idx, ea_off, true);
2590 GOTO(unlock_parent, rc);
2593 /* For other unknown magic/pattern, keep the current LOV EA. */
2595 GOTO(unlock_parent, rc = rc1);
2597 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
2598 * been verified in lfsck_layout_verify_header() already. If some
2599 * new magic introduced in the future, then layout LFSCK needs to
2600 * be updated also. */
2601 magic = le32_to_cpu(lmm->lmm_magic);
2602 if (magic == LOV_MAGIC_V1) {
2603 objs = &lmm->lmm_objects[0];
2605 LASSERT(magic == LOV_MAGIC_V3);
2606 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
2609 count = le16_to_cpu(lmm->lmm_stripe_count);
2611 GOTO(unlock_parent, rc = -EINVAL);
2614 /* Exceed the current end of MDT-object layout EA. Then extend it. */
2615 if (count <= ea_off) {
2616 if (bk->lb_param & LPF_DRYRUN)
2617 GOTO(unlock_parent, rc = 1);
2619 lovea_size = lov_mds_md_size(ea_off + 1, magic);
2620 /* If the declared is not big enough, re-try. */
2621 if (buf->lb_len < lovea_size) {
2625 buf->lb_len = lovea_size;
2626 rc = lfsck_layout_extend_lovea(env, lfsck, handle, parent, cfid,
2627 buf, fl, ost_idx, ea_off, false);
2629 GOTO(unlock_parent, rc);
2632 LASSERTF(rc > 0, "invalid rc = %d\n", rc);
2634 buf->lb_len = lovea_size;
2635 for (i = 0; i < count; i++, objs++) {
2636 /* The MDT-object was created via lfsck_layout_recover_create()
2637 * by others before, and we fill the dummy layout EA. */
2638 if (lovea_slot_is_dummy(objs)) {
2642 if (bk->lb_param & LPF_DRYRUN)
2643 GOTO(unlock_parent, rc = 1);
2645 lmm->lmm_layout_gen =
2646 cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
2647 rc = lfsck_layout_refill_lovea(env, handle, parent,
2648 cfid, buf, objs, fl,
2651 CDEBUG(D_LFSCK, "%s layout LFSCK assistant fill "
2652 "dummy layout slot for "DFID": parent "DFID
2653 ", OST-index %u, stripe-index %u: rc = %d\n",
2654 lfsck_lfsck2name(lfsck), PFID(cfid),
2655 PFID(lfsck_dto2fid(parent)), ost_idx, i, rc);
2657 GOTO(unlock_parent, rc);
2660 ostid_le_to_cpu(&objs->l_ost_oi, oi);
2661 ostid_to_fid(fid, oi, le32_to_cpu(objs->l_ost_idx));
2662 /* It should be rare case, the slot is there, but the LFSCK
2663 * does not handle it during the first-phase cycle scanning. */
2664 if (unlikely(lu_fid_eq(fid, cfid))) {
2666 GOTO(unlock_parent, rc = 0);
2668 /* Rare case that the OST-object index
2669 * does not match the parent MDT-object
2670 * layout EA. We trust the later one. */
2671 if (bk->lb_param & LPF_DRYRUN)
2672 GOTO(unlock_parent, rc = 1);
2674 dt_write_unlock(env, parent);
2676 dt_trans_stop(env, dt, handle);
2677 lfsck_layout_unlock(&lh);
2678 buf->lb_len = buflen;
2679 rc = lfsck_layout_update_pfid(env, com, parent,
2680 cfid, ltd->ltd_tgt, i);
2682 CDEBUG(D_LFSCK, "%s layout LFSCK assistant "
2683 "updated OST-object's pfid for "DFID
2684 ": parent "DFID", OST-index %u, "
2685 "stripe-index %u: rc = %d\n",
2686 lfsck_lfsck2name(lfsck), PFID(cfid),
2687 PFID(lfsck_dto2fid(parent)),
2688 ltd->ltd_index, i, rc);
2695 /* The MDT-object exists, but related layout EA slot is occupied
2697 if (bk->lb_param & LPF_DRYRUN)
2698 GOTO(unlock_parent, rc = 1);
2700 dt_write_unlock(env, parent);
2702 dt_trans_stop(env, dt, handle);
2703 lfsck_layout_unlock(&lh);
2704 if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_V1)
2705 objs = &lmm->lmm_objects[ea_off];
2707 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[ea_off];
2708 rc = lfsck_layout_conflict_create(env, com, ltd, rec, parent, cfid,
2709 buf, objs, ea_off, buflen);
2715 dt_write_unlock(env, parent);
2719 dt_trans_stop(env, dt, handle);
2722 lfsck_layout_unlock(&lh);
2723 buf->lb_len = buflen;
2728 static int lfsck_layout_scan_orphan_one(const struct lu_env *env,
2729 struct lfsck_component *com,
2730 struct lfsck_tgt_desc *ltd,
2731 struct lu_orphan_rec *rec,
2732 struct lu_fid *cfid)
2734 struct lfsck_layout *lo = com->lc_file_ram;
2735 struct lu_fid *pfid = &rec->lor_fid;
2736 struct dt_object *parent = NULL;
2737 __u32 ea_off = pfid->f_stripe_idx;
2741 if (!fid_is_sane(cfid))
2742 GOTO(out, rc = -EINVAL);
2744 if (fid_is_zero(pfid)) {
2745 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2751 if (!fid_is_sane(pfid))
2752 GOTO(out, rc = -EINVAL);
2754 parent = lfsck_object_find_by_dev(env, com->lc_lfsck->li_bottom, pfid);
2756 GOTO(out, rc = PTR_ERR(parent));
2758 if (unlikely(dt_object_remote(parent) != 0))
2759 GOTO(put, rc = -EXDEV);
2761 if (dt_object_exists(parent) == 0) {
2762 lu_object_put(env, &parent->do_lu);
2763 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2768 if (!S_ISREG(lu_object_attr(&parent->do_lu)))
2769 GOTO(put, rc = -EISDIR);
2771 rc = lfsck_layout_recreate_lovea(env, com, ltd, rec, parent, cfid,
2772 ltd->ltd_index, ea_off);
2778 lu_object_put(env, &parent->do_lu);
2780 /* The layout EA is changed, need to be reloaded next time. */
2781 lu_object_put_nocache(env, &parent->do_lu);
2784 down_write(&com->lc_sem);
2785 com->lc_new_scanned++;
2786 com->lc_new_checked++;
2788 lo->ll_objs_repaired[LLIT_ORPHAN - 1]++;
2790 } else if (rc < 0) {
2791 lo->ll_objs_failed_phase2++;
2793 up_write(&com->lc_sem);
2798 static int lfsck_layout_scan_orphan(const struct lu_env *env,
2799 struct lfsck_component *com,
2800 struct lfsck_tgt_desc *ltd)
2802 struct lfsck_layout *lo = com->lc_file_ram;
2803 struct lfsck_instance *lfsck = com->lc_lfsck;
2804 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2805 struct lfsck_thread_info *info = lfsck_env_info(env);
2806 struct ost_id *oi = &info->lti_oi;
2807 struct lu_fid *fid = &info->lti_fid;
2808 struct dt_object *obj;
2809 const struct dt_it_ops *iops;
2814 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant starts the orphan "
2815 "scanning for OST%04x\n",
2816 lfsck_lfsck2name(lfsck), ltd->ltd_index);
2818 ostid_set_seq(oi, FID_SEQ_IDIF);
2819 ostid_set_id(oi, 0);
2820 ostid_to_fid(fid, oi, ltd->ltd_index);
2821 obj = lfsck_object_find_by_dev(env, ltd->ltd_tgt, fid);
2822 if (unlikely(IS_ERR(obj)))
2823 GOTO(log, rc = PTR_ERR(obj));
2825 rc = obj->do_ops->do_index_try(env, obj, &dt_lfsck_orphan_features);
2829 iops = &obj->do_index_ops->dio_it;
2830 di = iops->init(env, obj, 0, BYPASS_CAPA);
2832 GOTO(put, rc = PTR_ERR(di));
2834 rc = iops->load(env, di, 0);
2836 /* -ESRCH means that the orphan OST-objects rbtree has been
2837 * cleanup because of the OSS server restart or other errors. */
2838 lo->ll_flags |= LF_INCOMPLETE;
2843 rc = iops->next(env, di);
2855 struct lu_orphan_rec *rec = &info->lti_rec;
2857 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY3) &&
2859 struct ptlrpc_thread *thread = &lfsck->li_thread;
2860 struct l_wait_info lwi;
2862 lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val),
2864 l_wait_event(thread->t_ctl_waitq,
2865 !thread_is_running(thread),
2869 key = iops->key(env, di);
2870 com->lc_fid_latest_scanned_phase2 = *(struct lu_fid *)key;
2871 rc = iops->rec(env, di, (struct dt_rec *)rec, 0);
2873 rc = lfsck_layout_scan_orphan_one(env, com, ltd, rec,
2874 &com->lc_fid_latest_scanned_phase2);
2875 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
2878 lfsck_control_speed_by_self(com);
2880 rc = iops->next(env, di);
2881 } while (rc < 0 && !(bk->lb_param & LPF_FAILOUT));
2888 iops->fini(env, di);
2890 lu_object_put(env, &obj->do_lu);
2893 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant finished the orphan "
2894 "scanning for OST%04x: rc = %d\n",
2895 lfsck_lfsck2name(lfsck), ltd->ltd_index, rc);
2897 return rc > 0 ? 0 : rc;
2900 /* For the MDT-object with dangling reference, we need to repare the
2901 * inconsistency according to the LFSCK sponsor's requirement:
2903 * 1) Keep the inconsistency there and report the inconsistency case,
2904 * then give the chance to the application to find related issues,
2905 * and the users can make the decision about how to handle it with
2906 * more human knownledge. (by default)
2908 * 2) Re-create the missed OST-object with the FID/owner information. */
2909 static int lfsck_layout_repair_dangling(const struct lu_env *env,
2910 struct lfsck_component *com,
2911 struct lfsck_layout_req *llr,
2912 const struct lu_attr *pla)
2914 struct lfsck_thread_info *info = lfsck_env_info(env);
2915 struct filter_fid *pfid = &info->lti_new_pfid;
2916 struct dt_allocation_hint *hint = &info->lti_hint;
2917 struct lu_attr *cla = &info->lti_la2;
2918 struct dt_object *parent = llr->llr_parent->llo_obj;
2919 struct dt_object *child = llr->llr_child;
2920 struct dt_device *dev = lfsck_obj2dt_dev(child);
2921 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
2922 struct thandle *handle;
2924 struct lustre_handle lh = { 0 };
2929 if (com->lc_lfsck->li_bookmark_ram.lb_param & LPF_CREATE_OSTOBJ)
2937 memset(cla, 0, sizeof(*cla));
2938 cla->la_uid = pla->la_uid;
2939 cla->la_gid = pla->la_gid;
2940 cla->la_mode = S_IFREG | 0666;
2941 cla->la_valid = LA_TYPE | LA_MODE | LA_UID | LA_GID |
2942 LA_ATIME | LA_MTIME | LA_CTIME;
2944 rc = lfsck_layout_lock(env, com, parent, &lh,
2945 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2949 handle = dt_trans_create(env, dev);
2951 GOTO(unlock1, rc = PTR_ERR(handle));
2953 hint->dah_parent = NULL;
2955 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
2956 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
2957 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
2958 * MDT-object's FID::f_ver, instead it is the OST-object index in its
2959 * parent MDT-object's layout EA. */
2960 pfid->ff_parent.f_stripe_idx = cpu_to_le32(llr->llr_lov_idx);
2961 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
2963 rc = dt_declare_create(env, child, cla, hint, NULL, handle);
2967 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID,
2968 LU_XATTR_CREATE, handle);
2972 rc = dt_trans_start(env, dev, handle);
2976 dt_read_lock(env, parent, 0);
2977 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
2978 GOTO(unlock2, rc = 1);
2980 rc = dt_create(env, child, cla, hint, NULL, handle);
2984 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, LU_XATTR_CREATE,
2985 handle, BYPASS_CAPA);
2990 dt_read_unlock(env, parent);
2993 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
2996 lfsck_layout_unlock(&lh);
2999 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant found dangling "
3000 "reference for: parent "DFID", child "DFID", OST-index %u, "
3001 "stripe-index %u, owner %u/%u. %s: rc = %d\n",
3002 lfsck_lfsck2name(com->lc_lfsck), PFID(lfsck_dto2fid(parent)),
3003 PFID(lfsck_dto2fid(child)), llr->llr_ost_idx,
3004 llr->llr_lov_idx, pla->la_uid, pla->la_gid,
3005 create ? "Create the lost OST-object as required" :
3006 "Keep the MDT-object there by default", rc);
3011 /* If the OST-object does not recognize the MDT-object as its parent, and
3012 * there is no other MDT-object claims as its parent, then just trust the
3013 * given MDT-object as its parent. So update the OST-object filter_fid. */
3014 static int lfsck_layout_repair_unmatched_pair(const struct lu_env *env,
3015 struct lfsck_component *com,
3016 struct lfsck_layout_req *llr,
3017 const struct lu_attr *pla)
3019 struct lfsck_thread_info *info = lfsck_env_info(env);
3020 struct filter_fid *pfid = &info->lti_new_pfid;
3021 struct lu_attr *tla = &info->lti_la3;
3022 struct dt_object *parent = llr->llr_parent->llo_obj;
3023 struct dt_object *child = llr->llr_child;
3024 struct dt_device *dev = lfsck_obj2dt_dev(child);
3025 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
3026 struct thandle *handle;
3028 struct lustre_handle lh = { 0 };
3032 rc = lfsck_layout_lock(env, com, parent, &lh,
3033 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
3037 handle = dt_trans_create(env, dev);
3039 GOTO(unlock1, rc = PTR_ERR(handle));
3041 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
3042 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
3043 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
3044 * MDT-object's FID::f_ver, instead it is the OST-object index in its
3045 * parent MDT-object's layout EA. */
3046 pfid->ff_parent.f_stripe_idx = cpu_to_le32(llr->llr_lov_idx);
3047 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
3049 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle);
3053 tla->la_valid = LA_UID | LA_GID;
3054 tla->la_uid = pla->la_uid;
3055 tla->la_gid = pla->la_gid;
3056 rc = dt_declare_attr_set(env, child, tla, handle);
3060 rc = dt_trans_start(env, dev, handle);
3064 dt_write_lock(env, parent, 0);
3065 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
3066 GOTO(unlock2, rc = 1);
3068 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle,
3073 /* Get the latest parent's owner. */
3074 rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
3078 tla->la_valid = LA_UID | LA_GID;
3079 rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
3084 dt_write_unlock(env, parent);
3087 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
3090 lfsck_layout_unlock(&lh);
3093 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant repaired unmatched "
3094 "MDT-OST pair for: parent "DFID", child "DFID", OST-index %u, "
3095 "stripe-index %u, owner %u/%u: rc = %d\n",
3096 lfsck_lfsck2name(com->lc_lfsck), PFID(lfsck_dto2fid(parent)),
3097 PFID(lfsck_dto2fid(child)), llr->llr_ost_idx, llr->llr_lov_idx,
3098 pla->la_uid, pla->la_gid, rc);
3103 /* If there are more than one MDT-objects claim as the OST-object's parent,
3104 * and the OST-object only recognizes one of them, then we need to generate
3105 * new OST-object(s) with new fid(s) for the non-recognized MDT-object(s). */
3106 static int lfsck_layout_repair_multiple_references(const struct lu_env *env,
3107 struct lfsck_component *com,
3108 struct lfsck_layout_req *llr,
3112 struct lfsck_thread_info *info = lfsck_env_info(env);
3113 struct dt_allocation_hint *hint = &info->lti_hint;
3114 struct dt_object_format *dof = &info->lti_dof;
3115 struct dt_device *pdev = com->lc_lfsck->li_next;
3116 struct ost_id *oi = &info->lti_oi;
3117 struct dt_object *parent = llr->llr_parent->llo_obj;
3118 struct dt_device *cdev = lfsck_obj2dt_dev(llr->llr_child);
3119 struct dt_object *child = NULL;
3120 struct lu_device *d = &cdev->dd_lu_dev;
3121 struct lu_object *o = NULL;
3122 struct thandle *handle;
3123 struct lov_mds_md_v1 *lmm;
3124 struct lov_ost_data_v1 *objs;
3125 struct lustre_handle lh = { 0 };
3130 rc = lfsck_layout_lock(env, com, parent, &lh,
3131 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
3135 handle = dt_trans_create(env, pdev);
3137 GOTO(unlock1, rc = PTR_ERR(handle));
3139 o = lu_object_anon(env, d, NULL);
3141 GOTO(stop, rc = PTR_ERR(o));
3143 child = container_of(o, struct dt_object, do_lu);
3144 o = lu_object_locate(o->lo_header, d->ld_type);
3145 if (unlikely(o == NULL))
3146 GOTO(stop, rc = -EINVAL);
3148 child = container_of(o, struct dt_object, do_lu);
3149 la->la_valid = LA_UID | LA_GID;
3150 hint->dah_parent = NULL;
3152 dof->dof_type = DFT_REGULAR;
3153 rc = dt_declare_create(env, child, la, NULL, NULL, handle);
3157 rc = dt_declare_xattr_set(env, parent, buf, XATTR_NAME_LOV,
3158 LU_XATTR_REPLACE, handle);
3162 rc = dt_trans_start(env, pdev, handle);
3166 dt_write_lock(env, parent, 0);
3167 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
3168 GOTO(unlock2, rc = 0);
3170 rc = dt_xattr_get(env, parent, buf, XATTR_NAME_LOV, BYPASS_CAPA);
3171 if (unlikely(rc == 0 || rc == -ENODATA || rc == -ERANGE))
3172 GOTO(unlock2, rc = 0);
3175 /* Someone change layout during the LFSCK, no need to repair then. */
3176 if (le16_to_cpu(lmm->lmm_layout_gen) != llr->llr_parent->llo_gen)
3177 GOTO(unlock2, rc = 0);
3179 rc = dt_create(env, child, la, hint, dof, handle);
3183 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
3184 * been verified in lfsck_layout_verify_header() already. If some
3185 * new magic introduced in the future, then layout LFSCK needs to
3186 * be updated also. */
3187 magic = le32_to_cpu(lmm->lmm_magic);
3188 if (magic == LOV_MAGIC_V1) {
3189 objs = &lmm->lmm_objects[0];
3191 LASSERT(magic == LOV_MAGIC_V3);
3192 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
3195 lmm->lmm_layout_gen = cpu_to_le16(llr->llr_parent->llo_gen + 1);
3196 fid_to_ostid(lu_object_fid(&child->do_lu), oi);
3197 ostid_cpu_to_le(oi, &objs[llr->llr_lov_idx].l_ost_oi);
3198 objs[llr->llr_lov_idx].l_ost_gen = cpu_to_le32(0);
3199 objs[llr->llr_lov_idx].l_ost_idx = cpu_to_le32(llr->llr_ost_idx);
3200 rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV,
3201 LU_XATTR_REPLACE, handle, BYPASS_CAPA);
3203 GOTO(unlock2, rc = (rc == 0 ? 1 : rc));
3206 dt_write_unlock(env, parent);
3210 lu_object_put(env, &child->do_lu);
3212 dt_trans_stop(env, pdev, handle);
3215 lfsck_layout_unlock(&lh);
3218 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant repaired multiple "
3219 "references for: parent "DFID", OST-index %u, stripe-index %u, "
3220 "owner %u/%u: rc = %d\n",
3221 lfsck_lfsck2name(com->lc_lfsck), PFID(lfsck_dto2fid(parent)),
3222 llr->llr_ost_idx, llr->llr_lov_idx, la->la_uid, la->la_gid, rc);
3227 /* If the MDT-object and the OST-object have different owner information,
3228 * then trust the MDT-object, because the normal chown/chgrp handle order
3229 * is from MDT to OST, and it is possible that some chown/chgrp operation
3230 * is partly done. */
3231 static int lfsck_layout_repair_owner(const struct lu_env *env,
3232 struct lfsck_component *com,
3233 struct lfsck_layout_req *llr,
3234 struct lu_attr *pla)
3236 struct lfsck_thread_info *info = lfsck_env_info(env);
3237 struct lu_attr *tla = &info->lti_la3;
3238 struct dt_object *parent = llr->llr_parent->llo_obj;
3239 struct dt_object *child = llr->llr_child;
3240 struct dt_device *dev = lfsck_obj2dt_dev(child);
3241 struct thandle *handle;
3245 handle = dt_trans_create(env, dev);
3247 GOTO(log, rc = PTR_ERR(handle));
3249 tla->la_uid = pla->la_uid;
3250 tla->la_gid = pla->la_gid;
3251 tla->la_valid = LA_UID | LA_GID;
3252 rc = dt_declare_attr_set(env, child, tla, handle);
3256 rc = dt_trans_start(env, dev, handle);
3260 /* Use the dt_object lock to serialize with destroy and attr_set. */
3261 dt_read_lock(env, parent, 0);
3262 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
3263 GOTO(unlock, rc = 1);
3265 /* Get the latest parent's owner. */
3266 rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
3270 /* Some others chown/chgrp during the LFSCK, needs to do nothing. */
3271 if (unlikely(tla->la_uid != pla->la_uid ||
3272 tla->la_gid != pla->la_gid))
3273 GOTO(unlock, rc = 1);
3275 tla->la_valid = LA_UID | LA_GID;
3276 rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
3281 dt_read_unlock(env, parent);
3284 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
3287 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant repaired inconsistent "
3288 "file owner for: parent "DFID", child "DFID", OST-index %u, "
3289 "stripe-index %u, owner %u/%u: rc = %d\n",
3290 lfsck_lfsck2name(com->lc_lfsck), PFID(lfsck_dto2fid(parent)),
3291 PFID(lfsck_dto2fid(child)), llr->llr_ost_idx, llr->llr_lov_idx,
3292 pla->la_uid, pla->la_gid, rc);
3297 /* Check whether the OST-object correctly back points to the
3298 * MDT-object (@parent) via the XATTR_NAME_FID xattr (@pfid). */
3299 static int lfsck_layout_check_parent(const struct lu_env *env,
3300 struct lfsck_component *com,
3301 struct dt_object *parent,
3302 const struct lu_fid *pfid,
3303 const struct lu_fid *cfid,
3304 const struct lu_attr *pla,
3305 const struct lu_attr *cla,
3306 struct lfsck_layout_req *llr,
3307 struct lu_buf *lov_ea, __u32 idx)
3309 struct lfsck_thread_info *info = lfsck_env_info(env);
3310 struct lu_buf *buf = &info->lti_big_buf;
3311 struct dt_object *tobj;
3312 struct lov_mds_md_v1 *lmm;
3313 struct lov_ost_data_v1 *objs;
3320 if (fid_is_zero(pfid)) {
3321 /* client never wrote. */
3322 if (cla->la_size == 0 && cla->la_blocks == 0) {
3323 if (unlikely(cla->la_uid != pla->la_uid ||
3324 cla->la_gid != pla->la_gid))
3325 RETURN (LLIT_INCONSISTENT_OWNER);
3330 RETURN(LLIT_UNMATCHED_PAIR);
3333 if (unlikely(!fid_is_sane(pfid)))
3334 RETURN(LLIT_UNMATCHED_PAIR);
3336 if (lu_fid_eq(pfid, lu_object_fid(&parent->do_lu))) {
3337 if (llr->llr_lov_idx == idx)
3340 RETURN(LLIT_UNMATCHED_PAIR);
3343 tobj = lfsck_object_find(env, com->lc_lfsck, pfid);
3345 RETURN(LLIT_UNMATCHED_PAIR);
3348 RETURN(PTR_ERR(tobj));
3350 if (!dt_object_exists(tobj))
3351 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3353 /* Load the tobj's layout EA, in spite of it is a local MDT-object or
3354 * remote one on another MDT. Then check whether the given OST-object
3355 * is in such layout. If yes, it is multiple referenced, otherwise it
3356 * is unmatched referenced case. */
3357 rc = lfsck_layout_get_lovea(env, tobj, buf, NULL);
3359 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3365 magic = le32_to_cpu(lmm->lmm_magic);
3366 if (magic == LOV_MAGIC_V1) {
3367 objs = &lmm->lmm_objects[0];
3369 LASSERT(magic == LOV_MAGIC_V3);
3370 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
3373 count = le16_to_cpu(lmm->lmm_stripe_count);
3374 for (i = 0; i < count; i++, objs++) {
3375 struct lu_fid *tfid = &info->lti_fid2;
3376 struct ost_id *oi = &info->lti_oi;
3378 if (lovea_slot_is_dummy(objs))
3381 ostid_le_to_cpu(&objs->l_ost_oi, oi);
3382 ostid_to_fid(tfid, oi, le32_to_cpu(objs->l_ost_idx));
3383 if (lu_fid_eq(cfid, tfid)) {
3386 GOTO(out, rc = LLIT_MULTIPLE_REFERENCED);
3390 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3393 lfsck_object_put(env, tobj);
3398 static int lfsck_layout_assistant_handle_one(const struct lu_env *env,
3399 struct lfsck_component *com,
3400 struct lfsck_layout_req *llr)
3402 struct lfsck_layout *lo = com->lc_file_ram;
3403 struct lfsck_thread_info *info = lfsck_env_info(env);
3404 struct filter_fid_old *pea = &info->lti_old_pfid;
3405 struct lu_fid *pfid = &info->lti_fid;
3406 struct lu_buf *buf = NULL;
3407 struct dt_object *parent = llr->llr_parent->llo_obj;
3408 struct dt_object *child = llr->llr_child;
3409 struct lu_attr *pla = &info->lti_la;
3410 struct lu_attr *cla = &info->lti_la2;
3411 struct lfsck_instance *lfsck = com->lc_lfsck;
3412 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3413 enum lfsck_layout_inconsistency_type type = LLIT_NONE;
3418 rc = dt_attr_get(env, parent, pla, BYPASS_CAPA);
3420 if (lu_object_is_dying(parent->do_lu.lo_header))
3426 rc = dt_attr_get(env, child, cla, BYPASS_CAPA);
3427 if (rc == -ENOENT) {
3428 if (lu_object_is_dying(parent->do_lu.lo_header))
3431 type = LLIT_DANGLING;
3438 buf = lfsck_buf_get(env, pea, sizeof(struct filter_fid_old));
3439 rc= dt_xattr_get(env, child, buf, XATTR_NAME_FID, BYPASS_CAPA);
3440 if (unlikely(rc >= 0 && rc != sizeof(struct filter_fid_old) &&
3441 rc != sizeof(struct filter_fid))) {
3442 type = LLIT_UNMATCHED_PAIR;
3446 if (rc < 0 && rc != -ENODATA)
3449 if (rc == -ENODATA) {
3452 fid_le_to_cpu(pfid, &pea->ff_parent);
3453 /* Currently, the filter_fid::ff_parent::f_ver is not the
3454 * real parent MDT-object's FID::f_ver, instead it is the
3455 * OST-object index in its parent MDT-object's layout EA. */
3456 idx = pfid->f_stripe_idx;
3460 rc = lfsck_layout_check_parent(env, com, parent, pfid,
3461 lu_object_fid(&child->do_lu),
3462 pla, cla, llr, buf, idx);
3471 if (unlikely(cla->la_uid != pla->la_uid ||
3472 cla->la_gid != pla->la_gid)) {
3473 type = LLIT_INCONSISTENT_OWNER;
3478 if (bk->lb_param & LPF_DRYRUN) {
3479 if (type != LLIT_NONE)
3487 rc = lfsck_layout_repair_dangling(env, com, llr, pla);
3489 case LLIT_UNMATCHED_PAIR:
3490 rc = lfsck_layout_repair_unmatched_pair(env, com, llr, pla);
3492 case LLIT_MULTIPLE_REFERENCED:
3493 rc = lfsck_layout_repair_multiple_references(env, com, llr,
3496 case LLIT_INCONSISTENT_OWNER:
3497 rc = lfsck_layout_repair_owner(env, com, llr, pla);
3507 down_write(&com->lc_sem);
3509 struct lfsck_layout_master_data *llmd = com->lc_data;
3511 if (unlikely(llmd->llmd_exit)) {
3513 } else if (rc == -ENOTCONN || rc == -ESHUTDOWN ||
3514 rc == -ETIMEDOUT || rc == -EHOSTDOWN ||
3515 rc == -EHOSTUNREACH) {
3516 /* If cannot touch the target server,
3517 * mark the LFSCK as INCOMPLETE. */
3518 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant fail to "
3519 "talk with OST %x: rc = %d\n",
3520 lfsck_lfsck2name(lfsck), llr->llr_ost_idx, rc);
3521 lo->ll_flags |= LF_INCOMPLETE;
3522 lo->ll_objs_skipped++;
3525 lfsck_layout_record_failure(env, lfsck, lo);
3527 } else if (rc > 0) {
3528 LASSERTF(type > LLIT_NONE && type <= LLIT_MAX,
3529 "unknown type = %d\n", type);
3531 lo->ll_objs_repaired[type - 1]++;
3532 if (bk->lb_param & LPF_DRYRUN &&
3533 unlikely(lo->ll_pos_first_inconsistent == 0))
3534 lo->ll_pos_first_inconsistent =
3535 lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
3538 up_write(&com->lc_sem);
3543 static int lfsck_layout_assistant(void *args)
3545 struct lfsck_thread_args *lta = args;
3546 struct lu_env *env = <a->lta_env;
3547 struct lfsck_component *com = lta->lta_com;
3548 struct lfsck_instance *lfsck = lta->lta_lfsck;
3549 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3550 struct lfsck_position *pos = &com->lc_pos_start;
3551 struct lfsck_thread_info *info = lfsck_env_info(env);
3552 struct lfsck_request *lr = &info->lti_lr;
3553 struct lfsck_layout_master_data *llmd = com->lc_data;
3554 struct ptlrpc_thread *mthread = &lfsck->li_thread;
3555 struct ptlrpc_thread *athread = &llmd->llmd_thread;
3556 struct lfsck_layout_req *llr;
3557 struct l_wait_info lwi = { 0 };
3562 memset(lr, 0, sizeof(*lr));
3563 lr->lr_event = LE_START;
3564 lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
3565 LSV_ASYNC_WINDOWS | LSV_CREATE_OSTOBJ;
3566 lr->lr_speed = bk->lb_speed_limit;
3567 lr->lr_version = bk->lb_version;
3568 lr->lr_param = bk->lb_param;
3569 lr->lr_async_windows = bk->lb_async_windows;
3570 lr->lr_flags = LEF_TO_OST;
3571 if (pos->lp_oit_cookie <= 1)
3572 lr->lr_param |= LPF_RESET;
3574 rc = lfsck_layout_master_notify_others(env, com, lr);
3576 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant failed to notify "
3577 "others for LFSCK start: rc = %d\n",
3578 lfsck_lfsck2name(lfsck), rc);
3582 spin_lock(&llmd->llmd_lock);
3583 thread_set_flags(athread, SVC_RUNNING);
3584 spin_unlock(&llmd->llmd_lock);
3585 wake_up_all(&mthread->t_ctl_waitq);
3588 while (!list_empty(&llmd->llmd_req_list)) {
3589 bool wakeup = false;
3591 if (unlikely(llmd->llmd_exit ||
3592 !thread_is_running(mthread)))
3593 GOTO(cleanup1, rc = llmd->llmd_post_result);
3595 llr = list_entry(llmd->llmd_req_list.next,
3596 struct lfsck_layout_req,
3598 /* Only the lfsck_layout_assistant thread itself can
3599 * remove the "llr" from the head of the list, LFSCK
3600 * engine thread only inserts other new "lld" at the
3601 * end of the list. So it is safe to handle current
3602 * "llr" without the spin_lock. */
3603 rc = lfsck_layout_assistant_handle_one(env, com, llr);
3604 spin_lock(&llmd->llmd_lock);
3605 list_del_init(&llr->llr_list);
3606 llmd->llmd_prefetched--;
3607 /* Wake up the main engine thread only when the list
3608 * is empty or half of the prefetched items have been
3609 * handled to avoid too frequent thread schedule. */
3610 if (llmd->llmd_prefetched == 0 ||
3611 (bk->lb_async_windows != 0 &&
3612 bk->lb_async_windows / 2 ==
3613 llmd->llmd_prefetched))
3615 spin_unlock(&llmd->llmd_lock);
3617 wake_up_all(&mthread->t_ctl_waitq);
3619 lfsck_layout_req_fini(env, llr);
3620 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
3624 l_wait_event(athread->t_ctl_waitq,
3625 !lfsck_layout_req_empty(llmd) ||
3627 llmd->llmd_to_post ||
3628 llmd->llmd_to_double_scan,
3631 if (unlikely(llmd->llmd_exit))
3632 GOTO(cleanup1, rc = llmd->llmd_post_result);
3634 if (!list_empty(&llmd->llmd_req_list))
3637 if (llmd->llmd_to_post) {
3638 llmd->llmd_to_post = 0;
3639 LASSERT(llmd->llmd_post_result > 0);
3641 memset(lr, 0, sizeof(*lr));
3642 lr->lr_event = LE_PHASE1_DONE;
3643 lr->lr_status = llmd->llmd_post_result;
3644 rc = lfsck_layout_master_notify_others(env, com, lr);
3646 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant "
3647 "failed to notify others for LFSCK "
3649 lfsck_lfsck2name(lfsck), rc);
3651 /* Wakeup the master engine to go ahead. */
3652 wake_up_all(&mthread->t_ctl_waitq);
3655 if (llmd->llmd_to_double_scan) {
3656 llmd->llmd_to_double_scan = 0;
3657 atomic_inc(&lfsck->li_double_scan_count);
3658 llmd->llmd_in_double_scan = 1;
3659 wake_up_all(&mthread->t_ctl_waitq);
3661 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant phase2 "
3662 "scan start\n", lfsck_lfsck2name(lfsck));
3664 com->lc_new_checked = 0;
3665 com->lc_new_scanned = 0;
3666 com->lc_time_last_checkpoint = cfs_time_current();
3667 com->lc_time_next_checkpoint =
3668 com->lc_time_last_checkpoint +
3669 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
3671 /* flush all async updating before handling orphan. */
3672 dt_sync(env, lfsck->li_next);
3674 while (llmd->llmd_in_double_scan) {
3675 struct lfsck_tgt_descs *ltds =
3676 &lfsck->li_ost_descs;
3677 struct lfsck_tgt_desc *ltd;
3679 rc = lfsck_layout_master_query_others(env, com);
3680 if (lfsck_layout_master_to_orphan(llmd))
3686 /* Pull LFSCK status on related targets once
3687 * per 30 seconds if we are not notified. */
3688 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(30),
3689 cfs_time_seconds(1),
3691 rc = l_wait_event(athread->t_ctl_waitq,
3692 lfsck_layout_master_to_orphan(llmd) ||
3694 !thread_is_running(mthread),
3697 if (unlikely(llmd->llmd_exit ||
3698 !thread_is_running(mthread)))
3699 GOTO(cleanup2, rc = 0);
3701 if (rc == -ETIMEDOUT)
3708 spin_lock(<ds->ltd_lock);
3710 &llmd->llmd_ost_phase2_list)) {
3712 llmd->llmd_ost_phase2_list.next,
3713 struct lfsck_tgt_desc,
3714 ltd_layout_phase_list);
3716 <d->ltd_layout_phase_list);
3717 spin_unlock(<ds->ltd_lock);
3719 if (bk->lb_param & LPF_ALL_TGT) {
3720 rc = lfsck_layout_scan_orphan(
3723 bk->lb_param & LPF_FAILOUT)
3727 if (unlikely(llmd->llmd_exit ||
3728 !thread_is_running(mthread)))
3729 GOTO(cleanup2, rc = 0);
3731 spin_lock(<ds->ltd_lock);
3734 if (list_empty(&llmd->llmd_ost_phase1_list)) {
3735 spin_unlock(<ds->ltd_lock);
3736 GOTO(cleanup2, rc = 1);
3738 spin_unlock(<ds->ltd_lock);
3744 /* Cleanup the unfinished requests. */
3745 spin_lock(&llmd->llmd_lock);
3747 llmd->llmd_assistant_status = rc;
3749 while (!list_empty(&llmd->llmd_req_list)) {
3750 llr = list_entry(llmd->llmd_req_list.next,
3751 struct lfsck_layout_req,
3753 list_del_init(&llr->llr_list);
3754 llmd->llmd_prefetched--;
3755 spin_unlock(&llmd->llmd_lock);
3756 lfsck_layout_req_fini(env, llr);
3757 spin_lock(&llmd->llmd_lock);
3759 spin_unlock(&llmd->llmd_lock);
3761 LASSERTF(llmd->llmd_prefetched == 0, "unmatched prefeteched objs %d\n",
3762 llmd->llmd_prefetched);
3765 memset(lr, 0, sizeof(*lr));
3767 lr->lr_event = LE_PHASE2_DONE;
3769 } else if (rc == 0) {
3770 if (lfsck->li_flags & LPF_ALL_TGT) {
3771 lr->lr_event = LE_STOP;
3772 lr->lr_status = LS_STOPPED;
3774 lr->lr_event = LE_PEER_EXIT;
3775 switch (lfsck->li_status) {
3778 lr->lr_status = LS_CO_PAUSED;
3782 lr->lr_status = LS_CO_STOPPED;
3785 CDEBUG(D_LFSCK, "%s: unknown status: rc = %d\n",
3786 lfsck_lfsck2name(lfsck),
3788 lr->lr_status = LS_CO_FAILED;
3793 if (lfsck->li_flags & LPF_ALL_TGT) {
3794 lr->lr_event = LE_STOP;
3795 lr->lr_status = LS_FAILED;
3797 lr->lr_event = LE_PEER_EXIT;
3798 lr->lr_status = LS_CO_FAILED;
3802 rc1 = lfsck_layout_master_notify_others(env, com, lr);
3804 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant failed to "
3805 "notify others for LFSCK quit: rc = %d\n",
3806 lfsck_lfsck2name(lfsck), rc1);
3810 /* flush all async updating before exit. */
3811 dt_sync(env, lfsck->li_next);
3813 /* Under force exit case, some requests may be just freed without
3814 * verification, those objects should be re-handled when next run.
3815 * So not update the on-disk tracing file under such case. */
3816 if (llmd->llmd_in_double_scan) {
3817 struct lfsck_layout *lo = com->lc_file_ram;
3819 if (!llmd->llmd_exit)
3820 rc1 = lfsck_layout_double_scan_result(env, com, rc);
3822 CDEBUG(D_LFSCK, "%s: layout LFSCK assistant phase2 scan "
3823 "finished, status %d: rc = %d\n",
3824 lfsck_lfsck2name(lfsck), lo->ll_status, rc1);
3828 if (llmd->llmd_in_double_scan)
3829 atomic_dec(&lfsck->li_double_scan_count);
3831 spin_lock(&llmd->llmd_lock);
3832 llmd->llmd_assistant_status = (rc1 != 0 ? rc1 : rc);
3833 thread_set_flags(athread, SVC_STOPPED);
3834 wake_up_all(&mthread->t_ctl_waitq);
3835 spin_unlock(&llmd->llmd_lock);
3836 lfsck_thread_args_fini(lta);
3842 lfsck_layout_slave_async_interpret(const struct lu_env *env,
3843 struct ptlrpc_request *req,
3846 struct lfsck_layout_slave_async_args *llsaa = args;
3847 struct obd_export *exp = llsaa->llsaa_exp;
3848 struct lfsck_component *com = llsaa->llsaa_com;
3849 struct lfsck_layout_slave_target *llst = llsaa->llsaa_llst;
3850 struct lfsck_layout_slave_data *llsd = com->lc_data;
3851 struct lfsck_reply *lr = NULL;
3855 /* It is quite probably caused by target crash,
3856 * to make the LFSCK can go ahead, assume that
3857 * the target finished the LFSCK prcoessing. */
3860 lr = req_capsule_server_get(&req->rq_pill, &RMF_LFSCK_REPLY);
3861 if (lr->lr_status != LS_SCANNING_PHASE1 &&
3862 lr->lr_status != LS_SCANNING_PHASE2)
3867 CDEBUG(D_LFSCK, "%s: layout LFSCK slave gets the MDT %x "
3868 "status %d\n", lfsck_lfsck2name(com->lc_lfsck),
3869 llst->llst_index, lr != NULL ? lr->lr_status : rc);
3871 lfsck_layout_llst_del(llsd, llst);
3874 lfsck_layout_llst_put(llst);
3875 lfsck_component_put(env, com);
3876 class_export_put(exp);
3881 static int lfsck_layout_async_query(const struct lu_env *env,
3882 struct lfsck_component *com,
3883 struct obd_export *exp,
3884 struct lfsck_layout_slave_target *llst,
3885 struct lfsck_request *lr,
3886 struct ptlrpc_request_set *set)
3888 struct lfsck_layout_slave_async_args *llsaa;
3889 struct ptlrpc_request *req;
3890 struct lfsck_request *tmp;
3894 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_QUERY);
3898 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_QUERY);
3900 ptlrpc_request_free(req);
3904 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
3906 ptlrpc_request_set_replen(req);
3908 llsaa = ptlrpc_req_async_args(req);
3909 llsaa->llsaa_exp = exp;
3910 llsaa->llsaa_com = lfsck_component_get(com);
3911 llsaa->llsaa_llst = llst;
3912 req->rq_interpret_reply = lfsck_layout_slave_async_interpret;
3913 ptlrpc_set_add_req(set, req);
3918 static int lfsck_layout_async_notify(const struct lu_env *env,
3919 struct obd_export *exp,
3920 struct lfsck_request *lr,
3921 struct ptlrpc_request_set *set)
3923 struct ptlrpc_request *req;
3924 struct lfsck_request *tmp;
3928 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
3932 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
3934 ptlrpc_request_free(req);
3938 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
3940 ptlrpc_request_set_replen(req);
3941 ptlrpc_set_add_req(set, req);
3947 lfsck_layout_slave_query_master(const struct lu_env *env,
3948 struct lfsck_component *com)
3950 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
3951 struct lfsck_instance *lfsck = com->lc_lfsck;
3952 struct lfsck_layout_slave_data *llsd = com->lc_data;
3953 struct lfsck_layout_slave_target *llst;
3954 struct obd_export *exp;
3955 struct ptlrpc_request_set *set;
3960 set = ptlrpc_prep_set();
3962 GOTO(log, rc = -ENOMEM);
3964 memset(lr, 0, sizeof(*lr));
3965 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
3966 lr->lr_event = LE_QUERY;
3967 lr->lr_active = LFSCK_TYPE_LAYOUT;
3969 llsd->llsd_touch_gen++;
3970 spin_lock(&llsd->llsd_lock);
3971 while (!list_empty(&llsd->llsd_master_list)) {
3972 llst = list_entry(llsd->llsd_master_list.next,
3973 struct lfsck_layout_slave_target,
3975 if (llst->llst_gen == llsd->llsd_touch_gen)
3978 llst->llst_gen = llsd->llsd_touch_gen;
3979 list_del(&llst->llst_list);
3980 list_add_tail(&llst->llst_list,
3981 &llsd->llsd_master_list);
3982 atomic_inc(&llst->llst_ref);
3983 spin_unlock(&llsd->llsd_lock);
3985 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
3988 lfsck_layout_llst_del(llsd, llst);
3989 lfsck_layout_llst_put(llst);
3990 spin_lock(&llsd->llsd_lock);
3994 rc = lfsck_layout_async_query(env, com, exp, llst, lr, set);
3996 CDEBUG(D_LFSCK, "%s: layout LFSCK slave fail to "
3997 "query %s for layout: rc = %d\n",
3998 lfsck_lfsck2name(lfsck),
3999 exp->exp_obd->obd_name, rc);
4002 lfsck_layout_llst_put(llst);
4003 class_export_put(exp);
4005 spin_lock(&llsd->llsd_lock);
4007 spin_unlock(&llsd->llsd_lock);
4009 rc = ptlrpc_set_wait(set);
4010 ptlrpc_set_destroy(set);
4012 GOTO(log, rc = (rc1 != 0 ? rc1 : rc));
4015 CDEBUG(D_LFSCK, "%s: layout LFSCK slave queries master: rc = %d\n",
4016 lfsck_lfsck2name(com->lc_lfsck), rc);
4022 lfsck_layout_slave_notify_master(const struct lu_env *env,
4023 struct lfsck_component *com,
4024 enum lfsck_events event, int result)
4026 struct lfsck_instance *lfsck = com->lc_lfsck;
4027 struct lfsck_layout_slave_data *llsd = com->lc_data;
4028 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
4029 struct lfsck_layout_slave_target *llst;
4030 struct obd_export *exp;
4031 struct ptlrpc_request_set *set;
4035 CDEBUG(D_LFSCK, "%s: layout LFSCK slave notifies master\n",
4036 lfsck_lfsck2name(com->lc_lfsck));
4038 set = ptlrpc_prep_set();
4042 memset(lr, 0, sizeof(*lr));
4043 lr->lr_event = event;
4044 lr->lr_flags = LEF_FROM_OST;
4045 lr->lr_status = result;
4046 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
4047 lr->lr_active = LFSCK_TYPE_LAYOUT;
4048 llsd->llsd_touch_gen++;
4049 spin_lock(&llsd->llsd_lock);
4050 while (!list_empty(&llsd->llsd_master_list)) {
4051 llst = list_entry(llsd->llsd_master_list.next,
4052 struct lfsck_layout_slave_target,
4054 if (llst->llst_gen == llsd->llsd_touch_gen)
4057 llst->llst_gen = llsd->llsd_touch_gen;
4058 list_del(&llst->llst_list);
4059 list_add_tail(&llst->llst_list,
4060 &llsd->llsd_master_list);
4061 atomic_inc(&llst->llst_ref);
4062 spin_unlock(&llsd->llsd_lock);
4064 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
4067 lfsck_layout_llst_del(llsd, llst);
4068 lfsck_layout_llst_put(llst);
4069 spin_lock(&llsd->llsd_lock);
4073 rc = lfsck_layout_async_notify(env, exp, lr, set);
4075 CDEBUG(D_LFSCK, "%s: layout LFSCK slave fail to "
4076 "notify %s for layout: rc = %d\n",
4077 lfsck_lfsck2name(lfsck),
4078 exp->exp_obd->obd_name, rc);
4080 lfsck_layout_llst_put(llst);
4081 class_export_put(exp);
4082 spin_lock(&llsd->llsd_lock);
4084 spin_unlock(&llsd->llsd_lock);
4086 ptlrpc_set_wait(set);
4087 ptlrpc_set_destroy(set);
4093 * \ret -ENODATA: unrecognized stripe
4094 * \ret = 0 : recognized stripe
4095 * \ret < 0 : other failures
4097 static int lfsck_layout_master_check_pairs(const struct lu_env *env,
4098 struct lfsck_component *com,
4099 struct lu_fid *cfid,
4100 struct lu_fid *pfid)
4102 struct lfsck_thread_info *info = lfsck_env_info(env);
4103 struct lu_buf *buf = &info->lti_big_buf;
4104 struct ost_id *oi = &info->lti_oi;
4105 struct dt_object *obj;
4106 struct lov_mds_md_v1 *lmm;
4107 struct lov_ost_data_v1 *objs;
4108 __u32 idx = pfid->f_stripe_idx;
4116 obj = lfsck_object_find_by_dev(env, com->lc_lfsck->li_bottom, pfid);
4118 RETURN(PTR_ERR(obj));
4120 dt_read_lock(env, obj, 0);
4121 if (unlikely(!dt_object_exists(obj)))
4122 GOTO(unlock, rc = -ENOENT);
4124 rc = lfsck_layout_get_lovea(env, obj, buf, NULL);
4129 GOTO(unlock, rc = -ENODATA);
4132 rc = lfsck_layout_verify_header(lmm);
4136 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
4137 * been verified in lfsck_layout_verify_header() already. If some
4138 * new magic introduced in the future, then layout LFSCK needs to
4139 * be updated also. */
4140 magic = le32_to_cpu(lmm->lmm_magic);
4141 if (magic == LOV_MAGIC_V1) {
4142 objs = &lmm->lmm_objects[0];
4144 LASSERT(magic == LOV_MAGIC_V3);
4145 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
4148 fid_to_ostid(cfid, oi);
4149 count = le16_to_cpu(lmm->lmm_stripe_count);
4150 for (i = 0; i < count; i++, objs++) {
4153 ostid_le_to_cpu(&objs->l_ost_oi, &oi2);
4154 if (memcmp(oi, &oi2, sizeof(*oi)) == 0)
4155 GOTO(unlock, rc = (i != idx ? -ENODATA : 0));
4158 GOTO(unlock, rc = -ENODATA);
4161 dt_read_unlock(env, obj);
4162 lu_object_put(env, &obj->do_lu);
4168 * The LFSCK-on-OST will ask the LFSCK-on-MDT to check whether the given
4169 * MDT-object/OST-object pairs match or not to aviod transfer MDT-object
4170 * layout EA from MDT to OST. On one hand, the OST no need to understand
4171 * the layout EA structure; on the other hand, it may cause trouble when
4172 * transfer large layout EA from MDT to OST via normal OUT RPC.
4174 * \ret > 0: unrecognized stripe
4175 * \ret = 0: recognized stripe
4176 * \ret < 0: other failures
4178 static int lfsck_layout_slave_check_pairs(const struct lu_env *env,
4179 struct lfsck_component *com,
4180 struct lu_fid *cfid,
4181 struct lu_fid *pfid)
4183 struct lfsck_instance *lfsck = com->lc_lfsck;
4184 struct obd_device *obd = lfsck->li_obd;
4185 struct seq_server_site *ss =
4186 lu_site2seq(lfsck->li_bottom->dd_lu_dev.ld_site);
4187 struct obd_export *exp = NULL;
4188 struct ptlrpc_request *req = NULL;
4189 struct lfsck_request *lr;
4190 struct lu_seq_range range = { 0 };
4194 if (unlikely(fid_is_idif(pfid)))
4197 fld_range_set_any(&range);
4198 rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(pfid), &range);
4200 RETURN(rc == -ENOENT ? 1 : rc);
4202 if (unlikely(!fld_range_is_mdt(&range)))
4205 exp = lustre_find_lwp_by_index(obd->obd_name, range.lsr_index);
4206 if (unlikely(exp == NULL))
4209 if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
4210 GOTO(out, rc = -EOPNOTSUPP);
4212 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
4214 GOTO(out, rc = -ENOMEM);
4216 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
4218 ptlrpc_request_free(req);
4223 lr = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
4224 memset(lr, 0, sizeof(*lr));
4225 lr->lr_event = LE_PAIRS_VERIFY;
4226 lr->lr_active = LFSCK_TYPE_LAYOUT;
4227 lr->lr_fid = *cfid; /* OST-object itself FID. */
4228 lr->lr_fid2 = *pfid; /* The claimed parent FID. */
4230 ptlrpc_request_set_replen(req);
4231 rc = ptlrpc_queue_wait(req);
4232 ptlrpc_req_finished(req);
4234 if (rc == -ENOENT || rc == -ENODATA)
4241 class_export_put(exp);
4246 static int lfsck_layout_slave_repair_pfid(const struct lu_env *env,
4247 struct lfsck_component *com,
4248 struct lfsck_request *lr)
4250 struct lfsck_thread_info *info = lfsck_env_info(env);
4251 struct filter_fid *ff = &info->lti_new_pfid;
4253 struct dt_device *dev = com->lc_lfsck->li_bottom;
4254 struct dt_object *obj;
4255 struct thandle *th = NULL;
4259 obj = lfsck_object_find_by_dev(env, dev, &lr->lr_fid);
4261 GOTO(log, rc = PTR_ERR(obj));
4263 fid_cpu_to_le(&ff->ff_parent, &lr->lr_fid2);
4264 buf = lfsck_buf_get(env, ff, sizeof(*ff));
4265 dt_write_lock(env, obj, 0);
4266 if (unlikely(!dt_object_exists(obj)))
4267 GOTO(unlock, rc = 0);
4269 th = dt_trans_create(env, dev);
4271 GOTO(unlock, rc = PTR_ERR(th));
4273 rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_FID, 0, th);
4277 rc = dt_trans_start_local(env, dev, th);
4281 rc = dt_xattr_set(env, obj, buf, XATTR_NAME_FID, 0, th, BYPASS_CAPA);
4286 dt_trans_stop(env, dev, th);
4289 dt_write_unlock(env, obj);
4290 lu_object_put(env, &obj->do_lu);
4293 CDEBUG(D_LFSCK, "%s: layout LFSCK slave repaired pfid for "DFID
4294 ", parent "DFID": rc = %d\n", lfsck_lfsck2name(com->lc_lfsck),
4295 PFID(&lr->lr_fid), PFID(&lr->lr_fid2), rc);
4302 static int lfsck_layout_reset(const struct lu_env *env,
4303 struct lfsck_component *com, bool init)
4305 struct lfsck_layout *lo = com->lc_file_ram;
4308 down_write(&com->lc_sem);
4310 memset(lo, 0, com->lc_file_size);
4312 __u32 count = lo->ll_success_count;
4313 __u64 last_time = lo->ll_time_last_complete;
4315 memset(lo, 0, com->lc_file_size);
4316 lo->ll_success_count = count;
4317 lo->ll_time_last_complete = last_time;
4320 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
4321 lo->ll_status = LS_INIT;
4323 rc = lfsck_layout_store(env, com);
4324 up_write(&com->lc_sem);
4326 CDEBUG(D_LFSCK, "%s: layout LFSCK reset: rc = %d\n",
4327 lfsck_lfsck2name(com->lc_lfsck), rc);
4332 static void lfsck_layout_fail(const struct lu_env *env,
4333 struct lfsck_component *com, bool new_checked)
4335 struct lfsck_layout *lo = com->lc_file_ram;
4337 down_write(&com->lc_sem);
4339 com->lc_new_checked++;
4340 lfsck_layout_record_failure(env, com->lc_lfsck, lo);
4341 up_write(&com->lc_sem);
4344 static int lfsck_layout_master_checkpoint(const struct lu_env *env,
4345 struct lfsck_component *com, bool init)
4347 struct lfsck_instance *lfsck = com->lc_lfsck;
4348 struct lfsck_layout *lo = com->lc_file_ram;
4349 struct lfsck_layout_master_data *llmd = com->lc_data;
4350 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4351 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4352 struct l_wait_info lwi = { 0 };
4355 if (com->lc_new_checked == 0 && !init)
4358 l_wait_event(mthread->t_ctl_waitq,
4359 list_empty(&llmd->llmd_req_list) ||
4360 !thread_is_running(mthread) ||
4361 thread_is_stopped(athread),
4364 if (!thread_is_running(mthread) || thread_is_stopped(athread))
4367 down_write(&com->lc_sem);
4369 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
4371 lo->ll_pos_last_checkpoint =
4372 lfsck->li_pos_current.lp_oit_cookie;
4373 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4374 HALF_SEC - lfsck->li_time_last_checkpoint);
4375 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4376 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4377 com->lc_new_checked = 0;
4380 rc = lfsck_layout_store(env, com);
4381 up_write(&com->lc_sem);
4383 CDEBUG(D_LFSCK, "%s: layout LFSCK master checkpoint at the pos ["
4384 LPU64"]: rc = %d\n", lfsck_lfsck2name(lfsck),
4385 lfsck->li_pos_current.lp_oit_cookie, rc);
4390 static int lfsck_layout_slave_checkpoint(const struct lu_env *env,
4391 struct lfsck_component *com, bool init)
4393 struct lfsck_instance *lfsck = com->lc_lfsck;
4394 struct lfsck_layout *lo = com->lc_file_ram;
4397 if (com->lc_new_checked == 0 && !init)
4400 down_write(&com->lc_sem);
4402 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
4404 lo->ll_pos_last_checkpoint =
4405 lfsck->li_pos_current.lp_oit_cookie;
4406 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4407 HALF_SEC - lfsck->li_time_last_checkpoint);
4408 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4409 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4410 com->lc_new_checked = 0;
4413 rc = lfsck_layout_store(env, com);
4414 up_write(&com->lc_sem);
4416 CDEBUG(D_LFSCK, "%s: layout LFSCK slave checkpoint at the pos ["
4417 LPU64"]: rc = %d\n", lfsck_lfsck2name(lfsck),
4418 lfsck->li_pos_current.lp_oit_cookie, rc);
4423 static int lfsck_layout_prep(const struct lu_env *env,
4424 struct lfsck_component *com,
4425 struct lfsck_start *start)
4427 struct lfsck_instance *lfsck = com->lc_lfsck;
4428 struct lfsck_layout *lo = com->lc_file_ram;
4429 struct lfsck_position *pos = &com->lc_pos_start;
4431 fid_zero(&pos->lp_dir_parent);
4432 pos->lp_dir_cookie = 0;
4433 if (lo->ll_status == LS_COMPLETED ||
4434 lo->ll_status == LS_PARTIAL ||
4435 /* To handle orphan, must scan from the beginning. */
4436 (start != NULL && start->ls_flags & LPF_ORPHAN)) {
4439 rc = lfsck_layout_reset(env, com, false);
4441 rc = lfsck_set_param(env, lfsck, start, true);
4444 CDEBUG(D_LFSCK, "%s: layout LFSCK prep failed: "
4445 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
4451 down_write(&com->lc_sem);
4452 lo->ll_time_latest_start = cfs_time_current_sec();
4453 spin_lock(&lfsck->li_lock);
4454 if (lo->ll_flags & LF_SCANNED_ONCE) {
4455 if (!lfsck->li_drop_dryrun ||
4456 lo->ll_pos_first_inconsistent == 0) {
4457 lo->ll_status = LS_SCANNING_PHASE2;
4458 list_del_init(&com->lc_link);
4459 list_add_tail(&com->lc_link,
4460 &lfsck->li_list_double_scan);
4461 pos->lp_oit_cookie = 0;
4465 lo->ll_status = LS_SCANNING_PHASE1;
4466 lo->ll_run_time_phase1 = 0;
4467 lo->ll_run_time_phase2 = 0;
4468 lo->ll_objs_checked_phase1 = 0;
4469 lo->ll_objs_checked_phase2 = 0;
4470 lo->ll_objs_failed_phase1 = 0;
4471 lo->ll_objs_failed_phase2 = 0;
4472 for (i = 0; i < LLIT_MAX; i++)
4473 lo->ll_objs_repaired[i] = 0;
4475 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
4476 fid_zero(&com->lc_fid_latest_scanned_phase2);
4479 lo->ll_status = LS_SCANNING_PHASE1;
4480 if (!lfsck->li_drop_dryrun ||
4481 lo->ll_pos_first_inconsistent == 0)
4482 pos->lp_oit_cookie = lo->ll_pos_last_checkpoint + 1;
4484 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
4486 spin_unlock(&lfsck->li_lock);
4487 up_write(&com->lc_sem);
4492 static int lfsck_layout_slave_prep(const struct lu_env *env,
4493 struct lfsck_component *com,
4494 struct lfsck_start_param *lsp)
4496 struct lfsck_layout_slave_data *llsd = com->lc_data;
4497 struct lfsck_instance *lfsck = com->lc_lfsck;
4498 struct lfsck_layout *lo = com->lc_file_ram;
4499 struct lfsck_start *start = lsp->lsp_start;
4502 rc = lfsck_layout_prep(env, com, start);
4506 if (lo->ll_flags & LF_CRASHED_LASTID &&
4507 list_empty(&llsd->llsd_master_list)) {
4508 LASSERT(lfsck->li_out_notify != NULL);
4510 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
4511 LE_LASTID_REBUILDING);
4514 if (!lsp->lsp_index_valid)
4517 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
4518 if (rc == 0 && start != NULL && start->ls_flags & LPF_ORPHAN) {
4519 LASSERT(!llsd->llsd_rbtree_valid);
4521 write_lock(&llsd->llsd_rb_lock);
4522 rc = lfsck_rbtree_setup(env, com);
4523 write_unlock(&llsd->llsd_rb_lock);
4526 CDEBUG(D_LFSCK, "%s: layout LFSCK slave prep done, start pos ["
4527 LPU64"]\n", lfsck_lfsck2name(lfsck),
4528 com->lc_pos_start.lp_oit_cookie);
4533 static int lfsck_layout_master_prep(const struct lu_env *env,
4534 struct lfsck_component *com,
4535 struct lfsck_start_param *lsp)
4537 struct lfsck_instance *lfsck = com->lc_lfsck;
4538 struct lfsck_layout_master_data *llmd = com->lc_data;
4539 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4540 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4541 struct lfsck_thread_args *lta;
4542 struct task_struct *task;
4546 rc = lfsck_layout_prep(env, com, lsp->lsp_start);
4550 llmd->llmd_assistant_status = 0;
4551 llmd->llmd_post_result = 0;
4552 llmd->llmd_to_post = 0;
4553 llmd->llmd_to_double_scan = 0;
4554 llmd->llmd_in_double_scan = 0;
4555 llmd->llmd_exit = 0;
4556 thread_set_flags(athread, 0);
4558 lta = lfsck_thread_args_init(lfsck, com, lsp);
4560 RETURN(PTR_ERR(lta));
4562 task = kthread_run(lfsck_layout_assistant, lta, "lfsck_layout");
4565 CERROR("%s: cannot start LFSCK layout assistant thread: "
4566 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
4567 lfsck_thread_args_fini(lta);
4569 struct l_wait_info lwi = { 0 };
4571 l_wait_event(mthread->t_ctl_waitq,
4572 thread_is_running(athread) ||
4573 thread_is_stopped(athread),
4575 if (unlikely(!thread_is_running(athread)))
4576 rc = llmd->llmd_assistant_status;
4581 CDEBUG(D_LFSCK, "%s: layout LFSCK master prep done, start pos ["
4582 LPU64"\n", lfsck_lfsck2name(lfsck),
4583 com->lc_pos_start.lp_oit_cookie);
4588 /* Pre-fetch the attribute for each stripe in the given layout EA. */
4589 static int lfsck_layout_scan_stripes(const struct lu_env *env,
4590 struct lfsck_component *com,
4591 struct dt_object *parent,
4592 struct lov_mds_md_v1 *lmm)
4594 struct lfsck_thread_info *info = lfsck_env_info(env);
4595 struct lfsck_instance *lfsck = com->lc_lfsck;
4596 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
4597 struct lfsck_layout *lo = com->lc_file_ram;
4598 struct lfsck_layout_master_data *llmd = com->lc_data;
4599 struct lfsck_layout_object *llo = NULL;
4600 struct lov_ost_data_v1 *objs;
4601 struct lfsck_tgt_descs *ltds = &lfsck->li_ost_descs;
4602 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4603 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4604 struct l_wait_info lwi = { 0 };
4613 buf = lfsck_buf_get(env, &info->lti_old_pfid,
4614 sizeof(struct filter_fid_old));
4615 count = le16_to_cpu(lmm->lmm_stripe_count);
4616 gen = le16_to_cpu(lmm->lmm_layout_gen);
4617 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
4618 * been verified in lfsck_layout_verify_header() already. If some
4619 * new magic introduced in the future, then layout LFSCK needs to
4620 * be updated also. */
4621 magic = le32_to_cpu(lmm->lmm_magic);
4622 if (magic == LOV_MAGIC_V1) {
4623 objs = &lmm->lmm_objects[0];
4625 LASSERT(magic == LOV_MAGIC_V3);
4626 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
4629 for (i = 0; i < count; i++, objs++) {
4630 struct lu_fid *fid = &info->lti_fid;
4631 struct ost_id *oi = &info->lti_oi;
4632 struct lfsck_layout_req *llr;
4633 struct lfsck_tgt_desc *tgt = NULL;
4634 struct dt_object *cobj = NULL;
4636 le32_to_cpu(objs->l_ost_idx);
4637 bool wakeup = false;
4639 if (unlikely(lovea_slot_is_dummy(objs)))
4642 l_wait_event(mthread->t_ctl_waitq,
4643 bk->lb_async_windows == 0 ||
4644 llmd->llmd_prefetched < bk->lb_async_windows ||
4645 !thread_is_running(mthread) ||
4646 thread_is_stopped(athread),
4649 if (unlikely(!thread_is_running(mthread)) ||
4650 thread_is_stopped(athread))
4653 ostid_le_to_cpu(&objs->l_ost_oi, oi);
4654 ostid_to_fid(fid, oi, index);
4655 tgt = lfsck_tgt_get(ltds, index);
4656 if (unlikely(tgt == NULL)) {
4657 CDEBUG(D_LFSCK, "%s: cannot talk with OST %x which "
4658 "did not join the layout LFSCK\n",
4659 lfsck_lfsck2name(lfsck), index);
4660 lo->ll_flags |= LF_INCOMPLETE;
4664 cobj = lfsck_object_find_by_dev(env, tgt->ltd_tgt, fid);
4670 rc = dt_declare_attr_get(env, cobj, BYPASS_CAPA);
4674 rc = dt_declare_xattr_get(env, cobj, buf, XATTR_NAME_FID,
4680 llo = lfsck_layout_object_init(env, parent, gen);
4687 llr = lfsck_layout_req_init(llo, cobj, index, i);
4694 spin_lock(&llmd->llmd_lock);
4695 if (llmd->llmd_assistant_status < 0) {
4696 spin_unlock(&llmd->llmd_lock);
4697 lfsck_layout_req_fini(env, llr);
4699 RETURN(llmd->llmd_assistant_status);
4702 list_add_tail(&llr->llr_list, &llmd->llmd_req_list);
4703 if (llmd->llmd_prefetched == 0)
4706 llmd->llmd_prefetched++;
4707 spin_unlock(&llmd->llmd_lock);
4709 wake_up_all(&athread->t_ctl_waitq);
4712 down_write(&com->lc_sem);
4713 com->lc_new_checked++;
4715 lfsck_layout_record_failure(env, lfsck, lo);
4716 up_write(&com->lc_sem);
4718 if (cobj != NULL && !IS_ERR(cobj))
4719 lu_object_put(env, &cobj->do_lu);
4721 if (likely(tgt != NULL))
4724 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
4731 if (llo != NULL && !IS_ERR(llo))
4732 lfsck_layout_object_put(env, llo);
4737 /* For the given object, read its layout EA locally. For each stripe, pre-fetch
4738 * the OST-object's attribute and generate an structure lfsck_layout_req on the
4739 * list ::llmd_req_list.
4741 * For each request on above list, the lfsck_layout_assistant thread compares
4742 * the OST side attribute with local attribute, if inconsistent, then repair it.
4744 * All above processing is async mode with pipeline. */
4745 static int lfsck_layout_master_exec_oit(const struct lu_env *env,
4746 struct lfsck_component *com,
4747 struct dt_object *obj)
4749 struct lfsck_thread_info *info = lfsck_env_info(env);
4750 struct ost_id *oi = &info->lti_oi;
4751 struct lfsck_layout *lo = com->lc_file_ram;
4752 struct lfsck_layout_master_data *llmd = com->lc_data;
4753 struct lfsck_instance *lfsck = com->lc_lfsck;
4754 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
4755 struct thandle *handle = NULL;
4756 struct lu_buf *buf = &info->lti_big_buf;
4757 struct lov_mds_md_v1 *lmm = NULL;
4758 struct dt_device *dev = lfsck->li_bottom;
4759 struct lustre_handle lh = { 0 };
4760 ssize_t buflen = buf->lb_len;
4762 bool locked = false;
4763 bool stripe = false;
4764 bool bad_oi = false;
4767 if (!S_ISREG(lfsck_object_type(obj)))
4770 if (llmd->llmd_assistant_status < 0)
4771 GOTO(out, rc = -ESRCH);
4773 fid_to_lmm_oi(lfsck_dto2fid(obj), oi);
4774 lmm_oi_cpu_to_le(oi, oi);
4775 dt_read_lock(env, obj, 0);
4779 rc = lfsck_layout_get_lovea(env, obj, buf, &buflen);
4785 rc = lfsck_layout_verify_header(lmm);
4786 /* If the LOV EA crashed, then it is possible to be rebuilt later
4787 * when handle orphan OST-objects. */
4791 if (memcmp(oi, &lmm->lmm_oi, sizeof(*oi)) == 0)
4792 GOTO(out, stripe = true);
4794 /* Inconsistent lmm_oi, should be repaired. */
4797 if (bk->lb_param & LPF_DRYRUN) {
4798 down_write(&com->lc_sem);
4799 lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
4800 up_write(&com->lc_sem);
4802 GOTO(out, stripe = true);
4805 if (!lustre_handle_is_used(&lh)) {
4806 dt_read_unlock(env, obj);
4808 buf->lb_len = buflen;
4809 rc = lfsck_layout_lock(env, com, obj, &lh,
4810 MDS_INODELOCK_LAYOUT |
4811 MDS_INODELOCK_XATTR);
4815 handle = dt_trans_create(env, dev);
4817 GOTO(out, rc = PTR_ERR(handle));
4819 rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_LOV,
4820 LU_XATTR_REPLACE, handle);
4824 rc = dt_trans_start_local(env, dev, handle);
4828 dt_write_lock(env, obj, 0);
4835 rc = dt_xattr_set(env, obj, buf, XATTR_NAME_LOV,
4836 LU_XATTR_REPLACE, handle, BYPASS_CAPA);
4840 down_write(&com->lc_sem);
4841 lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
4842 up_write(&com->lc_sem);
4844 GOTO(out, stripe = true);
4848 if (lustre_handle_is_used(&lh))
4849 dt_write_unlock(env, obj);
4851 dt_read_unlock(env, obj);
4854 if (handle != NULL && !IS_ERR(handle))
4855 dt_trans_stop(env, dev, handle);
4857 lfsck_layout_unlock(&lh);
4860 CDEBUG(D_LFSCK, "%s: layout LFSCK master %s bad lmm_oi for "
4861 DFID": rc = %d\n", lfsck_lfsck2name(lfsck),
4862 bk->lb_param & LPF_DRYRUN ? "found" : "repaired",
4863 PFID(lfsck_dto2fid(obj)), rc);
4866 rc = lfsck_layout_scan_stripes(env, com, obj, lmm);
4868 down_write(&com->lc_sem);
4869 com->lc_new_checked++;
4871 lfsck_layout_record_failure(env, lfsck, lo);
4872 up_write(&com->lc_sem);
4874 buf->lb_len = buflen;
4879 static int lfsck_layout_slave_exec_oit(const struct lu_env *env,
4880 struct lfsck_component *com,
4881 struct dt_object *obj)
4883 struct lfsck_instance *lfsck = com->lc_lfsck;
4884 struct lfsck_layout *lo = com->lc_file_ram;
4885 const struct lu_fid *fid = lfsck_dto2fid(obj);
4886 struct lfsck_layout_slave_data *llsd = com->lc_data;
4887 struct lfsck_layout_seq *lls;
4893 LASSERT(llsd != NULL);
4895 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY5) &&
4896 cfs_fail_val == lfsck_dev_idx(lfsck->li_bottom)) {
4897 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(1),
4899 struct ptlrpc_thread *thread = &lfsck->li_thread;
4901 l_wait_event(thread->t_ctl_waitq,
4902 !thread_is_running(thread),
4906 lfsck_rbtree_update_bitmap(env, com, fid, false);
4908 down_write(&com->lc_sem);
4909 if (fid_is_idif(fid))
4911 else if (!fid_is_norm(fid) ||
4912 !fid_is_for_ostobj(env, lfsck->li_next, obj, fid))
4913 GOTO(unlock, rc = 0);
4916 com->lc_new_checked++;
4918 lls = lfsck_layout_seq_lookup(llsd, seq);
4921 if (unlikely(lls == NULL))
4922 GOTO(unlock, rc = -ENOMEM);
4924 INIT_LIST_HEAD(&lls->lls_list);
4926 rc = lfsck_layout_lastid_load(env, com, lls);
4928 CDEBUG(D_LFSCK, "%s: layout LFSCK failed to "
4929 "load LAST_ID for "LPX64": rc = %d\n",
4930 lfsck_lfsck2name(com->lc_lfsck), seq, rc);
4931 lo->ll_objs_failed_phase1++;
4936 lfsck_layout_seq_insert(llsd, lls);
4939 if (unlikely(fid_is_last_id(fid)))
4940 GOTO(unlock, rc = 0);
4943 if (oid > lls->lls_lastid_known)
4944 lls->lls_lastid_known = oid;
4946 if (oid > lls->lls_lastid) {
4947 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
4948 /* OFD may create new objects during LFSCK scanning. */
4949 rc = lfsck_layout_lastid_reload(env, com, lls);
4950 if (unlikely(rc != 0))
4951 CDEBUG(D_LFSCK, "%s: layout LFSCK failed to "
4952 "reload LAST_ID for "LPX64": rc = %d\n",
4953 lfsck_lfsck2name(com->lc_lfsck),
4955 if (oid <= lls->lls_lastid)
4956 GOTO(unlock, rc = 0);
4958 LASSERT(lfsck->li_out_notify != NULL);
4960 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
4961 LE_LASTID_REBUILDING);
4962 lo->ll_flags |= LF_CRASHED_LASTID;
4965 lls->lls_lastid = oid;
4969 GOTO(unlock, rc = 0);
4972 up_write(&com->lc_sem);
4977 static int lfsck_layout_exec_dir(const struct lu_env *env,
4978 struct lfsck_component *com,
4979 struct dt_object *obj,
4980 struct lu_dirent *ent)
4985 static int lfsck_layout_master_post(const struct lu_env *env,
4986 struct lfsck_component *com,
4987 int result, bool init)
4989 struct lfsck_instance *lfsck = com->lc_lfsck;
4990 struct lfsck_layout *lo = com->lc_file_ram;
4991 struct lfsck_layout_master_data *llmd = com->lc_data;
4992 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4993 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4994 struct l_wait_info lwi = { 0 };
4999 llmd->llmd_post_result = result;
5000 llmd->llmd_to_post = 1;
5001 if (llmd->llmd_post_result <= 0)
5002 llmd->llmd_exit = 1;
5004 wake_up_all(&athread->t_ctl_waitq);
5005 l_wait_event(mthread->t_ctl_waitq,
5006 (result > 0 && list_empty(&llmd->llmd_req_list)) ||
5007 thread_is_stopped(athread),
5010 if (llmd->llmd_assistant_status < 0)
5011 result = llmd->llmd_assistant_status;
5013 down_write(&com->lc_sem);
5014 spin_lock(&lfsck->li_lock);
5015 /* When LFSCK failed, there may be some prefetched objects those are
5016 * not been processed yet, we do not know the exactly position, then
5017 * just restart from last check-point next time. */
5018 if (!init && !llmd->llmd_exit)
5019 lo->ll_pos_last_checkpoint =
5020 lfsck->li_pos_current.lp_oit_cookie;
5023 lo->ll_status = LS_SCANNING_PHASE2;
5024 lo->ll_flags |= LF_SCANNED_ONCE;
5025 lo->ll_flags &= ~LF_UPGRADE;
5026 list_del_init(&com->lc_link);
5027 list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
5028 } else if (result == 0) {
5029 lo->ll_status = lfsck->li_status;
5030 if (lo->ll_status == 0)
5031 lo->ll_status = LS_STOPPED;
5032 if (lo->ll_status != LS_PAUSED) {
5033 list_del_init(&com->lc_link);
5034 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
5037 lo->ll_status = LS_FAILED;
5038 list_del_init(&com->lc_link);
5039 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
5041 spin_unlock(&lfsck->li_lock);
5044 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
5045 HALF_SEC - lfsck->li_time_last_checkpoint);
5046 lo->ll_time_last_checkpoint = cfs_time_current_sec();
5047 lo->ll_objs_checked_phase1 += com->lc_new_checked;
5048 com->lc_new_checked = 0;
5051 rc = lfsck_layout_store(env, com);
5052 up_write(&com->lc_sem);
5054 CDEBUG(D_LFSCK, "%s: layout LFSCK master post done: rc = %d\n",
5055 lfsck_lfsck2name(lfsck), rc);
5060 static int lfsck_layout_slave_post(const struct lu_env *env,
5061 struct lfsck_component *com,
5062 int result, bool init)
5064 struct lfsck_instance *lfsck = com->lc_lfsck;
5065 struct lfsck_layout *lo = com->lc_file_ram;
5069 rc = lfsck_layout_lastid_store(env, com);
5073 LASSERT(lfsck->li_out_notify != NULL);
5075 down_write(&com->lc_sem);
5076 spin_lock(&lfsck->li_lock);
5078 lo->ll_pos_last_checkpoint =
5079 lfsck->li_pos_current.lp_oit_cookie;
5081 lo->ll_status = LS_SCANNING_PHASE2;
5082 lo->ll_flags |= LF_SCANNED_ONCE;
5083 if (lo->ll_flags & LF_CRASHED_LASTID) {
5085 lo->ll_flags &= ~LF_CRASHED_LASTID;
5087 lo->ll_flags &= ~LF_UPGRADE;
5088 list_del_init(&com->lc_link);
5089 list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
5090 } else if (result == 0) {
5091 lo->ll_status = lfsck->li_status;
5092 if (lo->ll_status == 0)
5093 lo->ll_status = LS_STOPPED;
5094 if (lo->ll_status != LS_PAUSED) {
5095 list_del_init(&com->lc_link);
5096 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
5099 lo->ll_status = LS_FAILED;
5100 list_del_init(&com->lc_link);
5101 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
5103 spin_unlock(&lfsck->li_lock);
5106 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
5110 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
5111 HALF_SEC - lfsck->li_time_last_checkpoint);
5112 lo->ll_time_last_checkpoint = cfs_time_current_sec();
5113 lo->ll_objs_checked_phase1 += com->lc_new_checked;
5114 com->lc_new_checked = 0;
5117 rc = lfsck_layout_store(env, com);
5118 up_write(&com->lc_sem);
5120 lfsck_layout_slave_notify_master(env, com, LE_PHASE1_DONE, result);
5123 lfsck_rbtree_cleanup(env, com);
5125 CDEBUG(D_LFSCK, "%s: layout LFSCK slave post done: rc = %d\n",
5126 lfsck_lfsck2name(lfsck), rc);
5131 static int lfsck_layout_dump(const struct lu_env *env,
5132 struct lfsck_component *com, struct seq_file *m)
5134 struct lfsck_instance *lfsck = com->lc_lfsck;
5135 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
5136 struct lfsck_layout *lo = com->lc_file_ram;
5139 down_read(&com->lc_sem);
5140 seq_printf(m, "name: lfsck_layout\n"
5146 lfsck_status2names(lo->ll_status));
5148 rc = lfsck_bits_dump(m, lo->ll_flags, lfsck_flags_names, "flags");
5152 rc = lfsck_bits_dump(m, bk->lb_param, lfsck_param_names, "param");
5156 rc = lfsck_time_dump(m, lo->ll_time_last_complete,
5157 "time_since_last_completed");
5161 rc = lfsck_time_dump(m, lo->ll_time_latest_start,
5162 "time_since_latest_start");
5166 rc = lfsck_time_dump(m, lo->ll_time_last_checkpoint,
5167 "time_since_last_checkpoint");
5171 seq_printf(m, "latest_start_position: "LPU64"\n"
5172 "last_checkpoint_position: "LPU64"\n"
5173 "first_failure_position: "LPU64"\n",
5174 lo->ll_pos_latest_start,
5175 lo->ll_pos_last_checkpoint,
5176 lo->ll_pos_first_inconsistent);
5178 seq_printf(m, "success_count: %u\n"
5179 "repaired_dangling: "LPU64"\n"
5180 "repaired_unmatched_pair: "LPU64"\n"
5181 "repaired_multiple_referenced: "LPU64"\n"
5182 "repaired_orphan: "LPU64"\n"
5183 "repaired_inconsistent_owner: "LPU64"\n"
5184 "repaired_others: "LPU64"\n"
5185 "skipped: "LPU64"\n"
5186 "failed_phase1: "LPU64"\n"
5187 "failed_phase2: "LPU64"\n",
5188 lo->ll_success_count,
5189 lo->ll_objs_repaired[LLIT_DANGLING - 1],
5190 lo->ll_objs_repaired[LLIT_UNMATCHED_PAIR - 1],
5191 lo->ll_objs_repaired[LLIT_MULTIPLE_REFERENCED - 1],
5192 lo->ll_objs_repaired[LLIT_ORPHAN - 1],
5193 lo->ll_objs_repaired[LLIT_INCONSISTENT_OWNER - 1],
5194 lo->ll_objs_repaired[LLIT_OTHERS - 1],
5195 lo->ll_objs_skipped,
5196 lo->ll_objs_failed_phase1,
5197 lo->ll_objs_failed_phase2);
5199 if (lo->ll_status == LS_SCANNING_PHASE1) {
5201 const struct dt_it_ops *iops;
5202 cfs_duration_t duration = cfs_time_current() -
5203 lfsck->li_time_last_checkpoint;
5204 __u64 checked = lo->ll_objs_checked_phase1 +
5205 com->lc_new_checked;
5206 __u64 speed = checked;
5207 __u64 new_checked = com->lc_new_checked * HZ;
5208 __u32 rtime = lo->ll_run_time_phase1 +
5209 cfs_duration_sec(duration + HALF_SEC);
5212 do_div(new_checked, duration);
5214 do_div(speed, rtime);
5215 seq_printf(m, "checked_phase1: "LPU64"\n"
5216 "checked_phase2: "LPU64"\n"
5217 "run_time_phase1: %u seconds\n"
5218 "run_time_phase2: %u seconds\n"
5219 "average_speed_phase1: "LPU64" items/sec\n"
5220 "average_speed_phase2: N/A\n"
5221 "real-time_speed_phase1: "LPU64" items/sec\n"
5222 "real-time_speed_phase2: N/A\n",
5224 lo->ll_objs_checked_phase2,
5226 lo->ll_run_time_phase2,
5230 LASSERT(lfsck->li_di_oit != NULL);
5232 iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
5234 /* The low layer otable-based iteration position may NOT
5235 * exactly match the layout-based directory traversal
5236 * cookie. Generally, it is not a serious issue. But the
5237 * caller should NOT make assumption on that. */
5238 pos = iops->store(env, lfsck->li_di_oit);
5239 if (!lfsck->li_current_oit_processed)
5241 seq_printf(m, "current_position: "LPU64"\n", pos);
5243 } else if (lo->ll_status == LS_SCANNING_PHASE2) {
5244 cfs_duration_t duration = cfs_time_current() -
5245 lfsck->li_time_last_checkpoint;
5246 __u64 checked = lo->ll_objs_checked_phase2 +
5247 com->lc_new_checked;
5248 __u64 speed1 = lo->ll_objs_checked_phase1;
5249 __u64 speed2 = checked;
5250 __u64 new_checked = com->lc_new_checked * HZ;
5251 __u32 rtime = lo->ll_run_time_phase2 +
5252 cfs_duration_sec(duration + HALF_SEC);
5255 do_div(new_checked, duration);
5256 if (lo->ll_run_time_phase1 != 0)
5257 do_div(speed1, lo->ll_run_time_phase1);
5259 do_div(speed2, rtime);
5260 rc = seq_printf(m, "checked_phase1: "LPU64"\n"
5261 "checked_phase2: "LPU64"\n"
5262 "run_time_phase1: %u seconds\n"
5263 "run_time_phase2: %u seconds\n"
5264 "average_speed_phase1: "LPU64" items/sec\n"
5265 "average_speed_phase2: "LPU64" items/sec\n"
5266 "real-time_speed_phase1: N/A\n"
5267 "real-time_speed_phase2: "LPU64" items/sec\n"
5268 "current_position: "DFID"\n",
5269 lo->ll_objs_checked_phase1,
5271 lo->ll_run_time_phase1,
5276 PFID(&com->lc_fid_latest_scanned_phase2));
5281 __u64 speed1 = lo->ll_objs_checked_phase1;
5282 __u64 speed2 = lo->ll_objs_checked_phase2;
5284 if (lo->ll_run_time_phase1 != 0)
5285 do_div(speed1, lo->ll_run_time_phase1);
5286 if (lo->ll_run_time_phase2 != 0)
5287 do_div(speed2, lo->ll_run_time_phase2);
5288 seq_printf(m, "checked_phase1: "LPU64"\n"
5289 "checked_phase2: "LPU64"\n"
5290 "run_time_phase1: %u seconds\n"
5291 "run_time_phase2: %u seconds\n"
5292 "average_speed_phase1: "LPU64" items/sec\n"
5293 "average_speed_phase2: "LPU64" objs/sec\n"
5294 "real-time_speed_phase1: N/A\n"
5295 "real-time_speed_phase2: N/A\n"
5296 "current_position: N/A\n",
5297 lo->ll_objs_checked_phase1,
5298 lo->ll_objs_checked_phase2,
5299 lo->ll_run_time_phase1,
5300 lo->ll_run_time_phase2,
5305 up_read(&com->lc_sem);
5310 static int lfsck_layout_master_double_scan(const struct lu_env *env,
5311 struct lfsck_component *com)
5313 struct lfsck_layout_master_data *llmd = com->lc_data;
5314 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
5315 struct ptlrpc_thread *athread = &llmd->llmd_thread;
5316 struct lfsck_layout *lo = com->lc_file_ram;
5317 struct l_wait_info lwi = { 0 };
5319 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2))
5322 llmd->llmd_to_double_scan = 1;
5323 wake_up_all(&athread->t_ctl_waitq);
5324 l_wait_event(mthread->t_ctl_waitq,
5325 llmd->llmd_in_double_scan ||
5326 thread_is_stopped(athread),
5328 if (llmd->llmd_assistant_status < 0)
5329 return llmd->llmd_assistant_status;
5334 static int lfsck_layout_slave_double_scan(const struct lu_env *env,
5335 struct lfsck_component *com)
5337 struct lfsck_instance *lfsck = com->lc_lfsck;
5338 struct lfsck_layout_slave_data *llsd = com->lc_data;
5339 struct lfsck_layout *lo = com->lc_file_ram;
5340 struct ptlrpc_thread *thread = &lfsck->li_thread;
5344 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2)) {
5345 lfsck_rbtree_cleanup(env, com);
5346 lfsck_layout_slave_notify_master(env, com, LE_PHASE2_DONE, 0);
5350 CDEBUG(D_LFSCK, "%s: layout LFSCK slave phase2 scan start\n",
5351 lfsck_lfsck2name(lfsck));
5353 atomic_inc(&lfsck->li_double_scan_count);
5355 com->lc_new_checked = 0;
5356 com->lc_new_scanned = 0;
5357 com->lc_time_last_checkpoint = cfs_time_current();
5358 com->lc_time_next_checkpoint = com->lc_time_last_checkpoint +
5359 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
5362 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(30),
5365 rc = lfsck_layout_slave_query_master(env, com);
5366 if (list_empty(&llsd->llsd_master_list)) {
5367 if (unlikely(!thread_is_running(thread)))
5378 rc = l_wait_event(thread->t_ctl_waitq,
5379 !thread_is_running(thread) ||
5380 list_empty(&llsd->llsd_master_list),
5382 if (unlikely(!thread_is_running(thread)))
5385 if (rc == -ETIMEDOUT)
5388 GOTO(done, rc = (rc < 0 ? rc : 1));
5392 rc = lfsck_layout_double_scan_result(env, com, rc);
5394 lfsck_rbtree_cleanup(env, com);
5395 lfsck_layout_slave_notify_master(env, com, LE_PHASE2_DONE, rc);
5396 if (atomic_dec_and_test(&lfsck->li_double_scan_count))
5397 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
5399 CDEBUG(D_LFSCK, "%s: layout LFSCK slave phase2 scan finished, "
5400 "status %d: rc = %d\n",
5401 lfsck_lfsck2name(lfsck), lo->ll_status, rc);
5406 static void lfsck_layout_master_data_release(const struct lu_env *env,
5407 struct lfsck_component *com)
5409 struct lfsck_layout_master_data *llmd = com->lc_data;
5410 struct lfsck_instance *lfsck = com->lc_lfsck;
5411 struct lfsck_tgt_descs *ltds;
5412 struct lfsck_tgt_desc *ltd;
5413 struct lfsck_tgt_desc *next;
5415 LASSERT(llmd != NULL);
5416 LASSERT(thread_is_init(&llmd->llmd_thread) ||
5417 thread_is_stopped(&llmd->llmd_thread));
5418 LASSERT(list_empty(&llmd->llmd_req_list));
5420 com->lc_data = NULL;
5422 ltds = &lfsck->li_ost_descs;
5423 spin_lock(<ds->ltd_lock);
5424 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase1_list,
5425 ltd_layout_phase_list) {
5426 list_del_init(<d->ltd_layout_phase_list);
5428 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase2_list,
5429 ltd_layout_phase_list) {
5430 list_del_init(<d->ltd_layout_phase_list);
5432 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_list,
5434 list_del_init(<d->ltd_layout_list);
5436 spin_unlock(<ds->ltd_lock);
5438 ltds = &lfsck->li_mdt_descs;
5439 spin_lock(<ds->ltd_lock);
5440 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase1_list,
5441 ltd_layout_phase_list) {
5442 list_del_init(<d->ltd_layout_phase_list);
5444 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase2_list,
5445 ltd_layout_phase_list) {
5446 list_del_init(<d->ltd_layout_phase_list);
5448 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_list,
5450 list_del_init(<d->ltd_layout_list);
5452 spin_unlock(<ds->ltd_lock);
5457 static void lfsck_layout_slave_data_release(const struct lu_env *env,
5458 struct lfsck_component *com)
5460 struct lfsck_layout_slave_data *llsd = com->lc_data;
5461 struct lfsck_layout_seq *lls;
5462 struct lfsck_layout_seq *next;
5463 struct lfsck_layout_slave_target *llst;
5464 struct lfsck_layout_slave_target *tmp;
5466 LASSERT(llsd != NULL);
5468 list_for_each_entry_safe(lls, next, &llsd->llsd_seq_list,
5470 list_del_init(&lls->lls_list);
5471 lfsck_object_put(env, lls->lls_lastid_obj);
5475 list_for_each_entry_safe(llst, tmp, &llsd->llsd_master_list,
5477 list_del_init(&llst->llst_list);
5481 lfsck_rbtree_cleanup(env, com);
5482 com->lc_data = NULL;
5486 static void lfsck_layout_master_quit(const struct lu_env *env,
5487 struct lfsck_component *com)
5489 struct lfsck_layout_master_data *llmd = com->lc_data;
5490 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
5491 struct ptlrpc_thread *athread = &llmd->llmd_thread;
5492 struct l_wait_info lwi = { 0 };
5494 llmd->llmd_exit = 1;
5495 wake_up_all(&athread->t_ctl_waitq);
5496 l_wait_event(mthread->t_ctl_waitq,
5497 thread_is_init(athread) ||
5498 thread_is_stopped(athread),
5502 static void lfsck_layout_slave_quit(const struct lu_env *env,
5503 struct lfsck_component *com)
5505 lfsck_rbtree_cleanup(env, com);
5508 static int lfsck_layout_master_in_notify(const struct lu_env *env,
5509 struct lfsck_component *com,
5510 struct lfsck_request *lr)
5512 struct lfsck_instance *lfsck = com->lc_lfsck;
5513 struct lfsck_layout *lo = com->lc_file_ram;
5514 struct lfsck_layout_master_data *llmd = com->lc_data;
5515 struct lfsck_tgt_descs *ltds;
5516 struct lfsck_tgt_desc *ltd;
5520 if (lr->lr_event == LE_PAIRS_VERIFY) {
5523 rc = lfsck_layout_master_check_pairs(env, com, &lr->lr_fid,
5529 CDEBUG(D_LFSCK, "%s: layout LFSCK master handle notify %u "
5530 "from %s %x, status %d\n", lfsck_lfsck2name(lfsck),
5531 lr->lr_event, (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
5532 lr->lr_index, lr->lr_status);
5534 if (lr->lr_event != LE_PHASE1_DONE &&
5535 lr->lr_event != LE_PHASE2_DONE &&
5536 lr->lr_event != LE_PEER_EXIT)
5539 if (lr->lr_flags & LEF_FROM_OST)
5540 ltds = &lfsck->li_ost_descs;
5542 ltds = &lfsck->li_mdt_descs;
5543 spin_lock(<ds->ltd_lock);
5544 ltd = LTD_TGT(ltds, lr->lr_index);
5546 spin_unlock(<ds->ltd_lock);
5551 list_del_init(<d->ltd_layout_phase_list);
5552 switch (lr->lr_event) {
5553 case LE_PHASE1_DONE:
5554 if (lr->lr_status <= 0) {
5555 ltd->ltd_layout_done = 1;
5556 list_del_init(<d->ltd_layout_list);
5557 lo->ll_flags |= LF_INCOMPLETE;
5562 if (lr->lr_flags & LEF_FROM_OST) {
5563 if (list_empty(<d->ltd_layout_list))
5564 list_add_tail(<d->ltd_layout_list,
5565 &llmd->llmd_ost_list);
5566 list_add_tail(<d->ltd_layout_phase_list,
5567 &llmd->llmd_ost_phase2_list);
5569 if (list_empty(<d->ltd_layout_list))
5570 list_add_tail(<d->ltd_layout_list,
5571 &llmd->llmd_mdt_list);
5572 list_add_tail(<d->ltd_layout_phase_list,
5573 &llmd->llmd_mdt_phase2_list);
5576 case LE_PHASE2_DONE:
5577 ltd->ltd_layout_done = 1;
5578 list_del_init(<d->ltd_layout_list);
5582 ltd->ltd_layout_done = 1;
5583 list_del_init(<d->ltd_layout_list);
5584 if (!(lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT))
5585 lo->ll_flags |= LF_INCOMPLETE;
5590 spin_unlock(<ds->ltd_lock);
5592 if (fail && lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT) {
5593 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
5595 memset(stop, 0, sizeof(*stop));
5596 stop->ls_status = lr->lr_status;
5597 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
5598 lfsck_stop(env, lfsck->li_bottom, stop);
5599 } else if (lfsck_layout_master_to_orphan(llmd)) {
5600 wake_up_all(&llmd->llmd_thread.t_ctl_waitq);
5606 static int lfsck_layout_slave_in_notify(const struct lu_env *env,
5607 struct lfsck_component *com,
5608 struct lfsck_request *lr)
5610 struct lfsck_instance *lfsck = com->lc_lfsck;
5611 struct lfsck_layout_slave_data *llsd = com->lc_data;
5612 struct lfsck_layout_slave_target *llst;
5616 switch (lr->lr_event) {
5617 case LE_FID_ACCESSED:
5618 lfsck_rbtree_update_bitmap(env, com, &lr->lr_fid, true);
5620 case LE_CONDITIONAL_DESTROY:
5621 rc = lfsck_layout_slave_conditional_destroy(env, com, lr);
5623 case LE_PAIRS_VERIFY: {
5624 lr->lr_status = LPVS_INIT;
5625 /* Firstly, if the MDT-object which is claimed via OST-object
5626 * local stored PFID xattr recognizes the OST-object, then it
5627 * must be that the client given PFID is wrong. */
5628 rc = lfsck_layout_slave_check_pairs(env, com, &lr->lr_fid,
5633 lr->lr_status = LPVS_INCONSISTENT;
5634 /* The OST-object local stored PFID xattr is stale. We need to
5635 * check whether the MDT-object that is claimed via the client
5636 * given PFID information recognizes the OST-object or not. If
5637 * matches, then need to update the OST-object's PFID xattr. */
5638 rc = lfsck_layout_slave_check_pairs(env, com, &lr->lr_fid,
5641 * We are not sure whether the client given PFID information
5642 * is correct or not, do nothing to avoid improper fixing.
5645 * The client given PFID information is also invalid, we can
5646 * NOT fix the OST-object inconsistency.
5651 lr->lr_status = LPVS_INCONSISTENT_TOFIX;
5652 rc = lfsck_layout_slave_repair_pfid(env, com, lr);
5656 case LE_PHASE2_DONE:
5658 CDEBUG(D_LFSCK, "%s: layout LFSCK slave handle notify %u "
5659 "from MDT %x, status %d\n", lfsck_lfsck2name(lfsck),
5660 lr->lr_event, lr->lr_index, lr->lr_status);
5666 llst = lfsck_layout_llst_find_and_del(llsd, lr->lr_index, true);
5670 lfsck_layout_llst_put(llst);
5671 if (list_empty(&llsd->llsd_master_list))
5672 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
5674 if (lr->lr_event == LE_PEER_EXIT &&
5675 lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT) {
5676 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
5678 memset(stop, 0, sizeof(*stop));
5679 stop->ls_status = lr->lr_status;
5680 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
5681 lfsck_stop(env, lfsck->li_bottom, stop);
5687 static int lfsck_layout_query(const struct lu_env *env,
5688 struct lfsck_component *com)
5690 struct lfsck_layout *lo = com->lc_file_ram;
5692 return lo->ll_status;
5695 static int lfsck_layout_master_stop_notify(const struct lu_env *env,
5696 struct lfsck_component *com,
5697 struct lfsck_tgt_descs *ltds,
5698 struct lfsck_tgt_desc *ltd,
5699 struct ptlrpc_request_set *set)
5701 struct lfsck_thread_info *info = lfsck_env_info(env);
5702 struct lfsck_async_interpret_args *laia = &info->lti_laia;
5703 struct lfsck_request *lr = &info->lti_lr;
5704 struct lfsck_instance *lfsck = com->lc_lfsck;
5707 spin_lock(<ds->ltd_lock);
5708 if (list_empty(<d->ltd_layout_list)) {
5709 LASSERT(list_empty(<d->ltd_layout_phase_list));
5710 spin_unlock(<ds->ltd_lock);
5715 list_del_init(<d->ltd_layout_phase_list);
5716 list_del_init(<d->ltd_layout_list);
5717 spin_unlock(<ds->ltd_lock);
5719 memset(lr, 0, sizeof(*lr));
5720 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
5721 lr->lr_event = LE_PEER_EXIT;
5722 lr->lr_active = LFSCK_TYPE_LAYOUT;
5723 lr->lr_status = LS_CO_PAUSED;
5724 if (ltds == &lfsck->li_ost_descs)
5725 lr->lr_flags = LEF_TO_OST;
5727 laia->laia_com = com;
5728 laia->laia_ltds = ltds;
5729 atomic_inc(<d->ltd_ref);
5730 laia->laia_ltd = ltd;
5732 laia->laia_shared = 0;
5734 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
5735 lfsck_layout_master_async_interpret,
5736 laia, LFSCK_NOTIFY);
5738 CDEBUG(D_LFSCK, "%s: layout LFSCK fail to notify %s %x "
5739 "for co-stop: rc = %d\n",
5740 lfsck_lfsck2name(lfsck),
5741 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
5742 ltd->ltd_index, rc);
5749 /* with lfsck::li_lock held */
5750 static int lfsck_layout_slave_join(const struct lu_env *env,
5751 struct lfsck_component *com,
5752 struct lfsck_start_param *lsp)
5754 struct lfsck_instance *lfsck = com->lc_lfsck;
5755 struct lfsck_layout_slave_data *llsd = com->lc_data;
5756 struct lfsck_layout_slave_target *llst;
5757 struct lfsck_start *start = lsp->lsp_start;
5761 if (start == NULL || !(start->ls_flags & LPF_ORPHAN))
5764 if (!lsp->lsp_index_valid)
5767 /* If someone is running the LFSCK without orphan handling,
5768 * it will not maintain the object accessing rbtree. So we
5769 * cannot join it for orphan handling. */
5770 if (!llsd->llsd_rbtree_valid)
5773 spin_unlock(&lfsck->li_lock);
5774 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
5775 spin_lock(&lfsck->li_lock);
5776 if (rc == 0 && !thread_is_running(&lfsck->li_thread)) {
5777 spin_unlock(&lfsck->li_lock);
5778 llst = lfsck_layout_llst_find_and_del(llsd, lsp->lsp_index,
5781 lfsck_layout_llst_put(llst);
5782 spin_lock(&lfsck->li_lock);
5789 static struct lfsck_operations lfsck_layout_master_ops = {
5790 .lfsck_reset = lfsck_layout_reset,
5791 .lfsck_fail = lfsck_layout_fail,
5792 .lfsck_checkpoint = lfsck_layout_master_checkpoint,
5793 .lfsck_prep = lfsck_layout_master_prep,
5794 .lfsck_exec_oit = lfsck_layout_master_exec_oit,
5795 .lfsck_exec_dir = lfsck_layout_exec_dir,
5796 .lfsck_post = lfsck_layout_master_post,
5797 .lfsck_interpret = lfsck_layout_master_async_interpret,
5798 .lfsck_dump = lfsck_layout_dump,
5799 .lfsck_double_scan = lfsck_layout_master_double_scan,
5800 .lfsck_data_release = lfsck_layout_master_data_release,
5801 .lfsck_quit = lfsck_layout_master_quit,
5802 .lfsck_in_notify = lfsck_layout_master_in_notify,
5803 .lfsck_query = lfsck_layout_query,
5804 .lfsck_stop_notify = lfsck_layout_master_stop_notify,
5807 static struct lfsck_operations lfsck_layout_slave_ops = {
5808 .lfsck_reset = lfsck_layout_reset,
5809 .lfsck_fail = lfsck_layout_fail,
5810 .lfsck_checkpoint = lfsck_layout_slave_checkpoint,
5811 .lfsck_prep = lfsck_layout_slave_prep,
5812 .lfsck_exec_oit = lfsck_layout_slave_exec_oit,
5813 .lfsck_exec_dir = lfsck_layout_exec_dir,
5814 .lfsck_post = lfsck_layout_slave_post,
5815 .lfsck_dump = lfsck_layout_dump,
5816 .lfsck_double_scan = lfsck_layout_slave_double_scan,
5817 .lfsck_data_release = lfsck_layout_slave_data_release,
5818 .lfsck_quit = lfsck_layout_slave_quit,
5819 .lfsck_in_notify = lfsck_layout_slave_in_notify,
5820 .lfsck_query = lfsck_layout_query,
5821 .lfsck_join = lfsck_layout_slave_join,
5824 int lfsck_layout_setup(const struct lu_env *env, struct lfsck_instance *lfsck)
5826 struct lfsck_component *com;
5827 struct lfsck_layout *lo;
5828 struct dt_object *root = NULL;
5829 struct dt_object *obj;
5837 INIT_LIST_HEAD(&com->lc_link);
5838 INIT_LIST_HEAD(&com->lc_link_dir);
5839 init_rwsem(&com->lc_sem);
5840 atomic_set(&com->lc_ref, 1);
5841 com->lc_lfsck = lfsck;
5842 com->lc_type = LFSCK_TYPE_LAYOUT;
5843 if (lfsck->li_master) {
5844 struct lfsck_layout_master_data *llmd;
5846 com->lc_ops = &lfsck_layout_master_ops;
5847 OBD_ALLOC_PTR(llmd);
5849 GOTO(out, rc = -ENOMEM);
5851 INIT_LIST_HEAD(&llmd->llmd_req_list);
5852 spin_lock_init(&llmd->llmd_lock);
5853 INIT_LIST_HEAD(&llmd->llmd_ost_list);
5854 INIT_LIST_HEAD(&llmd->llmd_ost_phase1_list);
5855 INIT_LIST_HEAD(&llmd->llmd_ost_phase2_list);
5856 INIT_LIST_HEAD(&llmd->llmd_mdt_list);
5857 INIT_LIST_HEAD(&llmd->llmd_mdt_phase1_list);
5858 INIT_LIST_HEAD(&llmd->llmd_mdt_phase2_list);
5859 init_waitqueue_head(&llmd->llmd_thread.t_ctl_waitq);
5860 com->lc_data = llmd;
5862 struct lfsck_layout_slave_data *llsd;
5864 com->lc_ops = &lfsck_layout_slave_ops;
5865 OBD_ALLOC_PTR(llsd);
5867 GOTO(out, rc = -ENOMEM);
5869 INIT_LIST_HEAD(&llsd->llsd_seq_list);
5870 INIT_LIST_HEAD(&llsd->llsd_master_list);
5871 spin_lock_init(&llsd->llsd_lock);
5872 llsd->llsd_rb_root = RB_ROOT;
5873 rwlock_init(&llsd->llsd_rb_lock);
5874 com->lc_data = llsd;
5876 com->lc_file_size = sizeof(*lo);
5877 OBD_ALLOC(com->lc_file_ram, com->lc_file_size);
5878 if (com->lc_file_ram == NULL)
5879 GOTO(out, rc = -ENOMEM);
5881 OBD_ALLOC(com->lc_file_disk, com->lc_file_size);
5882 if (com->lc_file_disk == NULL)
5883 GOTO(out, rc = -ENOMEM);
5885 root = dt_locate(env, lfsck->li_bottom, &lfsck->li_local_root_fid);
5887 GOTO(out, rc = PTR_ERR(root));
5889 if (unlikely(!dt_try_as_dir(env, root)))
5890 GOTO(out, rc = -ENOTDIR);
5892 obj = local_file_find_or_create(env, lfsck->li_los, root,
5894 S_IFREG | S_IRUGO | S_IWUSR);
5896 GOTO(out, rc = PTR_ERR(obj));
5899 rc = lfsck_layout_load(env, com);
5901 rc = lfsck_layout_reset(env, com, true);
5902 else if (rc == -ENOENT)
5903 rc = lfsck_layout_init(env, com);
5908 lo = com->lc_file_ram;
5909 switch (lo->ll_status) {
5915 spin_lock(&lfsck->li_lock);
5916 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
5917 spin_unlock(&lfsck->li_lock);
5920 CERROR("%s: unknown lfsck_layout status %d\n",
5921 lfsck_lfsck2name(lfsck), lo->ll_status);
5923 case LS_SCANNING_PHASE1:
5924 case LS_SCANNING_PHASE2:
5925 /* No need to store the status to disk right now.
5926 * If the system crashed before the status stored,
5927 * it will be loaded back when next time. */
5928 lo->ll_status = LS_CRASHED;
5929 lo->ll_flags |= LF_INCOMPLETE;
5936 spin_lock(&lfsck->li_lock);
5937 list_add_tail(&com->lc_link, &lfsck->li_list_scan);
5938 spin_unlock(&lfsck->li_lock);
5942 if (lo->ll_flags & LF_CRASHED_LASTID) {
5943 LASSERT(lfsck->li_out_notify != NULL);
5945 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
5946 LE_LASTID_REBUILDING);
5952 if (root != NULL && !IS_ERR(root))
5953 lu_object_put(env, &root->do_lu);
5956 lfsck_component_cleanup(env, com);
5957 CERROR("%s: fail to init layout LFSCK component: rc = %d\n",
5958 lfsck_lfsck2name(lfsck), rc);
5964 struct lfsck_orphan_it {
5965 struct lfsck_component *loi_com;
5966 struct lfsck_rbtree_node *loi_lrn;
5967 struct lfsck_layout_slave_target *loi_llst;
5968 struct lu_fid loi_key;
5969 struct lu_orphan_rec loi_rec;
5971 unsigned int loi_over:1;
5974 static int lfsck_fid_match_idx(const struct lu_env *env,
5975 struct lfsck_instance *lfsck,
5976 const struct lu_fid *fid, int idx)
5978 struct seq_server_site *ss;
5979 struct lu_server_fld *sf;
5980 struct lu_seq_range range = { 0 };
5983 /* All abnormal cases will be returned to MDT0. */
5984 if (!fid_is_norm(fid)) {
5991 ss = lu_site2seq(lfsck->li_bottom->dd_lu_dev.ld_site);
5992 if (unlikely(ss == NULL))
5995 sf = ss->ss_server_fld;
5996 LASSERT(sf != NULL);
5998 fld_range_set_any(&range);
5999 rc = fld_server_lookup(env, sf, fid_seq(fid), &range);
6003 if (!fld_range_is_mdt(&range))
6006 if (range.lsr_index == idx)
6012 static void lfsck_layout_destroy_orphan(const struct lu_env *env,
6013 struct dt_device *dev,
6014 struct dt_object *obj)
6016 struct thandle *handle;
6020 handle = dt_trans_create(env, dev);
6024 rc = dt_declare_ref_del(env, obj, handle);
6028 rc = dt_declare_destroy(env, obj, handle);
6032 rc = dt_trans_start_local(env, dev, handle);
6036 dt_write_lock(env, obj, 0);
6037 rc = dt_ref_del(env, obj, handle);
6039 rc = dt_destroy(env, obj, handle);
6040 dt_write_unlock(env, obj);
6045 dt_trans_stop(env, dev, handle);
6047 CDEBUG(D_LFSCK, "destroy orphan OST-object "DFID": rc = %d\n",
6048 PFID(lfsck_dto2fid(obj)), rc);
6053 static int lfsck_orphan_index_lookup(const struct lu_env *env,
6054 struct dt_object *dt,
6056 const struct dt_key *key,
6057 struct lustre_capa *capa)
6062 static int lfsck_orphan_index_declare_insert(const struct lu_env *env,
6063 struct dt_object *dt,
6064 const struct dt_rec *rec,
6065 const struct dt_key *key,
6066 struct thandle *handle)
6071 static int lfsck_orphan_index_insert(const struct lu_env *env,
6072 struct dt_object *dt,
6073 const struct dt_rec *rec,
6074 const struct dt_key *key,
6075 struct thandle *handle,
6076 struct lustre_capa *capa,
6082 static int lfsck_orphan_index_declare_delete(const struct lu_env *env,
6083 struct dt_object *dt,
6084 const struct dt_key *key,
6085 struct thandle *handle)
6090 static int lfsck_orphan_index_delete(const struct lu_env *env,
6091 struct dt_object *dt,
6092 const struct dt_key *key,
6093 struct thandle *handle,
6094 struct lustre_capa *capa)
6099 static struct dt_it *lfsck_orphan_it_init(const struct lu_env *env,
6100 struct dt_object *dt,
6102 struct lustre_capa *capa)
6104 struct dt_device *dev = lu2dt_dev(dt->do_lu.lo_dev);
6105 struct lfsck_instance *lfsck;
6106 struct lfsck_component *com = NULL;
6107 struct lfsck_layout_slave_data *llsd;
6108 struct lfsck_orphan_it *it = NULL;
6112 lfsck = lfsck_instance_find(dev, true, false);
6113 if (unlikely(lfsck == NULL))
6114 RETURN(ERR_PTR(-ENXIO));
6116 com = lfsck_component_find(lfsck, LFSCK_TYPE_LAYOUT);
6117 if (unlikely(com == NULL))
6118 GOTO(out, rc = -ENOENT);
6120 llsd = com->lc_data;
6121 if (!llsd->llsd_rbtree_valid)
6122 GOTO(out, rc = -ESRCH);
6126 GOTO(out, rc = -ENOMEM);
6128 it->loi_llst = lfsck_layout_llst_find_and_del(llsd, attr, false);
6129 if (it->loi_llst == NULL)
6130 GOTO(out, rc = -ENXIO);
6132 if (dev->dd_record_fid_accessed) {
6133 /* The first iteration against the rbtree, scan the whole rbtree
6134 * to remove the nodes which do NOT need to be handled. */
6135 write_lock(&llsd->llsd_rb_lock);
6136 if (dev->dd_record_fid_accessed) {
6137 struct rb_node *node;
6138 struct rb_node *next;
6139 struct lfsck_rbtree_node *lrn;
6141 /* No need to record the fid accessing anymore. */
6142 dev->dd_record_fid_accessed = 0;
6144 node = rb_first(&llsd->llsd_rb_root);
6145 while (node != NULL) {
6146 next = rb_next(node);
6147 lrn = rb_entry(node, struct lfsck_rbtree_node,
6149 if (atomic_read(&lrn->lrn_known_count) <=
6150 atomic_read(&lrn->lrn_accessed_count)) {
6151 rb_erase(node, &llsd->llsd_rb_root);
6152 lfsck_rbtree_free(lrn);
6157 write_unlock(&llsd->llsd_rb_lock);
6160 /* read lock the rbtree when init, and unlock when fini */
6161 read_lock(&llsd->llsd_rb_lock);
6169 lfsck_component_put(env, com);
6171 CDEBUG(D_LFSCK, "%s: init the orphan iteration: rc = %d\n",
6172 lfsck_lfsck2name(lfsck), rc);
6174 lfsck_instance_put(env, lfsck);
6179 it = (struct lfsck_orphan_it *)ERR_PTR(rc);
6182 return (struct dt_it *)it;
6185 static void lfsck_orphan_it_fini(const struct lu_env *env,
6188 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6189 struct lfsck_component *com = it->loi_com;
6190 struct lfsck_layout_slave_data *llsd;
6191 struct lfsck_layout_slave_target *llst;
6194 CDEBUG(D_LFSCK, "%s: fini the orphan iteration\n",
6195 lfsck_lfsck2name(com->lc_lfsck));
6197 llsd = com->lc_data;
6198 read_unlock(&llsd->llsd_rb_lock);
6199 llst = it->loi_llst;
6200 LASSERT(llst != NULL);
6202 /* Save the key and hash for iterate next. */
6203 llst->llst_fid = it->loi_key;
6204 llst->llst_hash = it->loi_hash;
6205 lfsck_layout_llst_put(llst);
6206 lfsck_component_put(env, com);
6212 * \retval +1: the iteration finished
6213 * \retval 0: on success, not finished
6214 * \retval -ve: on error
6216 static int lfsck_orphan_it_next(const struct lu_env *env,
6219 struct lfsck_thread_info *info = lfsck_env_info(env);
6220 struct filter_fid_old *pfid = &info->lti_old_pfid;
6221 struct lu_attr *la = &info->lti_la;
6222 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6223 struct lu_fid *key = &it->loi_key;
6224 struct lu_orphan_rec *rec = &it->loi_rec;
6225 struct lfsck_component *com = it->loi_com;
6226 struct lfsck_instance *lfsck = com->lc_lfsck;
6227 struct lfsck_layout_slave_data *llsd = com->lc_data;
6228 struct dt_object *obj;
6229 struct lfsck_rbtree_node *lrn;
6233 __u32 idx = it->loi_llst->llst_index;
6243 lrn = lfsck_rbtree_search(llsd, key, &exact);
6251 key->f_seq = lrn->lrn_seq;
6252 key->f_oid = lrn->lrn_first_oid;
6257 if (unlikely(key->f_oid == 0)) {
6264 lrn->lrn_first_oid + LFSCK_RBTREE_BITMAP_WIDTH) {
6270 if (unlikely(atomic_read(&lrn->lrn_known_count) <=
6271 atomic_read(&lrn->lrn_accessed_count))) {
6272 struct rb_node *next = rb_next(&lrn->lrn_node);
6274 while (next != NULL) {
6275 lrn = rb_entry(next, struct lfsck_rbtree_node,
6277 if (atomic_read(&lrn->lrn_known_count) >
6278 atomic_read(&lrn->lrn_accessed_count))
6280 next = rb_next(next);
6289 key->f_seq = lrn->lrn_seq;
6290 key->f_oid = lrn->lrn_first_oid;
6294 pos = key->f_oid - lrn->lrn_first_oid;
6297 pos = find_next_bit(lrn->lrn_known_bitmap,
6298 LFSCK_RBTREE_BITMAP_WIDTH, pos);
6299 if (pos >= LFSCK_RBTREE_BITMAP_WIDTH) {
6300 key->f_oid = lrn->lrn_first_oid + pos;
6301 if (unlikely(key->f_oid < lrn->lrn_first_oid)) {
6309 if (test_bit(pos, lrn->lrn_accessed_bitmap)) {
6314 key->f_oid = lrn->lrn_first_oid + pos;
6315 obj = lfsck_object_find(env, lfsck, key);
6318 if (rc == -ENOENT) {
6325 dt_read_lock(env, obj, 0);
6326 if (!dt_object_exists(obj)) {
6327 dt_read_unlock(env, obj);
6328 lfsck_object_put(env, obj);
6333 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
6337 rc = dt_xattr_get(env, obj, lfsck_buf_get(env, pfid, sizeof(*pfid)),
6338 XATTR_NAME_FID, BYPASS_CAPA);
6339 if (rc == -ENODATA) {
6340 /* For the pre-created OST-object, update the bitmap to avoid
6341 * others LFSCK (second phase) iteration to touch it again. */
6342 if (la->la_ctime == 0) {
6343 if (!test_and_set_bit(pos, lrn->lrn_accessed_bitmap))
6344 atomic_inc(&lrn->lrn_accessed_count);
6346 /* For the race between repairing dangling referenced
6347 * MDT-object and unlink the file, it may left orphan
6348 * OST-object there. Destroy it now! */
6349 if (unlikely(!(la->la_mode & S_ISUID))) {
6350 dt_read_unlock(env, obj);
6351 lfsck_layout_destroy_orphan(env,
6354 lfsck_object_put(env, obj);
6358 } else if (idx == 0) {
6359 /* If the orphan OST-object has no parent information,
6360 * regard it as referenced by the MDT-object on MDT0. */
6361 fid_zero(&rec->lor_fid);
6362 rec->lor_uid = la->la_uid;
6363 rec->lor_gid = la->la_gid;
6367 dt_read_unlock(env, obj);
6368 lfsck_object_put(env, obj);
6376 if (rc != sizeof(struct filter_fid) &&
6377 rc != sizeof(struct filter_fid_old))
6378 GOTO(out, rc = -EINVAL);
6380 fid_le_to_cpu(&rec->lor_fid, &pfid->ff_parent);
6381 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
6382 * MDT-object's FID::f_ver, instead it is the OST-object index in its
6383 * parent MDT-object's layout EA. */
6384 save = rec->lor_fid.f_stripe_idx;
6385 rec->lor_fid.f_ver = 0;
6386 rc = lfsck_fid_match_idx(env, lfsck, &rec->lor_fid, idx);
6387 /* If the orphan OST-object does not claim the MDT, then next.
6389 * If we do not know whether it matches or not, then return it
6390 * to the MDT for further check. */
6392 dt_read_unlock(env, obj);
6393 lfsck_object_put(env, obj);
6398 rec->lor_fid.f_stripe_idx = save;
6399 rec->lor_uid = la->la_uid;
6400 rec->lor_gid = la->la_gid;
6402 CDEBUG(D_LFSCK, "%s: return orphan "DFID", PFID "DFID", owner %u:%u\n",
6403 lfsck_lfsck2name(com->lc_lfsck), PFID(key), PFID(&rec->lor_fid),
6404 rec->lor_uid, rec->lor_gid);
6409 dt_read_unlock(env, obj);
6410 lfsck_object_put(env, obj);
6418 * \retval +1: locate to the exactly position
6419 * \retval 0: cannot locate to the exactly position,
6420 * call next() to move to a valid position.
6421 * \retval -ve: on error
6423 static int lfsck_orphan_it_get(const struct lu_env *env,
6425 const struct dt_key *key)
6427 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6430 it->loi_key = *(struct lu_fid *)key;
6431 rc = lfsck_orphan_it_next(env, di);
6441 static void lfsck_orphan_it_put(const struct lu_env *env,
6446 static struct dt_key *lfsck_orphan_it_key(const struct lu_env *env,
6447 const struct dt_it *di)
6449 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6451 return (struct dt_key *)&it->loi_key;
6454 static int lfsck_orphan_it_key_size(const struct lu_env *env,
6455 const struct dt_it *di)
6457 return sizeof(struct lu_fid);
6460 static int lfsck_orphan_it_rec(const struct lu_env *env,
6461 const struct dt_it *di,
6465 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6467 *(struct lu_orphan_rec *)rec = it->loi_rec;
6472 static __u64 lfsck_orphan_it_store(const struct lu_env *env,
6473 const struct dt_it *di)
6475 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6477 return it->loi_hash;
6481 * \retval +1: locate to the exactly position
6482 * \retval 0: cannot locate to the exactly position,
6483 * call next() to move to a valid position.
6484 * \retval -ve: on error
6486 static int lfsck_orphan_it_load(const struct lu_env *env,
6487 const struct dt_it *di,
6490 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6491 struct lfsck_layout_slave_target *llst = it->loi_llst;
6494 LASSERT(llst != NULL);
6496 if (hash != llst->llst_hash) {
6497 CDEBUG(D_LFSCK, "%s: the given hash "LPU64" for orphan "
6498 "iteration does not match the one when fini "
6499 LPU64", to be reset.\n",
6500 lfsck_lfsck2name(it->loi_com->lc_lfsck), hash,
6502 fid_zero(&llst->llst_fid);
6503 llst->llst_hash = 0;
6506 it->loi_key = llst->llst_fid;
6507 it->loi_hash = llst->llst_hash;
6508 rc = lfsck_orphan_it_next(env, (struct dt_it *)di);
6518 static int lfsck_orphan_it_key_rec(const struct lu_env *env,
6519 const struct dt_it *di,
6525 const struct dt_index_operations lfsck_orphan_index_ops = {
6526 .dio_lookup = lfsck_orphan_index_lookup,
6527 .dio_declare_insert = lfsck_orphan_index_declare_insert,
6528 .dio_insert = lfsck_orphan_index_insert,
6529 .dio_declare_delete = lfsck_orphan_index_declare_delete,
6530 .dio_delete = lfsck_orphan_index_delete,
6532 .init = lfsck_orphan_it_init,
6533 .fini = lfsck_orphan_it_fini,
6534 .get = lfsck_orphan_it_get,
6535 .put = lfsck_orphan_it_put,
6536 .next = lfsck_orphan_it_next,
6537 .key = lfsck_orphan_it_key,
6538 .key_size = lfsck_orphan_it_key_size,
6539 .rec = lfsck_orphan_it_rec,
6540 .store = lfsck_orphan_it_store,
6541 .load = lfsck_orphan_it_load,
6542 .key_rec = lfsck_orphan_it_key_rec,