4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2013, Intel Corporation.
26 * lustre/lfsck/lfsck_layout.c
28 * Author: Fan, Yong <fan.yong@intel.com>
32 # define EXPORT_SYMTAB
34 #define DEBUG_SUBSYSTEM S_LFSCK
36 #include <linux/bitops.h>
37 #include <linux/rbtree.h>
39 #include <lustre/lustre_idl.h>
40 #include <lu_object.h>
41 #include <dt_object.h>
42 #include <lustre_linkea.h>
43 #include <lustre_fid.h>
44 #include <lustre_lib.h>
45 #include <lustre_net.h>
46 #include <lustre/lustre_user.h>
47 #include <md_object.h>
48 #include <obd_class.h>
50 #include "lfsck_internal.h"
52 #define LFSCK_LAYOUT_MAGIC 0xB173AE14
54 static const char lfsck_layout_name[] = "lfsck_layout";
56 struct lfsck_layout_seq {
57 struct list_head lls_list;
60 __u64 lls_lastid_known;
61 struct dt_object *lls_lastid_obj;
62 unsigned int lls_dirty:1;
65 struct lfsck_layout_slave_target {
66 /* link into lfsck_layout_slave_data::llsd_master_list. */
67 struct list_head llst_list;
68 /* The position for next record in the rbtree for iteration. */
69 struct lu_fid llst_fid;
70 /* Dummy hash for iteration against the rbtree. */
77 struct lfsck_layout_slave_data {
78 /* list for lfsck_layout_seq */
79 struct list_head llsd_seq_list;
81 /* list for the masters involve layout verification. */
82 struct list_head llsd_master_list;
85 struct dt_object *llsd_rb_obj;
86 struct rb_root llsd_rb_root;
87 rwlock_t llsd_rb_lock;
88 unsigned int llsd_rbtree_valid:1;
91 struct lfsck_layout_object {
92 struct dt_object *llo_obj;
93 struct lu_attr llo_attr;
98 struct lfsck_layout_req {
99 struct list_head llr_list;
100 struct lfsck_layout_object *llr_parent;
101 struct dt_object *llr_child;
103 __u32 llr_lov_idx; /* offset in LOV EA */
106 struct lfsck_layout_master_data {
107 spinlock_t llmd_lock;
108 struct list_head llmd_req_list;
110 /* list for the ost targets involve layout verification. */
111 struct list_head llmd_ost_list;
113 /* list for the ost targets in phase1 scanning. */
114 struct list_head llmd_ost_phase1_list;
116 /* list for the ost targets in phase1 scanning. */
117 struct list_head llmd_ost_phase2_list;
119 /* list for the mdt targets involve layout verification. */
120 struct list_head llmd_mdt_list;
122 /* list for the mdt targets in phase1 scanning. */
123 struct list_head llmd_mdt_phase1_list;
125 /* list for the mdt targets in phase1 scanning. */
126 struct list_head llmd_mdt_phase2_list;
128 struct ptlrpc_thread llmd_thread;
129 __u32 llmd_touch_gen;
131 int llmd_assistant_status;
132 int llmd_post_result;
133 unsigned int llmd_to_post:1,
134 llmd_to_double_scan:1,
135 llmd_in_double_scan:1,
139 struct lfsck_layout_slave_async_args {
140 struct obd_export *llsaa_exp;
141 struct lfsck_component *llsaa_com;
142 struct lfsck_layout_slave_target *llsaa_llst;
145 static struct lfsck_layout_object *
146 lfsck_layout_object_init(const struct lu_env *env, struct dt_object *obj,
149 struct lfsck_layout_object *llo;
154 return ERR_PTR(-ENOMEM);
156 rc = dt_attr_get(env, obj, &llo->llo_attr, BYPASS_CAPA);
163 lu_object_get(&obj->do_lu);
165 /* The gen can be used to check whether some others have changed the
166 * file layout after LFSCK pre-fetching but before real verification. */
168 atomic_set(&llo->llo_ref, 1);
174 lfsck_layout_llst_put(struct lfsck_layout_slave_target *llst)
176 if (atomic_dec_and_test(&llst->llst_ref)) {
177 LASSERT(list_empty(&llst->llst_list));
184 lfsck_layout_llst_add(struct lfsck_layout_slave_data *llsd, __u32 index)
186 struct lfsck_layout_slave_target *llst;
187 struct lfsck_layout_slave_target *tmp;
194 INIT_LIST_HEAD(&llst->llst_list);
196 llst->llst_index = index;
197 atomic_set(&llst->llst_ref, 1);
199 spin_lock(&llsd->llsd_lock);
200 list_for_each_entry(tmp, &llsd->llsd_master_list, llst_list) {
201 if (tmp->llst_index == index) {
207 list_add_tail(&llst->llst_list, &llsd->llsd_master_list);
208 spin_unlock(&llsd->llsd_lock);
217 lfsck_layout_llst_del(struct lfsck_layout_slave_data *llsd,
218 struct lfsck_layout_slave_target *llst)
222 spin_lock(&llsd->llsd_lock);
223 if (!list_empty(&llst->llst_list)) {
224 list_del_init(&llst->llst_list);
227 spin_unlock(&llsd->llsd_lock);
230 lfsck_layout_llst_put(llst);
233 static inline struct lfsck_layout_slave_target *
234 lfsck_layout_llst_find_and_del(struct lfsck_layout_slave_data *llsd,
235 __u32 index, bool unlink)
237 struct lfsck_layout_slave_target *llst;
239 spin_lock(&llsd->llsd_lock);
240 list_for_each_entry(llst, &llsd->llsd_master_list, llst_list) {
241 if (llst->llst_index == index) {
243 list_del_init(&llst->llst_list);
245 atomic_inc(&llst->llst_ref);
246 spin_unlock(&llsd->llsd_lock);
251 spin_unlock(&llsd->llsd_lock);
256 static inline void lfsck_layout_object_put(const struct lu_env *env,
257 struct lfsck_layout_object *llo)
259 if (atomic_dec_and_test(&llo->llo_ref)) {
260 lfsck_object_put(env, llo->llo_obj);
265 static struct lfsck_layout_req *
266 lfsck_layout_req_init(struct lfsck_layout_object *parent,
267 struct dt_object *child, __u32 ost_idx, __u32 lov_idx)
269 struct lfsck_layout_req *llr;
273 return ERR_PTR(-ENOMEM);
275 INIT_LIST_HEAD(&llr->llr_list);
276 atomic_inc(&parent->llo_ref);
277 llr->llr_parent = parent;
278 llr->llr_child = child;
279 llr->llr_ost_idx = ost_idx;
280 llr->llr_lov_idx = lov_idx;
285 static inline void lfsck_layout_req_fini(const struct lu_env *env,
286 struct lfsck_layout_req *llr)
288 lu_object_put(env, &llr->llr_child->do_lu);
289 lfsck_layout_object_put(env, llr->llr_parent);
293 static inline bool lfsck_layout_req_empty(struct lfsck_layout_master_data *llmd)
297 spin_lock(&llmd->llmd_lock);
298 if (list_empty(&llmd->llmd_req_list))
300 spin_unlock(&llmd->llmd_lock);
305 static int lfsck_layout_get_lovea(const struct lu_env *env,
306 struct dt_object *obj,
307 struct lu_buf *buf, ssize_t *buflen)
312 rc = dt_xattr_get(env, obj, buf, XATTR_NAME_LOV, BYPASS_CAPA);
314 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_LOV,
319 lu_buf_realloc(buf, rc);
321 *buflen = buf->lb_len;
323 if (buf->lb_buf == NULL)
335 if (unlikely(buf->lb_buf == NULL)) {
336 lu_buf_alloc(buf, rc);
338 *buflen = buf->lb_len;
340 if (buf->lb_buf == NULL)
349 static int lfsck_layout_verify_header(struct lov_mds_md_v1 *lmm)
354 magic = le32_to_cpu(lmm->lmm_magic);
355 /* If magic crashed, keep it there. Sometime later, during OST-object
356 * orphan handling, if some OST-object(s) back-point to it, it can be
357 * verified and repaired. */
358 if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3)
361 patten = le32_to_cpu(lmm->lmm_pattern);
362 /* XXX: currently, we only support LOV_PATTERN_RAID0. */
363 if (patten != LOV_PATTERN_RAID0)
369 #define LFSCK_RBTREE_BITMAP_SIZE PAGE_CACHE_SIZE
370 #define LFSCK_RBTREE_BITMAP_WIDTH (LFSCK_RBTREE_BITMAP_SIZE << 3)
371 #define LFSCK_RBTREE_BITMAP_MASK (LFSCK_RBTREE_BITMAP_SIZE - 1)
373 struct lfsck_rbtree_node {
374 struct rb_node lrn_node;
377 atomic_t lrn_known_count;
378 atomic_t lrn_accessed_count;
379 void *lrn_known_bitmap;
380 void *lrn_accessed_bitmap;
383 static inline int lfsck_rbtree_cmp(struct lfsck_rbtree_node *lrn,
384 __u64 seq, __u32 oid)
386 if (seq < lrn->lrn_seq)
389 if (seq > lrn->lrn_seq)
392 if (oid < lrn->lrn_first_oid)
395 if (oid >= lrn->lrn_first_oid + LFSCK_RBTREE_BITMAP_WIDTH)
401 /* The caller should hold llsd->llsd_rb_lock. */
402 static struct lfsck_rbtree_node *
403 lfsck_rbtree_search(struct lfsck_layout_slave_data *llsd,
404 const struct lu_fid *fid, bool *exact)
406 struct rb_node *node = llsd->llsd_rb_root.rb_node;
407 struct rb_node *prev = NULL;
408 struct lfsck_rbtree_node *lrn = NULL;
414 while (node != NULL) {
416 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
417 rc = lfsck_rbtree_cmp(lrn, fid_seq(fid), fid_oid(fid));
419 node = node->rb_left;
421 node = node->rb_right;
429 /* If there is no exactly matched one, then to the next valid one. */
432 /* The rbtree is empty. */
439 node = rb_next(prev);
441 /* The end of the rbtree. */
445 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
450 static struct lfsck_rbtree_node *lfsck_rbtree_new(const struct lu_env *env,
451 const struct lu_fid *fid)
453 struct lfsck_rbtree_node *lrn;
457 return ERR_PTR(-ENOMEM);
459 OBD_ALLOC(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
460 if (lrn->lrn_known_bitmap == NULL) {
463 return ERR_PTR(-ENOMEM);
466 OBD_ALLOC(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
467 if (lrn->lrn_accessed_bitmap == NULL) {
468 OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
471 return ERR_PTR(-ENOMEM);
474 rb_init_node(&lrn->lrn_node);
475 lrn->lrn_seq = fid_seq(fid);
476 lrn->lrn_first_oid = fid_oid(fid) & ~LFSCK_RBTREE_BITMAP_MASK;
477 atomic_set(&lrn->lrn_known_count, 0);
478 atomic_set(&lrn->lrn_accessed_count, 0);
483 static void lfsck_rbtree_free(struct lfsck_rbtree_node *lrn)
485 OBD_FREE(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
486 OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
490 /* The caller should hold lock. */
491 static struct lfsck_rbtree_node *
492 lfsck_rbtree_insert(struct lfsck_layout_slave_data *llsd,
493 struct lfsck_rbtree_node *lrn)
495 struct rb_node **pos = &(llsd->llsd_rb_root.rb_node);
496 struct rb_node *parent = NULL;
497 struct lfsck_rbtree_node *tmp;
502 tmp = rb_entry(*pos, struct lfsck_rbtree_node, lrn_node);
503 rc = lfsck_rbtree_cmp(tmp, lrn->lrn_seq, lrn->lrn_first_oid);
505 pos = &((*pos)->rb_left);
507 pos = &((*pos)->rb_right);
512 rb_link_node(&lrn->lrn_node, parent, pos);
513 rb_insert_color(&lrn->lrn_node, &llsd->llsd_rb_root);
518 extern const struct dt_index_operations lfsck_orphan_index_ops;
520 static int lfsck_rbtree_setup(const struct lu_env *env,
521 struct lfsck_component *com)
523 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
524 struct lfsck_instance *lfsck = com->lc_lfsck;
525 struct dt_device *dev = lfsck->li_bottom;
526 struct lfsck_layout_slave_data *llsd = com->lc_data;
527 struct dt_object *obj;
529 fid->f_seq = FID_SEQ_LAYOUT_RBTREE;
530 fid->f_oid = lfsck_dev_idx(dev);
532 obj = dt_locate(env, dev, fid);
534 RETURN(PTR_ERR(obj));
536 /* Generate an in-RAM object to stand for the layout rbtree.
537 * Scanning the layout rbtree will be via the iteration over
538 * the object. In the future, the rbtree may be written onto
539 * disk with the object.
541 * Mark the object to be as exist. */
542 obj->do_lu.lo_header->loh_attr |= LOHA_EXISTS;
543 obj->do_index_ops = &lfsck_orphan_index_ops;
544 llsd->llsd_rb_obj = obj;
545 llsd->llsd_rbtree_valid = 1;
546 dev->dd_record_fid_accessed = 1;
551 static void lfsck_rbtree_cleanup(const struct lu_env *env,
552 struct lfsck_component *com)
554 struct lfsck_instance *lfsck = com->lc_lfsck;
555 struct lfsck_layout_slave_data *llsd = com->lc_data;
556 struct rb_node *node = rb_first(&llsd->llsd_rb_root);
557 struct rb_node *next;
558 struct lfsck_rbtree_node *lrn;
560 lfsck->li_bottom->dd_record_fid_accessed = 0;
561 /* Invalid the rbtree, then no others will use it. */
562 write_lock(&llsd->llsd_rb_lock);
563 llsd->llsd_rbtree_valid = 0;
564 write_unlock(&llsd->llsd_rb_lock);
566 while (node != NULL) {
567 next = rb_next(node);
568 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
569 rb_erase(node, &llsd->llsd_rb_root);
570 lfsck_rbtree_free(lrn);
574 if (llsd->llsd_rb_obj != NULL) {
575 lu_object_put(env, &llsd->llsd_rb_obj->do_lu);
576 llsd->llsd_rb_obj = NULL;
580 static void lfsck_rbtree_update_bitmap(const struct lu_env *env,
581 struct lfsck_component *com,
582 const struct lu_fid *fid,
585 struct lfsck_layout_slave_data *llsd = com->lc_data;
586 struct lfsck_rbtree_node *lrn;
592 CDEBUG(D_LFSCK, "%s: update bitmap for "DFID"\n",
593 lfsck_lfsck2name(com->lc_lfsck), PFID(fid));
595 if (unlikely(!fid_is_sane(fid) || fid_is_last_id(fid)))
598 if (!fid_is_idif(fid) && !fid_is_norm(fid))
601 read_lock(&llsd->llsd_rb_lock);
602 if (!llsd->llsd_rbtree_valid)
603 GOTO(unlock, rc = 0);
605 lrn = lfsck_rbtree_search(llsd, fid, NULL);
607 struct lfsck_rbtree_node *tmp;
611 read_unlock(&llsd->llsd_rb_lock);
612 tmp = lfsck_rbtree_new(env, fid);
614 GOTO(out, rc = PTR_ERR(tmp));
617 write_lock(&llsd->llsd_rb_lock);
618 if (!llsd->llsd_rbtree_valid) {
619 lfsck_rbtree_free(tmp);
620 GOTO(unlock, rc = 0);
623 lrn = lfsck_rbtree_insert(llsd, tmp);
625 lfsck_rbtree_free(tmp);
628 idx = fid_oid(fid) & LFSCK_RBTREE_BITMAP_MASK;
629 /* Any accessed object must be a known object. */
630 if (!test_and_set_bit(idx, lrn->lrn_known_bitmap))
631 atomic_inc(&lrn->lrn_known_count);
632 if (accessed && !test_and_set_bit(idx, lrn->lrn_accessed_bitmap))
633 atomic_inc(&lrn->lrn_accessed_count);
635 GOTO(unlock, rc = 0);
639 write_unlock(&llsd->llsd_rb_lock);
641 read_unlock(&llsd->llsd_rb_lock);
643 if (rc != 0 && accessed) {
644 struct lfsck_layout *lo = com->lc_file_ram;
646 CERROR("%s: Fail to update object accessed bitmap, will cause "
647 "incorrect LFSCK OST-object handling, so disable it to "
648 "cancel orphan handling for related device. rc = %d.\n",
649 lfsck_lfsck2name(com->lc_lfsck), rc);
650 lo->ll_flags |= LF_INCOMPLETE;
651 lfsck_rbtree_cleanup(env, com);
655 static inline bool is_dummy_lov_ost_data(struct lov_ost_data_v1 *obj)
657 if (fid_is_zero(&obj->l_ost_oi.oi_fid) &&
658 obj->l_ost_gen == 0 && obj->l_ost_idx == 0)
664 static void lfsck_layout_le_to_cpu(struct lfsck_layout *des,
665 const struct lfsck_layout *src)
669 des->ll_magic = le32_to_cpu(src->ll_magic);
670 des->ll_status = le32_to_cpu(src->ll_status);
671 des->ll_flags = le32_to_cpu(src->ll_flags);
672 des->ll_success_count = le32_to_cpu(src->ll_success_count);
673 des->ll_run_time_phase1 = le32_to_cpu(src->ll_run_time_phase1);
674 des->ll_run_time_phase2 = le32_to_cpu(src->ll_run_time_phase2);
675 des->ll_time_last_complete = le64_to_cpu(src->ll_time_last_complete);
676 des->ll_time_latest_start = le64_to_cpu(src->ll_time_latest_start);
677 des->ll_time_last_checkpoint =
678 le64_to_cpu(src->ll_time_last_checkpoint);
679 des->ll_pos_latest_start = le64_to_cpu(src->ll_pos_latest_start);
680 des->ll_pos_last_checkpoint = le64_to_cpu(src->ll_pos_last_checkpoint);
681 des->ll_pos_first_inconsistent =
682 le64_to_cpu(src->ll_pos_first_inconsistent);
683 des->ll_objs_checked_phase1 = le64_to_cpu(src->ll_objs_checked_phase1);
684 des->ll_objs_failed_phase1 = le64_to_cpu(src->ll_objs_failed_phase1);
685 des->ll_objs_checked_phase2 = le64_to_cpu(src->ll_objs_checked_phase2);
686 des->ll_objs_failed_phase2 = le64_to_cpu(src->ll_objs_failed_phase2);
687 for (i = 0; i < LLIT_MAX; i++)
688 des->ll_objs_repaired[i] =
689 le64_to_cpu(src->ll_objs_repaired[i]);
690 des->ll_objs_skipped = le64_to_cpu(src->ll_objs_skipped);
693 static void lfsck_layout_cpu_to_le(struct lfsck_layout *des,
694 const struct lfsck_layout *src)
698 des->ll_magic = cpu_to_le32(src->ll_magic);
699 des->ll_status = cpu_to_le32(src->ll_status);
700 des->ll_flags = cpu_to_le32(src->ll_flags);
701 des->ll_success_count = cpu_to_le32(src->ll_success_count);
702 des->ll_run_time_phase1 = cpu_to_le32(src->ll_run_time_phase1);
703 des->ll_run_time_phase2 = cpu_to_le32(src->ll_run_time_phase2);
704 des->ll_time_last_complete = cpu_to_le64(src->ll_time_last_complete);
705 des->ll_time_latest_start = cpu_to_le64(src->ll_time_latest_start);
706 des->ll_time_last_checkpoint =
707 cpu_to_le64(src->ll_time_last_checkpoint);
708 des->ll_pos_latest_start = cpu_to_le64(src->ll_pos_latest_start);
709 des->ll_pos_last_checkpoint = cpu_to_le64(src->ll_pos_last_checkpoint);
710 des->ll_pos_first_inconsistent =
711 cpu_to_le64(src->ll_pos_first_inconsistent);
712 des->ll_objs_checked_phase1 = cpu_to_le64(src->ll_objs_checked_phase1);
713 des->ll_objs_failed_phase1 = cpu_to_le64(src->ll_objs_failed_phase1);
714 des->ll_objs_checked_phase2 = cpu_to_le64(src->ll_objs_checked_phase2);
715 des->ll_objs_failed_phase2 = cpu_to_le64(src->ll_objs_failed_phase2);
716 for (i = 0; i < LLIT_MAX; i++)
717 des->ll_objs_repaired[i] =
718 cpu_to_le64(src->ll_objs_repaired[i]);
719 des->ll_objs_skipped = cpu_to_le64(src->ll_objs_skipped);
723 * \retval +ve: the lfsck_layout is broken, the caller should reset it.
724 * \retval 0: succeed.
725 * \retval -ve: failed cases.
727 static int lfsck_layout_load(const struct lu_env *env,
728 struct lfsck_component *com)
730 struct lfsck_layout *lo = com->lc_file_ram;
731 const struct dt_body_operations *dbo = com->lc_obj->do_body_ops;
732 ssize_t size = com->lc_file_size;
736 rc = dbo->dbo_read(env, com->lc_obj,
737 lfsck_buf_get(env, com->lc_file_disk, size), &pos,
742 CWARN("%s: failed to load lfsck_layout: rc = %d\n",
743 lfsck_lfsck2name(com->lc_lfsck), rc);
745 } else if (rc != size) {
746 CWARN("%s: crashed lfsck_layout, to be reset: rc = %d\n",
747 lfsck_lfsck2name(com->lc_lfsck), rc);
751 lfsck_layout_le_to_cpu(lo, com->lc_file_disk);
752 if (lo->ll_magic != LFSCK_LAYOUT_MAGIC) {
753 CWARN("%s: invalid lfsck_layout magic %#x != %#x, "
754 "to be reset\n", lfsck_lfsck2name(com->lc_lfsck),
755 lo->ll_magic, LFSCK_LAYOUT_MAGIC);
762 static int lfsck_layout_store(const struct lu_env *env,
763 struct lfsck_component *com)
765 struct dt_object *obj = com->lc_obj;
766 struct lfsck_instance *lfsck = com->lc_lfsck;
767 struct lfsck_layout *lo = com->lc_file_disk;
768 struct thandle *handle;
769 ssize_t size = com->lc_file_size;
774 lfsck_layout_cpu_to_le(lo, com->lc_file_ram);
775 handle = dt_trans_create(env, lfsck->li_bottom);
776 if (IS_ERR(handle)) {
777 rc = PTR_ERR(handle);
778 CERROR("%s: fail to create trans for storing lfsck_layout: "
779 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
783 rc = dt_declare_record_write(env, obj, size, pos, handle);
785 CERROR("%s: fail to declare trans for storing lfsck_layout(1): "
786 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
790 rc = dt_trans_start_local(env, lfsck->li_bottom, handle);
792 CERROR("%s: fail to start trans for storing lfsck_layout: "
793 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
797 rc = dt_record_write(env, obj, lfsck_buf_get(env, lo, size), &pos,
800 CERROR("%s: fail to store lfsck_layout(1): size = %d, "
801 "rc = %d\n", lfsck_lfsck2name(lfsck), (int)size, rc);
806 dt_trans_stop(env, lfsck->li_bottom, handle);
811 static int lfsck_layout_init(const struct lu_env *env,
812 struct lfsck_component *com)
814 struct lfsck_layout *lo = com->lc_file_ram;
817 memset(lo, 0, com->lc_file_size);
818 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
819 lo->ll_status = LS_INIT;
820 down_write(&com->lc_sem);
821 rc = lfsck_layout_store(env, com);
822 up_write(&com->lc_sem);
827 static int fid_is_for_ostobj(const struct lu_env *env, struct dt_device *dt,
828 struct dt_object *obj, const struct lu_fid *fid)
830 struct seq_server_site *ss = lu_site2seq(dt->dd_lu_dev.ld_site);
831 struct lu_seq_range range = { 0 };
832 struct lustre_mdt_attrs *lma;
835 fld_range_set_any(&range);
836 rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(fid), &range);
838 if (fld_range_is_ost(&range))
844 lma = &lfsck_env_info(env)->lti_lma;
845 rc = dt_xattr_get(env, obj, lfsck_buf_get(env, lma, sizeof(*lma)),
846 XATTR_NAME_LMA, BYPASS_CAPA);
847 if (rc == sizeof(*lma)) {
848 lustre_lma_swab(lma);
850 return lma->lma_compat & LMAC_FID_ON_OST ? 1 : 0;
853 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_FID, BYPASS_CAPA);
858 static struct lfsck_layout_seq *
859 lfsck_layout_seq_lookup(struct lfsck_layout_slave_data *llsd, __u64 seq)
861 struct lfsck_layout_seq *lls;
863 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
864 if (lls->lls_seq == seq)
867 if (lls->lls_seq > seq)
875 lfsck_layout_seq_insert(struct lfsck_layout_slave_data *llsd,
876 struct lfsck_layout_seq *lls)
878 struct lfsck_layout_seq *tmp;
879 struct list_head *pos = &llsd->llsd_seq_list;
881 list_for_each_entry(tmp, &llsd->llsd_seq_list, lls_list) {
882 if (lls->lls_seq < tmp->lls_seq) {
883 pos = &tmp->lls_list;
887 list_add_tail(&lls->lls_list, pos);
891 lfsck_layout_lastid_create(const struct lu_env *env,
892 struct lfsck_instance *lfsck,
893 struct dt_object *obj)
895 struct lfsck_thread_info *info = lfsck_env_info(env);
896 struct lu_attr *la = &info->lti_la;
897 struct dt_object_format *dof = &info->lti_dof;
898 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
899 struct dt_device *dt = lfsck->li_bottom;
906 CDEBUG(D_LFSCK, "To create LAST_ID for <seq> "LPX64"\n",
907 fid_seq(lfsck_dto2fid(obj)));
909 if (bk->lb_param & LPF_DRYRUN)
912 memset(la, 0, sizeof(*la));
913 la->la_mode = S_IFREG | S_IRUGO | S_IWUSR;
914 la->la_valid = LA_MODE | LA_UID | LA_GID;
915 dof->dof_type = dt_mode_to_dft(S_IFREG);
917 th = dt_trans_create(env, dt);
919 RETURN(rc = PTR_ERR(th));
921 rc = dt_declare_create(env, obj, la, NULL, dof, th);
925 rc = dt_declare_record_write(env, obj, sizeof(lastid), pos, th);
929 rc = dt_trans_start_local(env, dt, th);
933 dt_write_lock(env, obj, 0);
934 if (likely(!dt_object_exists(obj))) {
935 rc = dt_create(env, obj, la, NULL, dof, th);
937 rc = dt_record_write(env, obj,
938 lfsck_buf_get(env, &lastid, sizeof(lastid)),
941 dt_write_unlock(env, obj);
946 dt_trans_stop(env, dt, th);
952 lfsck_layout_lastid_reload(const struct lu_env *env,
953 struct lfsck_component *com,
954 struct lfsck_layout_seq *lls)
960 dt_read_lock(env, lls->lls_lastid_obj, 0);
961 rc = dt_record_read(env, lls->lls_lastid_obj,
962 lfsck_buf_get(env, &lastid, sizeof(lastid)), &pos);
963 dt_read_unlock(env, lls->lls_lastid_obj);
964 if (unlikely(rc != 0))
967 lastid = le64_to_cpu(lastid);
968 if (lastid < lls->lls_lastid_known) {
969 struct lfsck_instance *lfsck = com->lc_lfsck;
970 struct lfsck_layout *lo = com->lc_file_ram;
972 lls->lls_lastid = lls->lls_lastid_known;
974 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
975 LASSERT(lfsck->li_out_notify != NULL);
977 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
978 LE_LASTID_REBUILDING);
979 lo->ll_flags |= LF_CRASHED_LASTID;
981 } else if (lastid >= lls->lls_lastid) {
982 lls->lls_lastid = lastid;
990 lfsck_layout_lastid_store(const struct lu_env *env,
991 struct lfsck_component *com)
993 struct lfsck_instance *lfsck = com->lc_lfsck;
994 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
995 struct dt_device *dt = lfsck->li_bottom;
996 struct lfsck_layout_slave_data *llsd = com->lc_data;
997 struct lfsck_layout_seq *lls;
1003 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
1006 /* XXX: Add the code back if we really found related
1007 * inconsistent cases in the future. */
1009 if (!lls->lls_dirty) {
1010 /* In OFD, before the pre-creation, the LAST_ID
1011 * file will be updated firstly, which may hide
1012 * some potential crashed cases. For example:
1014 * The old obj1's ID is higher than old LAST_ID
1015 * but lower than the new LAST_ID, but the LFSCK
1016 * have not touch the obj1 until the OFD updated
1017 * the LAST_ID. So the LFSCK does not regard it
1018 * as crashed case. But when OFD does not create
1019 * successfully, it will set the LAST_ID as the
1020 * real created objects' ID, then LFSCK needs to
1021 * found related inconsistency. */
1022 rc = lfsck_layout_lastid_reload(env, com, lls);
1023 if (likely(!lls->lls_dirty))
1028 CDEBUG(D_LFSCK, "To sync the LAST_ID for <seq> "LPX64
1029 " as <oid> "LPU64"\n", lls->lls_seq, lls->lls_lastid);
1031 if (bk->lb_param & LPF_DRYRUN) {
1036 th = dt_trans_create(env, dt);
1039 CERROR("%s: (1) failed to store "LPX64": rc = %d\n",
1040 lfsck_lfsck2name(com->lc_lfsck),
1045 rc = dt_declare_record_write(env, lls->lls_lastid_obj,
1046 sizeof(lastid), pos, th);
1050 rc = dt_trans_start_local(env, dt, th);
1054 lastid = cpu_to_le64(lls->lls_lastid);
1055 dt_write_lock(env, lls->lls_lastid_obj, 0);
1056 rc = dt_record_write(env, lls->lls_lastid_obj,
1057 lfsck_buf_get(env, &lastid,
1058 sizeof(lastid)), &pos, th);
1059 dt_write_unlock(env, lls->lls_lastid_obj);
1064 dt_trans_stop(env, dt, th);
1067 CERROR("%s: (2) failed to store "LPX64": rc = %d\n",
1068 lfsck_lfsck2name(com->lc_lfsck),
1077 lfsck_layout_lastid_load(const struct lu_env *env,
1078 struct lfsck_component *com,
1079 struct lfsck_layout_seq *lls)
1081 struct lfsck_instance *lfsck = com->lc_lfsck;
1082 struct lfsck_layout *lo = com->lc_file_ram;
1083 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
1084 struct dt_object *obj;
1089 lu_last_id_fid(fid, lls->lls_seq, lfsck_dev_idx(lfsck->li_bottom));
1090 obj = dt_locate(env, lfsck->li_bottom, fid);
1092 RETURN(PTR_ERR(obj));
1094 /* LAST_ID crashed, to be rebuilt */
1095 if (!dt_object_exists(obj)) {
1096 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
1097 LASSERT(lfsck->li_out_notify != NULL);
1099 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1100 LE_LASTID_REBUILDING);
1101 lo->ll_flags |= LF_CRASHED_LASTID;
1103 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY4) &&
1105 struct l_wait_info lwi = LWI_TIMEOUT(
1106 cfs_time_seconds(cfs_fail_val),
1109 up_write(&com->lc_sem);
1110 l_wait_event(lfsck->li_thread.t_ctl_waitq,
1111 !thread_is_running(&lfsck->li_thread),
1113 down_write(&com->lc_sem);
1117 rc = lfsck_layout_lastid_create(env, lfsck, obj);
1119 dt_read_lock(env, obj, 0);
1120 rc = dt_read(env, obj,
1121 lfsck_buf_get(env, &lls->lls_lastid, sizeof(__u64)),
1123 dt_read_unlock(env, obj);
1124 if (rc != 0 && rc != sizeof(__u64))
1125 GOTO(out, rc = (rc > 0 ? -EFAULT : rc));
1127 if (rc == 0 && !(lo->ll_flags & LF_CRASHED_LASTID)) {
1128 LASSERT(lfsck->li_out_notify != NULL);
1130 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1131 LE_LASTID_REBUILDING);
1132 lo->ll_flags |= LF_CRASHED_LASTID;
1135 lls->lls_lastid = le64_to_cpu(lls->lls_lastid);
1143 lfsck_object_put(env, obj);
1145 lls->lls_lastid_obj = obj;
1150 static int lfsck_layout_master_async_interpret(const struct lu_env *env,
1151 struct ptlrpc_request *req,
1154 struct lfsck_async_interpret_args *laia = args;
1155 struct lfsck_component *com = laia->laia_com;
1156 struct lfsck_layout_master_data *llmd = com->lc_data;
1157 struct lfsck_tgt_descs *ltds = laia->laia_ltds;
1158 struct lfsck_tgt_desc *ltd = laia->laia_ltd;
1159 struct lfsck_request *lr = laia->laia_lr;
1161 switch (lr->lr_event) {
1164 struct lfsck_layout *lo = com->lc_file_ram;
1166 CERROR("%s: fail to notify %s %x for layout start: "
1167 "rc = %d\n", lfsck_lfsck2name(com->lc_lfsck),
1168 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1169 ltd->ltd_index, rc);
1170 lo->ll_flags |= LF_INCOMPLETE;
1174 spin_lock(<ds->ltd_lock);
1175 if (ltd->ltd_dead || ltd->ltd_layout_done) {
1176 spin_unlock(<ds->ltd_lock);
1180 if (lr->lr_flags & LEF_TO_OST) {
1181 if (list_empty(<d->ltd_layout_list))
1182 list_add_tail(<d->ltd_layout_list,
1183 &llmd->llmd_ost_list);
1184 if (list_empty(<d->ltd_layout_phase_list))
1185 list_add_tail(<d->ltd_layout_phase_list,
1186 &llmd->llmd_ost_phase1_list);
1188 if (list_empty(<d->ltd_layout_list))
1189 list_add_tail(<d->ltd_layout_list,
1190 &llmd->llmd_mdt_list);
1191 if (list_empty(<d->ltd_layout_phase_list))
1192 list_add_tail(<d->ltd_layout_phase_list,
1193 &llmd->llmd_mdt_phase1_list);
1195 spin_unlock(<ds->ltd_lock);
1198 case LE_PHASE1_DONE:
1199 case LE_PHASE2_DONE:
1201 if (rc != 0 && rc != -EALREADY)
1202 CWARN("%s: fail to notify %s %x for layout: "
1203 "event = %d, rc = %d\n",
1204 lfsck_lfsck2name(com->lc_lfsck),
1205 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1206 ltd->ltd_index, lr->lr_event, rc);
1209 struct lfsck_reply *reply;
1212 spin_lock(<ds->ltd_lock);
1213 list_del_init(<d->ltd_layout_phase_list);
1214 list_del_init(<d->ltd_layout_list);
1215 spin_unlock(<ds->ltd_lock);
1219 reply = req_capsule_server_get(&req->rq_pill,
1221 if (reply == NULL) {
1223 CERROR("%s: invalid return value: rc = %d\n",
1224 lfsck_lfsck2name(com->lc_lfsck), rc);
1225 spin_lock(<ds->ltd_lock);
1226 list_del_init(<d->ltd_layout_phase_list);
1227 list_del_init(<d->ltd_layout_list);
1228 spin_unlock(<ds->ltd_lock);
1232 switch (reply->lr_status) {
1233 case LS_SCANNING_PHASE1:
1235 case LS_SCANNING_PHASE2:
1236 spin_lock(<ds->ltd_lock);
1237 list_del_init(<d->ltd_layout_phase_list);
1238 if (ltd->ltd_dead || ltd->ltd_layout_done) {
1239 spin_unlock(<ds->ltd_lock);
1243 if (lr->lr_flags & LEF_TO_OST)
1244 list_add_tail(<d->ltd_layout_phase_list,
1245 &llmd->llmd_ost_phase2_list);
1247 list_add_tail(<d->ltd_layout_phase_list,
1248 &llmd->llmd_mdt_phase2_list);
1249 spin_unlock(<ds->ltd_lock);
1252 spin_lock(<ds->ltd_lock);
1253 list_del_init(<d->ltd_layout_phase_list);
1254 list_del_init(<d->ltd_layout_list);
1255 spin_unlock(<ds->ltd_lock);
1261 CERROR("%s: unexpected event: rc = %d\n",
1262 lfsck_lfsck2name(com->lc_lfsck), lr->lr_event);
1266 if (!laia->laia_shared) {
1268 lfsck_component_put(env, com);
1274 static int lfsck_layout_master_query_others(const struct lu_env *env,
1275 struct lfsck_component *com)
1277 struct lfsck_thread_info *info = lfsck_env_info(env);
1278 struct lfsck_request *lr = &info->lti_lr;
1279 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1280 struct lfsck_instance *lfsck = com->lc_lfsck;
1281 struct lfsck_layout_master_data *llmd = com->lc_data;
1282 struct ptlrpc_request_set *set;
1283 struct lfsck_tgt_descs *ltds;
1284 struct lfsck_tgt_desc *ltd;
1285 struct list_head *head;
1290 set = ptlrpc_prep_set();
1294 llmd->llmd_touch_gen++;
1295 memset(lr, 0, sizeof(*lr));
1296 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1297 lr->lr_event = LE_QUERY;
1298 lr->lr_active = LT_LAYOUT;
1299 laia->laia_com = com;
1301 laia->laia_shared = 0;
1303 if (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1304 ltds = &lfsck->li_mdt_descs;
1306 head = &llmd->llmd_mdt_phase1_list;
1310 ltds = &lfsck->li_ost_descs;
1311 lr->lr_flags = LEF_TO_OST;
1312 head = &llmd->llmd_ost_phase1_list;
1315 laia->laia_ltds = ltds;
1316 spin_lock(<ds->ltd_lock);
1317 while (!list_empty(head)) {
1318 ltd = list_entry(head->next,
1319 struct lfsck_tgt_desc,
1320 ltd_layout_phase_list);
1321 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1324 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1325 list_del(<d->ltd_layout_phase_list);
1326 list_add_tail(<d->ltd_layout_phase_list, head);
1327 atomic_inc(<d->ltd_ref);
1328 laia->laia_ltd = ltd;
1329 spin_unlock(<ds->ltd_lock);
1330 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1331 lfsck_layout_master_async_interpret,
1334 CERROR("%s: fail to query %s %x for layout: rc = %d\n",
1335 lfsck_lfsck2name(lfsck),
1336 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1337 ltd->ltd_index, rc);
1341 spin_lock(<ds->ltd_lock);
1343 spin_unlock(<ds->ltd_lock);
1345 rc = ptlrpc_set_wait(set);
1347 ptlrpc_set_destroy(set);
1351 if (!(lr->lr_flags & LEF_TO_OST) &&
1352 list_empty(&llmd->llmd_mdt_phase1_list))
1355 ptlrpc_set_destroy(set);
1357 RETURN(rc1 != 0 ? rc1 : rc);
1361 lfsck_layout_master_to_orphan(struct lfsck_layout_master_data *llmd)
1363 return list_empty(&llmd->llmd_mdt_phase1_list) &&
1364 (!list_empty(&llmd->llmd_ost_phase2_list) ||
1365 list_empty(&llmd->llmd_ost_phase1_list));
1368 static int lfsck_layout_master_notify_others(const struct lu_env *env,
1369 struct lfsck_component *com,
1370 struct lfsck_request *lr)
1372 struct lfsck_thread_info *info = lfsck_env_info(env);
1373 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1374 struct lfsck_instance *lfsck = com->lc_lfsck;
1375 struct lfsck_layout_master_data *llmd = com->lc_data;
1376 struct lfsck_layout *lo = com->lc_file_ram;
1377 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1378 struct ptlrpc_request_set *set;
1379 struct lfsck_tgt_descs *ltds;
1380 struct lfsck_tgt_desc *ltd;
1381 struct lfsck_tgt_desc *next;
1382 struct list_head *head;
1387 set = ptlrpc_prep_set();
1391 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1392 lr->lr_active = LT_LAYOUT;
1393 laia->laia_com = com;
1395 laia->laia_shared = 0;
1396 switch (lr->lr_event) {
1398 /* Notify OSTs firstly, then handle other MDTs if needed. */
1399 ltds = &lfsck->li_ost_descs;
1400 laia->laia_ltds = ltds;
1401 down_read(<ds->ltd_rw_sem);
1402 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1403 ltd = lfsck_tgt_get(ltds, idx);
1404 LASSERT(ltd != NULL);
1406 laia->laia_ltd = ltd;
1407 ltd->ltd_layout_done = 0;
1408 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1409 lfsck_layout_master_async_interpret,
1410 laia, LFSCK_NOTIFY);
1412 CERROR("%s: fail to notify %s %x for layout "
1414 lfsck_lfsck2name(lfsck),
1415 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1418 lo->ll_flags |= LF_INCOMPLETE;
1421 up_read(<ds->ltd_rw_sem);
1424 rc = ptlrpc_set_wait(set);
1426 ptlrpc_set_destroy(set);
1430 if (!(bk->lb_param & LPF_ALL_TGT))
1433 /* link other MDT targets locallly. */
1434 spin_lock(<ds->ltd_lock);
1435 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1436 ltd = LTD_TGT(ltds, idx);
1437 LASSERT(ltd != NULL);
1439 if (!list_empty(<d->ltd_layout_list))
1442 list_add_tail(<d->ltd_layout_list,
1443 &llmd->llmd_mdt_list);
1444 list_add_tail(<d->ltd_layout_phase_list,
1445 &llmd->llmd_mdt_phase1_list);
1447 spin_unlock(<ds->ltd_lock);
1450 case LE_PHASE2_DONE:
1451 case LE_PEER_EXIT: {
1452 /* Handle other MDTs firstly if needed, then notify the OSTs. */
1453 if (bk->lb_param & LPF_ALL_TGT) {
1454 head = &llmd->llmd_mdt_list;
1455 ltds = &lfsck->li_mdt_descs;
1456 if (lr->lr_event == LE_STOP) {
1457 /* unlink other MDT targets locallly. */
1458 spin_lock(<ds->ltd_lock);
1459 list_for_each_entry_safe(ltd, next, head,
1461 list_del_init(<d->ltd_layout_phase_list);
1462 list_del_init(<d->ltd_layout_list);
1464 spin_unlock(<ds->ltd_lock);
1466 lr->lr_flags |= LEF_TO_OST;
1467 head = &llmd->llmd_ost_list;
1468 ltds = &lfsck->li_ost_descs;
1470 lr->lr_flags &= ~LEF_TO_OST;
1473 lr->lr_flags |= LEF_TO_OST;
1474 head = &llmd->llmd_ost_list;
1475 ltds = &lfsck->li_ost_descs;
1479 laia->laia_ltds = ltds;
1480 spin_lock(<ds->ltd_lock);
1481 while (!list_empty(head)) {
1482 ltd = list_entry(head->next, struct lfsck_tgt_desc,
1484 if (!list_empty(<d->ltd_layout_phase_list))
1485 list_del_init(<d->ltd_layout_phase_list);
1486 list_del_init(<d->ltd_layout_list);
1487 atomic_inc(<d->ltd_ref);
1488 laia->laia_ltd = ltd;
1489 spin_unlock(<ds->ltd_lock);
1490 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1491 lfsck_layout_master_async_interpret,
1492 laia, LFSCK_NOTIFY);
1494 CERROR("%s: fail to notify %s %x for layout "
1495 "stop/phase2: rc = %d\n",
1496 lfsck_lfsck2name(lfsck),
1497 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1498 "MDT", ltd->ltd_index, rc);
1501 spin_lock(<ds->ltd_lock);
1503 spin_unlock(<ds->ltd_lock);
1505 rc = ptlrpc_set_wait(set);
1507 ptlrpc_set_destroy(set);
1511 if (!(lr->lr_flags & LEF_TO_OST)) {
1512 lr->lr_flags |= LEF_TO_OST;
1513 head = &llmd->llmd_ost_list;
1514 ltds = &lfsck->li_ost_descs;
1519 case LE_PHASE1_DONE:
1520 llmd->llmd_touch_gen++;
1521 ltds = &lfsck->li_mdt_descs;
1522 laia->laia_ltds = ltds;
1523 spin_lock(<ds->ltd_lock);
1524 while (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1525 ltd = list_entry(llmd->llmd_mdt_phase1_list.next,
1526 struct lfsck_tgt_desc,
1527 ltd_layout_phase_list);
1528 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1531 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1532 list_del_init(<d->ltd_layout_phase_list);
1533 list_add_tail(<d->ltd_layout_phase_list,
1534 &llmd->llmd_mdt_phase1_list);
1535 atomic_inc(<d->ltd_ref);
1536 laia->laia_ltd = ltd;
1537 spin_unlock(<ds->ltd_lock);
1538 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1539 lfsck_layout_master_async_interpret,
1540 laia, LFSCK_NOTIFY);
1542 CERROR("%s: fail to notify MDT %x for layout "
1543 "phase1 done: rc = %d\n",
1544 lfsck_lfsck2name(lfsck),
1545 ltd->ltd_index, rc);
1548 spin_lock(<ds->ltd_lock);
1550 spin_unlock(<ds->ltd_lock);
1553 CERROR("%s: unexpected LFSCK event: rc = %d\n",
1554 lfsck_lfsck2name(lfsck), lr->lr_event);
1559 rc = ptlrpc_set_wait(set);
1560 ptlrpc_set_destroy(set);
1565 static int lfsck_layout_double_scan_result(const struct lu_env *env,
1566 struct lfsck_component *com,
1569 struct lfsck_instance *lfsck = com->lc_lfsck;
1570 struct lfsck_layout *lo = com->lc_file_ram;
1571 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1573 down_write(&com->lc_sem);
1575 lo->ll_run_time_phase2 += cfs_duration_sec(cfs_time_current() +
1576 HALF_SEC - lfsck->li_time_last_checkpoint);
1577 lo->ll_time_last_checkpoint = cfs_time_current_sec();
1578 lo->ll_objs_checked_phase2 += com->lc_new_checked;
1581 com->lc_journal = 0;
1582 if (lo->ll_flags & LF_INCOMPLETE)
1583 lo->ll_status = LS_PARTIAL;
1585 lo->ll_status = LS_COMPLETED;
1586 if (!(bk->lb_param & LPF_DRYRUN))
1587 lo->ll_flags &= ~(LF_SCANNED_ONCE | LF_INCONSISTENT);
1588 lo->ll_time_last_complete = lo->ll_time_last_checkpoint;
1589 lo->ll_success_count++;
1590 } else if (rc == 0) {
1591 lo->ll_status = lfsck->li_status;
1592 if (lo->ll_status == 0)
1593 lo->ll_status = LS_STOPPED;
1595 lo->ll_status = LS_FAILED;
1598 if (lo->ll_status != LS_PAUSED) {
1599 spin_lock(&lfsck->li_lock);
1600 list_del_init(&com->lc_link);
1601 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
1602 spin_unlock(&lfsck->li_lock);
1605 rc = lfsck_layout_store(env, com);
1607 up_write(&com->lc_sem);
1612 static int lfsck_layout_lock(const struct lu_env *env,
1613 struct lfsck_component *com,
1614 struct dt_object *obj,
1615 struct lustre_handle *lh, __u64 bits)
1617 struct lfsck_thread_info *info = lfsck_env_info(env);
1618 ldlm_policy_data_t *policy = &info->lti_policy;
1619 struct ldlm_res_id *resid = &info->lti_resid;
1620 struct lfsck_instance *lfsck = com->lc_lfsck;
1621 __u64 flags = LDLM_FL_ATOMIC_CB;
1624 LASSERT(lfsck->li_namespace != NULL);
1626 memset(policy, 0, sizeof(*policy));
1627 policy->l_inodebits.bits = bits;
1628 fid_build_reg_res_name(lfsck_dto2fid(obj), resid);
1629 rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid, LDLM_IBITS,
1630 policy, LCK_EX, &flags, ldlm_blocking_ast,
1631 ldlm_completion_ast, NULL, NULL, 0,
1632 LVB_T_NONE, NULL, lh);
1633 if (rc == ELDLM_OK) {
1636 memset(lh, 0, sizeof(*lh));
1643 static void lfsck_layout_unlock(struct lustre_handle *lh)
1645 if (lustre_handle_is_used(lh)) {
1646 ldlm_lock_decref(lh, LCK_EX);
1647 memset(lh, 0, sizeof(*lh));
1651 static int lfsck_layout_trans_stop(const struct lu_env *env,
1652 struct dt_device *dev,
1653 struct thandle *handle, int result)
1657 handle->th_result = result;
1658 rc = dt_trans_stop(env, dev, handle);
1668 * \retval +1: repaired
1669 * \retval 0: did nothing
1670 * \retval -ve: on error
1672 static int lfsck_layout_refill_lovea(const struct lu_env *env,
1673 struct thandle *handle,
1674 struct dt_object *parent,
1675 struct lu_fid *cfid,
1677 struct lov_ost_data_v1 *slot,
1678 int fl, __u32 ost_idx)
1680 struct ost_id *oi = &lfsck_env_info(env)->lti_oi;
1683 fid_to_ostid(cfid, oi);
1684 ostid_cpu_to_le(oi, &slot->l_ost_oi);
1685 slot->l_ost_gen = cpu_to_le32(0);
1686 slot->l_ost_idx = cpu_to_le32(ost_idx);
1687 rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV, fl, handle,
1696 * \retval +1: repaired
1697 * \retval 0: did nothing
1698 * \retval -ve: on error
1700 static int lfsck_layout_extend_lovea(const struct lu_env *env,
1701 struct thandle *handle,
1702 struct dt_object *parent,
1703 struct lu_fid *cfid,
1704 struct lu_buf *buf, int fl,
1705 __u32 ost_idx, __u32 ea_off)
1707 struct lov_mds_md_v1 *lmm = buf->lb_buf;
1708 struct lov_ost_data_v1 *objs;
1712 if (fl == LU_XATTR_CREATE) {
1713 LASSERT(buf->lb_len == lov_mds_md_size(ea_off + 1,
1716 memset(lmm, 0, buf->lb_len);
1717 lmm->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
1718 /* XXX: currently, we only support LOV_PATTERN_RAID0. */
1719 lmm->lmm_pattern = cpu_to_le32(LOV_PATTERN_RAID0);
1720 fid_to_lmm_oi(lfsck_dto2fid(parent), &lmm->lmm_oi);
1721 lmm_oi_cpu_to_le(&lmm->lmm_oi, &lmm->lmm_oi);
1722 /* XXX: We cannot know the stripe size,
1723 * then use the default value (1 MB). */
1724 lmm->lmm_stripe_size = cpu_to_le32(1024 * 1024);
1725 lmm->lmm_layout_gen = cpu_to_le16(0);
1726 objs = &(lmm->lmm_objects[ea_off]);
1728 __u16 count = le16_to_cpu(lmm->lmm_stripe_count);
1729 int gap = ea_off - count;
1730 __u32 magic = le32_to_cpu(lmm->lmm_magic);
1732 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3
1733 * which has been verified in lfsck_layout_verify_header()
1734 * already. If some new magic introduced in the future,
1735 * then layout LFSCK needs to be updated also. */
1736 if (magic == LOV_MAGIC_V1) {
1737 objs = &(lmm->lmm_objects[count]);
1739 LASSERT(magic == LOV_MAGIC_V3);
1740 objs = &((struct lov_mds_md_v3 *)lmm)->
1745 memset(objs, 0, gap * sizeof(*objs));
1746 lmm->lmm_layout_gen =
1747 cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
1750 LASSERT(buf->lb_len == lov_mds_md_size(ea_off + 1, magic));
1753 lmm->lmm_stripe_count = cpu_to_le16(ea_off + 1);
1754 rc = lfsck_layout_refill_lovea(env, handle, parent, cfid, buf, objs,
1761 * \retval +1: repaired
1762 * \retval 0: did nothing
1763 * \retval -ve: on error
1765 static int lfsck_layout_update_pfid(const struct lu_env *env,
1766 struct lfsck_component *com,
1767 struct dt_object *parent,
1768 struct lu_fid *cfid,
1769 struct dt_device *cdev, __u32 ea_off)
1771 struct filter_fid *pfid = &lfsck_env_info(env)->lti_new_pfid;
1772 struct dt_object *child;
1773 struct thandle *handle;
1774 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
1779 child = lfsck_object_find_by_dev(env, cdev, cfid);
1781 RETURN(PTR_ERR(child));
1783 handle = dt_trans_create(env, cdev);
1785 GOTO(out, rc = PTR_ERR(handle));
1787 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
1788 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
1789 /* In fact, the ff_parent::f_ver is not the real parent FID::f_ver,
1790 * instead, it is the OST-object index in its parent MDT-object
1792 pfid->ff_parent.f_ver = cpu_to_le32(ea_off);
1793 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
1795 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle);
1799 rc = dt_trans_start(env, cdev, handle);
1803 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle,
1806 GOTO(stop, rc = (rc == 0 ? 1 : rc));
1809 dt_trans_stop(env, cdev, handle);
1812 lu_object_put(env, &child->do_lu);
1818 * \retval +1: repaired
1819 * \retval 0: did nothing
1820 * \retval -ve: on error
1822 static int lfsck_layout_recreate_parent(const struct lu_env *env,
1823 struct lfsck_component *com,
1824 struct lfsck_tgt_desc *ltd,
1825 struct lu_orphan_rec *rec,
1826 struct lu_fid *cfid,
1828 const char *postfix,
1831 struct lfsck_thread_info *info = lfsck_env_info(env);
1832 char *name = info->lti_key;
1833 struct lu_attr *la = &info->lti_la;
1834 struct dt_object_format *dof = &info->lti_dof;
1835 struct lfsck_instance *lfsck = com->lc_lfsck;
1836 struct lu_fid *pfid = &rec->lor_fid;
1837 struct lu_fid *tfid = &info->lti_fid3;
1838 struct dt_device *next = lfsck->li_next;
1839 struct dt_object *pobj = NULL;
1840 struct dt_object *cobj = NULL;
1841 struct thandle *th = NULL;
1842 struct lu_buf *pbuf = NULL;
1843 struct lu_buf *ea_buf = &info->lti_big_buf;
1844 int buflen = ea_buf->lb_len;
1848 /* Create .lustre/lost+found/MDTxxxx when needed. */
1849 if (unlikely(lfsck->li_lpf_obj == NULL)) {
1850 rc = lfsck_create_lpf(env, lfsck);
1855 if (fid_is_zero(pfid)) {
1856 struct filter_fid *ff = &info->lti_new_pfid;
1858 rc = lfsck_fid_alloc(env, lfsck, pfid, false);
1862 ff->ff_parent.f_seq = cpu_to_le64(pfid->f_seq);
1863 ff->ff_parent.f_oid = cpu_to_le32(pfid->f_oid);
1864 /* In fact, the ff_parent::f_ver is not the real parent FID::f_ver,
1865 * instead, it is the OST-object index in its parent MDT-object
1867 ff->ff_parent.f_ver = cpu_to_le32(ea_off);
1868 pbuf = lfsck_buf_get(env, ff, sizeof(struct filter_fid));
1869 cobj = lfsck_object_find_by_dev(env, ltd->ltd_tgt, cfid);
1871 RETURN(PTR_ERR(cobj));
1874 CDEBUG(D_LFSCK, "Re-create the lost MDT-object: parent "
1875 DFID", child "DFID", OST-index %u, stripe-index %u, "
1876 "prefix %s, postfix %s\n",
1877 PFID(pfid), PFID(cfid), ltd->ltd_index, ea_off, prefix, postfix);
1879 pobj = lfsck_object_find_by_dev(env, lfsck->li_bottom, pfid);
1881 GOTO(put, rc = PTR_ERR(pobj));
1883 LASSERT(prefix != NULL);
1884 LASSERT(postfix != NULL);
1888 * 1. Use the MDT-object's FID as the name with prefix and postfix.
1890 * 1.1 prefix "C-": More than one OST-objects cliam the same
1891 * MDT-object and the same slot in the layout EA.
1892 * It may be created for dangling referenced MDT
1893 * object or may be not.
1894 * 1.2 prefix "N-": The orphan OST-object does not know which one
1895 * is the real parent, so the LFSCK assign a new
1896 * FID as its parent.
1897 * 1.3 prefix "R-": The orphan OST-object know its parent FID but
1898 * does not know the position in the namespace.
1900 * 2. If there is name conflict, increase FID::f_ver for new name. */
1901 sprintf(name, "%s"DFID"%s", prefix, PFID(pfid), postfix);
1903 rc = dt_lookup(env, lfsck->li_lpf_obj, (struct dt_rec *)tfid,
1904 (const struct dt_key *)name, BYPASS_CAPA);
1905 if (rc != 0 && rc != -ENOENT)
1908 if (unlikely(rc == 0)) {
1909 CWARN("%s: The name %s under lost+found has been used "
1910 "by the "DFID". Try to increase the FID version "
1911 "for the new file name.\n",
1912 lfsck_lfsck2name(lfsck), name, PFID(tfid));
1915 sprintf(name, "%s"DFID"%s", prefix, PFID(tfid), postfix);
1919 memset(la, 0, sizeof(*la));
1920 la->la_uid = rec->lor_uid;
1921 la->la_gid = rec->lor_gid;
1922 la->la_mode = S_IFREG | S_IRUSR | S_IWUSR;
1923 la->la_valid = LA_MODE | LA_UID | LA_GID;
1925 memset(dof, 0, sizeof(*dof));
1926 dof->dof_type = dt_mode_to_dft(S_IFREG);
1928 rc = lov_mds_md_size(ea_off + 1, LOV_MAGIC_V1);
1930 lu_buf_realloc(ea_buf, rc);
1931 buflen = ea_buf->lb_len;
1932 if (ea_buf->lb_buf == NULL)
1933 GOTO(put, rc = -ENOMEM);
1935 ea_buf->lb_len = rc;
1938 th = dt_trans_create(env, next);
1940 GOTO(put, rc = PTR_ERR(th));
1942 /* 1a. Update OST-object's parent information remotely.
1944 * If other subsequent modifications failed, then next LFSCK scanning
1945 * will process the OST-object as orphan again with known parent FID. */
1947 rc = dt_declare_xattr_set(env, cobj, pbuf, XATTR_NAME_FID, 0, th);
1952 /* 2a. Create the MDT-object locally. */
1953 rc = dt_declare_create(env, pobj, la, NULL, dof, th);
1957 /* 3a. Add layout EA for the MDT-object. */
1958 rc = dt_declare_xattr_set(env, pobj, ea_buf, XATTR_NAME_LOV,
1959 LU_XATTR_CREATE, th);
1963 /* 4a. Insert the MDT-object to .lustre/lost+found/MDTxxxx/ */
1964 rc = dt_declare_insert(env, lfsck->li_lpf_obj,
1965 (const struct dt_rec *)pfid,
1966 (const struct dt_key *)name, th);
1970 rc = dt_trans_start(env, next, th);
1974 /* 1b. Update OST-object's parent information remotely. */
1976 rc = dt_xattr_set(env, cobj, pbuf, XATTR_NAME_FID, 0, th,
1982 dt_write_lock(env, pobj, 0);
1983 /* 2b. Create the MDT-object locally. */
1984 rc = dt_create(env, pobj, la, NULL, dof, th);
1986 /* 3b. Add layout EA for the MDT-object. */
1987 rc = lfsck_layout_extend_lovea(env, th, pobj, cfid, ea_buf,
1988 LU_XATTR_CREATE, ltd->ltd_index,
1990 dt_write_unlock(env, pobj);
1994 /* 4b. Insert the MDT-object to .lustre/lost+found/MDTxxxx/ */
1995 rc = dt_insert(env, lfsck->li_lpf_obj,
1996 (const struct dt_rec *)pfid,
1997 (const struct dt_key *)name, th, BYPASS_CAPA, 1);
2002 dt_trans_stop(env, next, th);
2004 if (cobj != NULL && !IS_ERR(cobj))
2005 lu_object_put(env, &cobj->do_lu);
2006 if (pobj != NULL && !IS_ERR(pobj))
2007 lu_object_put(env, &pobj->do_lu);
2008 ea_buf->lb_len = buflen;
2010 return rc >= 0 ? 1 : rc;
2013 static int lfsck_layout_master_conditional_destroy(const struct lu_env *env,
2014 struct lfsck_component *com,
2015 const struct lu_fid *fid,
2018 struct lfsck_thread_info *info = lfsck_env_info(env);
2019 struct lfsck_request *lr = &info->lti_lr;
2020 struct lfsck_instance *lfsck = com->lc_lfsck;
2021 struct lfsck_tgt_desc *ltd;
2022 struct ptlrpc_request *req;
2023 struct lfsck_request *tmp;
2024 struct obd_export *exp;
2028 ltd = lfsck_tgt_get(&lfsck->li_ost_descs, index);
2029 if (unlikely(ltd == NULL))
2033 if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
2034 GOTO(put, rc = -EOPNOTSUPP);
2036 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
2038 GOTO(put, rc = -ENOMEM);
2040 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
2042 ptlrpc_request_free(req);
2047 memset(lr, 0, sizeof(*lr));
2048 lr->lr_event = LE_CONDITIONAL_DESTROY;
2049 lr->lr_active = LT_LAYOUT;
2052 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
2054 ptlrpc_request_set_replen(req);
2056 rc = ptlrpc_queue_wait(req);
2057 ptlrpc_req_finished(req);
2067 static int lfsck_layout_slave_conditional_destroy(const struct lu_env *env,
2068 struct lfsck_component *com,
2069 struct lfsck_request *lr)
2071 struct lfsck_thread_info *info = lfsck_env_info(env);
2072 struct lu_attr *la = &info->lti_la;
2073 ldlm_policy_data_t *policy = &info->lti_policy;
2074 struct ldlm_res_id *resid = &info->lti_resid;
2075 struct lfsck_instance *lfsck = com->lc_lfsck;
2076 struct dt_device *dev = lfsck->li_bottom;
2077 struct lu_fid *fid = &lr->lr_fid;
2078 struct dt_object *obj;
2079 struct thandle *th = NULL;
2080 struct lustre_handle lh = { 0 };
2085 obj = lfsck_object_find_by_dev(env, dev, fid);
2087 RETURN(PTR_ERR(obj));
2089 dt_read_lock(env, obj, 0);
2090 if (dt_object_exists(obj) == 0) {
2091 dt_read_unlock(env, obj);
2093 GOTO(put, rc = -ENOENT);
2096 /* Get obj's attr without lock firstly. */
2097 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
2098 dt_read_unlock(env, obj);
2102 if (likely(la->la_ctime != 0 || la->la_mode & S_ISUID))
2103 GOTO(put, rc = -ETXTBSY);
2105 /* Acquire extent lock on [0, EOF] to sync with all possible written. */
2106 LASSERT(lfsck->li_namespace != NULL);
2108 memset(policy, 0, sizeof(*policy));
2109 policy->l_extent.end = OBD_OBJECT_EOF;
2110 ost_fid_build_resid(fid, resid);
2111 rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid, LDLM_EXTENT,
2112 policy, LCK_EX, &flags, ldlm_blocking_ast,
2113 ldlm_completion_ast, NULL, NULL, 0,
2114 LVB_T_NONE, NULL, &lh);
2116 GOTO(put, rc = -EIO);
2118 th = dt_trans_create(env, dev);
2120 GOTO(unlock1, rc = PTR_ERR(th));
2122 rc = dt_declare_ref_del(env, obj, th);
2126 rc = dt_declare_destroy(env, obj, th);
2130 rc = dt_trans_start_local(env, dev, th);
2134 dt_write_lock(env, obj, 0);
2135 /* Get obj's attr within lock again. */
2136 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
2140 if (la->la_ctime != 0)
2141 GOTO(unlock2, rc = -ETXTBSY);
2143 rc = dt_ref_del(env, obj, th);
2147 rc = dt_destroy(env, obj, th);
2149 CDEBUG(D_LFSCK, "Destroy the empty OST-object "DFID" which "
2150 "was created for reparing dangling referenced case. "
2151 "But the original missed OST-object is found now.\n",
2157 dt_write_unlock(env, obj);
2160 dt_trans_stop(env, dev, th);
2163 ldlm_lock_decref(&lh, LCK_EX);
2166 lu_object_put(env, &obj->do_lu);
2172 * Some OST-object has occupied the specified layout EA slot.
2173 * Such OST-object may be generated by the LFSCK when repair
2174 * dangling referenced MDT-object, which can be indicated by
2175 * attr::la_ctime == 0 but without S_ISUID in la_mode. If it
2176 * is true and such OST-object has not been modified yet, we
2177 * will replace it with the orphan OST-object; otherwise the
2178 * LFSCK will create new MDT-object to reference the orphan.
2180 * \retval +1: repaired
2181 * \retval 0: did nothing
2182 * \retval -ve: on error
2184 static int lfsck_layout_conflict_create(const struct lu_env *env,
2185 struct lfsck_component *com,
2186 struct lfsck_tgt_desc *ltd,
2187 struct lu_orphan_rec *rec,
2188 struct dt_object *parent,
2189 struct lu_fid *cfid,
2190 struct lu_buf *ea_buf,
2191 struct lov_ost_data_v1 *slot,
2192 __u32 ea_off, __u32 ori_len)
2194 struct lfsck_thread_info *info = lfsck_env_info(env);
2195 struct lu_fid *cfid2 = &info->lti_fid2;
2196 struct ost_id *oi = &info->lti_oi;
2197 struct lov_mds_md_v1 *lmm = ea_buf->lb_buf;
2198 struct dt_device *dev = com->lc_lfsck->li_bottom;
2199 struct thandle *th = NULL;
2200 struct lustre_handle lh = { 0 };
2202 __u32 ost_idx2 = le32_to_cpu(slot->l_ost_idx);
2206 ostid_le_to_cpu(&slot->l_ost_oi, oi);
2207 ostid_to_fid(cfid2, oi, ost_idx2);
2209 CDEBUG(D_LFSCK, "Handle layout EA conflict: parent "DFID
2210 ", cur-child "DFID" on the OST %u, orphan-child "
2211 DFID" on the OST %u, stripe-index %u\n",
2212 PFID(lfsck_dto2fid(parent)), PFID(cfid2), ost_idx2,
2213 PFID(cfid), ltd->ltd_index, ea_off);
2215 /* Hold layout lock on the parent to prevent others to access. */
2216 rc = lfsck_layout_lock(env, com, parent, &lh,
2217 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2221 rc = lfsck_layout_master_conditional_destroy(env, com, cfid2, ost_idx2);
2223 /* If the conflict OST-obejct is not created for fixing dangling
2224 * referenced MDT-object in former LFSCK check/repair, or it has
2225 * been modified by others, then we cannot destroy it. Re-create
2226 * a new MDT-object for the orphan OST-object. */
2227 if (rc == -ETXTBSY) {
2228 /* No need the layout lock on the original parent. */
2229 lfsck_layout_unlock(&lh);
2230 ea_buf->lb_len = ori_len;
2232 fid_zero(&rec->lor_fid);
2233 snprintf(postfix, 64, "-"DFID"-%x",
2234 PFID(lu_object_fid(&parent->do_lu)), ea_off);
2235 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2236 "C-", postfix, ea_off);
2241 if (rc != 0 && rc != -ENOENT)
2244 th = dt_trans_create(env, dev);
2246 GOTO(unlock, rc = PTR_ERR(th));
2248 rc = dt_declare_xattr_set(env, parent, ea_buf, XATTR_NAME_LOV,
2249 LU_XATTR_REPLACE, th);
2253 rc = dt_trans_start_local(env, dev, th);
2257 dt_write_lock(env, parent, 0);
2258 lmm->lmm_layout_gen = cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
2259 rc = lfsck_layout_refill_lovea(env, th, parent, cfid, ea_buf, slot,
2260 LU_XATTR_REPLACE, ltd->ltd_index);
2261 dt_write_unlock(env, parent);
2266 dt_trans_stop(env, dev, th);
2269 lfsck_layout_unlock(&lh);
2272 ea_buf->lb_len = ori_len;
2274 return rc >= 0 ? 1 : rc;
2278 * \retval +1: repaired
2279 * \retval 0: did nothing
2280 * \retval -ve: on error
2282 static int lfsck_layout_recreate_lovea(const struct lu_env *env,
2283 struct lfsck_component *com,
2284 struct lfsck_tgt_desc *ltd,
2285 struct lu_orphan_rec *rec,
2286 struct dt_object *parent,
2287 struct lu_fid *cfid,
2288 __u32 ost_idx, __u32 ea_off)
2290 struct lfsck_thread_info *info = lfsck_env_info(env);
2291 struct lu_buf *buf = &info->lti_big_buf;
2292 struct lu_fid *fid = &info->lti_fid2;
2293 struct ost_id *oi = &info->lti_oi;
2294 struct lfsck_instance *lfsck = com->lc_lfsck;
2295 struct dt_device *dt = lfsck->li_bottom;
2296 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2297 struct thandle *handle = NULL;
2298 size_t buflen = buf->lb_len;
2299 struct lov_mds_md_v1 *lmm;
2300 struct lov_ost_data_v1 *objs;
2301 struct lustre_handle lh = { 0 };
2310 CDEBUG(D_LFSCK, "Re-create the crashed layout EA: parent "
2311 DFID", child "DFID", OST-index %u, stripe-index %u\n",
2312 PFID(lfsck_dto2fid(parent)), PFID(cfid), ost_idx, ea_off);
2314 rc = lfsck_layout_lock(env, com, parent, &lh,
2315 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2320 if (!(bk->lb_param & LPF_DRYRUN)) {
2321 handle = dt_trans_create(env, dt);
2323 GOTO(unlock_layout, rc = PTR_ERR(handle));
2325 rc = dt_declare_xattr_set(env, parent, buf, XATTR_NAME_LOV,
2330 rc = dt_trans_start_local(env, dt, handle);
2335 dt_write_lock(env, parent, 0);
2336 rc = dt_xattr_get(env, parent, buf, XATTR_NAME_LOV, BYPASS_CAPA);
2337 if (rc == -ERANGE) {
2338 rc = dt_xattr_get(env, parent, &LU_BUF_NULL, XATTR_NAME_LOV,
2342 dt_write_unlock(env, parent);
2343 if (handle != NULL) {
2344 dt_trans_stop(env, dt, handle);
2349 GOTO(unlock_layout, rc);
2351 lu_buf_realloc(buf, rc);
2352 buflen = buf->lb_len;
2353 if (buf->lb_buf == NULL)
2354 GOTO(unlock_layout, rc = -ENOMEM);
2356 fl = LU_XATTR_REPLACE;
2358 } else if (rc == -ENODATA || rc == 0) {
2359 fl = LU_XATTR_CREATE;
2360 } else if (rc < 0) {
2361 GOTO(unlock_parent, rc);
2362 } else if (unlikely(buf->lb_len == 0)) {
2363 dt_write_unlock(env, parent);
2364 if (handle != NULL) {
2365 dt_trans_stop(env, dt, handle);
2369 lu_buf_alloc(buf, rc);
2370 buflen = buf->lb_len;
2371 if (buf->lb_buf == NULL)
2372 GOTO(unlock_layout, rc = -ENOMEM);
2374 fl = LU_XATTR_REPLACE;
2377 fl = LU_XATTR_REPLACE;
2380 if (fl == LU_XATTR_CREATE) {
2381 if (bk->lb_param & LPF_DRYRUN)
2382 GOTO(unlock_parent, rc = 1);
2384 rc = lov_mds_md_size(ea_off + 1, LOV_MAGIC_V1);
2385 /* If the declared is not big enough, re-try. */
2386 if (buf->lb_len < rc) {
2387 dt_write_unlock(env, parent);
2388 if (handle != NULL) {
2389 dt_trans_stop(env, dt, handle);
2393 lu_buf_realloc(buf, rc);
2394 buflen = buf->lb_len;
2395 if (buf->lb_buf == NULL)
2396 GOTO(unlock_layout, rc = -ENOMEM);
2402 rc = lfsck_layout_extend_lovea(env, handle, parent, cfid, buf,
2403 fl, ost_idx, ea_off);
2405 GOTO(unlock_parent, rc);
2409 rc1 = lfsck_layout_verify_header(lmm);
2411 GOTO(unlock_parent, rc = rc1);
2413 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
2414 * been verified in lfsck_layout_verify_header() already. If some
2415 * new magic introduced in the future, then layout LFSCK needs to
2416 * be updated also. */
2417 magic = le32_to_cpu(lmm->lmm_magic);
2418 if (magic == LOV_MAGIC_V1) {
2419 objs = &(lmm->lmm_objects[0]);
2421 LASSERT(magic == LOV_MAGIC_V3);
2422 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
2425 count = le16_to_cpu(lmm->lmm_stripe_count);
2427 GOTO(unlock_parent, rc = -EINVAL);
2430 /* Exceed the current end of MDT-object layout EA. Then extend it. */
2431 if (count <= ea_off) {
2432 if (bk->lb_param & LPF_DRYRUN)
2433 GOTO(unlock_parent, rc = 1);
2435 rc = lov_mds_md_size(ea_off + 1, LOV_MAGIC_V1);
2436 /* If the declared is not big enough, re-try. */
2437 if (buf->lb_len < rc) {
2438 dt_write_unlock(env, parent);
2439 if (handle != NULL) {
2440 dt_trans_stop(env, dt, handle);
2444 lu_buf_realloc(buf, rc);
2445 buflen = buf->lb_len;
2446 if (buf->lb_buf == NULL)
2447 GOTO(unlock_layout, rc = -ENOMEM);
2453 rc = lfsck_layout_extend_lovea(env, handle, parent, cfid, buf,
2454 fl, ost_idx, ea_off);
2455 GOTO(unlock_parent, rc);
2458 LASSERTF(rc > 0, "invalid rc = %d\n", rc);
2461 for (i = 0; i < count; i++, objs++) {
2462 /* The MDT-object was created via lfsck_layout_recover_create()
2463 * by others before, and we fill the dummy layout EA. */
2464 if (is_dummy_lov_ost_data(objs)) {
2468 if (bk->lb_param & LPF_DRYRUN)
2469 GOTO(unlock_parent, rc = 1);
2471 lmm->lmm_layout_gen =
2472 cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
2473 rc = lfsck_layout_refill_lovea(env, handle, parent,
2474 cfid, buf, objs, fl,
2476 GOTO(unlock_parent, rc);
2479 ostid_le_to_cpu(&objs->l_ost_oi, oi);
2480 ostid_to_fid(fid, oi, le32_to_cpu(objs->l_ost_idx));
2481 /* It should be rare case, the slot is there, but the LFSCK
2482 * does not handle it during the first-phase cycle scanning. */
2483 if (unlikely(lu_fid_eq(fid, cfid))) {
2485 GOTO(unlock_parent, rc = 0);
2487 /* Rare case that the OST-object index
2488 * does not match the parent MDT-object
2489 * layout EA. We trust the later one. */
2490 if (bk->lb_param & LPF_DRYRUN)
2491 GOTO(unlock_parent, rc = 1);
2493 dt_write_unlock(env, parent);
2495 dt_trans_stop(env, dt, handle);
2496 lfsck_layout_unlock(&lh);
2497 buf->lb_len = buflen;
2498 rc = lfsck_layout_update_pfid(env, com, parent,
2499 cfid, ltd->ltd_tgt, i);
2506 /* The MDT-object exists, but related layout EA slot is occupied
2508 if (bk->lb_param & LPF_DRYRUN)
2509 GOTO(unlock_parent, rc = 1);
2511 dt_write_unlock(env, parent);
2513 dt_trans_stop(env, dt, handle);
2514 lfsck_layout_unlock(&lh);
2515 if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_V1)
2516 objs = &(lmm->lmm_objects[ea_off]);
2518 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[ea_off];
2519 rc = lfsck_layout_conflict_create(env, com, ltd, rec, parent, cfid,
2520 buf, objs, ea_off, buflen);
2525 dt_write_unlock(env, parent);
2529 dt_trans_stop(env, dt, handle);
2532 lfsck_layout_unlock(&lh);
2533 buf->lb_len = buflen;
2538 static int lfsck_layout_scan_orphan_one(const struct lu_env *env,
2539 struct lfsck_component *com,
2540 struct lfsck_tgt_desc *ltd,
2541 struct lu_orphan_rec *rec,
2542 struct lu_fid *cfid)
2544 struct lfsck_layout *lo = com->lc_file_ram;
2545 struct lu_fid *pfid = &rec->lor_fid;
2546 struct dt_object *parent = NULL;
2547 __u32 ea_off = pfid->f_ver;
2551 if (!fid_is_sane(cfid))
2552 GOTO(out, rc = -EINVAL);
2554 if (fid_is_zero(pfid)) {
2555 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2561 if (!fid_is_sane(pfid))
2562 GOTO(out, rc = -EINVAL);
2564 parent = lfsck_object_find_by_dev(env, com->lc_lfsck->li_bottom, pfid);
2566 GOTO(out, rc = PTR_ERR(parent));
2568 if (unlikely(dt_object_remote(parent) != 0))
2569 GOTO(put, rc = -EXDEV);
2571 if (dt_object_exists(parent) == 0) {
2572 lu_object_put(env, &parent->do_lu);
2573 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2578 if (!S_ISREG(lu_object_attr(&parent->do_lu)))
2579 GOTO(put, rc = -EISDIR);
2581 rc = lfsck_layout_recreate_lovea(env, com, ltd, rec, parent, cfid,
2582 ltd->ltd_index, ea_off);
2588 lu_object_put(env, &parent->do_lu);
2590 /* The layout EA is changed, need to be reloaded next time. */
2591 lu_object_put_nocache(env, &parent->do_lu);
2594 down_write(&com->lc_sem);
2595 com->lc_new_scanned++;
2596 com->lc_new_checked++;
2598 lo->ll_objs_repaired[LLIT_ORPHAN - 1]++;
2600 } else if (rc < 0) {
2601 lo->ll_objs_failed_phase2++;
2603 up_write(&com->lc_sem);
2608 static int lfsck_layout_scan_orphan(const struct lu_env *env,
2609 struct lfsck_component *com,
2610 struct lfsck_tgt_desc *ltd)
2612 struct lfsck_layout *lo = com->lc_file_ram;
2613 struct lfsck_instance *lfsck = com->lc_lfsck;
2614 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2615 struct lfsck_thread_info *info = lfsck_env_info(env);
2616 struct ost_id *oi = &info->lti_oi;
2617 struct lu_fid *fid = &info->lti_fid;
2618 struct dt_object *obj;
2619 const struct dt_it_ops *iops;
2624 CDEBUG(D_LFSCK, "%s: start the orphan scanning for OST%04x\n",
2625 lfsck_lfsck2name(lfsck), ltd->ltd_index);
2627 ostid_set_seq(oi, FID_SEQ_IDIF);
2628 ostid_set_id(oi, 0);
2629 ostid_to_fid(fid, oi, ltd->ltd_index);
2630 obj = lfsck_object_find_by_dev(env, ltd->ltd_tgt, fid);
2631 if (unlikely(IS_ERR(obj)))
2632 RETURN(PTR_ERR(obj));
2634 rc = obj->do_ops->do_index_try(env, obj, &dt_lfsck_orphan_features);
2638 iops = &obj->do_index_ops->dio_it;
2639 di = iops->init(env, obj, 0, BYPASS_CAPA);
2641 GOTO(put, rc = PTR_ERR(di));
2643 rc = iops->load(env, di, 0);
2645 /* -ESRCH means that the orphan OST-objects rbtree has been
2646 * cleanup because of the OSS server restart or other errors. */
2647 lo->ll_flags |= LF_INCOMPLETE;
2652 rc = iops->next(env, di);
2664 struct lu_orphan_rec *rec = &info->lti_rec;
2666 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY3) &&
2668 struct ptlrpc_thread *thread = &lfsck->li_thread;
2669 struct l_wait_info lwi;
2671 lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val),
2673 l_wait_event(thread->t_ctl_waitq,
2674 !thread_is_running(thread),
2678 key = iops->key(env, di);
2679 com->lc_fid_latest_scanned_phase2 = *(struct lu_fid *)key;
2680 rc = iops->rec(env, di, (struct dt_rec *)rec, 0);
2682 rc = lfsck_layout_scan_orphan_one(env, com, ltd, rec,
2683 &com->lc_fid_latest_scanned_phase2);
2684 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
2687 lfsck_control_speed_by_self(com);
2689 rc = iops->next(env, di);
2690 } while (rc < 0 && !(bk->lb_param & LPF_FAILOUT));
2697 iops->fini(env, di);
2699 lu_object_put(env, &obj->do_lu);
2701 CDEBUG(D_LFSCK, "%s: finish the orphan scanning for OST%04x, rc = %d\n",
2702 lfsck_lfsck2name(lfsck), ltd->ltd_index, rc);
2704 return rc > 0 ? 0 : rc;
2707 /* For the MDT-object with dangling reference, we need to re-create
2708 * the missed OST-object with the known FID/owner information. */
2709 static int lfsck_layout_recreate_ostobj(const struct lu_env *env,
2710 struct lfsck_component *com,
2711 struct lfsck_layout_req *llr,
2714 struct lfsck_thread_info *info = lfsck_env_info(env);
2715 struct filter_fid *pfid = &info->lti_new_pfid;
2716 struct dt_allocation_hint *hint = &info->lti_hint;
2717 struct dt_object *parent = llr->llr_parent->llo_obj;
2718 struct dt_object *child = llr->llr_child;
2719 struct dt_device *dev = lfsck_obj2dt_dev(child);
2720 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
2721 struct thandle *handle;
2723 struct lustre_handle lh = { 0 };
2727 CDEBUG(D_LFSCK, "Repair dangling reference for: parent "DFID
2728 ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
2729 PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
2730 llr->llr_ost_idx, llr->llr_lov_idx, la->la_uid, la->la_gid);
2732 rc = lfsck_layout_lock(env, com, parent, &lh,
2733 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2737 handle = dt_trans_create(env, dev);
2739 GOTO(unlock1, rc = PTR_ERR(handle));
2741 hint->dah_parent = NULL;
2743 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
2744 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
2745 pfid->ff_parent.f_ver = cpu_to_le32(llr->llr_lov_idx);
2746 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
2748 rc = dt_declare_create(env, child, la, hint, NULL, handle);
2752 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID,
2753 LU_XATTR_CREATE, handle);
2757 rc = dt_trans_start(env, dev, handle);
2761 dt_read_lock(env, parent, 0);
2762 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
2763 GOTO(unlock2, rc = 1);
2765 rc = dt_create(env, child, la, hint, NULL, handle);
2769 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, LU_XATTR_CREATE,
2770 handle, BYPASS_CAPA);
2775 dt_read_unlock(env, parent);
2778 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
2781 lfsck_layout_unlock(&lh);
2786 /* If the OST-object does not recognize the MDT-object as its parent, and
2787 * there is no other MDT-object claims as its parent, then just trust the
2788 * given MDT-object as its parent. So update the OST-object filter_fid. */
2789 static int lfsck_layout_repair_unmatched_pair(const struct lu_env *env,
2790 struct lfsck_component *com,
2791 struct lfsck_layout_req *llr,
2792 const struct lu_attr *pla)
2794 struct lfsck_thread_info *info = lfsck_env_info(env);
2795 struct filter_fid *pfid = &info->lti_new_pfid;
2796 struct lu_attr *tla = &info->lti_la3;
2797 struct dt_object *parent = llr->llr_parent->llo_obj;
2798 struct dt_object *child = llr->llr_child;
2799 struct dt_device *dev = lfsck_obj2dt_dev(child);
2800 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
2801 struct thandle *handle;
2803 struct lustre_handle lh = { 0 };
2807 CDEBUG(D_LFSCK, "Repair unmatched MDT-OST pair for: parent "DFID
2808 ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
2809 PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
2810 llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid);
2812 rc = lfsck_layout_lock(env, com, parent, &lh,
2813 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2817 handle = dt_trans_create(env, dev);
2819 GOTO(unlock1, rc = PTR_ERR(handle));
2821 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
2822 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
2823 /* The ff_parent->f_ver is not the real parent fid->f_ver. Instead,
2824 * it is the OST-object index in the parent MDT-object layout. */
2825 pfid->ff_parent.f_ver = cpu_to_le32(llr->llr_lov_idx);
2826 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
2828 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle);
2832 tla->la_valid = LA_UID | LA_GID;
2833 tla->la_uid = pla->la_uid;
2834 tla->la_gid = pla->la_gid;
2835 rc = dt_declare_attr_set(env, child, tla, handle);
2839 rc = dt_trans_start(env, dev, handle);
2843 dt_write_lock(env, parent, 0);
2844 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
2845 GOTO(unlock2, rc = 1);
2847 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle,
2852 /* Get the latest parent's owner. */
2853 rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
2857 tla->la_valid = LA_UID | LA_GID;
2858 rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
2863 dt_write_unlock(env, parent);
2866 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
2869 lfsck_layout_unlock(&lh);
2874 /* If there are more than one MDT-objects claim as the OST-object's parent,
2875 * and the OST-object only recognizes one of them, then we need to generate
2876 * new OST-object(s) with new fid(s) for the non-recognized MDT-object(s). */
2877 static int lfsck_layout_repair_multiple_references(const struct lu_env *env,
2878 struct lfsck_component *com,
2879 struct lfsck_layout_req *llr,
2883 struct lfsck_thread_info *info = lfsck_env_info(env);
2884 struct dt_allocation_hint *hint = &info->lti_hint;
2885 struct dt_object_format *dof = &info->lti_dof;
2886 struct dt_device *pdev = com->lc_lfsck->li_next;
2887 struct ost_id *oi = &info->lti_oi;
2888 struct dt_object *parent = llr->llr_parent->llo_obj;
2889 struct dt_device *cdev = lfsck_obj2dt_dev(llr->llr_child);
2890 struct dt_object *child = NULL;
2891 struct lu_device *d = &cdev->dd_lu_dev;
2892 struct lu_object *o = NULL;
2893 struct thandle *handle;
2894 struct lov_mds_md_v1 *lmm;
2895 struct lov_ost_data_v1 *objs;
2896 struct lustre_handle lh = { 0 };
2901 CDEBUG(D_LFSCK, "Repair multiple references for: parent "DFID
2902 ", OST-index %u, stripe-index %u, owner %u:%u\n",
2903 PFID(lfsck_dto2fid(parent)), llr->llr_ost_idx,
2904 llr->llr_lov_idx, la->la_uid, la->la_gid);
2906 rc = lfsck_layout_lock(env, com, parent, &lh,
2907 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2911 handle = dt_trans_create(env, pdev);
2913 GOTO(unlock1, rc = PTR_ERR(handle));
2915 o = lu_object_anon(env, d, NULL);
2917 GOTO(stop, rc = PTR_ERR(o));
2919 child = container_of(o, struct dt_object, do_lu);
2920 o = lu_object_locate(o->lo_header, d->ld_type);
2921 if (unlikely(o == NULL))
2922 GOTO(stop, rc = -EINVAL);
2924 child = container_of(o, struct dt_object, do_lu);
2925 la->la_valid = LA_UID | LA_GID;
2926 hint->dah_parent = NULL;
2928 dof->dof_type = DFT_REGULAR;
2929 rc = dt_declare_create(env, child, la, NULL, NULL, handle);
2933 rc = dt_declare_xattr_set(env, parent, buf, XATTR_NAME_LOV,
2934 LU_XATTR_REPLACE, handle);
2938 rc = dt_trans_start(env, pdev, handle);
2942 dt_write_lock(env, parent, 0);
2943 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
2944 GOTO(unlock2, rc = 0);
2946 rc = dt_xattr_get(env, parent, buf, XATTR_NAME_LOV, BYPASS_CAPA);
2947 if (unlikely(rc == 0 || rc == -ENODATA || rc == -ERANGE))
2948 GOTO(unlock2, rc = 0);
2951 rc = lfsck_layout_verify_header(lmm);
2955 /* Someone change layout during the LFSCK, no need to repair then. */
2956 if (le16_to_cpu(lmm->lmm_layout_gen) != llr->llr_parent->llo_gen)
2957 GOTO(unlock2, rc = 0);
2959 rc = dt_create(env, child, la, hint, dof, handle);
2963 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
2964 * been verified in lfsck_layout_verify_header() already. If some
2965 * new magic introduced in the future, then layout LFSCK needs to
2966 * be updated also. */
2967 magic = le32_to_cpu(lmm->lmm_magic);
2968 if (magic == LOV_MAGIC_V1) {
2969 objs = &(lmm->lmm_objects[0]);
2971 LASSERT(magic == LOV_MAGIC_V3);
2972 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
2975 lmm->lmm_layout_gen = cpu_to_le16(llr->llr_parent->llo_gen + 1);
2976 fid_to_ostid(lu_object_fid(&child->do_lu), oi);
2977 ostid_cpu_to_le(oi, &objs[llr->llr_lov_idx].l_ost_oi);
2978 objs[llr->llr_lov_idx].l_ost_gen = cpu_to_le32(0);
2979 objs[llr->llr_lov_idx].l_ost_idx = cpu_to_le32(llr->llr_ost_idx);
2980 rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV,
2981 LU_XATTR_REPLACE, handle, BYPASS_CAPA);
2983 GOTO(unlock2, rc = (rc == 0 ? 1 : rc));
2986 dt_write_unlock(env, parent);
2990 lu_object_put(env, &child->do_lu);
2992 dt_trans_stop(env, pdev, handle);
2995 lfsck_layout_unlock(&lh);
3000 /* If the MDT-object and the OST-object have different owner information,
3001 * then trust the MDT-object, because the normal chown/chgrp handle order
3002 * is from MDT to OST, and it is possible that some chown/chgrp operation
3003 * is partly done. */
3004 static int lfsck_layout_repair_owner(const struct lu_env *env,
3005 struct lfsck_component *com,
3006 struct lfsck_layout_req *llr,
3007 struct lu_attr *pla)
3009 struct lfsck_thread_info *info = lfsck_env_info(env);
3010 struct lu_attr *tla = &info->lti_la3;
3011 struct dt_object *parent = llr->llr_parent->llo_obj;
3012 struct dt_object *child = llr->llr_child;
3013 struct dt_device *dev = lfsck_obj2dt_dev(child);
3014 struct thandle *handle;
3018 CDEBUG(D_LFSCK, "Repair inconsistent file owner for: parent "DFID
3019 ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
3020 PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
3021 llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid);
3023 handle = dt_trans_create(env, dev);
3025 RETURN(PTR_ERR(handle));
3027 tla->la_uid = pla->la_uid;
3028 tla->la_gid = pla->la_gid;
3029 tla->la_valid = LA_UID | LA_GID;
3030 rc = dt_declare_attr_set(env, child, tla, handle);
3034 rc = dt_trans_start(env, dev, handle);
3038 /* Use the dt_object lock to serialize with destroy and attr_set. */
3039 dt_read_lock(env, parent, 0);
3040 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
3041 GOTO(unlock, rc = 1);
3043 /* Get the latest parent's owner. */
3044 rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
3046 CWARN("%s: fail to get the latest parent's ("DFID") owner, "
3047 "not sure whether some others chown/chgrp during the "
3048 "LFSCK: rc = %d\n", lfsck_lfsck2name(com->lc_lfsck),
3049 PFID(lfsck_dto2fid(parent)), rc);
3054 /* Some others chown/chgrp during the LFSCK, needs to do nothing. */
3055 if (unlikely(tla->la_uid != pla->la_uid ||
3056 tla->la_gid != pla->la_gid))
3057 GOTO(unlock, rc = 1);
3059 tla->la_valid = LA_UID | LA_GID;
3060 rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
3065 dt_read_unlock(env, parent);
3068 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
3073 /* Check whether the OST-object correctly back points to the
3074 * MDT-object (@parent) via the XATTR_NAME_FID xattr (@pfid). */
3075 static int lfsck_layout_check_parent(const struct lu_env *env,
3076 struct lfsck_component *com,
3077 struct dt_object *parent,
3078 const struct lu_fid *pfid,
3079 const struct lu_fid *cfid,
3080 const struct lu_attr *pla,
3081 const struct lu_attr *cla,
3082 struct lfsck_layout_req *llr,
3083 struct lu_buf *lov_ea, __u32 idx)
3085 struct lfsck_thread_info *info = lfsck_env_info(env);
3086 struct lu_buf *buf = &info->lti_big_buf;
3087 struct dt_object *tobj;
3088 struct lov_mds_md_v1 *lmm;
3089 struct lov_ost_data_v1 *objs;
3096 if (fid_is_zero(pfid)) {
3097 /* client never wrote. */
3098 if (cla->la_size == 0 && cla->la_blocks == 0) {
3099 if (unlikely(cla->la_uid != pla->la_uid ||
3100 cla->la_gid != pla->la_gid))
3101 RETURN (LLIT_INCONSISTENT_OWNER);
3106 RETURN(LLIT_UNMATCHED_PAIR);
3109 if (unlikely(!fid_is_sane(pfid)))
3110 RETURN(LLIT_UNMATCHED_PAIR);
3112 if (lu_fid_eq(pfid, lu_object_fid(&parent->do_lu))) {
3113 if (llr->llr_lov_idx == idx)
3116 RETURN(LLIT_UNMATCHED_PAIR);
3119 tobj = lfsck_object_find(env, com->lc_lfsck, pfid);
3121 RETURN(LLIT_UNMATCHED_PAIR);
3124 RETURN(PTR_ERR(tobj));
3126 if (!dt_object_exists(tobj))
3127 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3129 /* Load the tobj's layout EA, in spite of it is a local MDT-object or
3130 * remote one on another MDT. Then check whether the given OST-object
3131 * is in such layout. If yes, it is multiple referenced, otherwise it
3132 * is unmatched referenced case. */
3133 rc = lfsck_layout_get_lovea(env, tobj, buf, NULL);
3135 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3141 rc = lfsck_layout_verify_header(lmm);
3145 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
3146 * been verified in lfsck_layout_verify_header() already. If some
3147 * new magic introduced in the future, then layout LFSCK needs to
3148 * be updated also. */
3149 magic = le32_to_cpu(lmm->lmm_magic);
3150 if (magic == LOV_MAGIC_V1) {
3151 objs = &(lmm->lmm_objects[0]);
3153 LASSERT(magic == LOV_MAGIC_V3);
3154 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
3157 count = le16_to_cpu(lmm->lmm_stripe_count);
3158 for (i = 0; i < count; i++, objs++) {
3159 struct lu_fid *tfid = &info->lti_fid2;
3160 struct ost_id *oi = &info->lti_oi;
3162 if (is_dummy_lov_ost_data(objs))
3165 ostid_le_to_cpu(&objs->l_ost_oi, oi);
3166 ostid_to_fid(tfid, oi, le32_to_cpu(objs->l_ost_idx));
3167 if (lu_fid_eq(cfid, tfid)) {
3170 GOTO(out, rc = LLIT_MULTIPLE_REFERENCED);
3174 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3177 lfsck_object_put(env, tobj);
3182 static int lfsck_layout_assistant_handle_one(const struct lu_env *env,
3183 struct lfsck_component *com,
3184 struct lfsck_layout_req *llr)
3186 struct lfsck_layout *lo = com->lc_file_ram;
3187 struct lfsck_thread_info *info = lfsck_env_info(env);
3188 struct filter_fid_old *pea = &info->lti_old_pfid;
3189 struct lu_fid *pfid = &info->lti_fid;
3190 struct lu_buf *buf = NULL;
3191 struct dt_object *parent = llr->llr_parent->llo_obj;
3192 struct dt_object *child = llr->llr_child;
3193 struct lu_attr *pla = &info->lti_la;
3194 struct lu_attr *cla = &info->lti_la2;
3195 struct lfsck_instance *lfsck = com->lc_lfsck;
3196 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3197 enum lfsck_layout_inconsistency_type type = LLIT_NONE;
3202 rc = dt_attr_get(env, parent, pla, BYPASS_CAPA);
3204 if (lu_object_is_dying(parent->do_lu.lo_header))
3210 rc = dt_attr_get(env, child, cla, BYPASS_CAPA);
3211 if (rc == -ENOENT) {
3212 if (lu_object_is_dying(parent->do_lu.lo_header))
3215 type = LLIT_DANGLING;
3222 buf = lfsck_buf_get(env, pea, sizeof(struct filter_fid_old));
3223 rc= dt_xattr_get(env, child, buf, XATTR_NAME_FID, BYPASS_CAPA);
3224 if (unlikely(rc >= 0 && rc != sizeof(struct filter_fid_old) &&
3225 rc != sizeof(struct filter_fid))) {
3226 type = LLIT_UNMATCHED_PAIR;
3230 if (rc < 0 && rc != -ENODATA)
3233 if (rc == -ENODATA) {
3236 fid_le_to_cpu(pfid, &pea->ff_parent);
3237 /* OST-object does not save parent FID::f_ver, instead,
3238 * the OST-object index in the parent MDT-object layout
3239 * EA reuses the pfid->f_ver. */
3244 rc = lfsck_layout_check_parent(env, com, parent, pfid,
3245 lu_object_fid(&child->do_lu),
3246 pla, cla, llr, buf, idx);
3255 if (unlikely(cla->la_uid != pla->la_uid ||
3256 cla->la_gid != pla->la_gid)) {
3257 type = LLIT_INCONSISTENT_OWNER;
3262 if (bk->lb_param & LPF_DRYRUN) {
3263 if (type != LLIT_NONE)
3271 memset(cla, 0, sizeof(*cla));
3272 cla->la_uid = pla->la_uid;
3273 cla->la_gid = pla->la_gid;
3274 cla->la_mode = S_IFREG | 0666;
3275 cla->la_valid = LA_TYPE | LA_MODE | LA_UID | LA_GID |
3276 LA_ATIME | LA_MTIME | LA_CTIME;
3277 rc = lfsck_layout_recreate_ostobj(env, com, llr, cla);
3279 case LLIT_UNMATCHED_PAIR:
3280 rc = lfsck_layout_repair_unmatched_pair(env, com, llr, pla);
3282 case LLIT_MULTIPLE_REFERENCED:
3283 rc = lfsck_layout_repair_multiple_references(env, com, llr,
3286 case LLIT_INCONSISTENT_OWNER:
3287 rc = lfsck_layout_repair_owner(env, com, llr, pla);
3297 down_write(&com->lc_sem);
3299 /* If cannot touch the target server,
3300 * mark the LFSCK as INCOMPLETE. */
3301 if (rc == -ENOTCONN || rc == -ESHUTDOWN || rc == -ETIMEDOUT ||
3302 rc == -EHOSTDOWN || rc == -EHOSTUNREACH) {
3303 CERROR("%s: Fail to talk with OST %x: rc = %d.\n",
3304 lfsck_lfsck2name(lfsck), llr->llr_ost_idx, rc);
3305 lo->ll_flags |= LF_INCOMPLETE;
3306 lo->ll_objs_skipped++;
3309 lo->ll_objs_failed_phase1++;
3311 } else if (rc > 0) {
3312 LASSERTF(type > LLIT_NONE && type <= LLIT_MAX,
3313 "unknown type = %d\n", type);
3315 lo->ll_objs_repaired[type - 1]++;
3317 up_write(&com->lc_sem);
3322 static int lfsck_layout_assistant(void *args)
3324 struct lfsck_thread_args *lta = args;
3325 struct lu_env *env = <a->lta_env;
3326 struct lfsck_component *com = lta->lta_com;
3327 struct lfsck_instance *lfsck = lta->lta_lfsck;
3328 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3329 struct lfsck_position *pos = &com->lc_pos_start;
3330 struct lfsck_thread_info *info = lfsck_env_info(env);
3331 struct lfsck_request *lr = &info->lti_lr;
3332 struct lfsck_layout_master_data *llmd = com->lc_data;
3333 struct ptlrpc_thread *mthread = &lfsck->li_thread;
3334 struct ptlrpc_thread *athread = &llmd->llmd_thread;
3335 struct lfsck_layout_req *llr;
3336 struct l_wait_info lwi = { 0 };
3341 memset(lr, 0, sizeof(*lr));
3342 lr->lr_event = LE_START;
3343 lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
3345 lr->lr_speed = bk->lb_speed_limit;
3346 lr->lr_version = bk->lb_version;
3347 lr->lr_param = bk->lb_param;
3348 lr->lr_async_windows = bk->lb_async_windows;
3349 lr->lr_flags = LEF_TO_OST;
3350 if (pos->lp_oit_cookie <= 1)
3351 lr->lr_param |= LPF_RESET;
3353 rc = lfsck_layout_master_notify_others(env, com, lr);
3355 CERROR("%s: fail to notify others for layout start: rc = %d\n",
3356 lfsck_lfsck2name(lfsck), rc);
3360 spin_lock(&llmd->llmd_lock);
3361 thread_set_flags(athread, SVC_RUNNING);
3362 spin_unlock(&llmd->llmd_lock);
3363 wake_up_all(&mthread->t_ctl_waitq);
3366 while (!list_empty(&llmd->llmd_req_list)) {
3367 bool wakeup = false;
3369 if (unlikely(llmd->llmd_exit))
3370 GOTO(cleanup1, rc = llmd->llmd_post_result);
3372 llr = list_entry(llmd->llmd_req_list.next,
3373 struct lfsck_layout_req,
3375 /* Only the lfsck_layout_assistant thread itself can
3376 * remove the "llr" from the head of the list, LFSCK
3377 * engine thread only inserts other new "lld" at the
3378 * end of the list. So it is safe to handle current
3379 * "llr" without the spin_lock. */
3380 rc = lfsck_layout_assistant_handle_one(env, com, llr);
3381 spin_lock(&llmd->llmd_lock);
3382 list_del_init(&llr->llr_list);
3383 llmd->llmd_prefetched--;
3384 /* Wake up the main engine thread only when the list
3385 * is empty or half of the prefetched items have been
3386 * handled to avoid too frequent thread schedule. */
3387 if (llmd->llmd_prefetched == 0 ||
3388 (bk->lb_async_windows != 0 &&
3389 (bk->lb_async_windows >> 1) ==
3390 llmd->llmd_prefetched))
3392 spin_unlock(&llmd->llmd_lock);
3394 wake_up_all(&mthread->t_ctl_waitq);
3396 lfsck_layout_req_fini(env, llr);
3397 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
3401 l_wait_event(athread->t_ctl_waitq,
3402 !lfsck_layout_req_empty(llmd) ||
3404 llmd->llmd_to_post ||
3405 llmd->llmd_to_double_scan,
3408 if (unlikely(llmd->llmd_exit))
3409 GOTO(cleanup1, rc = llmd->llmd_post_result);
3411 if (!list_empty(&llmd->llmd_req_list))
3414 if (llmd->llmd_to_post) {
3415 llmd->llmd_to_post = 0;
3416 LASSERT(llmd->llmd_post_result > 0);
3418 memset(lr, 0, sizeof(*lr));
3419 lr->lr_event = LE_PHASE1_DONE;
3420 lr->lr_status = llmd->llmd_post_result;
3421 rc = lfsck_layout_master_notify_others(env, com, lr);
3423 CERROR("%s: failed to notify others "
3424 "for layout post: rc = %d\n",
3425 lfsck_lfsck2name(lfsck), rc);
3427 /* Wakeup the master engine to go ahead. */
3428 wake_up_all(&mthread->t_ctl_waitq);
3431 if (llmd->llmd_to_double_scan) {
3432 llmd->llmd_to_double_scan = 0;
3433 atomic_inc(&lfsck->li_double_scan_count);
3434 llmd->llmd_in_double_scan = 1;
3435 wake_up_all(&mthread->t_ctl_waitq);
3437 com->lc_new_checked = 0;
3438 com->lc_new_scanned = 0;
3439 com->lc_time_last_checkpoint = cfs_time_current();
3440 com->lc_time_next_checkpoint =
3441 com->lc_time_last_checkpoint +
3442 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
3444 while (llmd->llmd_in_double_scan) {
3445 struct lfsck_tgt_descs *ltds =
3446 &lfsck->li_ost_descs;
3447 struct lfsck_tgt_desc *ltd;
3449 rc = lfsck_layout_master_query_others(env, com);
3450 if (lfsck_layout_master_to_orphan(llmd))
3456 /* Pull LFSCK status on related targets once
3457 * per 30 seconds if we are not notified. */
3458 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(30),
3459 cfs_time_seconds(1),
3461 rc = l_wait_event(athread->t_ctl_waitq,
3462 lfsck_layout_master_to_orphan(llmd) ||
3464 !thread_is_running(mthread),
3467 if (unlikely(llmd->llmd_exit ||
3468 !thread_is_running(mthread)))
3469 GOTO(cleanup2, rc = 0);
3471 if (rc == -ETIMEDOUT)
3478 spin_lock(<ds->ltd_lock);
3480 &llmd->llmd_ost_phase2_list)) {
3482 llmd->llmd_ost_phase2_list.next,
3483 struct lfsck_tgt_desc,
3484 ltd_layout_phase_list);
3486 <d->ltd_layout_phase_list);
3487 spin_unlock(<ds->ltd_lock);
3489 if (bk->lb_param & LPF_ALL_TGT) {
3490 rc = lfsck_layout_scan_orphan(
3493 bk->lb_param & LPF_FAILOUT)
3497 if (unlikely(llmd->llmd_exit ||
3498 !thread_is_running(mthread)))
3499 GOTO(cleanup2, rc = 0);
3501 spin_lock(<ds->ltd_lock);
3504 if (list_empty(&llmd->llmd_ost_phase1_list)) {
3505 spin_unlock(<ds->ltd_lock);
3506 GOTO(cleanup2, rc = 1);
3508 spin_unlock(<ds->ltd_lock);
3514 /* Cleanup the unfinished requests. */
3515 spin_lock(&llmd->llmd_lock);
3517 llmd->llmd_assistant_status = rc;
3519 while (!list_empty(&llmd->llmd_req_list)) {
3520 llr = list_entry(llmd->llmd_req_list.next,
3521 struct lfsck_layout_req,
3523 list_del_init(&llr->llr_list);
3524 llmd->llmd_prefetched--;
3525 spin_unlock(&llmd->llmd_lock);
3526 lfsck_layout_req_fini(env, llr);
3527 spin_lock(&llmd->llmd_lock);
3529 spin_unlock(&llmd->llmd_lock);
3531 LASSERTF(llmd->llmd_prefetched == 0, "unmatched prefeteched objs %d\n",
3532 llmd->llmd_prefetched);
3535 memset(lr, 0, sizeof(*lr));
3537 lr->lr_event = LE_PHASE2_DONE;
3539 } else if (rc == 0) {
3540 if (lfsck->li_flags & LPF_ALL_TGT) {
3541 lr->lr_event = LE_STOP;
3542 lr->lr_status = LS_STOPPED;
3544 lr->lr_event = LE_PEER_EXIT;
3545 switch (lfsck->li_status) {
3548 lr->lr_status = LS_CO_PAUSED;
3552 lr->lr_status = LS_CO_STOPPED;
3555 CERROR("%s: unknown status: rc = %d\n",
3556 lfsck_lfsck2name(lfsck),
3558 lr->lr_status = LS_CO_FAILED;
3563 if (lfsck->li_flags & LPF_ALL_TGT) {
3564 lr->lr_event = LE_STOP;
3565 lr->lr_status = LS_FAILED;
3567 lr->lr_event = LE_PEER_EXIT;
3568 lr->lr_status = LS_CO_FAILED;
3572 rc1 = lfsck_layout_master_notify_others(env, com, lr);
3574 CERROR("%s: failed to notify others for layout quit: rc = %d\n",
3575 lfsck_lfsck2name(lfsck), rc1);
3579 /* Under force exit case, some requests may be just freed without
3580 * verification, those objects should be re-handled when next run.
3581 * So not update the on-disk tracing file under such case. */
3582 if (!llmd->llmd_exit)
3583 rc1 = lfsck_layout_double_scan_result(env, com, rc);
3586 if (llmd->llmd_in_double_scan)
3587 atomic_dec(&lfsck->li_double_scan_count);
3589 spin_lock(&llmd->llmd_lock);
3590 llmd->llmd_assistant_status = (rc1 != 0 ? rc1 : rc);
3591 thread_set_flags(athread, SVC_STOPPED);
3592 wake_up_all(&mthread->t_ctl_waitq);
3593 spin_unlock(&llmd->llmd_lock);
3594 lfsck_thread_args_fini(lta);
3600 lfsck_layout_slave_async_interpret(const struct lu_env *env,
3601 struct ptlrpc_request *req,
3604 struct lfsck_layout_slave_async_args *llsaa = args;
3605 struct obd_export *exp = llsaa->llsaa_exp;
3606 struct lfsck_component *com = llsaa->llsaa_com;
3607 struct lfsck_layout_slave_target *llst = llsaa->llsaa_llst;
3608 struct lfsck_layout_slave_data *llsd = com->lc_data;
3612 /* It is quite probably caused by target crash,
3613 * to make the LFSCK can go ahead, assume that
3614 * the target finished the LFSCK prcoessing. */
3617 struct lfsck_reply *lr;
3619 lr = req_capsule_server_get(&req->rq_pill, &RMF_LFSCK_REPLY);
3620 if (lr->lr_status != LS_SCANNING_PHASE1 &&
3621 lr->lr_status != LS_SCANNING_PHASE2)
3625 lfsck_layout_llst_del(llsd, llst);
3626 lfsck_layout_llst_put(llst);
3627 lfsck_component_put(env, com);
3628 class_export_put(exp);
3633 static int lfsck_layout_async_query(const struct lu_env *env,
3634 struct lfsck_component *com,
3635 struct obd_export *exp,
3636 struct lfsck_layout_slave_target *llst,
3637 struct lfsck_request *lr,
3638 struct ptlrpc_request_set *set)
3640 struct lfsck_layout_slave_async_args *llsaa;
3641 struct ptlrpc_request *req;
3642 struct lfsck_request *tmp;
3646 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_QUERY);
3650 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_QUERY);
3652 ptlrpc_request_free(req);
3656 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
3658 ptlrpc_request_set_replen(req);
3660 llsaa = ptlrpc_req_async_args(req);
3661 llsaa->llsaa_exp = exp;
3662 llsaa->llsaa_com = lfsck_component_get(com);
3663 llsaa->llsaa_llst = llst;
3664 req->rq_interpret_reply = lfsck_layout_slave_async_interpret;
3665 ptlrpc_set_add_req(set, req);
3670 static int lfsck_layout_async_notify(const struct lu_env *env,
3671 struct obd_export *exp,
3672 struct lfsck_request *lr,
3673 struct ptlrpc_request_set *set)
3675 struct ptlrpc_request *req;
3676 struct lfsck_request *tmp;
3680 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
3684 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
3686 ptlrpc_request_free(req);
3690 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
3692 ptlrpc_request_set_replen(req);
3693 ptlrpc_set_add_req(set, req);
3699 lfsck_layout_slave_query_master(const struct lu_env *env,
3700 struct lfsck_component *com)
3702 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
3703 struct lfsck_instance *lfsck = com->lc_lfsck;
3704 struct lfsck_layout_slave_data *llsd = com->lc_data;
3705 struct lfsck_layout_slave_target *llst;
3706 struct obd_export *exp;
3707 struct ptlrpc_request_set *set;
3712 set = ptlrpc_prep_set();
3716 memset(lr, 0, sizeof(*lr));
3717 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
3718 lr->lr_event = LE_QUERY;
3719 lr->lr_active = LT_LAYOUT;
3721 llsd->llsd_touch_gen++;
3722 spin_lock(&llsd->llsd_lock);
3723 while (!list_empty(&llsd->llsd_master_list)) {
3724 llst = list_entry(llsd->llsd_master_list.next,
3725 struct lfsck_layout_slave_target,
3727 if (llst->llst_gen == llsd->llsd_touch_gen)
3730 llst->llst_gen = llsd->llsd_touch_gen;
3731 list_del(&llst->llst_list);
3732 list_add_tail(&llst->llst_list,
3733 &llsd->llsd_master_list);
3734 atomic_inc(&llst->llst_ref);
3735 spin_unlock(&llsd->llsd_lock);
3737 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
3740 lfsck_layout_llst_del(llsd, llst);
3741 lfsck_layout_llst_put(llst);
3742 spin_lock(&llsd->llsd_lock);
3746 rc = lfsck_layout_async_query(env, com, exp, llst, lr, set);
3748 CERROR("%s: slave fail to query %s for layout: "
3749 "rc = %d\n", lfsck_lfsck2name(lfsck),
3750 exp->exp_obd->obd_name, rc);
3752 lfsck_layout_llst_put(llst);
3753 class_export_put(exp);
3755 spin_lock(&llsd->llsd_lock);
3757 spin_unlock(&llsd->llsd_lock);
3759 rc = ptlrpc_set_wait(set);
3760 ptlrpc_set_destroy(set);
3762 RETURN(rc1 != 0 ? rc1 : rc);
3766 lfsck_layout_slave_notify_master(const struct lu_env *env,
3767 struct lfsck_component *com,
3768 enum lfsck_events event, int result)
3770 struct lfsck_instance *lfsck = com->lc_lfsck;
3771 struct lfsck_layout_slave_data *llsd = com->lc_data;
3772 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
3773 struct lfsck_layout_slave_target *llst;
3774 struct obd_export *exp;
3775 struct ptlrpc_request_set *set;
3779 set = ptlrpc_prep_set();
3783 memset(lr, 0, sizeof(*lr));
3784 lr->lr_event = event;
3785 lr->lr_flags = LEF_FROM_OST;
3786 lr->lr_status = result;
3787 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
3788 lr->lr_active = LT_LAYOUT;
3789 llsd->llsd_touch_gen++;
3790 spin_lock(&llsd->llsd_lock);
3791 while (!list_empty(&llsd->llsd_master_list)) {
3792 llst = list_entry(llsd->llsd_master_list.next,
3793 struct lfsck_layout_slave_target,
3795 if (llst->llst_gen == llsd->llsd_touch_gen)
3798 llst->llst_gen = llsd->llsd_touch_gen;
3799 list_del(&llst->llst_list);
3800 list_add_tail(&llst->llst_list,
3801 &llsd->llsd_master_list);
3802 atomic_inc(&llst->llst_ref);
3803 spin_unlock(&llsd->llsd_lock);
3805 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
3808 lfsck_layout_llst_del(llsd, llst);
3809 lfsck_layout_llst_put(llst);
3810 spin_lock(&llsd->llsd_lock);
3814 rc = lfsck_layout_async_notify(env, exp, lr, set);
3816 CERROR("%s: slave fail to notify %s for layout: "
3817 "rc = %d\n", lfsck_lfsck2name(lfsck),
3818 exp->exp_obd->obd_name, rc);
3819 lfsck_layout_llst_put(llst);
3820 class_export_put(exp);
3821 spin_lock(&llsd->llsd_lock);
3823 spin_unlock(&llsd->llsd_lock);
3825 ptlrpc_set_wait(set);
3826 ptlrpc_set_destroy(set);
3833 static int lfsck_layout_reset(const struct lu_env *env,
3834 struct lfsck_component *com, bool init)
3836 struct lfsck_layout *lo = com->lc_file_ram;
3839 down_write(&com->lc_sem);
3841 memset(lo, 0, com->lc_file_size);
3843 __u32 count = lo->ll_success_count;
3844 __u64 last_time = lo->ll_time_last_complete;
3846 memset(lo, 0, com->lc_file_size);
3847 lo->ll_success_count = count;
3848 lo->ll_time_last_complete = last_time;
3851 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
3852 lo->ll_status = LS_INIT;
3854 rc = lfsck_layout_store(env, com);
3855 up_write(&com->lc_sem);
3860 static void lfsck_layout_fail(const struct lu_env *env,
3861 struct lfsck_component *com, bool new_checked)
3863 struct lfsck_layout *lo = com->lc_file_ram;
3865 down_write(&com->lc_sem);
3867 com->lc_new_checked++;
3868 lo->ll_objs_failed_phase1++;
3869 if (lo->ll_pos_first_inconsistent == 0) {
3870 struct lfsck_instance *lfsck = com->lc_lfsck;
3872 lo->ll_pos_first_inconsistent =
3873 lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
3876 up_write(&com->lc_sem);
3879 static int lfsck_layout_master_checkpoint(const struct lu_env *env,
3880 struct lfsck_component *com, bool init)
3882 struct lfsck_instance *lfsck = com->lc_lfsck;
3883 struct lfsck_layout *lo = com->lc_file_ram;
3884 struct lfsck_layout_master_data *llmd = com->lc_data;
3885 struct ptlrpc_thread *mthread = &lfsck->li_thread;
3886 struct ptlrpc_thread *athread = &llmd->llmd_thread;
3887 struct l_wait_info lwi = { 0 };
3890 if (com->lc_new_checked == 0 && !init)
3893 l_wait_event(mthread->t_ctl_waitq,
3894 list_empty(&llmd->llmd_req_list) ||
3895 !thread_is_running(mthread) ||
3896 thread_is_stopped(athread),
3899 if (!thread_is_running(mthread) || thread_is_stopped(athread))
3902 down_write(&com->lc_sem);
3904 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
3906 lo->ll_pos_last_checkpoint =
3907 lfsck->li_pos_current.lp_oit_cookie;
3908 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
3909 HALF_SEC - lfsck->li_time_last_checkpoint);
3910 lo->ll_time_last_checkpoint = cfs_time_current_sec();
3911 lo->ll_objs_checked_phase1 += com->lc_new_checked;
3912 com->lc_new_checked = 0;
3915 rc = lfsck_layout_store(env, com);
3916 up_write(&com->lc_sem);
3921 static int lfsck_layout_slave_checkpoint(const struct lu_env *env,
3922 struct lfsck_component *com, bool init)
3924 struct lfsck_instance *lfsck = com->lc_lfsck;
3925 struct lfsck_layout *lo = com->lc_file_ram;
3928 if (com->lc_new_checked == 0 && !init)
3931 down_write(&com->lc_sem);
3934 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
3936 lo->ll_pos_last_checkpoint =
3937 lfsck->li_pos_current.lp_oit_cookie;
3938 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
3939 HALF_SEC - lfsck->li_time_last_checkpoint);
3940 lo->ll_time_last_checkpoint = cfs_time_current_sec();
3941 lo->ll_objs_checked_phase1 += com->lc_new_checked;
3942 com->lc_new_checked = 0;
3945 rc = lfsck_layout_store(env, com);
3947 up_write(&com->lc_sem);
3952 static int lfsck_layout_prep(const struct lu_env *env,
3953 struct lfsck_component *com,
3954 struct lfsck_start *start)
3956 struct lfsck_instance *lfsck = com->lc_lfsck;
3957 struct lfsck_layout *lo = com->lc_file_ram;
3958 struct lfsck_position *pos = &com->lc_pos_start;
3960 fid_zero(&pos->lp_dir_parent);
3961 pos->lp_dir_cookie = 0;
3962 if (lo->ll_status == LS_COMPLETED ||
3963 lo->ll_status == LS_PARTIAL ||
3964 /* To handle orphan, must scan from the beginning. */
3965 (start != NULL && start->ls_flags & LPF_ORPHAN)) {
3968 rc = lfsck_layout_reset(env, com, false);
3973 down_write(&com->lc_sem);
3974 lo->ll_time_latest_start = cfs_time_current_sec();
3975 spin_lock(&lfsck->li_lock);
3976 if (lo->ll_flags & LF_SCANNED_ONCE) {
3977 if (!lfsck->li_drop_dryrun ||
3978 lo->ll_pos_first_inconsistent == 0) {
3979 lo->ll_status = LS_SCANNING_PHASE2;
3980 list_del_init(&com->lc_link);
3981 list_add_tail(&com->lc_link,
3982 &lfsck->li_list_double_scan);
3983 pos->lp_oit_cookie = 0;
3987 lo->ll_status = LS_SCANNING_PHASE1;
3988 lo->ll_run_time_phase1 = 0;
3989 lo->ll_run_time_phase2 = 0;
3990 lo->ll_objs_checked_phase1 = 0;
3991 lo->ll_objs_checked_phase2 = 0;
3992 lo->ll_objs_failed_phase1 = 0;
3993 lo->ll_objs_failed_phase2 = 0;
3994 for (i = 0; i < LLIT_MAX; i++)
3995 lo->ll_objs_repaired[i] = 0;
3997 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
3998 fid_zero(&com->lc_fid_latest_scanned_phase2);
4001 lo->ll_status = LS_SCANNING_PHASE1;
4002 if (!lfsck->li_drop_dryrun ||
4003 lo->ll_pos_first_inconsistent == 0)
4004 pos->lp_oit_cookie = lo->ll_pos_last_checkpoint + 1;
4006 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
4008 spin_unlock(&lfsck->li_lock);
4009 up_write(&com->lc_sem);
4014 static int lfsck_layout_slave_prep(const struct lu_env *env,
4015 struct lfsck_component *com,
4016 struct lfsck_start_param *lsp)
4018 struct lfsck_layout_slave_data *llsd = com->lc_data;
4019 struct lfsck_start *start = lsp->lsp_start;
4022 rc = lfsck_layout_prep(env, com, start);
4023 if (rc != 0 || !lsp->lsp_index_valid)
4026 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
4027 if (rc == 0 && start != NULL && start->ls_flags & LPF_ORPHAN) {
4028 LASSERT(!llsd->llsd_rbtree_valid);
4030 write_lock(&llsd->llsd_rb_lock);
4031 rc = lfsck_rbtree_setup(env, com);
4032 write_unlock(&llsd->llsd_rb_lock);
4038 static int lfsck_layout_master_prep(const struct lu_env *env,
4039 struct lfsck_component *com,
4040 struct lfsck_start_param *lsp)
4042 struct lfsck_instance *lfsck = com->lc_lfsck;
4043 struct lfsck_layout_master_data *llmd = com->lc_data;
4044 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4045 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4046 struct lfsck_thread_args *lta;
4050 rc = lfsck_layout_prep(env, com, lsp->lsp_start);
4054 llmd->llmd_assistant_status = 0;
4055 llmd->llmd_post_result = 0;
4056 llmd->llmd_to_post = 0;
4057 llmd->llmd_to_double_scan = 0;
4058 llmd->llmd_in_double_scan = 0;
4059 llmd->llmd_exit = 0;
4060 thread_set_flags(athread, 0);
4062 lta = lfsck_thread_args_init(lfsck, com, lsp);
4064 RETURN(PTR_ERR(lta));
4066 rc = PTR_ERR(kthread_run(lfsck_layout_assistant, lta, "lfsck_layout"));
4067 if (IS_ERR_VALUE(rc)) {
4068 CERROR("%s: Cannot start LFSCK layout assistant thread: "
4069 "rc = %ld\n", lfsck_lfsck2name(lfsck), rc);
4070 lfsck_thread_args_fini(lta);
4072 struct l_wait_info lwi = { 0 };
4074 l_wait_event(mthread->t_ctl_waitq,
4075 thread_is_running(athread) ||
4076 thread_is_stopped(athread),
4078 if (unlikely(!thread_is_running(athread)))
4079 rc = llmd->llmd_assistant_status;
4087 /* Pre-fetch the attribute for each stripe in the given layout EA. */
4088 static int lfsck_layout_scan_stripes(const struct lu_env *env,
4089 struct lfsck_component *com,
4090 struct dt_object *parent,
4091 struct lov_mds_md_v1 *lmm)
4093 struct lfsck_thread_info *info = lfsck_env_info(env);
4094 struct lfsck_instance *lfsck = com->lc_lfsck;
4095 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
4096 struct lfsck_layout *lo = com->lc_file_ram;
4097 struct lfsck_layout_master_data *llmd = com->lc_data;
4098 struct lfsck_layout_object *llo = NULL;
4099 struct lov_ost_data_v1 *objs;
4100 struct lfsck_tgt_descs *ltds = &lfsck->li_ost_descs;
4101 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4102 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4103 struct l_wait_info lwi = { 0 };
4112 buf = lfsck_buf_get(env, &info->lti_old_pfid,
4113 sizeof(struct filter_fid_old));
4114 count = le16_to_cpu(lmm->lmm_stripe_count);
4115 gen = le16_to_cpu(lmm->lmm_layout_gen);
4116 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
4117 * been verified in lfsck_layout_verify_header() already. If some
4118 * new magic introduced in the future, then layout LFSCK needs to
4119 * be updated also. */
4120 magic = le32_to_cpu(lmm->lmm_magic);
4121 if (magic == LOV_MAGIC_V1) {
4122 objs = &(lmm->lmm_objects[0]);
4124 LASSERT(magic == LOV_MAGIC_V3);
4125 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
4128 for (i = 0; i < count; i++, objs++) {
4129 struct lu_fid *fid = &info->lti_fid;
4130 struct ost_id *oi = &info->lti_oi;
4131 struct lfsck_layout_req *llr;
4132 struct lfsck_tgt_desc *tgt = NULL;
4133 struct dt_object *cobj = NULL;
4135 le32_to_cpu(objs->l_ost_idx);
4136 bool wakeup = false;
4138 if (is_dummy_lov_ost_data(objs))
4141 l_wait_event(mthread->t_ctl_waitq,
4142 bk->lb_async_windows == 0 ||
4143 llmd->llmd_prefetched < bk->lb_async_windows ||
4144 !thread_is_running(mthread) ||
4145 thread_is_stopped(athread),
4148 if (unlikely(!thread_is_running(mthread)) ||
4149 thread_is_stopped(athread))
4152 ostid_le_to_cpu(&objs->l_ost_oi, oi);
4153 ostid_to_fid(fid, oi, index);
4154 tgt = lfsck_tgt_get(ltds, index);
4155 if (unlikely(tgt == NULL)) {
4156 CERROR("%s: Cannot talk with OST %x which did not join "
4157 "the layout LFSCK.\n",
4158 lfsck_lfsck2name(lfsck), index);
4159 lo->ll_flags |= LF_INCOMPLETE;
4163 cobj = lfsck_object_find_by_dev(env, tgt->ltd_tgt, fid);
4169 rc = dt_declare_attr_get(env, cobj, BYPASS_CAPA);
4173 rc = dt_declare_xattr_get(env, cobj, buf, XATTR_NAME_FID,
4179 llo = lfsck_layout_object_init(env, parent, gen);
4186 llr = lfsck_layout_req_init(llo, cobj, index, i);
4193 spin_lock(&llmd->llmd_lock);
4194 if (llmd->llmd_assistant_status < 0) {
4195 spin_unlock(&llmd->llmd_lock);
4196 lfsck_layout_req_fini(env, llr);
4198 RETURN(llmd->llmd_assistant_status);
4201 list_add_tail(&llr->llr_list, &llmd->llmd_req_list);
4202 if (llmd->llmd_prefetched == 0)
4205 llmd->llmd_prefetched++;
4206 spin_unlock(&llmd->llmd_lock);
4208 wake_up_all(&athread->t_ctl_waitq);
4211 down_write(&com->lc_sem);
4212 com->lc_new_checked++;
4214 lo->ll_objs_failed_phase1++;
4215 up_write(&com->lc_sem);
4217 if (cobj != NULL && !IS_ERR(cobj))
4218 lu_object_put(env, &cobj->do_lu);
4220 if (likely(tgt != NULL))
4223 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
4230 if (llo != NULL && !IS_ERR(llo))
4231 lfsck_layout_object_put(env, llo);
4236 /* For the given object, read its layout EA locally. For each stripe, pre-fetch
4237 * the OST-object's attribute and generate an structure lfsck_layout_req on the
4238 * list ::llmd_req_list.
4240 * For each request on above list, the lfsck_layout_assistant thread compares
4241 * the OST side attribute with local attribute, if inconsistent, then repair it.
4243 * All above processing is async mode with pipeline. */
4244 static int lfsck_layout_master_exec_oit(const struct lu_env *env,
4245 struct lfsck_component *com,
4246 struct dt_object *obj)
4248 struct lfsck_thread_info *info = lfsck_env_info(env);
4249 struct ost_id *oi = &info->lti_oi;
4250 struct lfsck_layout *lo = com->lc_file_ram;
4251 struct lfsck_layout_master_data *llmd = com->lc_data;
4252 struct lfsck_instance *lfsck = com->lc_lfsck;
4253 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
4254 struct thandle *handle = NULL;
4255 struct lu_buf *buf = &info->lti_big_buf;
4256 struct lov_mds_md_v1 *lmm = NULL;
4257 struct dt_device *dev = lfsck->li_bottom;
4258 struct lustre_handle lh = { 0 };
4259 ssize_t buflen = buf->lb_len;
4261 bool locked = false;
4262 bool stripe = false;
4265 if (!S_ISREG(lfsck_object_type(obj)))
4268 if (llmd->llmd_assistant_status < 0)
4269 GOTO(out, rc = -ESRCH);
4271 fid_to_lmm_oi(lfsck_dto2fid(obj), oi);
4272 lmm_oi_cpu_to_le(oi, oi);
4273 dt_read_lock(env, obj, 0);
4277 rc = lfsck_layout_get_lovea(env, obj, buf, &buflen);
4283 rc = lfsck_layout_verify_header(lmm);
4287 if (memcmp(oi, &lmm->lmm_oi, sizeof(*oi)) == 0)
4288 GOTO(out, stripe = true);
4290 /* Inconsistent lmm_oi, should be repaired. */
4291 CDEBUG(D_LFSCK, "Repair bad lmm_oi for "DFID"\n",
4292 PFID(lfsck_dto2fid(obj)));
4294 if (bk->lb_param & LPF_DRYRUN) {
4295 down_write(&com->lc_sem);
4296 lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
4297 up_write(&com->lc_sem);
4299 GOTO(out, stripe = true);
4302 if (!lustre_handle_is_used(&lh)) {
4303 dt_read_unlock(env, obj);
4305 buf->lb_len = buflen;
4306 rc = lfsck_layout_lock(env, com, obj, &lh,
4307 MDS_INODELOCK_LAYOUT |
4308 MDS_INODELOCK_XATTR);
4312 handle = dt_trans_create(env, dev);
4314 GOTO(out, rc = PTR_ERR(handle));
4316 rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_LOV,
4317 LU_XATTR_REPLACE, handle);
4321 rc = dt_trans_start_local(env, dev, handle);
4325 dt_write_lock(env, obj, 0);
4332 rc = dt_xattr_set(env, obj, buf, XATTR_NAME_LOV,
4333 LU_XATTR_REPLACE, handle, BYPASS_CAPA);
4337 down_write(&com->lc_sem);
4338 lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
4339 up_write(&com->lc_sem);
4341 GOTO(out, stripe = true);
4345 if (lustre_handle_is_used(&lh))
4346 dt_write_unlock(env, obj);
4348 dt_read_unlock(env, obj);
4351 if (handle != NULL && !IS_ERR(handle))
4352 dt_trans_stop(env, dev, handle);
4354 lfsck_layout_unlock(&lh);
4356 rc = lfsck_layout_scan_stripes(env, com, obj, lmm);
4358 down_write(&com->lc_sem);
4359 com->lc_new_checked++;
4361 lo->ll_objs_failed_phase1++;
4362 up_write(&com->lc_sem);
4364 buf->lb_len = buflen;
4369 static int lfsck_layout_slave_exec_oit(const struct lu_env *env,
4370 struct lfsck_component *com,
4371 struct dt_object *obj)
4373 struct lfsck_instance *lfsck = com->lc_lfsck;
4374 struct lfsck_layout *lo = com->lc_file_ram;
4375 const struct lu_fid *fid = lfsck_dto2fid(obj);
4376 struct lfsck_layout_slave_data *llsd = com->lc_data;
4377 struct lfsck_layout_seq *lls;
4383 LASSERT(llsd != NULL);
4385 lfsck_rbtree_update_bitmap(env, com, fid, false);
4387 down_write(&com->lc_sem);
4388 if (fid_is_idif(fid))
4390 else if (!fid_is_norm(fid) ||
4391 !fid_is_for_ostobj(env, lfsck->li_next, obj, fid))
4392 GOTO(unlock, rc = 0);
4395 com->lc_new_checked++;
4397 lls = lfsck_layout_seq_lookup(llsd, seq);
4400 if (unlikely(lls == NULL))
4401 GOTO(unlock, rc = -ENOMEM);
4403 INIT_LIST_HEAD(&lls->lls_list);
4405 rc = lfsck_layout_lastid_load(env, com, lls);
4407 lo->ll_objs_failed_phase1++;
4412 lfsck_layout_seq_insert(llsd, lls);
4415 if (unlikely(fid_is_last_id(fid)))
4416 GOTO(unlock, rc = 0);
4419 if (oid > lls->lls_lastid_known)
4420 lls->lls_lastid_known = oid;
4422 if (oid > lls->lls_lastid) {
4423 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
4424 /* OFD may create new objects during LFSCK scanning. */
4425 rc = lfsck_layout_lastid_reload(env, com, lls);
4426 if (unlikely(rc != 0))
4427 CWARN("%s: failed to reload LAST_ID for "LPX64
4429 lfsck_lfsck2name(com->lc_lfsck),
4431 if (oid <= lls->lls_lastid)
4432 GOTO(unlock, rc = 0);
4434 LASSERT(lfsck->li_out_notify != NULL);
4436 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
4437 LE_LASTID_REBUILDING);
4438 lo->ll_flags |= LF_CRASHED_LASTID;
4441 lls->lls_lastid = oid;
4445 GOTO(unlock, rc = 0);
4448 up_write(&com->lc_sem);
4453 static int lfsck_layout_exec_dir(const struct lu_env *env,
4454 struct lfsck_component *com,
4455 struct dt_object *obj,
4456 struct lu_dirent *ent)
4461 static int lfsck_layout_master_post(const struct lu_env *env,
4462 struct lfsck_component *com,
4463 int result, bool init)
4465 struct lfsck_instance *lfsck = com->lc_lfsck;
4466 struct lfsck_layout *lo = com->lc_file_ram;
4467 struct lfsck_layout_master_data *llmd = com->lc_data;
4468 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4469 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4470 struct l_wait_info lwi = { 0 };
4475 llmd->llmd_post_result = result;
4476 llmd->llmd_to_post = 1;
4477 if (llmd->llmd_post_result <= 0)
4478 llmd->llmd_exit = 1;
4480 wake_up_all(&athread->t_ctl_waitq);
4481 l_wait_event(mthread->t_ctl_waitq,
4482 (result > 0 && list_empty(&llmd->llmd_req_list)) ||
4483 thread_is_stopped(athread),
4486 if (llmd->llmd_assistant_status < 0)
4487 result = llmd->llmd_assistant_status;
4489 down_write(&com->lc_sem);
4490 spin_lock(&lfsck->li_lock);
4491 /* When LFSCK failed, there may be some prefetched objects those are
4492 * not been processed yet, we do not know the exactly position, then
4493 * just restart from last check-point next time. */
4494 if (!init && !llmd->llmd_exit)
4495 lo->ll_pos_last_checkpoint =
4496 lfsck->li_pos_current.lp_oit_cookie;
4499 lo->ll_status = LS_SCANNING_PHASE2;
4500 lo->ll_flags |= LF_SCANNED_ONCE;
4501 lo->ll_flags &= ~LF_UPGRADE;
4502 list_del_init(&com->lc_link);
4503 list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
4504 } else if (result == 0) {
4505 lo->ll_status = lfsck->li_status;
4506 if (lo->ll_status == 0)
4507 lo->ll_status = LS_STOPPED;
4508 if (lo->ll_status != LS_PAUSED) {
4509 list_del_init(&com->lc_link);
4510 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
4513 lo->ll_status = LS_FAILED;
4514 list_del_init(&com->lc_link);
4515 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
4517 spin_unlock(&lfsck->li_lock);
4520 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4521 HALF_SEC - lfsck->li_time_last_checkpoint);
4522 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4523 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4524 com->lc_new_checked = 0;
4527 rc = lfsck_layout_store(env, com);
4528 up_write(&com->lc_sem);
4533 static int lfsck_layout_slave_post(const struct lu_env *env,
4534 struct lfsck_component *com,
4535 int result, bool init)
4537 struct lfsck_instance *lfsck = com->lc_lfsck;
4538 struct lfsck_layout *lo = com->lc_file_ram;
4542 rc = lfsck_layout_lastid_store(env, com);
4546 LASSERT(lfsck->li_out_notify != NULL);
4548 down_write(&com->lc_sem);
4550 spin_lock(&lfsck->li_lock);
4552 lo->ll_pos_last_checkpoint =
4553 lfsck->li_pos_current.lp_oit_cookie;
4555 lo->ll_status = LS_SCANNING_PHASE2;
4556 lo->ll_flags |= LF_SCANNED_ONCE;
4557 if (lo->ll_flags & LF_CRASHED_LASTID) {
4559 lo->ll_flags &= ~LF_CRASHED_LASTID;
4561 lo->ll_flags &= ~LF_UPGRADE;
4562 list_del_init(&com->lc_link);
4563 list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
4564 } else if (result == 0) {
4565 lo->ll_status = lfsck->li_status;
4566 if (lo->ll_status == 0)
4567 lo->ll_status = LS_STOPPED;
4568 if (lo->ll_status != LS_PAUSED) {
4569 list_del_init(&com->lc_link);
4570 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
4573 lo->ll_status = LS_FAILED;
4574 list_del_init(&com->lc_link);
4575 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
4577 spin_unlock(&lfsck->li_lock);
4580 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
4584 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4585 HALF_SEC - lfsck->li_time_last_checkpoint);
4586 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4587 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4588 com->lc_new_checked = 0;
4591 rc = lfsck_layout_store(env, com);
4593 up_write(&com->lc_sem);
4595 lfsck_layout_slave_notify_master(env, com, LE_PHASE1_DONE, result);
4598 lfsck_rbtree_cleanup(env, com);
4603 static int lfsck_layout_dump(const struct lu_env *env,
4604 struct lfsck_component *com, char *buf, int len)
4606 struct lfsck_instance *lfsck = com->lc_lfsck;
4607 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
4608 struct lfsck_layout *lo = com->lc_file_ram;
4613 down_read(&com->lc_sem);
4614 rc = snprintf(buf, len,
4615 "name: lfsck_layout\n"
4621 lfsck_status2names(lo->ll_status));
4627 rc = lfsck_bits_dump(&buf, &len, lo->ll_flags, lfsck_flags_names,
4632 rc = lfsck_bits_dump(&buf, &len, bk->lb_param, lfsck_param_names,
4637 rc = lfsck_time_dump(&buf, &len, lo->ll_time_last_complete,
4638 "time_since_last_completed");
4642 rc = lfsck_time_dump(&buf, &len, lo->ll_time_latest_start,
4643 "time_since_latest_start");
4647 rc = lfsck_time_dump(&buf, &len, lo->ll_time_last_checkpoint,
4648 "time_since_last_checkpoint");
4652 rc = snprintf(buf, len,
4653 "latest_start_position: "LPU64"\n"
4654 "last_checkpoint_position: "LPU64"\n"
4655 "first_failure_position: "LPU64"\n",
4656 lo->ll_pos_latest_start,
4657 lo->ll_pos_last_checkpoint,
4658 lo->ll_pos_first_inconsistent);
4665 rc = snprintf(buf, len,
4666 "success_count: %u\n"
4667 "repaired_dangling: "LPU64"\n"
4668 "repaired_unmatched_pair: "LPU64"\n"
4669 "repaired_multiple_referenced: "LPU64"\n"
4670 "repaired_orphan: "LPU64"\n"
4671 "repaired_inconsistent_owner: "LPU64"\n"
4672 "repaired_others: "LPU64"\n"
4673 "skipped: "LPU64"\n"
4674 "failed_phase1: "LPU64"\n"
4675 "failed_phase2: "LPU64"\n",
4676 lo->ll_success_count,
4677 lo->ll_objs_repaired[LLIT_DANGLING - 1],
4678 lo->ll_objs_repaired[LLIT_UNMATCHED_PAIR - 1],
4679 lo->ll_objs_repaired[LLIT_MULTIPLE_REFERENCED - 1],
4680 lo->ll_objs_repaired[LLIT_ORPHAN - 1],
4681 lo->ll_objs_repaired[LLIT_INCONSISTENT_OWNER - 1],
4682 lo->ll_objs_repaired[LLIT_OTHERS - 1],
4683 lo->ll_objs_skipped,
4684 lo->ll_objs_failed_phase1,
4685 lo->ll_objs_failed_phase2);
4692 if (lo->ll_status == LS_SCANNING_PHASE1) {
4694 const struct dt_it_ops *iops;
4695 cfs_duration_t duration = cfs_time_current() -
4696 lfsck->li_time_last_checkpoint;
4697 __u64 checked = lo->ll_objs_checked_phase1 + com->lc_new_checked;
4698 __u64 speed = checked;
4699 __u64 new_checked = com->lc_new_checked * HZ;
4700 __u32 rtime = lo->ll_run_time_phase1 +
4701 cfs_duration_sec(duration + HALF_SEC);
4704 do_div(new_checked, duration);
4706 do_div(speed, rtime);
4707 rc = snprintf(buf, len,
4708 "checked_phase1: "LPU64"\n"
4709 "checked_phase2: "LPU64"\n"
4710 "run_time_phase1: %u seconds\n"
4711 "run_time_phase2: %u seconds\n"
4712 "average_speed_phase1: "LPU64" items/sec\n"
4713 "average_speed_phase2: N/A\n"
4714 "real-time_speed_phase1: "LPU64" items/sec\n"
4715 "real-time_speed_phase2: N/A\n",
4717 lo->ll_objs_checked_phase2,
4719 lo->ll_run_time_phase2,
4728 LASSERT(lfsck->li_di_oit != NULL);
4730 iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
4732 /* The low layer otable-based iteration position may NOT
4733 * exactly match the layout-based directory traversal
4734 * cookie. Generally, it is not a serious issue. But the
4735 * caller should NOT make assumption on that. */
4736 pos = iops->store(env, lfsck->li_di_oit);
4737 if (!lfsck->li_current_oit_processed)
4739 rc = snprintf(buf, len, "current_position: "LPU64"\n", pos);
4745 } else if (lo->ll_status == LS_SCANNING_PHASE2) {
4746 cfs_duration_t duration = cfs_time_current() -
4747 lfsck->li_time_last_checkpoint;
4748 __u64 checked = lo->ll_objs_checked_phase1 + com->lc_new_checked;
4749 __u64 speed = checked;
4750 __u64 new_checked = com->lc_new_checked * HZ;
4751 __u32 rtime = lo->ll_run_time_phase1 +
4752 cfs_duration_sec(duration + HALF_SEC);
4755 do_div(new_checked, duration);
4757 do_div(speed, rtime);
4758 rc = snprintf(buf, len,
4759 "checked_phase1: "LPU64"\n"
4760 "checked_phase2: "LPU64"\n"
4761 "run_time_phase1: %u seconds\n"
4762 "run_time_phase2: %u seconds\n"
4763 "average_speed_phase1: "LPU64" items/sec\n"
4764 "average_speed_phase2: N/A\n"
4765 "real-time_speed_phase1: "LPU64" items/sec\n"
4766 "real-time_speed_phase2: N/A\n"
4767 "current_position: "DFID"\n",
4769 lo->ll_objs_checked_phase2,
4771 lo->ll_run_time_phase2,
4774 PFID(&com->lc_fid_latest_scanned_phase2));
4781 __u64 speed1 = lo->ll_objs_checked_phase1;
4782 __u64 speed2 = lo->ll_objs_checked_phase2;
4784 if (lo->ll_run_time_phase1 != 0)
4785 do_div(speed1, lo->ll_run_time_phase1);
4786 if (lo->ll_run_time_phase2 != 0)
4787 do_div(speed2, lo->ll_run_time_phase2);
4788 rc = snprintf(buf, len,
4789 "checked_phase1: "LPU64"\n"
4790 "checked_phase2: "LPU64"\n"
4791 "run_time_phase1: %u seconds\n"
4792 "run_time_phase2: %u seconds\n"
4793 "average_speed_phase1: "LPU64" items/sec\n"
4794 "average_speed_phase2: "LPU64" objs/sec\n"
4795 "real-time_speed_phase1: N/A\n"
4796 "real-time_speed_phase2: N/A\n"
4797 "current_position: N/A\n",
4798 lo->ll_objs_checked_phase1,
4799 lo->ll_objs_checked_phase2,
4800 lo->ll_run_time_phase1,
4801 lo->ll_run_time_phase2,
4813 up_read(&com->lc_sem);
4818 static int lfsck_layout_master_double_scan(const struct lu_env *env,
4819 struct lfsck_component *com)
4821 struct lfsck_layout_master_data *llmd = com->lc_data;
4822 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
4823 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4824 struct lfsck_layout *lo = com->lc_file_ram;
4825 struct l_wait_info lwi = { 0 };
4827 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2))
4830 llmd->llmd_to_double_scan = 1;
4831 wake_up_all(&athread->t_ctl_waitq);
4832 l_wait_event(mthread->t_ctl_waitq,
4833 llmd->llmd_in_double_scan ||
4834 thread_is_stopped(athread),
4836 if (llmd->llmd_assistant_status < 0)
4837 return llmd->llmd_assistant_status;
4842 static int lfsck_layout_slave_double_scan(const struct lu_env *env,
4843 struct lfsck_component *com)
4845 struct lfsck_instance *lfsck = com->lc_lfsck;
4846 struct lfsck_layout_slave_data *llsd = com->lc_data;
4847 struct lfsck_layout *lo = com->lc_file_ram;
4848 struct ptlrpc_thread *thread = &lfsck->li_thread;
4852 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2)) {
4853 lfsck_rbtree_cleanup(env, com);
4854 lfsck_layout_slave_notify_master(env, com, LE_PHASE2_DONE, 0);
4858 atomic_inc(&lfsck->li_double_scan_count);
4860 com->lc_new_checked = 0;
4861 com->lc_new_scanned = 0;
4862 com->lc_time_last_checkpoint = cfs_time_current();
4863 com->lc_time_next_checkpoint = com->lc_time_last_checkpoint +
4864 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
4867 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(30),
4870 rc = lfsck_layout_slave_query_master(env, com);
4871 if (list_empty(&llsd->llsd_master_list)) {
4872 if (unlikely(!thread_is_running(thread)))
4883 rc = l_wait_event(thread->t_ctl_waitq,
4884 !thread_is_running(thread) ||
4885 list_empty(&llsd->llsd_master_list),
4887 if (unlikely(!thread_is_running(thread)))
4890 if (rc == -ETIMEDOUT)
4893 GOTO(done, rc = (rc < 0 ? rc : 1));
4897 rc = lfsck_layout_double_scan_result(env, com, rc);
4899 lfsck_rbtree_cleanup(env, com);
4900 lfsck_layout_slave_notify_master(env, com, LE_PHASE2_DONE, rc);
4901 if (atomic_dec_and_test(&lfsck->li_double_scan_count))
4902 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
4907 static void lfsck_layout_master_data_release(const struct lu_env *env,
4908 struct lfsck_component *com)
4910 struct lfsck_layout_master_data *llmd = com->lc_data;
4911 struct lfsck_instance *lfsck = com->lc_lfsck;
4912 struct lfsck_tgt_descs *ltds;
4913 struct lfsck_tgt_desc *ltd;
4914 struct lfsck_tgt_desc *next;
4916 LASSERT(llmd != NULL);
4917 LASSERT(thread_is_init(&llmd->llmd_thread) ||
4918 thread_is_stopped(&llmd->llmd_thread));
4919 LASSERT(list_empty(&llmd->llmd_req_list));
4921 com->lc_data = NULL;
4923 ltds = &lfsck->li_ost_descs;
4924 spin_lock(<ds->ltd_lock);
4925 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase1_list,
4926 ltd_layout_phase_list) {
4927 list_del_init(<d->ltd_layout_phase_list);
4929 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase2_list,
4930 ltd_layout_phase_list) {
4931 list_del_init(<d->ltd_layout_phase_list);
4933 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_list,
4935 list_del_init(<d->ltd_layout_list);
4937 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase1_list,
4938 ltd_layout_phase_list) {
4939 list_del_init(<d->ltd_layout_phase_list);
4941 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase2_list,
4942 ltd_layout_phase_list) {
4943 list_del_init(<d->ltd_layout_phase_list);
4945 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_list,
4947 list_del_init(<d->ltd_layout_list);
4949 spin_unlock(<ds->ltd_lock);
4954 static void lfsck_layout_slave_data_release(const struct lu_env *env,
4955 struct lfsck_component *com)
4957 struct lfsck_layout_slave_data *llsd = com->lc_data;
4958 struct lfsck_layout_seq *lls;
4959 struct lfsck_layout_seq *next;
4960 struct lfsck_layout_slave_target *llst;
4961 struct lfsck_layout_slave_target *tmp;
4963 LASSERT(llsd != NULL);
4965 list_for_each_entry_safe(lls, next, &llsd->llsd_seq_list,
4967 list_del_init(&lls->lls_list);
4968 lfsck_object_put(env, lls->lls_lastid_obj);
4972 list_for_each_entry_safe(llst, tmp, &llsd->llsd_master_list,
4974 list_del_init(&llst->llst_list);
4978 lfsck_rbtree_cleanup(env, com);
4979 com->lc_data = NULL;
4983 static void lfsck_layout_master_quit(const struct lu_env *env,
4984 struct lfsck_component *com)
4986 struct lfsck_layout_master_data *llmd = com->lc_data;
4987 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
4988 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4989 struct l_wait_info lwi = { 0 };
4991 llmd->llmd_exit = 1;
4992 wake_up_all(&athread->t_ctl_waitq);
4993 l_wait_event(mthread->t_ctl_waitq,
4994 thread_is_init(athread) ||
4995 thread_is_stopped(athread),
4999 static void lfsck_layout_slave_quit(const struct lu_env *env,
5000 struct lfsck_component *com)
5002 lfsck_rbtree_cleanup(env, com);
5005 static int lfsck_layout_master_in_notify(const struct lu_env *env,
5006 struct lfsck_component *com,
5007 struct lfsck_request *lr)
5009 struct lfsck_instance *lfsck = com->lc_lfsck;
5010 struct lfsck_layout *lo = com->lc_file_ram;
5011 struct lfsck_layout_master_data *llmd = com->lc_data;
5012 struct lfsck_tgt_descs *ltds;
5013 struct lfsck_tgt_desc *ltd;
5017 if (lr->lr_event != LE_PHASE1_DONE &&
5018 lr->lr_event != LE_PHASE2_DONE &&
5019 lr->lr_event != LE_PEER_EXIT)
5022 if (lr->lr_flags & LEF_FROM_OST)
5023 ltds = &lfsck->li_ost_descs;
5025 ltds = &lfsck->li_mdt_descs;
5026 spin_lock(<ds->ltd_lock);
5027 ltd = LTD_TGT(ltds, lr->lr_index);
5029 spin_unlock(<ds->ltd_lock);
5034 list_del_init(<d->ltd_layout_phase_list);
5035 switch (lr->lr_event) {
5036 case LE_PHASE1_DONE:
5037 if (lr->lr_status <= 0) {
5038 ltd->ltd_layout_done = 1;
5039 list_del_init(<d->ltd_layout_list);
5040 CWARN("%s: %s %x failed/stopped at phase1: rc = %d.\n",
5041 lfsck_lfsck2name(lfsck),
5042 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
5043 ltd->ltd_index, lr->lr_status);
5044 lo->ll_flags |= LF_INCOMPLETE;
5049 if (lr->lr_flags & LEF_FROM_OST) {
5050 if (list_empty(<d->ltd_layout_list))
5051 list_add_tail(<d->ltd_layout_list,
5052 &llmd->llmd_ost_list);
5053 list_add_tail(<d->ltd_layout_phase_list,
5054 &llmd->llmd_ost_phase2_list);
5056 if (list_empty(<d->ltd_layout_list))
5057 list_add_tail(<d->ltd_layout_list,
5058 &llmd->llmd_mdt_list);
5059 list_add_tail(<d->ltd_layout_phase_list,
5060 &llmd->llmd_mdt_phase2_list);
5063 case LE_PHASE2_DONE:
5064 ltd->ltd_layout_done = 1;
5065 list_del_init(<d->ltd_layout_list);
5069 ltd->ltd_layout_done = 1;
5070 list_del_init(<d->ltd_layout_list);
5071 if (!(lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT)) {
5072 CWARN("%s: the peer %s %x exit layout LFSCK.\n",
5073 lfsck_lfsck2name(lfsck),
5074 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
5076 lo->ll_flags |= LF_INCOMPLETE;
5082 spin_unlock(<ds->ltd_lock);
5084 if (fail && lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT) {
5085 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
5087 memset(stop, 0, sizeof(*stop));
5088 stop->ls_status = lr->lr_status;
5089 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
5090 lfsck_stop(env, lfsck->li_bottom, stop);
5091 } else if (lfsck_layout_master_to_orphan(llmd)) {
5092 wake_up_all(&llmd->llmd_thread.t_ctl_waitq);
5098 static int lfsck_layout_slave_in_notify(const struct lu_env *env,
5099 struct lfsck_component *com,
5100 struct lfsck_request *lr)
5102 struct lfsck_instance *lfsck = com->lc_lfsck;
5103 struct lfsck_layout_slave_data *llsd = com->lc_data;
5104 struct lfsck_layout_slave_target *llst;
5107 if (lr->lr_event == LE_FID_ACCESSED) {
5108 lfsck_rbtree_update_bitmap(env, com, &lr->lr_fid, true);
5113 if (lr->lr_event == LE_CONDITIONAL_DESTROY) {
5116 rc = lfsck_layout_slave_conditional_destroy(env, com, lr);
5121 if (lr->lr_event != LE_PHASE2_DONE && lr->lr_event != LE_PEER_EXIT)
5124 llst = lfsck_layout_llst_find_and_del(llsd, lr->lr_index, true);
5128 lfsck_layout_llst_put(llst);
5129 if (list_empty(&llsd->llsd_master_list))
5130 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
5132 if (lr->lr_event == LE_PEER_EXIT &&
5133 lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT) {
5134 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
5136 memset(stop, 0, sizeof(*stop));
5137 stop->ls_status = lr->lr_status;
5138 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
5139 lfsck_stop(env, lfsck->li_bottom, stop);
5145 static int lfsck_layout_query(const struct lu_env *env,
5146 struct lfsck_component *com)
5148 struct lfsck_layout *lo = com->lc_file_ram;
5150 return lo->ll_status;
5153 static int lfsck_layout_master_stop_notify(const struct lu_env *env,
5154 struct lfsck_component *com,
5155 struct lfsck_tgt_descs *ltds,
5156 struct lfsck_tgt_desc *ltd,
5157 struct ptlrpc_request_set *set)
5159 struct lfsck_thread_info *info = lfsck_env_info(env);
5160 struct lfsck_async_interpret_args *laia = &info->lti_laia;
5161 struct lfsck_request *lr = &info->lti_lr;
5162 struct lfsck_instance *lfsck = com->lc_lfsck;
5165 spin_lock(<ds->ltd_lock);
5166 if (list_empty(<d->ltd_layout_list)) {
5167 LASSERT(list_empty(<d->ltd_layout_phase_list));
5168 spin_unlock(<ds->ltd_lock);
5173 list_del_init(<d->ltd_layout_phase_list);
5174 list_del_init(<d->ltd_layout_list);
5175 spin_unlock(<ds->ltd_lock);
5177 memset(lr, 0, sizeof(*lr));
5178 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
5179 lr->lr_event = LE_PEER_EXIT;
5180 lr->lr_active = LT_LAYOUT;
5181 lr->lr_status = LS_CO_PAUSED;
5182 if (ltds == &lfsck->li_ost_descs)
5183 lr->lr_flags = LEF_TO_OST;
5185 laia->laia_com = com;
5186 laia->laia_ltds = ltds;
5187 atomic_inc(<d->ltd_ref);
5188 laia->laia_ltd = ltd;
5190 laia->laia_shared = 0;
5192 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
5193 lfsck_layout_master_async_interpret,
5194 laia, LFSCK_NOTIFY);
5196 CERROR("%s: Fail to notify %s %x for co-stop: rc = %d\n",
5197 lfsck_lfsck2name(lfsck),
5198 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
5199 ltd->ltd_index, rc);
5206 /* with lfsck::li_lock held */
5207 static int lfsck_layout_slave_join(const struct lu_env *env,
5208 struct lfsck_component *com,
5209 struct lfsck_start_param *lsp)
5211 struct lfsck_instance *lfsck = com->lc_lfsck;
5212 struct lfsck_layout_slave_data *llsd = com->lc_data;
5213 struct lfsck_layout_slave_target *llst;
5214 struct lfsck_start *start = lsp->lsp_start;
5218 if (!lsp->lsp_index_valid || start == NULL ||
5219 !(start->ls_flags & LPF_ALL_TGT) ||
5220 !(lfsck->li_bookmark_ram.lb_param & LPF_ALL_TGT))
5223 spin_unlock(&lfsck->li_lock);
5224 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
5225 spin_lock(&lfsck->li_lock);
5226 if (rc == 0 && !thread_is_running(&lfsck->li_thread)) {
5227 spin_unlock(&lfsck->li_lock);
5228 llst = lfsck_layout_llst_find_and_del(llsd, lsp->lsp_index,
5231 lfsck_layout_llst_put(llst);
5232 spin_lock(&lfsck->li_lock);
5239 static struct lfsck_operations lfsck_layout_master_ops = {
5240 .lfsck_reset = lfsck_layout_reset,
5241 .lfsck_fail = lfsck_layout_fail,
5242 .lfsck_checkpoint = lfsck_layout_master_checkpoint,
5243 .lfsck_prep = lfsck_layout_master_prep,
5244 .lfsck_exec_oit = lfsck_layout_master_exec_oit,
5245 .lfsck_exec_dir = lfsck_layout_exec_dir,
5246 .lfsck_post = lfsck_layout_master_post,
5247 .lfsck_interpret = lfsck_layout_master_async_interpret,
5248 .lfsck_dump = lfsck_layout_dump,
5249 .lfsck_double_scan = lfsck_layout_master_double_scan,
5250 .lfsck_data_release = lfsck_layout_master_data_release,
5251 .lfsck_quit = lfsck_layout_master_quit,
5252 .lfsck_in_notify = lfsck_layout_master_in_notify,
5253 .lfsck_query = lfsck_layout_query,
5254 .lfsck_stop_notify = lfsck_layout_master_stop_notify,
5257 static struct lfsck_operations lfsck_layout_slave_ops = {
5258 .lfsck_reset = lfsck_layout_reset,
5259 .lfsck_fail = lfsck_layout_fail,
5260 .lfsck_checkpoint = lfsck_layout_slave_checkpoint,
5261 .lfsck_prep = lfsck_layout_slave_prep,
5262 .lfsck_exec_oit = lfsck_layout_slave_exec_oit,
5263 .lfsck_exec_dir = lfsck_layout_exec_dir,
5264 .lfsck_post = lfsck_layout_slave_post,
5265 .lfsck_dump = lfsck_layout_dump,
5266 .lfsck_double_scan = lfsck_layout_slave_double_scan,
5267 .lfsck_data_release = lfsck_layout_slave_data_release,
5268 .lfsck_quit = lfsck_layout_slave_quit,
5269 .lfsck_in_notify = lfsck_layout_slave_in_notify,
5270 .lfsck_query = lfsck_layout_query,
5271 .lfsck_join = lfsck_layout_slave_join,
5274 int lfsck_layout_setup(const struct lu_env *env, struct lfsck_instance *lfsck)
5276 struct lfsck_component *com;
5277 struct lfsck_layout *lo;
5278 struct dt_object *root = NULL;
5279 struct dt_object *obj;
5287 INIT_LIST_HEAD(&com->lc_link);
5288 INIT_LIST_HEAD(&com->lc_link_dir);
5289 init_rwsem(&com->lc_sem);
5290 atomic_set(&com->lc_ref, 1);
5291 com->lc_lfsck = lfsck;
5292 com->lc_type = LT_LAYOUT;
5293 if (lfsck->li_master) {
5294 struct lfsck_layout_master_data *llmd;
5296 com->lc_ops = &lfsck_layout_master_ops;
5297 OBD_ALLOC_PTR(llmd);
5299 GOTO(out, rc = -ENOMEM);
5301 INIT_LIST_HEAD(&llmd->llmd_req_list);
5302 spin_lock_init(&llmd->llmd_lock);
5303 INIT_LIST_HEAD(&llmd->llmd_ost_list);
5304 INIT_LIST_HEAD(&llmd->llmd_ost_phase1_list);
5305 INIT_LIST_HEAD(&llmd->llmd_ost_phase2_list);
5306 INIT_LIST_HEAD(&llmd->llmd_mdt_list);
5307 INIT_LIST_HEAD(&llmd->llmd_mdt_phase1_list);
5308 INIT_LIST_HEAD(&llmd->llmd_mdt_phase2_list);
5309 init_waitqueue_head(&llmd->llmd_thread.t_ctl_waitq);
5310 com->lc_data = llmd;
5312 struct lfsck_layout_slave_data *llsd;
5314 com->lc_ops = &lfsck_layout_slave_ops;
5315 OBD_ALLOC_PTR(llsd);
5317 GOTO(out, rc = -ENOMEM);
5319 INIT_LIST_HEAD(&llsd->llsd_seq_list);
5320 INIT_LIST_HEAD(&llsd->llsd_master_list);
5321 spin_lock_init(&llsd->llsd_lock);
5322 llsd->llsd_rb_root = RB_ROOT;
5323 rwlock_init(&llsd->llsd_rb_lock);
5324 com->lc_data = llsd;
5326 com->lc_file_size = sizeof(*lo);
5327 OBD_ALLOC(com->lc_file_ram, com->lc_file_size);
5328 if (com->lc_file_ram == NULL)
5329 GOTO(out, rc = -ENOMEM);
5331 OBD_ALLOC(com->lc_file_disk, com->lc_file_size);
5332 if (com->lc_file_disk == NULL)
5333 GOTO(out, rc = -ENOMEM);
5335 root = dt_locate(env, lfsck->li_bottom, &lfsck->li_local_root_fid);
5337 GOTO(out, rc = PTR_ERR(root));
5339 if (unlikely(!dt_try_as_dir(env, root)))
5340 GOTO(out, rc = -ENOTDIR);
5342 obj = local_file_find_or_create(env, lfsck->li_los, root,
5344 S_IFREG | S_IRUGO | S_IWUSR);
5346 GOTO(out, rc = PTR_ERR(obj));
5349 rc = lfsck_layout_load(env, com);
5351 rc = lfsck_layout_reset(env, com, true);
5352 else if (rc == -ENOENT)
5353 rc = lfsck_layout_init(env, com);
5358 lo = com->lc_file_ram;
5359 switch (lo->ll_status) {
5365 spin_lock(&lfsck->li_lock);
5366 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
5367 spin_unlock(&lfsck->li_lock);
5370 CERROR("%s: unknown lfsck_layout status: rc = %u\n",
5371 lfsck_lfsck2name(lfsck), lo->ll_status);
5373 case LS_SCANNING_PHASE1:
5374 case LS_SCANNING_PHASE2:
5375 /* No need to store the status to disk right now.
5376 * If the system crashed before the status stored,
5377 * it will be loaded back when next time. */
5378 lo->ll_status = LS_CRASHED;
5379 lo->ll_flags |= LF_INCOMPLETE;
5386 spin_lock(&lfsck->li_lock);
5387 list_add_tail(&com->lc_link, &lfsck->li_list_scan);
5388 spin_unlock(&lfsck->li_lock);
5392 if (lo->ll_flags & LF_CRASHED_LASTID) {
5393 LASSERT(lfsck->li_out_notify != NULL);
5395 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
5396 LE_LASTID_REBUILDING);
5402 if (root != NULL && !IS_ERR(root))
5403 lu_object_put(env, &root->do_lu);
5406 lfsck_component_cleanup(env, com);
5411 struct lfsck_orphan_it {
5412 struct lfsck_component *loi_com;
5413 struct lfsck_rbtree_node *loi_lrn;
5414 struct lfsck_layout_slave_target *loi_llst;
5415 struct lu_fid loi_key;
5416 struct lu_orphan_rec loi_rec;
5418 unsigned int loi_over:1;
5421 static int lfsck_fid_match_idx(const struct lu_env *env,
5422 struct lfsck_instance *lfsck,
5423 const struct lu_fid *fid, int idx)
5425 struct seq_server_site *ss;
5426 struct lu_server_fld *sf;
5427 struct lu_seq_range range = { 0 };
5430 /* All abnormal cases will be returned to MDT0. */
5431 if (!fid_is_norm(fid)) {
5438 ss = lu_site2seq(lfsck->li_bottom->dd_lu_dev.ld_site);
5439 if (unlikely(ss == NULL))
5442 sf = ss->ss_server_fld;
5443 LASSERT(sf != NULL);
5445 fld_range_set_any(&range);
5446 rc = fld_server_lookup(env, sf, fid_seq(fid), &range);
5450 if (!fld_range_is_mdt(&range))
5453 if (range.lsr_index == idx)
5459 static void lfsck_layout_destroy_orphan(const struct lu_env *env,
5460 struct dt_device *dev,
5461 struct dt_object *obj)
5463 struct thandle *handle;
5467 handle = dt_trans_create(env, dev);
5471 rc = dt_declare_ref_del(env, obj, handle);
5475 rc = dt_declare_destroy(env, obj, handle);
5479 rc = dt_trans_start_local(env, dev, handle);
5483 dt_write_lock(env, obj, 0);
5484 rc = dt_ref_del(env, obj, handle);
5486 rc = dt_destroy(env, obj, handle);
5487 dt_write_unlock(env, obj);
5492 dt_trans_stop(env, dev, handle);
5497 static int lfsck_orphan_index_lookup(const struct lu_env *env,
5498 struct dt_object *dt,
5500 const struct dt_key *key,
5501 struct lustre_capa *capa)
5506 static int lfsck_orphan_index_declare_insert(const struct lu_env *env,
5507 struct dt_object *dt,
5508 const struct dt_rec *rec,
5509 const struct dt_key *key,
5510 struct thandle *handle)
5515 static int lfsck_orphan_index_insert(const struct lu_env *env,
5516 struct dt_object *dt,
5517 const struct dt_rec *rec,
5518 const struct dt_key *key,
5519 struct thandle *handle,
5520 struct lustre_capa *capa,
5526 static int lfsck_orphan_index_declare_delete(const struct lu_env *env,
5527 struct dt_object *dt,
5528 const struct dt_key *key,
5529 struct thandle *handle)
5534 static int lfsck_orphan_index_delete(const struct lu_env *env,
5535 struct dt_object *dt,
5536 const struct dt_key *key,
5537 struct thandle *handle,
5538 struct lustre_capa *capa)
5543 static struct dt_it *lfsck_orphan_it_init(const struct lu_env *env,
5544 struct dt_object *dt,
5546 struct lustre_capa *capa)
5548 struct dt_device *dev = lu2dt_dev(dt->do_lu.lo_dev);
5549 struct lfsck_instance *lfsck;
5550 struct lfsck_component *com = NULL;
5551 struct lfsck_layout_slave_data *llsd;
5552 struct lfsck_orphan_it *it = NULL;
5556 lfsck = lfsck_instance_find(dev, true, false);
5557 if (unlikely(lfsck == NULL))
5558 RETURN(ERR_PTR(-ENODEV));
5560 com = lfsck_component_find(lfsck, LT_LAYOUT);
5561 if (unlikely(com == NULL))
5562 GOTO(out, rc = -ENOENT);
5564 llsd = com->lc_data;
5565 if (!llsd->llsd_rbtree_valid)
5566 GOTO(out, rc = -ESRCH);
5570 GOTO(out, rc = -ENOMEM);
5572 it->loi_llst = lfsck_layout_llst_find_and_del(llsd, attr, false);
5573 if (it->loi_llst == NULL)
5574 GOTO(out, rc = -ENODEV);
5576 if (dev->dd_record_fid_accessed) {
5577 /* The first iteration against the rbtree, scan the whole rbtree
5578 * to remove the nodes which do NOT need to be handled. */
5579 write_lock(&llsd->llsd_rb_lock);
5580 if (dev->dd_record_fid_accessed) {
5581 struct rb_node *node;
5582 struct rb_node *next;
5583 struct lfsck_rbtree_node *lrn;
5585 /* No need to record the fid accessing anymore. */
5586 dev->dd_record_fid_accessed = 0;
5588 node = rb_first(&llsd->llsd_rb_root);
5589 while (node != NULL) {
5590 next = rb_next(node);
5591 lrn = rb_entry(node, struct lfsck_rbtree_node,
5593 if (atomic_read(&lrn->lrn_known_count) <=
5594 atomic_read(&lrn->lrn_accessed_count)) {
5595 rb_erase(node, &llsd->llsd_rb_root);
5596 lfsck_rbtree_free(lrn);
5601 write_unlock(&llsd->llsd_rb_lock);
5604 /* read lock the rbtree when init, and unlock when fini */
5605 read_lock(&llsd->llsd_rb_lock);
5613 lfsck_component_put(env, com);
5614 lfsck_instance_put(env, lfsck);
5619 it = (struct lfsck_orphan_it *)ERR_PTR(rc);
5622 return (struct dt_it *)it;
5625 static void lfsck_orphan_it_fini(const struct lu_env *env,
5628 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
5629 struct lfsck_component *com = it->loi_com;
5630 struct lfsck_layout_slave_data *llsd;
5631 struct lfsck_layout_slave_target *llst;
5634 llsd = com->lc_data;
5635 read_unlock(&llsd->llsd_rb_lock);
5636 llst = it->loi_llst;
5637 LASSERT(llst != NULL);
5639 /* Save the key and hash for iterate next. */
5640 llst->llst_fid = it->loi_key;
5641 llst->llst_hash = it->loi_hash;
5642 lfsck_layout_llst_put(llst);
5643 lfsck_component_put(env, com);
5649 * \retval +1: the iteration finished
5650 * \retval 0: on success, not finished
5651 * \retval -ve: on error
5653 static int lfsck_orphan_it_next(const struct lu_env *env,
5656 struct lfsck_thread_info *info = lfsck_env_info(env);
5657 struct filter_fid_old *pfid = &info->lti_old_pfid;
5658 struct lu_attr *la = &info->lti_la;
5659 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
5660 struct lu_fid *key = &it->loi_key;
5661 struct lu_orphan_rec *rec = &it->loi_rec;
5662 struct lfsck_component *com = it->loi_com;
5663 struct lfsck_instance *lfsck = com->lc_lfsck;
5664 struct lfsck_layout_slave_data *llsd = com->lc_data;
5665 struct dt_object *obj;
5666 struct lfsck_rbtree_node *lrn;
5670 __u32 idx = it->loi_llst->llst_index;
5680 lrn = lfsck_rbtree_search(llsd, key, &exact);
5688 key->f_seq = lrn->lrn_seq;
5689 key->f_oid = lrn->lrn_first_oid;
5694 if (unlikely(key->f_oid == 0)) {
5701 lrn->lrn_first_oid + LFSCK_RBTREE_BITMAP_WIDTH) {
5707 if (unlikely(atomic_read(&lrn->lrn_known_count) <=
5708 atomic_read(&lrn->lrn_accessed_count))) {
5709 struct rb_node *next = rb_next(&lrn->lrn_node);
5711 while (next != NULL) {
5712 lrn = rb_entry(next, struct lfsck_rbtree_node,
5714 if (atomic_read(&lrn->lrn_known_count) >
5715 atomic_read(&lrn->lrn_accessed_count))
5717 next = rb_next(next);
5726 key->f_seq = lrn->lrn_seq;
5727 key->f_oid = lrn->lrn_first_oid;
5731 pos = key->f_oid - lrn->lrn_first_oid;
5734 pos = find_next_bit(lrn->lrn_known_bitmap,
5735 LFSCK_RBTREE_BITMAP_WIDTH, pos);
5736 if (pos >= LFSCK_RBTREE_BITMAP_WIDTH) {
5737 key->f_oid = lrn->lrn_first_oid + pos;
5738 if (unlikely(key->f_oid < lrn->lrn_first_oid)) {
5746 if (test_bit(pos, lrn->lrn_accessed_bitmap)) {
5751 key->f_oid = lrn->lrn_first_oid + pos;
5752 obj = lfsck_object_find(env, lfsck, key);
5755 if (rc == -ENOENT) {
5762 dt_read_lock(env, obj, 0);
5763 if (!dt_object_exists(obj)) {
5764 dt_read_unlock(env, obj);
5765 lfsck_object_put(env, obj);
5770 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
5774 rc = dt_xattr_get(env, obj, lfsck_buf_get(env, pfid, sizeof(*pfid)),
5775 XATTR_NAME_FID, BYPASS_CAPA);
5776 if (rc == -ENODATA) {
5777 /* For the pre-created OST-object, update the bitmap to avoid
5778 * others LFSCK (second phase) iteration to touch it again. */
5779 if (la->la_ctime == 0) {
5780 if (!test_and_set_bit(pos, lrn->lrn_accessed_bitmap))
5781 atomic_inc(&lrn->lrn_accessed_count);
5783 /* For the race between repairing dangling referenced
5784 * MDT-object and unlink the file, it may left orphan
5785 * OST-object there. Destroy it now! */
5786 if (unlikely(!(la->la_mode & S_ISUID))) {
5787 dt_read_unlock(env, obj);
5788 lfsck_layout_destroy_orphan(env,
5791 lfsck_object_put(env, obj);
5795 } else if (idx == 0) {
5796 /* If the orphan OST-object has no parent information,
5797 * regard it as referenced by the MDT-object on MDT0. */
5798 fid_zero(&rec->lor_fid);
5799 rec->lor_uid = la->la_uid;
5800 rec->lor_gid = la->la_gid;
5804 dt_read_unlock(env, obj);
5805 lfsck_object_put(env, obj);
5813 if (rc != sizeof(struct filter_fid) &&
5814 rc != sizeof(struct filter_fid_old))
5815 GOTO(out, rc = -EINVAL);
5817 fid_le_to_cpu(&rec->lor_fid, &pfid->ff_parent);
5818 /* In fact, the ff_parent::f_ver is not the real parent FID::f_ver,
5819 * instead, it is the OST-object index in its parent MDT-object
5821 save = rec->lor_fid.f_ver;
5822 rec->lor_fid.f_ver = 0;
5823 rc = lfsck_fid_match_idx(env, lfsck, &rec->lor_fid, idx);
5824 /* If the orphan OST-object does not claim the MDT, then next.
5826 * If we do not know whether it matches or not, then return it
5827 * to the MDT for further check. */
5829 dt_read_unlock(env, obj);
5830 lfsck_object_put(env, obj);
5835 rec->lor_fid.f_ver = save;
5836 rec->lor_uid = la->la_uid;
5837 rec->lor_gid = la->la_gid;
5839 CDEBUG(D_LFSCK, "%s: return orphan "DFID", PFID "DFID", owner %u:%u\n",
5840 lfsck_lfsck2name(com->lc_lfsck), PFID(key), PFID(&rec->lor_fid),
5841 rec->lor_uid, rec->lor_gid);
5846 dt_read_unlock(env, obj);
5847 lfsck_object_put(env, obj);
5855 * \retval +1: locate to the exactly position
5856 * \retval 0: cannot locate to the exactly position,
5857 * call next() to move to a valid position.
5858 * \retval -ve: on error
5860 static int lfsck_orphan_it_get(const struct lu_env *env,
5862 const struct dt_key *key)
5864 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
5867 it->loi_key = *(struct lu_fid *)key;
5868 rc = lfsck_orphan_it_next(env, di);
5878 static void lfsck_orphan_it_put(const struct lu_env *env,
5883 static struct dt_key *lfsck_orphan_it_key(const struct lu_env *env,
5884 const struct dt_it *di)
5886 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
5888 return (struct dt_key *)&it->loi_key;
5891 static int lfsck_orphan_it_key_size(const struct lu_env *env,
5892 const struct dt_it *di)
5894 return sizeof(struct lu_fid);
5897 static int lfsck_orphan_it_rec(const struct lu_env *env,
5898 const struct dt_it *di,
5902 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
5904 *(struct lu_orphan_rec *)rec = it->loi_rec;
5909 static __u64 lfsck_orphan_it_store(const struct lu_env *env,
5910 const struct dt_it *di)
5912 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
5914 return it->loi_hash;
5918 * \retval +1: locate to the exactly position
5919 * \retval 0: cannot locate to the exactly position,
5920 * call next() to move to a valid position.
5921 * \retval -ve: on error
5923 static int lfsck_orphan_it_load(const struct lu_env *env,
5924 const struct dt_it *di,
5927 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
5928 struct lfsck_layout_slave_target *llst = it->loi_llst;
5931 LASSERT(llst != NULL);
5933 if (hash != llst->llst_hash) {
5934 CWARN("%s: the given hash "LPU64" for orphan iteration does "
5935 "not match the one when fini "LPU64", to be reset.\n",
5936 lfsck_lfsck2name(it->loi_com->lc_lfsck), hash,
5938 fid_zero(&llst->llst_fid);
5939 llst->llst_hash = 0;
5942 it->loi_key = llst->llst_fid;
5943 it->loi_hash = llst->llst_hash;
5944 rc = lfsck_orphan_it_next(env, (struct dt_it *)di);
5954 static int lfsck_orphan_it_key_rec(const struct lu_env *env,
5955 const struct dt_it *di,
5961 const struct dt_index_operations lfsck_orphan_index_ops = {
5962 .dio_lookup = lfsck_orphan_index_lookup,
5963 .dio_declare_insert = lfsck_orphan_index_declare_insert,
5964 .dio_insert = lfsck_orphan_index_insert,
5965 .dio_declare_delete = lfsck_orphan_index_declare_delete,
5966 .dio_delete = lfsck_orphan_index_delete,
5968 .init = lfsck_orphan_it_init,
5969 .fini = lfsck_orphan_it_fini,
5970 .get = lfsck_orphan_it_get,
5971 .put = lfsck_orphan_it_put,
5972 .next = lfsck_orphan_it_next,
5973 .key = lfsck_orphan_it_key,
5974 .key_size = lfsck_orphan_it_key_size,
5975 .rec = lfsck_orphan_it_rec,
5976 .store = lfsck_orphan_it_store,
5977 .load = lfsck_orphan_it_load,
5978 .key_rec = lfsck_orphan_it_key_rec,