4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2013, Intel Corporation.
26 * lustre/lfsck/lfsck_layout.c
28 * Author: Fan, Yong <fan.yong@intel.com>
32 # define EXPORT_SYMTAB
34 #define DEBUG_SUBSYSTEM S_LFSCK
36 #include <linux/bitops.h>
37 #include <linux/rbtree.h>
39 #include <lustre/lustre_idl.h>
40 #include <lu_object.h>
41 #include <dt_object.h>
42 #include <lustre_linkea.h>
43 #include <lustre_fid.h>
44 #include <lustre_lib.h>
45 #include <lustre_net.h>
46 #include <lustre/lustre_user.h>
47 #include <md_object.h>
48 #include <obd_class.h>
50 #include "lfsck_internal.h"
52 #define LFSCK_LAYOUT_MAGIC 0xB173AE14
54 static const char lfsck_layout_name[] = "lfsck_layout";
56 struct lfsck_layout_seq {
57 struct list_head lls_list;
60 __u64 lls_lastid_known;
61 struct dt_object *lls_lastid_obj;
62 unsigned int lls_dirty:1;
65 struct lfsck_layout_slave_target {
66 /* link into lfsck_layout_slave_data::llsd_master_list. */
67 struct list_head llst_list;
68 /* The position for next record in the rbtree for iteration. */
69 struct lu_fid llst_fid;
70 /* Dummy hash for iteration against the rbtree. */
77 struct lfsck_layout_slave_data {
78 /* list for lfsck_layout_seq */
79 struct list_head llsd_seq_list;
81 /* list for the masters involve layout verification. */
82 struct list_head llsd_master_list;
85 struct dt_object *llsd_rb_obj;
86 struct rb_root llsd_rb_root;
87 rwlock_t llsd_rb_lock;
88 unsigned int llsd_rbtree_valid:1;
91 struct lfsck_layout_object {
92 struct dt_object *llo_obj;
93 struct lu_attr llo_attr;
98 struct lfsck_layout_req {
99 struct list_head llr_list;
100 struct lfsck_layout_object *llr_parent;
101 struct dt_object *llr_child;
103 __u32 llr_lov_idx; /* offset in LOV EA */
106 struct lfsck_layout_master_data {
107 spinlock_t llmd_lock;
108 struct list_head llmd_req_list;
110 /* list for the ost targets involve layout verification. */
111 struct list_head llmd_ost_list;
113 /* list for the ost targets in phase1 scanning. */
114 struct list_head llmd_ost_phase1_list;
116 /* list for the ost targets in phase1 scanning. */
117 struct list_head llmd_ost_phase2_list;
119 /* list for the mdt targets involve layout verification. */
120 struct list_head llmd_mdt_list;
122 /* list for the mdt targets in phase1 scanning. */
123 struct list_head llmd_mdt_phase1_list;
125 /* list for the mdt targets in phase1 scanning. */
126 struct list_head llmd_mdt_phase2_list;
128 struct ptlrpc_thread llmd_thread;
129 __u32 llmd_touch_gen;
131 int llmd_assistant_status;
132 int llmd_post_result;
133 unsigned int llmd_to_post:1,
134 llmd_to_double_scan:1,
135 llmd_in_double_scan:1,
139 struct lfsck_layout_slave_async_args {
140 struct obd_export *llsaa_exp;
141 struct lfsck_component *llsaa_com;
142 struct lfsck_layout_slave_target *llsaa_llst;
145 static struct lfsck_layout_object *
146 lfsck_layout_object_init(const struct lu_env *env, struct dt_object *obj,
149 struct lfsck_layout_object *llo;
154 return ERR_PTR(-ENOMEM);
156 rc = dt_attr_get(env, obj, &llo->llo_attr, BYPASS_CAPA);
163 lu_object_get(&obj->do_lu);
165 /* The gen can be used to check whether some others have changed the
166 * file layout after LFSCK pre-fetching but before real verification. */
168 atomic_set(&llo->llo_ref, 1);
174 lfsck_layout_llst_put(struct lfsck_layout_slave_target *llst)
176 if (atomic_dec_and_test(&llst->llst_ref)) {
177 LASSERT(list_empty(&llst->llst_list));
184 lfsck_layout_llst_add(struct lfsck_layout_slave_data *llsd, __u32 index)
186 struct lfsck_layout_slave_target *llst;
187 struct lfsck_layout_slave_target *tmp;
194 INIT_LIST_HEAD(&llst->llst_list);
196 llst->llst_index = index;
197 atomic_set(&llst->llst_ref, 1);
199 spin_lock(&llsd->llsd_lock);
200 list_for_each_entry(tmp, &llsd->llsd_master_list, llst_list) {
201 if (tmp->llst_index == index) {
207 list_add_tail(&llst->llst_list, &llsd->llsd_master_list);
208 spin_unlock(&llsd->llsd_lock);
217 lfsck_layout_llst_del(struct lfsck_layout_slave_data *llsd,
218 struct lfsck_layout_slave_target *llst)
222 spin_lock(&llsd->llsd_lock);
223 if (!list_empty(&llst->llst_list)) {
224 list_del_init(&llst->llst_list);
227 spin_unlock(&llsd->llsd_lock);
230 lfsck_layout_llst_put(llst);
233 static inline struct lfsck_layout_slave_target *
234 lfsck_layout_llst_find_and_del(struct lfsck_layout_slave_data *llsd,
235 __u32 index, bool unlink)
237 struct lfsck_layout_slave_target *llst;
239 spin_lock(&llsd->llsd_lock);
240 list_for_each_entry(llst, &llsd->llsd_master_list, llst_list) {
241 if (llst->llst_index == index) {
243 list_del_init(&llst->llst_list);
245 atomic_inc(&llst->llst_ref);
246 spin_unlock(&llsd->llsd_lock);
251 spin_unlock(&llsd->llsd_lock);
256 static inline void lfsck_layout_object_put(const struct lu_env *env,
257 struct lfsck_layout_object *llo)
259 if (atomic_dec_and_test(&llo->llo_ref)) {
260 lfsck_object_put(env, llo->llo_obj);
265 static struct lfsck_layout_req *
266 lfsck_layout_req_init(struct lfsck_layout_object *parent,
267 struct dt_object *child, __u32 ost_idx, __u32 lov_idx)
269 struct lfsck_layout_req *llr;
273 return ERR_PTR(-ENOMEM);
275 INIT_LIST_HEAD(&llr->llr_list);
276 atomic_inc(&parent->llo_ref);
277 llr->llr_parent = parent;
278 llr->llr_child = child;
279 llr->llr_ost_idx = ost_idx;
280 llr->llr_lov_idx = lov_idx;
285 static inline void lfsck_layout_req_fini(const struct lu_env *env,
286 struct lfsck_layout_req *llr)
288 lu_object_put(env, &llr->llr_child->do_lu);
289 lfsck_layout_object_put(env, llr->llr_parent);
293 static inline bool lfsck_layout_req_empty(struct lfsck_layout_master_data *llmd)
297 spin_lock(&llmd->llmd_lock);
298 if (list_empty(&llmd->llmd_req_list))
300 spin_unlock(&llmd->llmd_lock);
305 static int lfsck_layout_get_lovea(const struct lu_env *env,
306 struct dt_object *obj,
307 struct lu_buf *buf, ssize_t *buflen)
312 rc = dt_xattr_get(env, obj, buf, XATTR_NAME_LOV, BYPASS_CAPA);
314 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_LOV,
319 lu_buf_realloc(buf, rc);
321 *buflen = buf->lb_len;
323 if (buf->lb_buf == NULL)
335 if (unlikely(buf->lb_buf == NULL)) {
336 lu_buf_alloc(buf, rc);
338 *buflen = buf->lb_len;
340 if (buf->lb_buf == NULL)
349 static int lfsck_layout_verify_header(struct lov_mds_md_v1 *lmm)
354 magic = le32_to_cpu(lmm->lmm_magic);
355 /* If magic crashed, keep it there. Sometime later, during OST-object
356 * orphan handling, if some OST-object(s) back-point to it, it can be
357 * verified and repaired. */
358 if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3)
361 patten = le32_to_cpu(lmm->lmm_pattern);
362 /* XXX: currently, we only support LOV_PATTERN_RAID0. */
363 if (patten != LOV_PATTERN_RAID0)
369 #define LFSCK_RBTREE_BITMAP_SIZE PAGE_CACHE_SIZE
370 #define LFSCK_RBTREE_BITMAP_WIDTH (LFSCK_RBTREE_BITMAP_SIZE << 3)
371 #define LFSCK_RBTREE_BITMAP_MASK (LFSCK_RBTREE_BITMAP_WIDTH - 1)
373 struct lfsck_rbtree_node {
374 struct rb_node lrn_node;
377 atomic_t lrn_known_count;
378 atomic_t lrn_accessed_count;
379 void *lrn_known_bitmap;
380 void *lrn_accessed_bitmap;
383 static inline int lfsck_rbtree_cmp(struct lfsck_rbtree_node *lrn,
384 __u64 seq, __u32 oid)
386 if (seq < lrn->lrn_seq)
389 if (seq > lrn->lrn_seq)
392 if (oid < lrn->lrn_first_oid)
395 if (oid - lrn->lrn_first_oid >= LFSCK_RBTREE_BITMAP_WIDTH)
401 /* The caller should hold llsd->llsd_rb_lock. */
402 static struct lfsck_rbtree_node *
403 lfsck_rbtree_search(struct lfsck_layout_slave_data *llsd,
404 const struct lu_fid *fid, bool *exact)
406 struct rb_node *node = llsd->llsd_rb_root.rb_node;
407 struct rb_node *prev = NULL;
408 struct lfsck_rbtree_node *lrn = NULL;
414 while (node != NULL) {
416 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
417 rc = lfsck_rbtree_cmp(lrn, fid_seq(fid), fid_oid(fid));
419 node = node->rb_left;
421 node = node->rb_right;
429 /* If there is no exactly matched one, then to the next valid one. */
432 /* The rbtree is empty. */
439 node = rb_next(prev);
441 /* The end of the rbtree. */
445 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
450 static struct lfsck_rbtree_node *lfsck_rbtree_new(const struct lu_env *env,
451 const struct lu_fid *fid)
453 struct lfsck_rbtree_node *lrn;
457 return ERR_PTR(-ENOMEM);
459 OBD_ALLOC(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
460 if (lrn->lrn_known_bitmap == NULL) {
463 return ERR_PTR(-ENOMEM);
466 OBD_ALLOC(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
467 if (lrn->lrn_accessed_bitmap == NULL) {
468 OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
471 return ERR_PTR(-ENOMEM);
474 rb_init_node(&lrn->lrn_node);
475 lrn->lrn_seq = fid_seq(fid);
476 lrn->lrn_first_oid = fid_oid(fid) & ~LFSCK_RBTREE_BITMAP_MASK;
477 atomic_set(&lrn->lrn_known_count, 0);
478 atomic_set(&lrn->lrn_accessed_count, 0);
483 static void lfsck_rbtree_free(struct lfsck_rbtree_node *lrn)
485 OBD_FREE(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
486 OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
490 /* The caller should hold lock. */
491 static struct lfsck_rbtree_node *
492 lfsck_rbtree_insert(struct lfsck_layout_slave_data *llsd,
493 struct lfsck_rbtree_node *lrn)
495 struct rb_node **pos = &llsd->llsd_rb_root.rb_node;
496 struct rb_node *parent = NULL;
497 struct lfsck_rbtree_node *tmp;
500 while (*pos != NULL) {
502 tmp = rb_entry(parent, struct lfsck_rbtree_node, lrn_node);
503 rc = lfsck_rbtree_cmp(tmp, lrn->lrn_seq, lrn->lrn_first_oid);
505 pos = &(*pos)->rb_left;
507 pos = &(*pos)->rb_right;
512 rb_link_node(&lrn->lrn_node, parent, pos);
513 rb_insert_color(&lrn->lrn_node, &llsd->llsd_rb_root);
518 extern const struct dt_index_operations lfsck_orphan_index_ops;
520 static int lfsck_rbtree_setup(const struct lu_env *env,
521 struct lfsck_component *com)
523 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
524 struct lfsck_instance *lfsck = com->lc_lfsck;
525 struct dt_device *dev = lfsck->li_bottom;
526 struct lfsck_layout_slave_data *llsd = com->lc_data;
527 struct dt_object *obj;
529 fid->f_seq = FID_SEQ_LAYOUT_RBTREE;
530 fid->f_oid = lfsck_dev_idx(dev);
532 obj = dt_locate(env, dev, fid);
534 RETURN(PTR_ERR(obj));
536 /* Generate an in-RAM object to stand for the layout rbtree.
537 * Scanning the layout rbtree will be via the iteration over
538 * the object. In the future, the rbtree may be written onto
539 * disk with the object.
541 * Mark the object to be as exist. */
542 obj->do_lu.lo_header->loh_attr |= LOHA_EXISTS;
543 obj->do_index_ops = &lfsck_orphan_index_ops;
544 llsd->llsd_rb_obj = obj;
545 llsd->llsd_rbtree_valid = 1;
546 dev->dd_record_fid_accessed = 1;
551 static void lfsck_rbtree_cleanup(const struct lu_env *env,
552 struct lfsck_component *com)
554 struct lfsck_instance *lfsck = com->lc_lfsck;
555 struct lfsck_layout_slave_data *llsd = com->lc_data;
556 struct rb_node *node = rb_first(&llsd->llsd_rb_root);
557 struct rb_node *next;
558 struct lfsck_rbtree_node *lrn;
560 lfsck->li_bottom->dd_record_fid_accessed = 0;
561 /* Invalid the rbtree, then no others will use it. */
562 write_lock(&llsd->llsd_rb_lock);
563 llsd->llsd_rbtree_valid = 0;
564 write_unlock(&llsd->llsd_rb_lock);
566 while (node != NULL) {
567 next = rb_next(node);
568 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
569 rb_erase(node, &llsd->llsd_rb_root);
570 lfsck_rbtree_free(lrn);
574 if (llsd->llsd_rb_obj != NULL) {
575 lu_object_put(env, &llsd->llsd_rb_obj->do_lu);
576 llsd->llsd_rb_obj = NULL;
580 static void lfsck_rbtree_update_bitmap(const struct lu_env *env,
581 struct lfsck_component *com,
582 const struct lu_fid *fid,
585 struct lfsck_layout_slave_data *llsd = com->lc_data;
586 struct lfsck_rbtree_node *lrn;
592 CDEBUG(D_LFSCK, "%s: update bitmap for "DFID"\n",
593 lfsck_lfsck2name(com->lc_lfsck), PFID(fid));
595 if (unlikely(!fid_is_sane(fid) || fid_is_last_id(fid)))
598 if (!fid_is_idif(fid) && !fid_is_norm(fid))
601 read_lock(&llsd->llsd_rb_lock);
602 if (!llsd->llsd_rbtree_valid)
603 GOTO(unlock, rc = 0);
605 lrn = lfsck_rbtree_search(llsd, fid, NULL);
607 struct lfsck_rbtree_node *tmp;
611 read_unlock(&llsd->llsd_rb_lock);
612 tmp = lfsck_rbtree_new(env, fid);
614 GOTO(out, rc = PTR_ERR(tmp));
617 write_lock(&llsd->llsd_rb_lock);
618 if (!llsd->llsd_rbtree_valid) {
619 lfsck_rbtree_free(tmp);
620 GOTO(unlock, rc = 0);
623 lrn = lfsck_rbtree_insert(llsd, tmp);
625 lfsck_rbtree_free(tmp);
628 idx = fid_oid(fid) & LFSCK_RBTREE_BITMAP_MASK;
629 /* Any accessed object must be a known object. */
630 if (!test_and_set_bit(idx, lrn->lrn_known_bitmap))
631 atomic_inc(&lrn->lrn_known_count);
632 if (accessed && !test_and_set_bit(idx, lrn->lrn_accessed_bitmap))
633 atomic_inc(&lrn->lrn_accessed_count);
635 GOTO(unlock, rc = 0);
639 write_unlock(&llsd->llsd_rb_lock);
641 read_unlock(&llsd->llsd_rb_lock);
643 if (rc != 0 && accessed) {
644 struct lfsck_layout *lo = com->lc_file_ram;
646 CERROR("%s: Fail to update object accessed bitmap, will cause "
647 "incorrect LFSCK OST-object handling, so disable it to "
648 "cancel orphan handling for related device. rc = %d.\n",
649 lfsck_lfsck2name(com->lc_lfsck), rc);
650 lo->ll_flags |= LF_INCOMPLETE;
651 lfsck_rbtree_cleanup(env, com);
655 static inline bool is_dummy_lov_ost_data(struct lov_ost_data_v1 *obj)
657 if (fid_is_zero(&obj->l_ost_oi.oi_fid) &&
658 obj->l_ost_gen == 0 && obj->l_ost_idx == 0)
664 static void lfsck_layout_le_to_cpu(struct lfsck_layout *des,
665 const struct lfsck_layout *src)
669 des->ll_magic = le32_to_cpu(src->ll_magic);
670 des->ll_status = le32_to_cpu(src->ll_status);
671 des->ll_flags = le32_to_cpu(src->ll_flags);
672 des->ll_success_count = le32_to_cpu(src->ll_success_count);
673 des->ll_run_time_phase1 = le32_to_cpu(src->ll_run_time_phase1);
674 des->ll_run_time_phase2 = le32_to_cpu(src->ll_run_time_phase2);
675 des->ll_time_last_complete = le64_to_cpu(src->ll_time_last_complete);
676 des->ll_time_latest_start = le64_to_cpu(src->ll_time_latest_start);
677 des->ll_time_last_checkpoint =
678 le64_to_cpu(src->ll_time_last_checkpoint);
679 des->ll_pos_latest_start = le64_to_cpu(src->ll_pos_latest_start);
680 des->ll_pos_last_checkpoint = le64_to_cpu(src->ll_pos_last_checkpoint);
681 des->ll_pos_first_inconsistent =
682 le64_to_cpu(src->ll_pos_first_inconsistent);
683 des->ll_objs_checked_phase1 = le64_to_cpu(src->ll_objs_checked_phase1);
684 des->ll_objs_failed_phase1 = le64_to_cpu(src->ll_objs_failed_phase1);
685 des->ll_objs_checked_phase2 = le64_to_cpu(src->ll_objs_checked_phase2);
686 des->ll_objs_failed_phase2 = le64_to_cpu(src->ll_objs_failed_phase2);
687 for (i = 0; i < LLIT_MAX; i++)
688 des->ll_objs_repaired[i] =
689 le64_to_cpu(src->ll_objs_repaired[i]);
690 des->ll_objs_skipped = le64_to_cpu(src->ll_objs_skipped);
693 static void lfsck_layout_cpu_to_le(struct lfsck_layout *des,
694 const struct lfsck_layout *src)
698 des->ll_magic = cpu_to_le32(src->ll_magic);
699 des->ll_status = cpu_to_le32(src->ll_status);
700 des->ll_flags = cpu_to_le32(src->ll_flags);
701 des->ll_success_count = cpu_to_le32(src->ll_success_count);
702 des->ll_run_time_phase1 = cpu_to_le32(src->ll_run_time_phase1);
703 des->ll_run_time_phase2 = cpu_to_le32(src->ll_run_time_phase2);
704 des->ll_time_last_complete = cpu_to_le64(src->ll_time_last_complete);
705 des->ll_time_latest_start = cpu_to_le64(src->ll_time_latest_start);
706 des->ll_time_last_checkpoint =
707 cpu_to_le64(src->ll_time_last_checkpoint);
708 des->ll_pos_latest_start = cpu_to_le64(src->ll_pos_latest_start);
709 des->ll_pos_last_checkpoint = cpu_to_le64(src->ll_pos_last_checkpoint);
710 des->ll_pos_first_inconsistent =
711 cpu_to_le64(src->ll_pos_first_inconsistent);
712 des->ll_objs_checked_phase1 = cpu_to_le64(src->ll_objs_checked_phase1);
713 des->ll_objs_failed_phase1 = cpu_to_le64(src->ll_objs_failed_phase1);
714 des->ll_objs_checked_phase2 = cpu_to_le64(src->ll_objs_checked_phase2);
715 des->ll_objs_failed_phase2 = cpu_to_le64(src->ll_objs_failed_phase2);
716 for (i = 0; i < LLIT_MAX; i++)
717 des->ll_objs_repaired[i] =
718 cpu_to_le64(src->ll_objs_repaired[i]);
719 des->ll_objs_skipped = cpu_to_le64(src->ll_objs_skipped);
723 * \retval +ve: the lfsck_layout is broken, the caller should reset it.
724 * \retval 0: succeed.
725 * \retval -ve: failed cases.
727 static int lfsck_layout_load(const struct lu_env *env,
728 struct lfsck_component *com)
730 struct lfsck_layout *lo = com->lc_file_ram;
731 const struct dt_body_operations *dbo = com->lc_obj->do_body_ops;
732 ssize_t size = com->lc_file_size;
736 rc = dbo->dbo_read(env, com->lc_obj,
737 lfsck_buf_get(env, com->lc_file_disk, size), &pos,
742 CWARN("%s: failed to load lfsck_layout: rc = %d\n",
743 lfsck_lfsck2name(com->lc_lfsck), rc);
745 } else if (rc != size) {
746 CWARN("%s: crashed lfsck_layout, to be reset: rc = %d\n",
747 lfsck_lfsck2name(com->lc_lfsck), rc);
751 lfsck_layout_le_to_cpu(lo, com->lc_file_disk);
752 if (lo->ll_magic != LFSCK_LAYOUT_MAGIC) {
753 CWARN("%s: invalid lfsck_layout magic %#x != %#x, "
754 "to be reset\n", lfsck_lfsck2name(com->lc_lfsck),
755 lo->ll_magic, LFSCK_LAYOUT_MAGIC);
762 static int lfsck_layout_store(const struct lu_env *env,
763 struct lfsck_component *com)
765 struct dt_object *obj = com->lc_obj;
766 struct lfsck_instance *lfsck = com->lc_lfsck;
767 struct lfsck_layout *lo = com->lc_file_disk;
768 struct thandle *handle;
769 ssize_t size = com->lc_file_size;
774 lfsck_layout_cpu_to_le(lo, com->lc_file_ram);
775 handle = dt_trans_create(env, lfsck->li_bottom);
776 if (IS_ERR(handle)) {
777 rc = PTR_ERR(handle);
778 CERROR("%s: fail to create trans for storing lfsck_layout: "
779 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
783 rc = dt_declare_record_write(env, obj, lfsck_buf_get(env, lo, size),
786 CERROR("%s: fail to declare trans for storing lfsck_layout(1): "
787 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
791 rc = dt_trans_start_local(env, lfsck->li_bottom, handle);
793 CERROR("%s: fail to start trans for storing lfsck_layout: "
794 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
798 rc = dt_record_write(env, obj, lfsck_buf_get(env, lo, size), &pos,
801 CERROR("%s: fail to store lfsck_layout(1): size = %d, "
802 "rc = %d\n", lfsck_lfsck2name(lfsck), (int)size, rc);
807 dt_trans_stop(env, lfsck->li_bottom, handle);
812 static int lfsck_layout_init(const struct lu_env *env,
813 struct lfsck_component *com)
815 struct lfsck_layout *lo = com->lc_file_ram;
818 memset(lo, 0, com->lc_file_size);
819 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
820 lo->ll_status = LS_INIT;
821 down_write(&com->lc_sem);
822 rc = lfsck_layout_store(env, com);
823 up_write(&com->lc_sem);
828 static int fid_is_for_ostobj(const struct lu_env *env, struct dt_device *dt,
829 struct dt_object *obj, const struct lu_fid *fid)
831 struct seq_server_site *ss = lu_site2seq(dt->dd_lu_dev.ld_site);
832 struct lu_seq_range range = { 0 };
833 struct lustre_mdt_attrs *lma;
836 fld_range_set_any(&range);
837 rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(fid), &range);
839 if (fld_range_is_ost(&range))
845 lma = &lfsck_env_info(env)->lti_lma;
846 rc = dt_xattr_get(env, obj, lfsck_buf_get(env, lma, sizeof(*lma)),
847 XATTR_NAME_LMA, BYPASS_CAPA);
848 if (rc == sizeof(*lma)) {
849 lustre_lma_swab(lma);
851 return lma->lma_compat & LMAC_FID_ON_OST ? 1 : 0;
854 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_FID, BYPASS_CAPA);
859 static struct lfsck_layout_seq *
860 lfsck_layout_seq_lookup(struct lfsck_layout_slave_data *llsd, __u64 seq)
862 struct lfsck_layout_seq *lls;
864 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
865 if (lls->lls_seq == seq)
868 if (lls->lls_seq > seq)
876 lfsck_layout_seq_insert(struct lfsck_layout_slave_data *llsd,
877 struct lfsck_layout_seq *lls)
879 struct lfsck_layout_seq *tmp;
880 struct list_head *pos = &llsd->llsd_seq_list;
882 list_for_each_entry(tmp, &llsd->llsd_seq_list, lls_list) {
883 if (lls->lls_seq < tmp->lls_seq) {
884 pos = &tmp->lls_list;
888 list_add_tail(&lls->lls_list, pos);
892 lfsck_layout_lastid_create(const struct lu_env *env,
893 struct lfsck_instance *lfsck,
894 struct dt_object *obj)
896 struct lfsck_thread_info *info = lfsck_env_info(env);
897 struct lu_attr *la = &info->lti_la;
898 struct dt_object_format *dof = &info->lti_dof;
899 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
900 struct dt_device *dt = lfsck->li_bottom;
907 CDEBUG(D_LFSCK, "To create LAST_ID for <seq> "LPX64"\n",
908 fid_seq(lfsck_dto2fid(obj)));
910 if (bk->lb_param & LPF_DRYRUN)
913 memset(la, 0, sizeof(*la));
914 la->la_mode = S_IFREG | S_IRUGO | S_IWUSR;
915 la->la_valid = LA_MODE | LA_UID | LA_GID;
916 dof->dof_type = dt_mode_to_dft(S_IFREG);
918 th = dt_trans_create(env, dt);
920 RETURN(rc = PTR_ERR(th));
922 rc = dt_declare_create(env, obj, la, NULL, dof, th);
926 rc = dt_declare_record_write(env, obj,
927 lfsck_buf_get(env, &lastid,
933 rc = dt_trans_start_local(env, dt, th);
937 dt_write_lock(env, obj, 0);
938 if (likely(!dt_object_exists(obj))) {
939 rc = dt_create(env, obj, la, NULL, dof, th);
941 rc = dt_record_write(env, obj,
942 lfsck_buf_get(env, &lastid, sizeof(lastid)),
945 dt_write_unlock(env, obj);
950 dt_trans_stop(env, dt, th);
956 lfsck_layout_lastid_reload(const struct lu_env *env,
957 struct lfsck_component *com,
958 struct lfsck_layout_seq *lls)
964 dt_read_lock(env, lls->lls_lastid_obj, 0);
965 rc = dt_record_read(env, lls->lls_lastid_obj,
966 lfsck_buf_get(env, &lastid, sizeof(lastid)), &pos);
967 dt_read_unlock(env, lls->lls_lastid_obj);
968 if (unlikely(rc != 0))
971 lastid = le64_to_cpu(lastid);
972 if (lastid < lls->lls_lastid_known) {
973 struct lfsck_instance *lfsck = com->lc_lfsck;
974 struct lfsck_layout *lo = com->lc_file_ram;
976 lls->lls_lastid = lls->lls_lastid_known;
978 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
979 LASSERT(lfsck->li_out_notify != NULL);
981 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
982 LE_LASTID_REBUILDING);
983 lo->ll_flags |= LF_CRASHED_LASTID;
985 } else if (lastid >= lls->lls_lastid) {
986 lls->lls_lastid = lastid;
994 lfsck_layout_lastid_store(const struct lu_env *env,
995 struct lfsck_component *com)
997 struct lfsck_instance *lfsck = com->lc_lfsck;
998 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
999 struct dt_device *dt = lfsck->li_bottom;
1000 struct lfsck_layout_slave_data *llsd = com->lc_data;
1001 struct lfsck_layout_seq *lls;
1007 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
1010 /* XXX: Add the code back if we really found related
1011 * inconsistent cases in the future. */
1013 if (!lls->lls_dirty) {
1014 /* In OFD, before the pre-creation, the LAST_ID
1015 * file will be updated firstly, which may hide
1016 * some potential crashed cases. For example:
1018 * The old obj1's ID is higher than old LAST_ID
1019 * but lower than the new LAST_ID, but the LFSCK
1020 * have not touch the obj1 until the OFD updated
1021 * the LAST_ID. So the LFSCK does not regard it
1022 * as crashed case. But when OFD does not create
1023 * successfully, it will set the LAST_ID as the
1024 * real created objects' ID, then LFSCK needs to
1025 * found related inconsistency. */
1026 rc = lfsck_layout_lastid_reload(env, com, lls);
1027 if (likely(!lls->lls_dirty))
1032 CDEBUG(D_LFSCK, "To sync the LAST_ID for <seq> "LPX64
1033 " as <oid> "LPU64"\n", lls->lls_seq, lls->lls_lastid);
1035 if (bk->lb_param & LPF_DRYRUN) {
1040 th = dt_trans_create(env, dt);
1043 CERROR("%s: (1) failed to store "LPX64": rc = %d\n",
1044 lfsck_lfsck2name(com->lc_lfsck),
1049 lastid = cpu_to_le64(lls->lls_lastid);
1050 rc = dt_declare_record_write(env, lls->lls_lastid_obj,
1051 lfsck_buf_get(env, &lastid,
1057 rc = dt_trans_start_local(env, dt, th);
1061 dt_write_lock(env, lls->lls_lastid_obj, 0);
1062 rc = dt_record_write(env, lls->lls_lastid_obj,
1063 lfsck_buf_get(env, &lastid,
1064 sizeof(lastid)), &pos, th);
1065 dt_write_unlock(env, lls->lls_lastid_obj);
1070 dt_trans_stop(env, dt, th);
1073 CERROR("%s: (2) failed to store "LPX64": rc = %d\n",
1074 lfsck_lfsck2name(com->lc_lfsck),
1083 lfsck_layout_lastid_load(const struct lu_env *env,
1084 struct lfsck_component *com,
1085 struct lfsck_layout_seq *lls)
1087 struct lfsck_instance *lfsck = com->lc_lfsck;
1088 struct lfsck_layout *lo = com->lc_file_ram;
1089 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
1090 struct dt_object *obj;
1095 lu_last_id_fid(fid, lls->lls_seq, lfsck_dev_idx(lfsck->li_bottom));
1096 obj = dt_locate(env, lfsck->li_bottom, fid);
1098 RETURN(PTR_ERR(obj));
1100 /* LAST_ID crashed, to be rebuilt */
1101 if (!dt_object_exists(obj)) {
1102 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
1103 LASSERT(lfsck->li_out_notify != NULL);
1105 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1106 LE_LASTID_REBUILDING);
1107 lo->ll_flags |= LF_CRASHED_LASTID;
1109 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY4) &&
1111 struct l_wait_info lwi = LWI_TIMEOUT(
1112 cfs_time_seconds(cfs_fail_val),
1115 up_write(&com->lc_sem);
1116 l_wait_event(lfsck->li_thread.t_ctl_waitq,
1117 !thread_is_running(&lfsck->li_thread),
1119 down_write(&com->lc_sem);
1123 rc = lfsck_layout_lastid_create(env, lfsck, obj);
1125 dt_read_lock(env, obj, 0);
1126 rc = dt_read(env, obj,
1127 lfsck_buf_get(env, &lls->lls_lastid, sizeof(__u64)),
1129 dt_read_unlock(env, obj);
1130 if (rc != 0 && rc != sizeof(__u64))
1131 GOTO(out, rc = (rc > 0 ? -EFAULT : rc));
1133 if (rc == 0 && !(lo->ll_flags & LF_CRASHED_LASTID)) {
1134 LASSERT(lfsck->li_out_notify != NULL);
1136 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1137 LE_LASTID_REBUILDING);
1138 lo->ll_flags |= LF_CRASHED_LASTID;
1141 lls->lls_lastid = le64_to_cpu(lls->lls_lastid);
1149 lfsck_object_put(env, obj);
1151 lls->lls_lastid_obj = obj;
1156 static void lfsck_layout_record_failure(const struct lu_env *env,
1157 struct lfsck_instance *lfsck,
1158 struct lfsck_layout *lo)
1160 lo->ll_objs_failed_phase1++;
1161 if (unlikely(lo->ll_pos_first_inconsistent == 0))
1162 lo->ll_pos_first_inconsistent =
1163 lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
1167 static int lfsck_layout_master_async_interpret(const struct lu_env *env,
1168 struct ptlrpc_request *req,
1171 struct lfsck_async_interpret_args *laia = args;
1172 struct lfsck_component *com = laia->laia_com;
1173 struct lfsck_layout_master_data *llmd = com->lc_data;
1174 struct lfsck_tgt_descs *ltds = laia->laia_ltds;
1175 struct lfsck_tgt_desc *ltd = laia->laia_ltd;
1176 struct lfsck_request *lr = laia->laia_lr;
1178 switch (lr->lr_event) {
1181 struct lfsck_layout *lo = com->lc_file_ram;
1183 CERROR("%s: fail to notify %s %x for layout start: "
1184 "rc = %d\n", lfsck_lfsck2name(com->lc_lfsck),
1185 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1186 ltd->ltd_index, rc);
1187 lo->ll_flags |= LF_INCOMPLETE;
1191 spin_lock(<ds->ltd_lock);
1192 if (ltd->ltd_dead || ltd->ltd_layout_done) {
1193 spin_unlock(<ds->ltd_lock);
1197 if (lr->lr_flags & LEF_TO_OST) {
1198 if (list_empty(<d->ltd_layout_list))
1199 list_add_tail(<d->ltd_layout_list,
1200 &llmd->llmd_ost_list);
1201 if (list_empty(<d->ltd_layout_phase_list))
1202 list_add_tail(<d->ltd_layout_phase_list,
1203 &llmd->llmd_ost_phase1_list);
1205 if (list_empty(<d->ltd_layout_list))
1206 list_add_tail(<d->ltd_layout_list,
1207 &llmd->llmd_mdt_list);
1208 if (list_empty(<d->ltd_layout_phase_list))
1209 list_add_tail(<d->ltd_layout_phase_list,
1210 &llmd->llmd_mdt_phase1_list);
1212 spin_unlock(<ds->ltd_lock);
1215 case LE_PHASE1_DONE:
1216 case LE_PHASE2_DONE:
1218 if (rc != 0 && rc != -EALREADY)
1219 CWARN("%s: fail to notify %s %x for layout: "
1220 "event = %d, rc = %d\n",
1221 lfsck_lfsck2name(com->lc_lfsck),
1222 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1223 ltd->ltd_index, lr->lr_event, rc);
1226 struct lfsck_reply *reply;
1229 spin_lock(<ds->ltd_lock);
1230 list_del_init(<d->ltd_layout_phase_list);
1231 list_del_init(<d->ltd_layout_list);
1232 spin_unlock(<ds->ltd_lock);
1236 reply = req_capsule_server_get(&req->rq_pill,
1238 if (reply == NULL) {
1240 CERROR("%s: invalid return value: rc = %d\n",
1241 lfsck_lfsck2name(com->lc_lfsck), rc);
1242 spin_lock(<ds->ltd_lock);
1243 list_del_init(<d->ltd_layout_phase_list);
1244 list_del_init(<d->ltd_layout_list);
1245 spin_unlock(<ds->ltd_lock);
1249 switch (reply->lr_status) {
1250 case LS_SCANNING_PHASE1:
1252 case LS_SCANNING_PHASE2:
1253 spin_lock(<ds->ltd_lock);
1254 list_del_init(<d->ltd_layout_phase_list);
1255 if (ltd->ltd_dead || ltd->ltd_layout_done) {
1256 spin_unlock(<ds->ltd_lock);
1260 if (lr->lr_flags & LEF_TO_OST)
1261 list_add_tail(<d->ltd_layout_phase_list,
1262 &llmd->llmd_ost_phase2_list);
1264 list_add_tail(<d->ltd_layout_phase_list,
1265 &llmd->llmd_mdt_phase2_list);
1266 spin_unlock(<ds->ltd_lock);
1269 spin_lock(<ds->ltd_lock);
1270 list_del_init(<d->ltd_layout_phase_list);
1271 list_del_init(<d->ltd_layout_list);
1272 spin_unlock(<ds->ltd_lock);
1278 CERROR("%s: unexpected event: rc = %d\n",
1279 lfsck_lfsck2name(com->lc_lfsck), lr->lr_event);
1283 if (!laia->laia_shared) {
1285 lfsck_component_put(env, com);
1291 static int lfsck_layout_master_query_others(const struct lu_env *env,
1292 struct lfsck_component *com)
1294 struct lfsck_thread_info *info = lfsck_env_info(env);
1295 struct lfsck_request *lr = &info->lti_lr;
1296 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1297 struct lfsck_instance *lfsck = com->lc_lfsck;
1298 struct lfsck_layout_master_data *llmd = com->lc_data;
1299 struct ptlrpc_request_set *set;
1300 struct lfsck_tgt_descs *ltds;
1301 struct lfsck_tgt_desc *ltd;
1302 struct list_head *head;
1307 set = ptlrpc_prep_set();
1311 llmd->llmd_touch_gen++;
1312 memset(lr, 0, sizeof(*lr));
1313 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1314 lr->lr_event = LE_QUERY;
1315 lr->lr_active = LT_LAYOUT;
1316 laia->laia_com = com;
1318 laia->laia_shared = 0;
1320 if (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1321 ltds = &lfsck->li_mdt_descs;
1323 head = &llmd->llmd_mdt_phase1_list;
1327 ltds = &lfsck->li_ost_descs;
1328 lr->lr_flags = LEF_TO_OST;
1329 head = &llmd->llmd_ost_phase1_list;
1332 laia->laia_ltds = ltds;
1333 spin_lock(<ds->ltd_lock);
1334 while (!list_empty(head)) {
1335 ltd = list_entry(head->next,
1336 struct lfsck_tgt_desc,
1337 ltd_layout_phase_list);
1338 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1341 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1342 list_del(<d->ltd_layout_phase_list);
1343 list_add_tail(<d->ltd_layout_phase_list, head);
1344 atomic_inc(<d->ltd_ref);
1345 laia->laia_ltd = ltd;
1346 spin_unlock(<ds->ltd_lock);
1347 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1348 lfsck_layout_master_async_interpret,
1351 CERROR("%s: fail to query %s %x for layout: rc = %d\n",
1352 lfsck_lfsck2name(lfsck),
1353 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1354 ltd->ltd_index, rc);
1358 spin_lock(<ds->ltd_lock);
1360 spin_unlock(<ds->ltd_lock);
1362 rc = ptlrpc_set_wait(set);
1364 ptlrpc_set_destroy(set);
1368 if (!(lr->lr_flags & LEF_TO_OST) &&
1369 list_empty(&llmd->llmd_mdt_phase1_list))
1372 ptlrpc_set_destroy(set);
1374 RETURN(rc1 != 0 ? rc1 : rc);
1378 lfsck_layout_master_to_orphan(struct lfsck_layout_master_data *llmd)
1380 return list_empty(&llmd->llmd_mdt_phase1_list) &&
1381 (!list_empty(&llmd->llmd_ost_phase2_list) ||
1382 list_empty(&llmd->llmd_ost_phase1_list));
1385 static int lfsck_layout_master_notify_others(const struct lu_env *env,
1386 struct lfsck_component *com,
1387 struct lfsck_request *lr)
1389 struct lfsck_thread_info *info = lfsck_env_info(env);
1390 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1391 struct lfsck_instance *lfsck = com->lc_lfsck;
1392 struct lfsck_layout_master_data *llmd = com->lc_data;
1393 struct lfsck_layout *lo = com->lc_file_ram;
1394 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1395 struct ptlrpc_request_set *set;
1396 struct lfsck_tgt_descs *ltds;
1397 struct lfsck_tgt_desc *ltd;
1398 struct lfsck_tgt_desc *next;
1399 struct list_head *head;
1404 set = ptlrpc_prep_set();
1408 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1409 lr->lr_active = LT_LAYOUT;
1410 laia->laia_com = com;
1412 laia->laia_shared = 0;
1413 switch (lr->lr_event) {
1415 /* Notify OSTs firstly, then handle other MDTs if needed. */
1416 ltds = &lfsck->li_ost_descs;
1417 laia->laia_ltds = ltds;
1418 down_read(<ds->ltd_rw_sem);
1419 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1420 ltd = lfsck_tgt_get(ltds, idx);
1421 LASSERT(ltd != NULL);
1423 laia->laia_ltd = ltd;
1424 ltd->ltd_layout_done = 0;
1425 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1426 lfsck_layout_master_async_interpret,
1427 laia, LFSCK_NOTIFY);
1429 CERROR("%s: fail to notify %s %x for layout "
1431 lfsck_lfsck2name(lfsck),
1432 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1435 lo->ll_flags |= LF_INCOMPLETE;
1438 up_read(<ds->ltd_rw_sem);
1441 rc = ptlrpc_set_wait(set);
1443 ptlrpc_set_destroy(set);
1447 if (!(bk->lb_param & LPF_ALL_TGT))
1450 /* link other MDT targets locallly. */
1451 spin_lock(<ds->ltd_lock);
1452 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1453 ltd = LTD_TGT(ltds, idx);
1454 LASSERT(ltd != NULL);
1456 if (!list_empty(<d->ltd_layout_list))
1459 list_add_tail(<d->ltd_layout_list,
1460 &llmd->llmd_mdt_list);
1461 list_add_tail(<d->ltd_layout_phase_list,
1462 &llmd->llmd_mdt_phase1_list);
1464 spin_unlock(<ds->ltd_lock);
1467 case LE_PHASE2_DONE:
1468 case LE_PEER_EXIT: {
1469 /* Handle other MDTs firstly if needed, then notify the OSTs. */
1470 if (bk->lb_param & LPF_ALL_TGT) {
1471 head = &llmd->llmd_mdt_list;
1472 ltds = &lfsck->li_mdt_descs;
1473 if (lr->lr_event == LE_STOP) {
1474 /* unlink other MDT targets locallly. */
1475 spin_lock(<ds->ltd_lock);
1476 list_for_each_entry_safe(ltd, next, head,
1478 list_del_init(<d->ltd_layout_phase_list);
1479 list_del_init(<d->ltd_layout_list);
1481 spin_unlock(<ds->ltd_lock);
1483 lr->lr_flags |= LEF_TO_OST;
1484 head = &llmd->llmd_ost_list;
1485 ltds = &lfsck->li_ost_descs;
1487 lr->lr_flags &= ~LEF_TO_OST;
1490 lr->lr_flags |= LEF_TO_OST;
1491 head = &llmd->llmd_ost_list;
1492 ltds = &lfsck->li_ost_descs;
1496 laia->laia_ltds = ltds;
1497 spin_lock(<ds->ltd_lock);
1498 while (!list_empty(head)) {
1499 ltd = list_entry(head->next, struct lfsck_tgt_desc,
1501 if (!list_empty(<d->ltd_layout_phase_list))
1502 list_del_init(<d->ltd_layout_phase_list);
1503 list_del_init(<d->ltd_layout_list);
1504 atomic_inc(<d->ltd_ref);
1505 laia->laia_ltd = ltd;
1506 spin_unlock(<ds->ltd_lock);
1507 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1508 lfsck_layout_master_async_interpret,
1509 laia, LFSCK_NOTIFY);
1511 CERROR("%s: fail to notify %s %x for layout "
1512 "stop/phase2: rc = %d\n",
1513 lfsck_lfsck2name(lfsck),
1514 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1515 "MDT", ltd->ltd_index, rc);
1518 spin_lock(<ds->ltd_lock);
1520 spin_unlock(<ds->ltd_lock);
1522 rc = ptlrpc_set_wait(set);
1524 ptlrpc_set_destroy(set);
1528 if (!(lr->lr_flags & LEF_TO_OST)) {
1529 lr->lr_flags |= LEF_TO_OST;
1530 head = &llmd->llmd_ost_list;
1531 ltds = &lfsck->li_ost_descs;
1536 case LE_PHASE1_DONE:
1537 llmd->llmd_touch_gen++;
1538 ltds = &lfsck->li_mdt_descs;
1539 laia->laia_ltds = ltds;
1540 spin_lock(<ds->ltd_lock);
1541 while (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1542 ltd = list_entry(llmd->llmd_mdt_phase1_list.next,
1543 struct lfsck_tgt_desc,
1544 ltd_layout_phase_list);
1545 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1548 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1549 list_del_init(<d->ltd_layout_phase_list);
1550 list_add_tail(<d->ltd_layout_phase_list,
1551 &llmd->llmd_mdt_phase1_list);
1552 atomic_inc(<d->ltd_ref);
1553 laia->laia_ltd = ltd;
1554 spin_unlock(<ds->ltd_lock);
1555 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1556 lfsck_layout_master_async_interpret,
1557 laia, LFSCK_NOTIFY);
1559 CERROR("%s: fail to notify MDT %x for layout "
1560 "phase1 done: rc = %d\n",
1561 lfsck_lfsck2name(lfsck),
1562 ltd->ltd_index, rc);
1565 spin_lock(<ds->ltd_lock);
1567 spin_unlock(<ds->ltd_lock);
1570 CERROR("%s: unexpected LFSCK event: rc = %d\n",
1571 lfsck_lfsck2name(lfsck), lr->lr_event);
1576 rc = ptlrpc_set_wait(set);
1577 ptlrpc_set_destroy(set);
1582 static int lfsck_layout_double_scan_result(const struct lu_env *env,
1583 struct lfsck_component *com,
1586 struct lfsck_instance *lfsck = com->lc_lfsck;
1587 struct lfsck_layout *lo = com->lc_file_ram;
1588 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1590 down_write(&com->lc_sem);
1591 lo->ll_run_time_phase2 += cfs_duration_sec(cfs_time_current() +
1592 HALF_SEC - lfsck->li_time_last_checkpoint);
1593 lo->ll_time_last_checkpoint = cfs_time_current_sec();
1594 lo->ll_objs_checked_phase2 += com->lc_new_checked;
1597 com->lc_journal = 0;
1598 if (lo->ll_flags & LF_INCOMPLETE)
1599 lo->ll_status = LS_PARTIAL;
1601 lo->ll_status = LS_COMPLETED;
1602 if (!(bk->lb_param & LPF_DRYRUN))
1603 lo->ll_flags &= ~(LF_SCANNED_ONCE | LF_INCONSISTENT);
1604 lo->ll_time_last_complete = lo->ll_time_last_checkpoint;
1605 lo->ll_success_count++;
1606 } else if (rc == 0) {
1607 lo->ll_status = lfsck->li_status;
1608 if (lo->ll_status == 0)
1609 lo->ll_status = LS_STOPPED;
1611 lo->ll_status = LS_FAILED;
1614 rc = lfsck_layout_store(env, com);
1615 up_write(&com->lc_sem);
1620 static int lfsck_layout_lock(const struct lu_env *env,
1621 struct lfsck_component *com,
1622 struct dt_object *obj,
1623 struct lustre_handle *lh, __u64 bits)
1625 struct lfsck_thread_info *info = lfsck_env_info(env);
1626 ldlm_policy_data_t *policy = &info->lti_policy;
1627 struct ldlm_res_id *resid = &info->lti_resid;
1628 struct lfsck_instance *lfsck = com->lc_lfsck;
1629 __u64 flags = LDLM_FL_ATOMIC_CB;
1632 LASSERT(lfsck->li_namespace != NULL);
1634 memset(policy, 0, sizeof(*policy));
1635 policy->l_inodebits.bits = bits;
1636 fid_build_reg_res_name(lfsck_dto2fid(obj), resid);
1637 rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid, LDLM_IBITS,
1638 policy, LCK_EX, &flags, ldlm_blocking_ast,
1639 ldlm_completion_ast, NULL, NULL, 0,
1640 LVB_T_NONE, NULL, lh);
1641 if (rc == ELDLM_OK) {
1644 memset(lh, 0, sizeof(*lh));
1651 static void lfsck_layout_unlock(struct lustre_handle *lh)
1653 if (lustre_handle_is_used(lh)) {
1654 ldlm_lock_decref(lh, LCK_EX);
1655 memset(lh, 0, sizeof(*lh));
1659 static int lfsck_layout_trans_stop(const struct lu_env *env,
1660 struct dt_device *dev,
1661 struct thandle *handle, int result)
1665 handle->th_result = result;
1666 rc = dt_trans_stop(env, dev, handle);
1676 * \retval +1: repaired
1677 * \retval 0: did nothing
1678 * \retval -ve: on error
1680 static int lfsck_layout_refill_lovea(const struct lu_env *env,
1681 struct thandle *handle,
1682 struct dt_object *parent,
1683 struct lu_fid *cfid,
1685 struct lov_ost_data_v1 *slot,
1686 int fl, __u32 ost_idx)
1688 struct ost_id *oi = &lfsck_env_info(env)->lti_oi;
1691 fid_to_ostid(cfid, oi);
1692 ostid_cpu_to_le(oi, &slot->l_ost_oi);
1693 slot->l_ost_gen = cpu_to_le32(0);
1694 slot->l_ost_idx = cpu_to_le32(ost_idx);
1695 rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV, fl, handle,
1704 * \retval +1: repaired
1705 * \retval 0: did nothing
1706 * \retval -ve: on error
1708 static int lfsck_layout_extend_lovea(const struct lu_env *env,
1709 struct thandle *handle,
1710 struct dt_object *parent,
1711 struct lu_fid *cfid,
1712 struct lu_buf *buf, int fl,
1713 __u32 ost_idx, __u32 ea_off)
1715 struct lov_mds_md_v1 *lmm = buf->lb_buf;
1716 struct lov_ost_data_v1 *objs;
1720 if (fl == LU_XATTR_CREATE) {
1721 LASSERT(buf->lb_len == lov_mds_md_size(ea_off + 1,
1724 memset(lmm, 0, buf->lb_len);
1725 lmm->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
1726 /* XXX: currently, we only support LOV_PATTERN_RAID0. */
1727 lmm->lmm_pattern = cpu_to_le32(LOV_PATTERN_RAID0);
1728 fid_to_lmm_oi(lfsck_dto2fid(parent), &lmm->lmm_oi);
1729 lmm_oi_cpu_to_le(&lmm->lmm_oi, &lmm->lmm_oi);
1730 /* XXX: We cannot know the stripe size,
1731 * then use the default value (1 MB). */
1732 lmm->lmm_stripe_size =
1733 cpu_to_le32(LOV_DESC_STRIPE_SIZE_DEFAULT);
1734 objs = &(lmm->lmm_objects[ea_off]);
1736 __u16 count = le16_to_cpu(lmm->lmm_stripe_count);
1737 int gap = ea_off - count;
1738 __u32 magic = le32_to_cpu(lmm->lmm_magic);
1740 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3
1741 * which has been verified in lfsck_layout_verify_header()
1742 * already. If some new magic introduced in the future,
1743 * then layout LFSCK needs to be updated also. */
1744 if (magic == LOV_MAGIC_V1) {
1745 objs = &(lmm->lmm_objects[count]);
1747 LASSERT(magic == LOV_MAGIC_V3);
1748 objs = &((struct lov_mds_md_v3 *)lmm)->
1753 memset(objs, 0, gap * sizeof(*objs));
1754 lmm->lmm_layout_gen =
1755 cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
1758 LASSERT(buf->lb_len == lov_mds_md_size(ea_off + 1, magic));
1761 lmm->lmm_stripe_count = cpu_to_le16(ea_off + 1);
1762 rc = lfsck_layout_refill_lovea(env, handle, parent, cfid, buf, objs,
1769 * \retval +1: repaired
1770 * \retval 0: did nothing
1771 * \retval -ve: on error
1773 static int lfsck_layout_update_pfid(const struct lu_env *env,
1774 struct lfsck_component *com,
1775 struct dt_object *parent,
1776 struct lu_fid *cfid,
1777 struct dt_device *cdev, __u32 ea_off)
1779 struct filter_fid *pfid = &lfsck_env_info(env)->lti_new_pfid;
1780 struct dt_object *child;
1781 struct thandle *handle;
1782 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
1787 child = lfsck_object_find_by_dev(env, cdev, cfid);
1789 RETURN(PTR_ERR(child));
1791 handle = dt_trans_create(env, cdev);
1793 GOTO(out, rc = PTR_ERR(handle));
1795 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
1796 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
1797 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
1798 * MDT-object's FID::f_ver, instead it is the OST-object index in its
1799 * parent MDT-object's layout EA. */
1800 pfid->ff_parent.f_stripe_idx = cpu_to_le32(ea_off);
1801 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
1803 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle);
1807 rc = dt_trans_start(env, cdev, handle);
1811 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle,
1814 GOTO(stop, rc = (rc == 0 ? 1 : rc));
1817 dt_trans_stop(env, cdev, handle);
1820 lu_object_put(env, &child->do_lu);
1826 * \retval +1: repaired
1827 * \retval 0: did nothing
1828 * \retval -ve: on error
1830 static int lfsck_layout_recreate_parent(const struct lu_env *env,
1831 struct lfsck_component *com,
1832 struct lfsck_tgt_desc *ltd,
1833 struct lu_orphan_rec *rec,
1834 struct lu_fid *cfid,
1836 const char *postfix,
1839 struct lfsck_thread_info *info = lfsck_env_info(env);
1840 char *name = info->lti_key;
1841 struct lu_attr *la = &info->lti_la;
1842 struct dt_object_format *dof = &info->lti_dof;
1843 struct lfsck_instance *lfsck = com->lc_lfsck;
1844 struct lu_fid *pfid = &rec->lor_fid;
1845 struct lu_fid *tfid = &info->lti_fid3;
1846 struct dt_device *next = lfsck->li_next;
1847 struct dt_object *pobj = NULL;
1848 struct dt_object *cobj = NULL;
1849 struct thandle *th = NULL;
1850 struct lu_buf *pbuf = NULL;
1851 struct lu_buf *ea_buf = &info->lti_big_buf;
1852 struct lustre_handle lh = { 0 };
1853 int buflen = ea_buf->lb_len;
1858 /* Create .lustre/lost+found/MDTxxxx when needed. */
1859 if (unlikely(lfsck->li_lpf_obj == NULL)) {
1860 rc = lfsck_create_lpf(env, lfsck);
1865 if (fid_is_zero(pfid)) {
1866 struct filter_fid *ff = &info->lti_new_pfid;
1868 rc = lfsck_fid_alloc(env, lfsck, pfid, false);
1872 ff->ff_parent.f_seq = cpu_to_le64(pfid->f_seq);
1873 ff->ff_parent.f_oid = cpu_to_le32(pfid->f_oid);
1874 /* Currently, the filter_fid::ff_parent::f_ver is not the
1875 * real parent MDT-object's FID::f_ver, instead it is the
1876 * OST-object index in its parent MDT-object's layout EA. */
1877 ff->ff_parent.f_stripe_idx = cpu_to_le32(ea_off);
1878 pbuf = lfsck_buf_get(env, ff, sizeof(struct filter_fid));
1879 cobj = lfsck_object_find_by_dev(env, ltd->ltd_tgt, cfid);
1881 RETURN(PTR_ERR(cobj));
1884 CDEBUG(D_LFSCK, "Re-create the lost MDT-object: parent "
1885 DFID", child "DFID", OST-index %u, stripe-index %u, "
1886 "prefix %s, postfix %s\n",
1887 PFID(pfid), PFID(cfid), ltd->ltd_index, ea_off, prefix, postfix);
1889 pobj = lfsck_object_find_by_dev(env, lfsck->li_bottom, pfid);
1891 GOTO(put, rc = PTR_ERR(pobj));
1893 LASSERT(prefix != NULL);
1894 LASSERT(postfix != NULL);
1898 * 1. Use the MDT-object's FID as the name with prefix and postfix.
1900 * 1.1 prefix "C-": More than one OST-objects claim the same
1901 * MDT-object and the same slot in the layout EA.
1902 * It may be created for dangling referenced MDT
1903 * object or may be not.
1904 * 1.2 prefix "N-": The orphan OST-object does not know which one
1905 * is the real parent, so the LFSCK assign a new
1906 * FID as its parent.
1907 * 1.3 prefix "R-": The orphan OST-object know its parent FID but
1908 * does not know the position in the namespace.
1910 * 2. If there is name conflict, append more index for new name. */
1911 sprintf(name, "%s"DFID"%s", prefix, PFID(pfid), postfix);
1913 rc = dt_lookup(env, lfsck->li_lpf_obj, (struct dt_rec *)tfid,
1914 (const struct dt_key *)name, BYPASS_CAPA);
1915 if (rc != 0 && rc != -ENOENT)
1918 if (unlikely(rc == 0)) {
1919 CWARN("%s: The name %s under lost+found has been used "
1920 "by the "DFID". Try to increase the FID version "
1921 "for the new file name.\n",
1922 lfsck_lfsck2name(lfsck), name, PFID(tfid));
1923 sprintf(name, "%s"DFID"%s-%d", prefix, PFID(pfid),
1928 memset(la, 0, sizeof(*la));
1929 la->la_uid = rec->lor_uid;
1930 la->la_gid = rec->lor_gid;
1931 la->la_mode = S_IFREG | S_IRUSR | S_IWUSR;
1932 la->la_valid = LA_MODE | LA_UID | LA_GID;
1934 memset(dof, 0, sizeof(*dof));
1935 dof->dof_type = dt_mode_to_dft(S_IFREG);
1937 rc = lov_mds_md_size(ea_off + 1, LOV_MAGIC_V1);
1939 lu_buf_realloc(ea_buf, rc);
1940 buflen = ea_buf->lb_len;
1941 if (ea_buf->lb_buf == NULL)
1942 GOTO(put, rc = -ENOMEM);
1944 ea_buf->lb_len = rc;
1947 /* Hold update lock on the .lustre/lost+found/MDTxxxx/.
1949 * XXX: Currently, we do not grab the PDO lock as normal create cases,
1950 * because creating MDT-object for orphan OST-object is rare, we
1951 * do not much care about the performance. It can be improved in
1952 * the future when needed. */
1953 rc = lfsck_layout_lock(env, com, lfsck->li_lpf_obj, &lh,
1954 MDS_INODELOCK_UPDATE);
1958 th = dt_trans_create(env, next);
1960 GOTO(unlock, rc = PTR_ERR(th));
1962 /* 1a. Update OST-object's parent information remotely.
1964 * If other subsequent modifications failed, then next LFSCK scanning
1965 * will process the OST-object as orphan again with known parent FID. */
1967 rc = dt_declare_xattr_set(env, cobj, pbuf, XATTR_NAME_FID, 0, th);
1972 /* 2a. Create the MDT-object locally. */
1973 rc = dt_declare_create(env, pobj, la, NULL, dof, th);
1977 /* 3a. Add layout EA for the MDT-object. */
1978 rc = dt_declare_xattr_set(env, pobj, ea_buf, XATTR_NAME_LOV,
1979 LU_XATTR_CREATE, th);
1983 /* 4a. Insert the MDT-object to .lustre/lost+found/MDTxxxx/ */
1984 rc = dt_declare_insert(env, lfsck->li_lpf_obj,
1985 (const struct dt_rec *)pfid,
1986 (const struct dt_key *)name, th);
1990 rc = dt_trans_start(env, next, th);
1994 /* 1b. Update OST-object's parent information remotely. */
1996 rc = dt_xattr_set(env, cobj, pbuf, XATTR_NAME_FID, 0, th,
2002 dt_write_lock(env, pobj, 0);
2003 /* 2b. Create the MDT-object locally. */
2004 rc = dt_create(env, pobj, la, NULL, dof, th);
2006 /* 3b. Add layout EA for the MDT-object. */
2007 rc = lfsck_layout_extend_lovea(env, th, pobj, cfid, ea_buf,
2008 LU_XATTR_CREATE, ltd->ltd_index,
2010 dt_write_unlock(env, pobj);
2014 /* 4b. Insert the MDT-object to .lustre/lost+found/MDTxxxx/ */
2015 rc = dt_insert(env, lfsck->li_lpf_obj,
2016 (const struct dt_rec *)pfid,
2017 (const struct dt_key *)name, th, BYPASS_CAPA, 1);
2022 dt_trans_stop(env, next, th);
2025 lfsck_layout_unlock(&lh);
2028 if (cobj != NULL && !IS_ERR(cobj))
2029 lu_object_put(env, &cobj->do_lu);
2030 if (pobj != NULL && !IS_ERR(pobj))
2031 lu_object_put(env, &pobj->do_lu);
2032 ea_buf->lb_len = buflen;
2034 return rc >= 0 ? 1 : rc;
2037 static int lfsck_layout_master_conditional_destroy(const struct lu_env *env,
2038 struct lfsck_component *com,
2039 const struct lu_fid *fid,
2042 struct lfsck_thread_info *info = lfsck_env_info(env);
2043 struct lfsck_request *lr = &info->lti_lr;
2044 struct lfsck_instance *lfsck = com->lc_lfsck;
2045 struct lfsck_tgt_desc *ltd;
2046 struct ptlrpc_request *req;
2047 struct lfsck_request *tmp;
2048 struct obd_export *exp;
2052 ltd = lfsck_tgt_get(&lfsck->li_ost_descs, index);
2053 if (unlikely(ltd == NULL))
2057 if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
2058 GOTO(put, rc = -EOPNOTSUPP);
2060 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
2062 GOTO(put, rc = -ENOMEM);
2064 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
2066 ptlrpc_request_free(req);
2071 memset(lr, 0, sizeof(*lr));
2072 lr->lr_event = LE_CONDITIONAL_DESTROY;
2073 lr->lr_active = LT_LAYOUT;
2076 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
2078 ptlrpc_request_set_replen(req);
2080 rc = ptlrpc_queue_wait(req);
2081 ptlrpc_req_finished(req);
2091 static int lfsck_layout_slave_conditional_destroy(const struct lu_env *env,
2092 struct lfsck_component *com,
2093 struct lfsck_request *lr)
2095 struct lfsck_thread_info *info = lfsck_env_info(env);
2096 struct lu_attr *la = &info->lti_la;
2097 ldlm_policy_data_t *policy = &info->lti_policy;
2098 struct ldlm_res_id *resid = &info->lti_resid;
2099 struct lfsck_instance *lfsck = com->lc_lfsck;
2100 struct dt_device *dev = lfsck->li_bottom;
2101 struct lu_fid *fid = &lr->lr_fid;
2102 struct dt_object *obj;
2103 struct thandle *th = NULL;
2104 struct lustre_handle lh = { 0 };
2109 obj = lfsck_object_find_by_dev(env, dev, fid);
2111 RETURN(PTR_ERR(obj));
2113 dt_read_lock(env, obj, 0);
2114 if (dt_object_exists(obj) == 0) {
2115 dt_read_unlock(env, obj);
2117 GOTO(put, rc = -ENOENT);
2120 /* Get obj's attr without lock firstly. */
2121 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
2122 dt_read_unlock(env, obj);
2126 if (likely(la->la_ctime != 0 || la->la_mode & S_ISUID))
2127 GOTO(put, rc = -ETXTBSY);
2129 /* Acquire extent lock on [0, EOF] to sync with all possible written. */
2130 LASSERT(lfsck->li_namespace != NULL);
2132 memset(policy, 0, sizeof(*policy));
2133 policy->l_extent.end = OBD_OBJECT_EOF;
2134 ost_fid_build_resid(fid, resid);
2135 rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid, LDLM_EXTENT,
2136 policy, LCK_EX, &flags, ldlm_blocking_ast,
2137 ldlm_completion_ast, NULL, NULL, 0,
2138 LVB_T_NONE, NULL, &lh);
2140 GOTO(put, rc = -EIO);
2142 dt_write_lock(env, obj, 0);
2143 /* Get obj's attr within lock again. */
2144 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
2148 if (la->la_ctime != 0)
2149 GOTO(unlock, rc = -ETXTBSY);
2151 th = dt_trans_create(env, dev);
2153 GOTO(unlock, rc = PTR_ERR(th));
2155 rc = dt_declare_ref_del(env, obj, th);
2159 rc = dt_declare_destroy(env, obj, th);
2163 rc = dt_trans_start_local(env, dev, th);
2167 rc = dt_ref_del(env, obj, th);
2171 rc = dt_destroy(env, obj, th);
2173 CDEBUG(D_LFSCK, "Destroy the empty OST-object "DFID" which "
2174 "was created for reparing dangling referenced case. "
2175 "But the original missed OST-object is found now.\n",
2181 dt_trans_stop(env, dev, th);
2184 dt_write_unlock(env, obj);
2185 ldlm_lock_decref(&lh, LCK_EX);
2188 lu_object_put(env, &obj->do_lu);
2194 * Some OST-object has occupied the specified layout EA slot.
2195 * Such OST-object may be generated by the LFSCK when repair
2196 * dangling referenced MDT-object, which can be indicated by
2197 * attr::la_ctime == 0 but without S_ISUID in la_mode. If it
2198 * is true and such OST-object has not been modified yet, we
2199 * will replace it with the orphan OST-object; otherwise the
2200 * LFSCK will create new MDT-object to reference the orphan.
2202 * \retval +1: repaired
2203 * \retval 0: did nothing
2204 * \retval -ve: on error
2206 static int lfsck_layout_conflict_create(const struct lu_env *env,
2207 struct lfsck_component *com,
2208 struct lfsck_tgt_desc *ltd,
2209 struct lu_orphan_rec *rec,
2210 struct dt_object *parent,
2211 struct lu_fid *cfid,
2212 struct lu_buf *ea_buf,
2213 struct lov_ost_data_v1 *slot,
2214 __u32 ea_off, __u32 ori_len)
2216 struct lfsck_thread_info *info = lfsck_env_info(env);
2217 struct lu_fid *cfid2 = &info->lti_fid2;
2218 struct ost_id *oi = &info->lti_oi;
2219 char *postfix = info->lti_tmpbuf;
2220 struct lov_mds_md_v1 *lmm = ea_buf->lb_buf;
2221 struct dt_device *dev = com->lc_lfsck->li_bottom;
2222 struct thandle *th = NULL;
2223 struct lustre_handle lh = { 0 };
2224 __u32 ost_idx2 = le32_to_cpu(slot->l_ost_idx);
2228 ostid_le_to_cpu(&slot->l_ost_oi, oi);
2229 ostid_to_fid(cfid2, oi, ost_idx2);
2231 CDEBUG(D_LFSCK, "Handle layout EA conflict: parent "DFID
2232 ", cur-child "DFID" on the OST %u, orphan-child "
2233 DFID" on the OST %u, stripe-index %u\n",
2234 PFID(lfsck_dto2fid(parent)), PFID(cfid2), ost_idx2,
2235 PFID(cfid), ltd->ltd_index, ea_off);
2237 /* Hold layout lock on the parent to prevent others to access. */
2238 rc = lfsck_layout_lock(env, com, parent, &lh,
2239 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2243 rc = lfsck_layout_master_conditional_destroy(env, com, cfid2, ost_idx2);
2245 /* If the conflict OST-obejct is not created for fixing dangling
2246 * referenced MDT-object in former LFSCK check/repair, or it has
2247 * been modified by others, then we cannot destroy it. Re-create
2248 * a new MDT-object for the orphan OST-object. */
2249 if (rc == -ETXTBSY) {
2250 /* No need the layout lock on the original parent. */
2251 lfsck_layout_unlock(&lh);
2252 ea_buf->lb_len = ori_len;
2254 fid_zero(&rec->lor_fid);
2255 snprintf(postfix, LFSCK_TMPBUF_LEN, "-"DFID"-%x",
2256 PFID(lu_object_fid(&parent->do_lu)), ea_off);
2257 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2258 "C-", postfix, ea_off);
2263 if (rc != 0 && rc != -ENOENT)
2266 th = dt_trans_create(env, dev);
2268 GOTO(unlock, rc = PTR_ERR(th));
2270 rc = dt_declare_xattr_set(env, parent, ea_buf, XATTR_NAME_LOV,
2271 LU_XATTR_REPLACE, th);
2275 rc = dt_trans_start_local(env, dev, th);
2279 dt_write_lock(env, parent, 0);
2280 lmm->lmm_layout_gen = cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
2281 rc = lfsck_layout_refill_lovea(env, th, parent, cfid, ea_buf, slot,
2282 LU_XATTR_REPLACE, ltd->ltd_index);
2283 dt_write_unlock(env, parent);
2288 dt_trans_stop(env, dev, th);
2291 lfsck_layout_unlock(&lh);
2294 ea_buf->lb_len = ori_len;
2296 return rc >= 0 ? 1 : rc;
2300 * \retval +1: repaired
2301 * \retval 0: did nothing
2302 * \retval -ve: on error
2304 static int lfsck_layout_recreate_lovea(const struct lu_env *env,
2305 struct lfsck_component *com,
2306 struct lfsck_tgt_desc *ltd,
2307 struct lu_orphan_rec *rec,
2308 struct dt_object *parent,
2309 struct lu_fid *cfid,
2310 __u32 ost_idx, __u32 ea_off)
2312 struct lfsck_thread_info *info = lfsck_env_info(env);
2313 struct lu_buf *buf = &info->lti_big_buf;
2314 struct lu_fid *fid = &info->lti_fid2;
2315 struct ost_id *oi = &info->lti_oi;
2316 struct lfsck_instance *lfsck = com->lc_lfsck;
2317 struct dt_device *dt = lfsck->li_bottom;
2318 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2319 struct thandle *handle = NULL;
2320 size_t buflen = buf->lb_len;
2321 struct lov_mds_md_v1 *lmm;
2322 struct lov_ost_data_v1 *objs;
2323 struct lustre_handle lh = { 0 };
2330 bool locked = false;
2333 CDEBUG(D_LFSCK, "Re-create the crashed layout EA: parent "
2334 DFID", child "DFID", OST-index %u, stripe-index %u\n",
2335 PFID(lfsck_dto2fid(parent)), PFID(cfid), ost_idx, ea_off);
2337 rc = lfsck_layout_lock(env, com, parent, &lh,
2338 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2344 dt_write_unlock(env, parent);
2348 if (handle != NULL) {
2349 dt_trans_stop(env, dt, handle);
2354 GOTO(unlock_layout, rc);
2356 if (buf->lb_len < rc) {
2357 lu_buf_realloc(buf, rc);
2358 buflen = buf->lb_len;
2359 if (buf->lb_buf == NULL)
2360 GOTO(unlock_layout, rc = -ENOMEM);
2363 if (!(bk->lb_param & LPF_DRYRUN)) {
2364 handle = dt_trans_create(env, dt);
2366 GOTO(unlock_layout, rc = PTR_ERR(handle));
2368 rc = dt_declare_xattr_set(env, parent, buf, XATTR_NAME_LOV,
2373 rc = dt_trans_start_local(env, dt, handle);
2378 dt_write_lock(env, parent, 0);
2380 rc = dt_xattr_get(env, parent, buf, XATTR_NAME_LOV, BYPASS_CAPA);
2381 if (rc == -ERANGE) {
2382 rc = dt_xattr_get(env, parent, &LU_BUF_NULL, XATTR_NAME_LOV,
2386 } else if (rc == -ENODATA || rc == 0) {
2387 rc = lov_mds_md_size(ea_off + 1, LOV_MAGIC_V1);
2388 /* If the declared is not big enough, re-try. */
2389 if (buf->lb_len < rc)
2392 fl = LU_XATTR_CREATE;
2393 } else if (rc < 0) {
2394 GOTO(unlock_parent, rc);
2395 } else if (unlikely(buf->lb_len == 0)) {
2398 fl = LU_XATTR_REPLACE;
2401 if (fl == LU_XATTR_CREATE) {
2402 if (bk->lb_param & LPF_DRYRUN)
2403 GOTO(unlock_parent, rc = 1);
2405 LASSERT(buf->lb_len >= rc);
2408 rc = lfsck_layout_extend_lovea(env, handle, parent, cfid, buf,
2409 fl, ost_idx, ea_off);
2411 GOTO(unlock_parent, rc);
2415 rc1 = lfsck_layout_verify_header(lmm);
2417 GOTO(unlock_parent, rc = rc1);
2419 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
2420 * been verified in lfsck_layout_verify_header() already. If some
2421 * new magic introduced in the future, then layout LFSCK needs to
2422 * be updated also. */
2423 magic = le32_to_cpu(lmm->lmm_magic);
2424 if (magic == LOV_MAGIC_V1) {
2425 objs = &(lmm->lmm_objects[0]);
2427 LASSERT(magic == LOV_MAGIC_V3);
2428 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
2431 count = le16_to_cpu(lmm->lmm_stripe_count);
2433 GOTO(unlock_parent, rc = -EINVAL);
2436 /* Exceed the current end of MDT-object layout EA. Then extend it. */
2437 if (count <= ea_off) {
2438 if (bk->lb_param & LPF_DRYRUN)
2439 GOTO(unlock_parent, rc = 1);
2441 rc = lov_mds_md_size(ea_off + 1, magic);
2442 /* If the declared is not big enough, re-try. */
2443 if (buf->lb_len < rc)
2447 rc = lfsck_layout_extend_lovea(env, handle, parent, cfid, buf,
2448 fl, ost_idx, ea_off);
2449 GOTO(unlock_parent, rc);
2452 LASSERTF(rc > 0, "invalid rc = %d\n", rc);
2455 for (i = 0; i < count; i++, objs++) {
2456 /* The MDT-object was created via lfsck_layout_recover_create()
2457 * by others before, and we fill the dummy layout EA. */
2458 if (is_dummy_lov_ost_data(objs)) {
2462 if (bk->lb_param & LPF_DRYRUN)
2463 GOTO(unlock_parent, rc = 1);
2465 lmm->lmm_layout_gen =
2466 cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
2467 rc = lfsck_layout_refill_lovea(env, handle, parent,
2468 cfid, buf, objs, fl,
2470 GOTO(unlock_parent, rc);
2473 ostid_le_to_cpu(&objs->l_ost_oi, oi);
2474 ostid_to_fid(fid, oi, le32_to_cpu(objs->l_ost_idx));
2475 /* It should be rare case, the slot is there, but the LFSCK
2476 * does not handle it during the first-phase cycle scanning. */
2477 if (unlikely(lu_fid_eq(fid, cfid))) {
2479 GOTO(unlock_parent, rc = 0);
2481 /* Rare case that the OST-object index
2482 * does not match the parent MDT-object
2483 * layout EA. We trust the later one. */
2484 if (bk->lb_param & LPF_DRYRUN)
2485 GOTO(unlock_parent, rc = 1);
2487 dt_write_unlock(env, parent);
2489 dt_trans_stop(env, dt, handle);
2490 lfsck_layout_unlock(&lh);
2491 buf->lb_len = buflen;
2492 rc = lfsck_layout_update_pfid(env, com, parent,
2493 cfid, ltd->ltd_tgt, i);
2500 /* The MDT-object exists, but related layout EA slot is occupied
2502 if (bk->lb_param & LPF_DRYRUN)
2503 GOTO(unlock_parent, rc = 1);
2505 dt_write_unlock(env, parent);
2507 dt_trans_stop(env, dt, handle);
2508 lfsck_layout_unlock(&lh);
2509 if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_V1)
2510 objs = &(lmm->lmm_objects[ea_off]);
2512 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[ea_off];
2513 rc = lfsck_layout_conflict_create(env, com, ltd, rec, parent, cfid,
2514 buf, objs, ea_off, buflen);
2520 dt_write_unlock(env, parent);
2524 dt_trans_stop(env, dt, handle);
2527 lfsck_layout_unlock(&lh);
2528 buf->lb_len = buflen;
2533 static int lfsck_layout_scan_orphan_one(const struct lu_env *env,
2534 struct lfsck_component *com,
2535 struct lfsck_tgt_desc *ltd,
2536 struct lu_orphan_rec *rec,
2537 struct lu_fid *cfid)
2539 struct lfsck_layout *lo = com->lc_file_ram;
2540 struct lu_fid *pfid = &rec->lor_fid;
2541 struct dt_object *parent = NULL;
2542 __u32 ea_off = pfid->f_stripe_idx;
2546 if (!fid_is_sane(cfid))
2547 GOTO(out, rc = -EINVAL);
2549 if (fid_is_zero(pfid)) {
2550 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2556 if (!fid_is_sane(pfid))
2557 GOTO(out, rc = -EINVAL);
2559 parent = lfsck_object_find_by_dev(env, com->lc_lfsck->li_bottom, pfid);
2561 GOTO(out, rc = PTR_ERR(parent));
2563 if (unlikely(dt_object_remote(parent) != 0))
2564 GOTO(put, rc = -EXDEV);
2566 if (dt_object_exists(parent) == 0) {
2567 lu_object_put(env, &parent->do_lu);
2568 rc = lfsck_layout_recreate_parent(env, com, ltd, rec, cfid,
2573 if (!S_ISREG(lu_object_attr(&parent->do_lu)))
2574 GOTO(put, rc = -EISDIR);
2576 rc = lfsck_layout_recreate_lovea(env, com, ltd, rec, parent, cfid,
2577 ltd->ltd_index, ea_off);
2583 lu_object_put(env, &parent->do_lu);
2585 /* The layout EA is changed, need to be reloaded next time. */
2586 lu_object_put_nocache(env, &parent->do_lu);
2589 down_write(&com->lc_sem);
2590 com->lc_new_scanned++;
2591 com->lc_new_checked++;
2593 lo->ll_objs_repaired[LLIT_ORPHAN - 1]++;
2595 } else if (rc < 0) {
2596 lo->ll_objs_failed_phase2++;
2598 up_write(&com->lc_sem);
2603 static int lfsck_layout_scan_orphan(const struct lu_env *env,
2604 struct lfsck_component *com,
2605 struct lfsck_tgt_desc *ltd)
2607 struct lfsck_layout *lo = com->lc_file_ram;
2608 struct lfsck_instance *lfsck = com->lc_lfsck;
2609 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2610 struct lfsck_thread_info *info = lfsck_env_info(env);
2611 struct ost_id *oi = &info->lti_oi;
2612 struct lu_fid *fid = &info->lti_fid;
2613 struct dt_object *obj;
2614 const struct dt_it_ops *iops;
2619 CDEBUG(D_LFSCK, "%s: start the orphan scanning for OST%04x\n",
2620 lfsck_lfsck2name(lfsck), ltd->ltd_index);
2622 ostid_set_seq(oi, FID_SEQ_IDIF);
2623 ostid_set_id(oi, 0);
2624 ostid_to_fid(fid, oi, ltd->ltd_index);
2625 obj = lfsck_object_find_by_dev(env, ltd->ltd_tgt, fid);
2626 if (unlikely(IS_ERR(obj)))
2627 RETURN(PTR_ERR(obj));
2629 rc = obj->do_ops->do_index_try(env, obj, &dt_lfsck_orphan_features);
2633 iops = &obj->do_index_ops->dio_it;
2634 di = iops->init(env, obj, 0, BYPASS_CAPA);
2636 GOTO(put, rc = PTR_ERR(di));
2638 rc = iops->load(env, di, 0);
2640 /* -ESRCH means that the orphan OST-objects rbtree has been
2641 * cleanup because of the OSS server restart or other errors. */
2642 lo->ll_flags |= LF_INCOMPLETE;
2647 rc = iops->next(env, di);
2659 struct lu_orphan_rec *rec = &info->lti_rec;
2661 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY3) &&
2663 struct ptlrpc_thread *thread = &lfsck->li_thread;
2664 struct l_wait_info lwi;
2666 lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val),
2668 l_wait_event(thread->t_ctl_waitq,
2669 !thread_is_running(thread),
2673 key = iops->key(env, di);
2674 com->lc_fid_latest_scanned_phase2 = *(struct lu_fid *)key;
2675 rc = iops->rec(env, di, (struct dt_rec *)rec, 0);
2677 rc = lfsck_layout_scan_orphan_one(env, com, ltd, rec,
2678 &com->lc_fid_latest_scanned_phase2);
2679 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
2682 lfsck_control_speed_by_self(com);
2684 rc = iops->next(env, di);
2685 } while (rc < 0 && !(bk->lb_param & LPF_FAILOUT));
2692 iops->fini(env, di);
2694 lu_object_put(env, &obj->do_lu);
2696 CDEBUG(D_LFSCK, "%s: finish the orphan scanning for OST%04x, rc = %d\n",
2697 lfsck_lfsck2name(lfsck), ltd->ltd_index, rc);
2699 return rc > 0 ? 0 : rc;
2702 /* For the MDT-object with dangling reference, we need to re-create
2703 * the missed OST-object with the known FID/owner information. */
2704 static int lfsck_layout_recreate_ostobj(const struct lu_env *env,
2705 struct lfsck_component *com,
2706 struct lfsck_layout_req *llr,
2709 struct lfsck_thread_info *info = lfsck_env_info(env);
2710 struct filter_fid *pfid = &info->lti_new_pfid;
2711 struct dt_allocation_hint *hint = &info->lti_hint;
2712 struct dt_object *parent = llr->llr_parent->llo_obj;
2713 struct dt_object *child = llr->llr_child;
2714 struct dt_device *dev = lfsck_obj2dt_dev(child);
2715 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
2716 struct thandle *handle;
2718 struct lustre_handle lh = { 0 };
2722 CDEBUG(D_LFSCK, "Repair dangling reference for: parent "DFID
2723 ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
2724 PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
2725 llr->llr_ost_idx, llr->llr_lov_idx, la->la_uid, la->la_gid);
2727 rc = lfsck_layout_lock(env, com, parent, &lh,
2728 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2732 handle = dt_trans_create(env, dev);
2734 GOTO(unlock1, rc = PTR_ERR(handle));
2736 hint->dah_parent = NULL;
2738 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
2739 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
2740 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
2741 * MDT-object's FID::f_ver, instead it is the OST-object index in its
2742 * parent MDT-object's layout EA. */
2743 pfid->ff_parent.f_stripe_idx = cpu_to_le32(llr->llr_lov_idx);
2744 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
2746 rc = dt_declare_create(env, child, la, hint, NULL, handle);
2750 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID,
2751 LU_XATTR_CREATE, handle);
2755 rc = dt_trans_start(env, dev, handle);
2759 dt_read_lock(env, parent, 0);
2760 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
2761 GOTO(unlock2, rc = 1);
2763 rc = dt_create(env, child, la, hint, NULL, handle);
2767 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, LU_XATTR_CREATE,
2768 handle, BYPASS_CAPA);
2773 dt_read_unlock(env, parent);
2776 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
2779 lfsck_layout_unlock(&lh);
2784 /* If the OST-object does not recognize the MDT-object as its parent, and
2785 * there is no other MDT-object claims as its parent, then just trust the
2786 * given MDT-object as its parent. So update the OST-object filter_fid. */
2787 static int lfsck_layout_repair_unmatched_pair(const struct lu_env *env,
2788 struct lfsck_component *com,
2789 struct lfsck_layout_req *llr,
2790 const struct lu_attr *pla)
2792 struct lfsck_thread_info *info = lfsck_env_info(env);
2793 struct filter_fid *pfid = &info->lti_new_pfid;
2794 struct lu_attr *tla = &info->lti_la3;
2795 struct dt_object *parent = llr->llr_parent->llo_obj;
2796 struct dt_object *child = llr->llr_child;
2797 struct dt_device *dev = lfsck_obj2dt_dev(child);
2798 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
2799 struct thandle *handle;
2801 struct lustre_handle lh = { 0 };
2805 CDEBUG(D_LFSCK, "Repair unmatched MDT-OST pair for: parent "DFID
2806 ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
2807 PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
2808 llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid);
2810 rc = lfsck_layout_lock(env, com, parent, &lh,
2811 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2815 handle = dt_trans_create(env, dev);
2817 GOTO(unlock1, rc = PTR_ERR(handle));
2819 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
2820 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
2821 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
2822 * MDT-object's FID::f_ver, instead it is the OST-object index in its
2823 * parent MDT-object's layout EA. */
2824 pfid->ff_parent.f_stripe_idx = cpu_to_le32(llr->llr_lov_idx);
2825 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
2827 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle);
2831 tla->la_valid = LA_UID | LA_GID;
2832 tla->la_uid = pla->la_uid;
2833 tla->la_gid = pla->la_gid;
2834 rc = dt_declare_attr_set(env, child, tla, handle);
2838 rc = dt_trans_start(env, dev, handle);
2842 dt_write_lock(env, parent, 0);
2843 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
2844 GOTO(unlock2, rc = 1);
2846 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle,
2851 /* Get the latest parent's owner. */
2852 rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
2856 tla->la_valid = LA_UID | LA_GID;
2857 rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
2862 dt_write_unlock(env, parent);
2865 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
2868 lfsck_layout_unlock(&lh);
2873 /* If there are more than one MDT-objects claim as the OST-object's parent,
2874 * and the OST-object only recognizes one of them, then we need to generate
2875 * new OST-object(s) with new fid(s) for the non-recognized MDT-object(s). */
2876 static int lfsck_layout_repair_multiple_references(const struct lu_env *env,
2877 struct lfsck_component *com,
2878 struct lfsck_layout_req *llr,
2882 struct lfsck_thread_info *info = lfsck_env_info(env);
2883 struct dt_allocation_hint *hint = &info->lti_hint;
2884 struct dt_object_format *dof = &info->lti_dof;
2885 struct dt_device *pdev = com->lc_lfsck->li_next;
2886 struct ost_id *oi = &info->lti_oi;
2887 struct dt_object *parent = llr->llr_parent->llo_obj;
2888 struct dt_device *cdev = lfsck_obj2dt_dev(llr->llr_child);
2889 struct dt_object *child = NULL;
2890 struct lu_device *d = &cdev->dd_lu_dev;
2891 struct lu_object *o = NULL;
2892 struct thandle *handle;
2893 struct lov_mds_md_v1 *lmm;
2894 struct lov_ost_data_v1 *objs;
2895 struct lustre_handle lh = { 0 };
2900 CDEBUG(D_LFSCK, "Repair multiple references for: parent "DFID
2901 ", OST-index %u, stripe-index %u, owner %u:%u\n",
2902 PFID(lfsck_dto2fid(parent)), llr->llr_ost_idx,
2903 llr->llr_lov_idx, la->la_uid, la->la_gid);
2905 rc = lfsck_layout_lock(env, com, parent, &lh,
2906 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
2910 handle = dt_trans_create(env, pdev);
2912 GOTO(unlock1, rc = PTR_ERR(handle));
2914 o = lu_object_anon(env, d, NULL);
2916 GOTO(stop, rc = PTR_ERR(o));
2918 child = container_of(o, struct dt_object, do_lu);
2919 o = lu_object_locate(o->lo_header, d->ld_type);
2920 if (unlikely(o == NULL))
2921 GOTO(stop, rc = -EINVAL);
2923 child = container_of(o, struct dt_object, do_lu);
2924 la->la_valid = LA_UID | LA_GID;
2925 hint->dah_parent = NULL;
2927 dof->dof_type = DFT_REGULAR;
2928 rc = dt_declare_create(env, child, la, NULL, NULL, handle);
2932 rc = dt_declare_xattr_set(env, parent, buf, XATTR_NAME_LOV,
2933 LU_XATTR_REPLACE, handle);
2937 rc = dt_trans_start(env, pdev, handle);
2941 dt_write_lock(env, parent, 0);
2942 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
2943 GOTO(unlock2, rc = 0);
2945 rc = dt_xattr_get(env, parent, buf, XATTR_NAME_LOV, BYPASS_CAPA);
2946 if (unlikely(rc == 0 || rc == -ENODATA || rc == -ERANGE))
2947 GOTO(unlock2, rc = 0);
2950 rc = lfsck_layout_verify_header(lmm);
2954 /* Someone change layout during the LFSCK, no need to repair then. */
2955 if (le16_to_cpu(lmm->lmm_layout_gen) != llr->llr_parent->llo_gen)
2956 GOTO(unlock2, rc = 0);
2958 rc = dt_create(env, child, la, hint, dof, handle);
2962 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
2963 * been verified in lfsck_layout_verify_header() already. If some
2964 * new magic introduced in the future, then layout LFSCK needs to
2965 * be updated also. */
2966 magic = le32_to_cpu(lmm->lmm_magic);
2967 if (magic == LOV_MAGIC_V1) {
2968 objs = &(lmm->lmm_objects[0]);
2970 LASSERT(magic == LOV_MAGIC_V3);
2971 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
2974 lmm->lmm_layout_gen = cpu_to_le16(llr->llr_parent->llo_gen + 1);
2975 fid_to_ostid(lu_object_fid(&child->do_lu), oi);
2976 ostid_cpu_to_le(oi, &objs[llr->llr_lov_idx].l_ost_oi);
2977 objs[llr->llr_lov_idx].l_ost_gen = cpu_to_le32(0);
2978 objs[llr->llr_lov_idx].l_ost_idx = cpu_to_le32(llr->llr_ost_idx);
2979 rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV,
2980 LU_XATTR_REPLACE, handle, BYPASS_CAPA);
2982 GOTO(unlock2, rc = (rc == 0 ? 1 : rc));
2985 dt_write_unlock(env, parent);
2989 lu_object_put(env, &child->do_lu);
2991 dt_trans_stop(env, pdev, handle);
2994 lfsck_layout_unlock(&lh);
2999 /* If the MDT-object and the OST-object have different owner information,
3000 * then trust the MDT-object, because the normal chown/chgrp handle order
3001 * is from MDT to OST, and it is possible that some chown/chgrp operation
3002 * is partly done. */
3003 static int lfsck_layout_repair_owner(const struct lu_env *env,
3004 struct lfsck_component *com,
3005 struct lfsck_layout_req *llr,
3006 struct lu_attr *pla)
3008 struct lfsck_thread_info *info = lfsck_env_info(env);
3009 struct lu_attr *tla = &info->lti_la3;
3010 struct dt_object *parent = llr->llr_parent->llo_obj;
3011 struct dt_object *child = llr->llr_child;
3012 struct dt_device *dev = lfsck_obj2dt_dev(child);
3013 struct thandle *handle;
3017 CDEBUG(D_LFSCK, "Repair inconsistent file owner for: parent "DFID
3018 ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
3019 PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
3020 llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid);
3022 handle = dt_trans_create(env, dev);
3024 RETURN(PTR_ERR(handle));
3026 tla->la_uid = pla->la_uid;
3027 tla->la_gid = pla->la_gid;
3028 tla->la_valid = LA_UID | LA_GID;
3029 rc = dt_declare_attr_set(env, child, tla, handle);
3033 rc = dt_trans_start(env, dev, handle);
3037 /* Use the dt_object lock to serialize with destroy and attr_set. */
3038 dt_read_lock(env, parent, 0);
3039 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
3040 GOTO(unlock, rc = 1);
3042 /* Get the latest parent's owner. */
3043 rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
3045 CWARN("%s: fail to get the latest parent's ("DFID") owner, "
3046 "not sure whether some others chown/chgrp during the "
3047 "LFSCK: rc = %d\n", lfsck_lfsck2name(com->lc_lfsck),
3048 PFID(lfsck_dto2fid(parent)), rc);
3053 /* Some others chown/chgrp during the LFSCK, needs to do nothing. */
3054 if (unlikely(tla->la_uid != pla->la_uid ||
3055 tla->la_gid != pla->la_gid))
3056 GOTO(unlock, rc = 1);
3058 tla->la_valid = LA_UID | LA_GID;
3059 rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
3064 dt_read_unlock(env, parent);
3067 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
3072 /* Check whether the OST-object correctly back points to the
3073 * MDT-object (@parent) via the XATTR_NAME_FID xattr (@pfid). */
3074 static int lfsck_layout_check_parent(const struct lu_env *env,
3075 struct lfsck_component *com,
3076 struct dt_object *parent,
3077 const struct lu_fid *pfid,
3078 const struct lu_fid *cfid,
3079 const struct lu_attr *pla,
3080 const struct lu_attr *cla,
3081 struct lfsck_layout_req *llr,
3082 struct lu_buf *lov_ea, __u32 idx)
3084 struct lfsck_thread_info *info = lfsck_env_info(env);
3085 struct lu_buf *buf = &info->lti_big_buf;
3086 struct dt_object *tobj;
3087 struct lov_mds_md_v1 *lmm;
3088 struct lov_ost_data_v1 *objs;
3095 if (fid_is_zero(pfid)) {
3096 /* client never wrote. */
3097 if (cla->la_size == 0 && cla->la_blocks == 0) {
3098 if (unlikely(cla->la_uid != pla->la_uid ||
3099 cla->la_gid != pla->la_gid))
3100 RETURN (LLIT_INCONSISTENT_OWNER);
3105 RETURN(LLIT_UNMATCHED_PAIR);
3108 if (unlikely(!fid_is_sane(pfid)))
3109 RETURN(LLIT_UNMATCHED_PAIR);
3111 if (lu_fid_eq(pfid, lu_object_fid(&parent->do_lu))) {
3112 if (llr->llr_lov_idx == idx)
3115 RETURN(LLIT_UNMATCHED_PAIR);
3118 tobj = lfsck_object_find(env, com->lc_lfsck, pfid);
3120 RETURN(LLIT_UNMATCHED_PAIR);
3123 RETURN(PTR_ERR(tobj));
3125 if (!dt_object_exists(tobj))
3126 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3128 /* Load the tobj's layout EA, in spite of it is a local MDT-object or
3129 * remote one on another MDT. Then check whether the given OST-object
3130 * is in such layout. If yes, it is multiple referenced, otherwise it
3131 * is unmatched referenced case. */
3132 rc = lfsck_layout_get_lovea(env, tobj, buf, NULL);
3134 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3140 rc = lfsck_layout_verify_header(lmm);
3144 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
3145 * been verified in lfsck_layout_verify_header() already. If some
3146 * new magic introduced in the future, then layout LFSCK needs to
3147 * be updated also. */
3148 magic = le32_to_cpu(lmm->lmm_magic);
3149 if (magic == LOV_MAGIC_V1) {
3150 objs = &(lmm->lmm_objects[0]);
3152 LASSERT(magic == LOV_MAGIC_V3);
3153 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
3156 count = le16_to_cpu(lmm->lmm_stripe_count);
3157 for (i = 0; i < count; i++, objs++) {
3158 struct lu_fid *tfid = &info->lti_fid2;
3159 struct ost_id *oi = &info->lti_oi;
3161 if (is_dummy_lov_ost_data(objs))
3164 ostid_le_to_cpu(&objs->l_ost_oi, oi);
3165 ostid_to_fid(tfid, oi, le32_to_cpu(objs->l_ost_idx));
3166 if (lu_fid_eq(cfid, tfid)) {
3169 GOTO(out, rc = LLIT_MULTIPLE_REFERENCED);
3173 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
3176 lfsck_object_put(env, tobj);
3181 static int lfsck_layout_assistant_handle_one(const struct lu_env *env,
3182 struct lfsck_component *com,
3183 struct lfsck_layout_req *llr)
3185 struct lfsck_layout *lo = com->lc_file_ram;
3186 struct lfsck_thread_info *info = lfsck_env_info(env);
3187 struct filter_fid_old *pea = &info->lti_old_pfid;
3188 struct lu_fid *pfid = &info->lti_fid;
3189 struct lu_buf *buf = NULL;
3190 struct dt_object *parent = llr->llr_parent->llo_obj;
3191 struct dt_object *child = llr->llr_child;
3192 struct lu_attr *pla = &info->lti_la;
3193 struct lu_attr *cla = &info->lti_la2;
3194 struct lfsck_instance *lfsck = com->lc_lfsck;
3195 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3196 enum lfsck_layout_inconsistency_type type = LLIT_NONE;
3201 rc = dt_attr_get(env, parent, pla, BYPASS_CAPA);
3203 if (lu_object_is_dying(parent->do_lu.lo_header))
3209 rc = dt_attr_get(env, child, cla, BYPASS_CAPA);
3210 if (rc == -ENOENT) {
3211 if (lu_object_is_dying(parent->do_lu.lo_header))
3214 type = LLIT_DANGLING;
3221 buf = lfsck_buf_get(env, pea, sizeof(struct filter_fid_old));
3222 rc= dt_xattr_get(env, child, buf, XATTR_NAME_FID, BYPASS_CAPA);
3223 if (unlikely(rc >= 0 && rc != sizeof(struct filter_fid_old) &&
3224 rc != sizeof(struct filter_fid))) {
3225 type = LLIT_UNMATCHED_PAIR;
3229 if (rc < 0 && rc != -ENODATA)
3232 if (rc == -ENODATA) {
3235 fid_le_to_cpu(pfid, &pea->ff_parent);
3236 /* Currently, the filter_fid::ff_parent::f_ver is not the
3237 * real parent MDT-object's FID::f_ver, instead it is the
3238 * OST-object index in its parent MDT-object's layout EA. */
3239 idx = pfid->f_stripe_idx;
3243 rc = lfsck_layout_check_parent(env, com, parent, pfid,
3244 lu_object_fid(&child->do_lu),
3245 pla, cla, llr, buf, idx);
3254 if (unlikely(cla->la_uid != pla->la_uid ||
3255 cla->la_gid != pla->la_gid)) {
3256 type = LLIT_INCONSISTENT_OWNER;
3261 if (bk->lb_param & LPF_DRYRUN) {
3262 if (type != LLIT_NONE)
3270 memset(cla, 0, sizeof(*cla));
3271 cla->la_uid = pla->la_uid;
3272 cla->la_gid = pla->la_gid;
3273 cla->la_mode = S_IFREG | 0666;
3274 cla->la_valid = LA_TYPE | LA_MODE | LA_UID | LA_GID |
3275 LA_ATIME | LA_MTIME | LA_CTIME;
3276 rc = lfsck_layout_recreate_ostobj(env, com, llr, cla);
3278 case LLIT_UNMATCHED_PAIR:
3279 rc = lfsck_layout_repair_unmatched_pair(env, com, llr, pla);
3281 case LLIT_MULTIPLE_REFERENCED:
3282 rc = lfsck_layout_repair_multiple_references(env, com, llr,
3285 case LLIT_INCONSISTENT_OWNER:
3286 rc = lfsck_layout_repair_owner(env, com, llr, pla);
3296 down_write(&com->lc_sem);
3298 struct lfsck_layout_master_data *llmd = com->lc_data;
3300 if (unlikely(llmd->llmd_exit)) {
3302 } else if (rc == -ENOTCONN || rc == -ESHUTDOWN ||
3303 rc == -ETIMEDOUT || rc == -EHOSTDOWN ||
3304 rc == -EHOSTUNREACH) {
3305 /* If cannot touch the target server,
3306 * mark the LFSCK as INCOMPLETE. */
3307 CERROR("%s: Fail to talk with OST %x: rc = %d.\n",
3308 lfsck_lfsck2name(lfsck), llr->llr_ost_idx, rc);
3309 lo->ll_flags |= LF_INCOMPLETE;
3310 lo->ll_objs_skipped++;
3313 lfsck_layout_record_failure(env, lfsck, lo);
3315 } else if (rc > 0) {
3316 LASSERTF(type > LLIT_NONE && type <= LLIT_MAX,
3317 "unknown type = %d\n", type);
3319 lo->ll_objs_repaired[type - 1]++;
3320 if (bk->lb_param & LPF_DRYRUN &&
3321 unlikely(lo->ll_pos_first_inconsistent == 0))
3322 lo->ll_pos_first_inconsistent =
3323 lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
3326 up_write(&com->lc_sem);
3331 static int lfsck_layout_assistant(void *args)
3333 struct lfsck_thread_args *lta = args;
3334 struct lu_env *env = <a->lta_env;
3335 struct lfsck_component *com = lta->lta_com;
3336 struct lfsck_instance *lfsck = lta->lta_lfsck;
3337 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3338 struct lfsck_position *pos = &com->lc_pos_start;
3339 struct lfsck_thread_info *info = lfsck_env_info(env);
3340 struct lfsck_request *lr = &info->lti_lr;
3341 struct lfsck_layout_master_data *llmd = com->lc_data;
3342 struct ptlrpc_thread *mthread = &lfsck->li_thread;
3343 struct ptlrpc_thread *athread = &llmd->llmd_thread;
3344 struct lfsck_layout_req *llr;
3345 struct l_wait_info lwi = { 0 };
3350 memset(lr, 0, sizeof(*lr));
3351 lr->lr_event = LE_START;
3352 lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
3354 lr->lr_speed = bk->lb_speed_limit;
3355 lr->lr_version = bk->lb_version;
3356 lr->lr_param = bk->lb_param;
3357 lr->lr_async_windows = bk->lb_async_windows;
3358 lr->lr_flags = LEF_TO_OST;
3359 if (pos->lp_oit_cookie <= 1)
3360 lr->lr_param |= LPF_RESET;
3362 rc = lfsck_layout_master_notify_others(env, com, lr);
3364 CERROR("%s: fail to notify others for layout start: rc = %d\n",
3365 lfsck_lfsck2name(lfsck), rc);
3369 spin_lock(&llmd->llmd_lock);
3370 thread_set_flags(athread, SVC_RUNNING);
3371 spin_unlock(&llmd->llmd_lock);
3372 wake_up_all(&mthread->t_ctl_waitq);
3375 while (!list_empty(&llmd->llmd_req_list)) {
3376 bool wakeup = false;
3378 if (unlikely(llmd->llmd_exit ||
3379 !thread_is_running(mthread)))
3380 GOTO(cleanup1, rc = llmd->llmd_post_result);
3382 llr = list_entry(llmd->llmd_req_list.next,
3383 struct lfsck_layout_req,
3385 /* Only the lfsck_layout_assistant thread itself can
3386 * remove the "llr" from the head of the list, LFSCK
3387 * engine thread only inserts other new "lld" at the
3388 * end of the list. So it is safe to handle current
3389 * "llr" without the spin_lock. */
3390 rc = lfsck_layout_assistant_handle_one(env, com, llr);
3391 spin_lock(&llmd->llmd_lock);
3392 list_del_init(&llr->llr_list);
3393 llmd->llmd_prefetched--;
3394 /* Wake up the main engine thread only when the list
3395 * is empty or half of the prefetched items have been
3396 * handled to avoid too frequent thread schedule. */
3397 if (llmd->llmd_prefetched == 0 ||
3398 (bk->lb_async_windows != 0 &&
3399 bk->lb_async_windows / 2 ==
3400 llmd->llmd_prefetched))
3402 spin_unlock(&llmd->llmd_lock);
3404 wake_up_all(&mthread->t_ctl_waitq);
3406 lfsck_layout_req_fini(env, llr);
3407 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
3411 l_wait_event(athread->t_ctl_waitq,
3412 !lfsck_layout_req_empty(llmd) ||
3414 llmd->llmd_to_post ||
3415 llmd->llmd_to_double_scan,
3418 if (unlikely(llmd->llmd_exit))
3419 GOTO(cleanup1, rc = llmd->llmd_post_result);
3421 if (!list_empty(&llmd->llmd_req_list))
3424 if (llmd->llmd_to_post) {
3425 llmd->llmd_to_post = 0;
3426 LASSERT(llmd->llmd_post_result > 0);
3428 memset(lr, 0, sizeof(*lr));
3429 lr->lr_event = LE_PHASE1_DONE;
3430 lr->lr_status = llmd->llmd_post_result;
3431 rc = lfsck_layout_master_notify_others(env, com, lr);
3433 CERROR("%s: failed to notify others "
3434 "for layout post: rc = %d\n",
3435 lfsck_lfsck2name(lfsck), rc);
3437 /* Wakeup the master engine to go ahead. */
3438 wake_up_all(&mthread->t_ctl_waitq);
3441 if (llmd->llmd_to_double_scan) {
3442 llmd->llmd_to_double_scan = 0;
3443 atomic_inc(&lfsck->li_double_scan_count);
3444 llmd->llmd_in_double_scan = 1;
3445 wake_up_all(&mthread->t_ctl_waitq);
3447 com->lc_new_checked = 0;
3448 com->lc_new_scanned = 0;
3449 com->lc_time_last_checkpoint = cfs_time_current();
3450 com->lc_time_next_checkpoint =
3451 com->lc_time_last_checkpoint +
3452 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
3454 while (llmd->llmd_in_double_scan) {
3455 struct lfsck_tgt_descs *ltds =
3456 &lfsck->li_ost_descs;
3457 struct lfsck_tgt_desc *ltd;
3459 rc = lfsck_layout_master_query_others(env, com);
3460 if (lfsck_layout_master_to_orphan(llmd))
3466 /* Pull LFSCK status on related targets once
3467 * per 30 seconds if we are not notified. */
3468 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(30),
3469 cfs_time_seconds(1),
3471 rc = l_wait_event(athread->t_ctl_waitq,
3472 lfsck_layout_master_to_orphan(llmd) ||
3474 !thread_is_running(mthread),
3477 if (unlikely(llmd->llmd_exit ||
3478 !thread_is_running(mthread)))
3479 GOTO(cleanup2, rc = 0);
3481 if (rc == -ETIMEDOUT)
3488 spin_lock(<ds->ltd_lock);
3490 &llmd->llmd_ost_phase2_list)) {
3492 llmd->llmd_ost_phase2_list.next,
3493 struct lfsck_tgt_desc,
3494 ltd_layout_phase_list);
3496 <d->ltd_layout_phase_list);
3497 spin_unlock(<ds->ltd_lock);
3499 if (bk->lb_param & LPF_ALL_TGT) {
3500 rc = lfsck_layout_scan_orphan(
3503 bk->lb_param & LPF_FAILOUT)
3507 if (unlikely(llmd->llmd_exit ||
3508 !thread_is_running(mthread)))
3509 GOTO(cleanup2, rc = 0);
3511 spin_lock(<ds->ltd_lock);
3514 if (list_empty(&llmd->llmd_ost_phase1_list)) {
3515 spin_unlock(<ds->ltd_lock);
3516 GOTO(cleanup2, rc = 1);
3518 spin_unlock(<ds->ltd_lock);
3524 /* Cleanup the unfinished requests. */
3525 spin_lock(&llmd->llmd_lock);
3527 llmd->llmd_assistant_status = rc;
3529 while (!list_empty(&llmd->llmd_req_list)) {
3530 llr = list_entry(llmd->llmd_req_list.next,
3531 struct lfsck_layout_req,
3533 list_del_init(&llr->llr_list);
3534 llmd->llmd_prefetched--;
3535 spin_unlock(&llmd->llmd_lock);
3536 lfsck_layout_req_fini(env, llr);
3537 spin_lock(&llmd->llmd_lock);
3539 spin_unlock(&llmd->llmd_lock);
3541 LASSERTF(llmd->llmd_prefetched == 0, "unmatched prefeteched objs %d\n",
3542 llmd->llmd_prefetched);
3545 memset(lr, 0, sizeof(*lr));
3547 lr->lr_event = LE_PHASE2_DONE;
3549 } else if (rc == 0) {
3550 if (lfsck->li_flags & LPF_ALL_TGT) {
3551 lr->lr_event = LE_STOP;
3552 lr->lr_status = LS_STOPPED;
3554 lr->lr_event = LE_PEER_EXIT;
3555 switch (lfsck->li_status) {
3558 lr->lr_status = LS_CO_PAUSED;
3562 lr->lr_status = LS_CO_STOPPED;
3565 CERROR("%s: unknown status: rc = %d\n",
3566 lfsck_lfsck2name(lfsck),
3568 lr->lr_status = LS_CO_FAILED;
3573 if (lfsck->li_flags & LPF_ALL_TGT) {
3574 lr->lr_event = LE_STOP;
3575 lr->lr_status = LS_FAILED;
3577 lr->lr_event = LE_PEER_EXIT;
3578 lr->lr_status = LS_CO_FAILED;
3582 rc1 = lfsck_layout_master_notify_others(env, com, lr);
3584 CERROR("%s: failed to notify others for layout quit: rc = %d\n",
3585 lfsck_lfsck2name(lfsck), rc1);
3589 /* Under force exit case, some requests may be just freed without
3590 * verification, those objects should be re-handled when next run.
3591 * So not update the on-disk tracing file under such case. */
3592 if (llmd->llmd_in_double_scan && !llmd->llmd_exit)
3593 rc1 = lfsck_layout_double_scan_result(env, com, rc);
3596 if (llmd->llmd_in_double_scan)
3597 atomic_dec(&lfsck->li_double_scan_count);
3599 spin_lock(&llmd->llmd_lock);
3600 llmd->llmd_assistant_status = (rc1 != 0 ? rc1 : rc);
3601 thread_set_flags(athread, SVC_STOPPED);
3602 wake_up_all(&mthread->t_ctl_waitq);
3603 spin_unlock(&llmd->llmd_lock);
3604 lfsck_thread_args_fini(lta);
3610 lfsck_layout_slave_async_interpret(const struct lu_env *env,
3611 struct ptlrpc_request *req,
3614 struct lfsck_layout_slave_async_args *llsaa = args;
3615 struct obd_export *exp = llsaa->llsaa_exp;
3616 struct lfsck_component *com = llsaa->llsaa_com;
3617 struct lfsck_layout_slave_target *llst = llsaa->llsaa_llst;
3618 struct lfsck_layout_slave_data *llsd = com->lc_data;
3622 /* It is quite probably caused by target crash,
3623 * to make the LFSCK can go ahead, assume that
3624 * the target finished the LFSCK prcoessing. */
3627 struct lfsck_reply *lr;
3629 lr = req_capsule_server_get(&req->rq_pill, &RMF_LFSCK_REPLY);
3630 if (lr->lr_status != LS_SCANNING_PHASE1 &&
3631 lr->lr_status != LS_SCANNING_PHASE2)
3635 lfsck_layout_llst_del(llsd, llst);
3636 lfsck_layout_llst_put(llst);
3637 lfsck_component_put(env, com);
3638 class_export_put(exp);
3643 static int lfsck_layout_async_query(const struct lu_env *env,
3644 struct lfsck_component *com,
3645 struct obd_export *exp,
3646 struct lfsck_layout_slave_target *llst,
3647 struct lfsck_request *lr,
3648 struct ptlrpc_request_set *set)
3650 struct lfsck_layout_slave_async_args *llsaa;
3651 struct ptlrpc_request *req;
3652 struct lfsck_request *tmp;
3656 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_QUERY);
3660 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_QUERY);
3662 ptlrpc_request_free(req);
3666 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
3668 ptlrpc_request_set_replen(req);
3670 llsaa = ptlrpc_req_async_args(req);
3671 llsaa->llsaa_exp = exp;
3672 llsaa->llsaa_com = lfsck_component_get(com);
3673 llsaa->llsaa_llst = llst;
3674 req->rq_interpret_reply = lfsck_layout_slave_async_interpret;
3675 ptlrpc_set_add_req(set, req);
3680 static int lfsck_layout_async_notify(const struct lu_env *env,
3681 struct obd_export *exp,
3682 struct lfsck_request *lr,
3683 struct ptlrpc_request_set *set)
3685 struct ptlrpc_request *req;
3686 struct lfsck_request *tmp;
3690 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
3694 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
3696 ptlrpc_request_free(req);
3700 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
3702 ptlrpc_request_set_replen(req);
3703 ptlrpc_set_add_req(set, req);
3709 lfsck_layout_slave_query_master(const struct lu_env *env,
3710 struct lfsck_component *com)
3712 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
3713 struct lfsck_instance *lfsck = com->lc_lfsck;
3714 struct lfsck_layout_slave_data *llsd = com->lc_data;
3715 struct lfsck_layout_slave_target *llst;
3716 struct obd_export *exp;
3717 struct ptlrpc_request_set *set;
3722 set = ptlrpc_prep_set();
3726 memset(lr, 0, sizeof(*lr));
3727 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
3728 lr->lr_event = LE_QUERY;
3729 lr->lr_active = LT_LAYOUT;
3731 llsd->llsd_touch_gen++;
3732 spin_lock(&llsd->llsd_lock);
3733 while (!list_empty(&llsd->llsd_master_list)) {
3734 llst = list_entry(llsd->llsd_master_list.next,
3735 struct lfsck_layout_slave_target,
3737 if (llst->llst_gen == llsd->llsd_touch_gen)
3740 llst->llst_gen = llsd->llsd_touch_gen;
3741 list_del(&llst->llst_list);
3742 list_add_tail(&llst->llst_list,
3743 &llsd->llsd_master_list);
3744 atomic_inc(&llst->llst_ref);
3745 spin_unlock(&llsd->llsd_lock);
3747 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
3750 lfsck_layout_llst_del(llsd, llst);
3751 lfsck_layout_llst_put(llst);
3752 spin_lock(&llsd->llsd_lock);
3756 rc = lfsck_layout_async_query(env, com, exp, llst, lr, set);
3758 CERROR("%s: slave fail to query %s for layout: "
3759 "rc = %d\n", lfsck_lfsck2name(lfsck),
3760 exp->exp_obd->obd_name, rc);
3762 lfsck_layout_llst_put(llst);
3763 class_export_put(exp);
3765 spin_lock(&llsd->llsd_lock);
3767 spin_unlock(&llsd->llsd_lock);
3769 rc = ptlrpc_set_wait(set);
3770 ptlrpc_set_destroy(set);
3772 RETURN(rc1 != 0 ? rc1 : rc);
3776 lfsck_layout_slave_notify_master(const struct lu_env *env,
3777 struct lfsck_component *com,
3778 enum lfsck_events event, int result)
3780 struct lfsck_instance *lfsck = com->lc_lfsck;
3781 struct lfsck_layout_slave_data *llsd = com->lc_data;
3782 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
3783 struct lfsck_layout_slave_target *llst;
3784 struct obd_export *exp;
3785 struct ptlrpc_request_set *set;
3789 set = ptlrpc_prep_set();
3793 memset(lr, 0, sizeof(*lr));
3794 lr->lr_event = event;
3795 lr->lr_flags = LEF_FROM_OST;
3796 lr->lr_status = result;
3797 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
3798 lr->lr_active = LT_LAYOUT;
3799 llsd->llsd_touch_gen++;
3800 spin_lock(&llsd->llsd_lock);
3801 while (!list_empty(&llsd->llsd_master_list)) {
3802 llst = list_entry(llsd->llsd_master_list.next,
3803 struct lfsck_layout_slave_target,
3805 if (llst->llst_gen == llsd->llsd_touch_gen)
3808 llst->llst_gen = llsd->llsd_touch_gen;
3809 list_del(&llst->llst_list);
3810 list_add_tail(&llst->llst_list,
3811 &llsd->llsd_master_list);
3812 atomic_inc(&llst->llst_ref);
3813 spin_unlock(&llsd->llsd_lock);
3815 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
3818 lfsck_layout_llst_del(llsd, llst);
3819 lfsck_layout_llst_put(llst);
3820 spin_lock(&llsd->llsd_lock);
3824 rc = lfsck_layout_async_notify(env, exp, lr, set);
3826 CERROR("%s: slave fail to notify %s for layout: "
3827 "rc = %d\n", lfsck_lfsck2name(lfsck),
3828 exp->exp_obd->obd_name, rc);
3829 lfsck_layout_llst_put(llst);
3830 class_export_put(exp);
3831 spin_lock(&llsd->llsd_lock);
3833 spin_unlock(&llsd->llsd_lock);
3835 ptlrpc_set_wait(set);
3836 ptlrpc_set_destroy(set);
3842 * \ret -ENODATA: unrecognized stripe
3843 * \ret = 0 : recognized stripe
3844 * \ret < 0 : other failures
3846 static int lfsck_layout_master_check_pairs(const struct lu_env *env,
3847 struct lfsck_component *com,
3848 struct lu_fid *cfid,
3849 struct lu_fid *pfid)
3851 struct lfsck_thread_info *info = lfsck_env_info(env);
3852 struct lu_buf *buf = &info->lti_big_buf;
3853 struct ost_id *oi = &info->lti_oi;
3854 struct dt_object *obj;
3855 struct lov_mds_md_v1 *lmm;
3856 struct lov_ost_data_v1 *objs;
3857 __u32 idx = pfid->f_stripe_idx;
3865 obj = lfsck_object_find_by_dev(env, com->lc_lfsck->li_bottom, pfid);
3867 RETURN(PTR_ERR(obj));
3869 dt_read_lock(env, obj, 0);
3870 if (unlikely(!dt_object_exists(obj)))
3871 GOTO(unlock, rc = -ENOENT);
3873 rc = lfsck_layout_get_lovea(env, obj, buf, NULL);
3878 GOTO(unlock, rc = -ENODATA);
3881 rc = lfsck_layout_verify_header(lmm);
3885 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
3886 * been verified in lfsck_layout_verify_header() already. If some
3887 * new magic introduced in the future, then layout LFSCK needs to
3888 * be updated also. */
3889 magic = le32_to_cpu(lmm->lmm_magic);
3890 if (magic == LOV_MAGIC_V1) {
3891 objs = &(lmm->lmm_objects[0]);
3893 LASSERT(magic == LOV_MAGIC_V3);
3894 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
3897 fid_to_ostid(cfid, oi);
3898 count = le16_to_cpu(lmm->lmm_stripe_count);
3899 for (i = 0; i < count; i++, objs++) {
3902 ostid_le_to_cpu(&objs->l_ost_oi, &oi2);
3903 if (memcmp(oi, &oi2, sizeof(*oi)) == 0)
3904 GOTO(unlock, rc = (i != idx ? -ENODATA : 0));
3907 GOTO(unlock, rc = -ENODATA);
3910 dt_read_unlock(env, obj);
3911 lu_object_put(env, &obj->do_lu);
3917 * The LFSCK-on-OST will ask the LFSCK-on-MDT to check whether the given
3918 * MDT-object/OST-object pairs match or not to aviod transfer MDT-object
3919 * layout EA from MDT to OST. On one hand, the OST no need to understand
3920 * the layout EA structure; on the other hand, it may cause trouble when
3921 * transfer large layout EA from MDT to OST via normal OUT RPC.
3923 * \ret > 0: unrecognized stripe
3924 * \ret = 0: recognized stripe
3925 * \ret < 0: other failures
3927 static int lfsck_layout_slave_check_pairs(const struct lu_env *env,
3928 struct lfsck_component *com,
3929 struct lu_fid *cfid,
3930 struct lu_fid *pfid)
3932 struct lfsck_instance *lfsck = com->lc_lfsck;
3933 struct obd_device *obd = lfsck->li_obd;
3934 struct seq_server_site *ss =
3935 lu_site2seq(lfsck->li_bottom->dd_lu_dev.ld_site);
3936 struct obd_export *exp = NULL;
3937 struct ptlrpc_request *req = NULL;
3938 struct lfsck_request *lr;
3939 struct lu_seq_range range = { 0 };
3943 if (unlikely(fid_is_idif(pfid)))
3946 fld_range_set_any(&range);
3947 rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(pfid), &range);
3949 RETURN(rc == -ENOENT ? 1 : rc);
3951 if (unlikely(!fld_range_is_mdt(&range)))
3954 exp = lustre_find_lwp_by_index(obd->obd_name, range.lsr_index);
3955 if (unlikely(exp == NULL))
3958 if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
3959 GOTO(out, rc = -EOPNOTSUPP);
3961 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
3963 GOTO(out, rc = -ENOMEM);
3965 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
3967 ptlrpc_request_free(req);
3972 lr = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
3973 memset(lr, 0, sizeof(*lr));
3974 lr->lr_event = LE_PAIRS_VERIFY;
3975 lr->lr_active = LT_LAYOUT;
3976 lr->lr_fid = *cfid; /* OST-object itself FID. */
3977 lr->lr_fid2 = *pfid; /* The claimed parent FID. */
3979 ptlrpc_request_set_replen(req);
3980 rc = ptlrpc_queue_wait(req);
3981 ptlrpc_req_finished(req);
3983 if (rc == -ENOENT || rc == -ENODATA)
3990 class_export_put(exp);
3995 static int lfsck_layout_slave_repair_pfid(const struct lu_env *env,
3996 struct lfsck_component *com,
3997 struct lfsck_request *lr)
3999 struct lfsck_thread_info *info = lfsck_env_info(env);
4000 struct filter_fid *ff = &info->lti_new_pfid;
4002 struct dt_device *dev = com->lc_lfsck->li_bottom;
4003 struct dt_object *obj;
4004 struct thandle *th = NULL;
4008 obj = lfsck_object_find_by_dev(env, dev, &lr->lr_fid);
4010 RETURN(PTR_ERR(obj));
4012 fid_cpu_to_le(&ff->ff_parent, &lr->lr_fid2);
4013 buf = lfsck_buf_get(env, ff, sizeof(*ff));
4014 dt_write_lock(env, obj, 0);
4015 if (unlikely(!dt_object_exists(obj)))
4016 GOTO(unlock, rc = 0);
4018 th = dt_trans_create(env, dev);
4020 GOTO(unlock, rc = PTR_ERR(th));
4022 rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_FID, 0, th);
4026 rc = dt_trans_start_local(env, dev, th);
4030 rc = dt_xattr_set(env, obj, buf, XATTR_NAME_FID, 0, th, BYPASS_CAPA);
4035 dt_trans_stop(env, dev, th);
4038 dt_write_unlock(env, obj);
4039 lu_object_put(env, &obj->do_lu);
4046 static int lfsck_layout_reset(const struct lu_env *env,
4047 struct lfsck_component *com, bool init)
4049 struct lfsck_layout *lo = com->lc_file_ram;
4052 down_write(&com->lc_sem);
4054 memset(lo, 0, com->lc_file_size);
4056 __u32 count = lo->ll_success_count;
4057 __u64 last_time = lo->ll_time_last_complete;
4059 memset(lo, 0, com->lc_file_size);
4060 lo->ll_success_count = count;
4061 lo->ll_time_last_complete = last_time;
4064 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
4065 lo->ll_status = LS_INIT;
4067 rc = lfsck_layout_store(env, com);
4068 up_write(&com->lc_sem);
4073 static void lfsck_layout_fail(const struct lu_env *env,
4074 struct lfsck_component *com, bool new_checked)
4076 struct lfsck_layout *lo = com->lc_file_ram;
4078 down_write(&com->lc_sem);
4080 com->lc_new_checked++;
4081 lfsck_layout_record_failure(env, com->lc_lfsck, lo);
4082 up_write(&com->lc_sem);
4085 static int lfsck_layout_master_checkpoint(const struct lu_env *env,
4086 struct lfsck_component *com, bool init)
4088 struct lfsck_instance *lfsck = com->lc_lfsck;
4089 struct lfsck_layout *lo = com->lc_file_ram;
4090 struct lfsck_layout_master_data *llmd = com->lc_data;
4091 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4092 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4093 struct l_wait_info lwi = { 0 };
4096 if (com->lc_new_checked == 0 && !init)
4099 l_wait_event(mthread->t_ctl_waitq,
4100 list_empty(&llmd->llmd_req_list) ||
4101 !thread_is_running(mthread) ||
4102 thread_is_stopped(athread),
4105 if (!thread_is_running(mthread) || thread_is_stopped(athread))
4108 down_write(&com->lc_sem);
4110 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
4112 lo->ll_pos_last_checkpoint =
4113 lfsck->li_pos_current.lp_oit_cookie;
4114 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4115 HALF_SEC - lfsck->li_time_last_checkpoint);
4116 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4117 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4118 com->lc_new_checked = 0;
4121 rc = lfsck_layout_store(env, com);
4122 up_write(&com->lc_sem);
4127 static int lfsck_layout_slave_checkpoint(const struct lu_env *env,
4128 struct lfsck_component *com, bool init)
4130 struct lfsck_instance *lfsck = com->lc_lfsck;
4131 struct lfsck_layout *lo = com->lc_file_ram;
4134 if (com->lc_new_checked == 0 && !init)
4137 down_write(&com->lc_sem);
4140 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
4142 lo->ll_pos_last_checkpoint =
4143 lfsck->li_pos_current.lp_oit_cookie;
4144 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4145 HALF_SEC - lfsck->li_time_last_checkpoint);
4146 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4147 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4148 com->lc_new_checked = 0;
4151 rc = lfsck_layout_store(env, com);
4153 up_write(&com->lc_sem);
4158 static int lfsck_layout_prep(const struct lu_env *env,
4159 struct lfsck_component *com,
4160 struct lfsck_start *start)
4162 struct lfsck_instance *lfsck = com->lc_lfsck;
4163 struct lfsck_layout *lo = com->lc_file_ram;
4164 struct lfsck_position *pos = &com->lc_pos_start;
4166 fid_zero(&pos->lp_dir_parent);
4167 pos->lp_dir_cookie = 0;
4168 if (lo->ll_status == LS_COMPLETED ||
4169 lo->ll_status == LS_PARTIAL ||
4170 /* To handle orphan, must scan from the beginning. */
4171 (start != NULL && start->ls_flags & LPF_ORPHAN)) {
4174 rc = lfsck_layout_reset(env, com, false);
4179 down_write(&com->lc_sem);
4180 lo->ll_time_latest_start = cfs_time_current_sec();
4181 spin_lock(&lfsck->li_lock);
4182 if (lo->ll_flags & LF_SCANNED_ONCE) {
4183 if (!lfsck->li_drop_dryrun ||
4184 lo->ll_pos_first_inconsistent == 0) {
4185 lo->ll_status = LS_SCANNING_PHASE2;
4186 list_del_init(&com->lc_link);
4187 list_add_tail(&com->lc_link,
4188 &lfsck->li_list_double_scan);
4189 pos->lp_oit_cookie = 0;
4193 lo->ll_status = LS_SCANNING_PHASE1;
4194 lo->ll_run_time_phase1 = 0;
4195 lo->ll_run_time_phase2 = 0;
4196 lo->ll_objs_checked_phase1 = 0;
4197 lo->ll_objs_checked_phase2 = 0;
4198 lo->ll_objs_failed_phase1 = 0;
4199 lo->ll_objs_failed_phase2 = 0;
4200 for (i = 0; i < LLIT_MAX; i++)
4201 lo->ll_objs_repaired[i] = 0;
4203 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
4204 fid_zero(&com->lc_fid_latest_scanned_phase2);
4207 lo->ll_status = LS_SCANNING_PHASE1;
4208 if (!lfsck->li_drop_dryrun ||
4209 lo->ll_pos_first_inconsistent == 0)
4210 pos->lp_oit_cookie = lo->ll_pos_last_checkpoint + 1;
4212 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
4214 spin_unlock(&lfsck->li_lock);
4215 up_write(&com->lc_sem);
4220 static int lfsck_layout_slave_prep(const struct lu_env *env,
4221 struct lfsck_component *com,
4222 struct lfsck_start_param *lsp)
4224 struct lfsck_layout_slave_data *llsd = com->lc_data;
4225 struct lfsck_start *start = lsp->lsp_start;
4228 rc = lfsck_layout_prep(env, com, start);
4229 if (rc != 0 || !lsp->lsp_index_valid)
4232 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
4233 if (rc == 0 && start != NULL && start->ls_flags & LPF_ORPHAN) {
4234 LASSERT(!llsd->llsd_rbtree_valid);
4236 write_lock(&llsd->llsd_rb_lock);
4237 rc = lfsck_rbtree_setup(env, com);
4238 write_unlock(&llsd->llsd_rb_lock);
4244 static int lfsck_layout_master_prep(const struct lu_env *env,
4245 struct lfsck_component *com,
4246 struct lfsck_start_param *lsp)
4248 struct lfsck_instance *lfsck = com->lc_lfsck;
4249 struct lfsck_layout_master_data *llmd = com->lc_data;
4250 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4251 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4252 struct lfsck_thread_args *lta;
4256 rc = lfsck_layout_prep(env, com, lsp->lsp_start);
4260 llmd->llmd_assistant_status = 0;
4261 llmd->llmd_post_result = 0;
4262 llmd->llmd_to_post = 0;
4263 llmd->llmd_to_double_scan = 0;
4264 llmd->llmd_in_double_scan = 0;
4265 llmd->llmd_exit = 0;
4266 thread_set_flags(athread, 0);
4268 lta = lfsck_thread_args_init(lfsck, com, lsp);
4270 RETURN(PTR_ERR(lta));
4272 rc = PTR_ERR(kthread_run(lfsck_layout_assistant, lta, "lfsck_layout"));
4273 if (IS_ERR_VALUE(rc)) {
4274 CERROR("%s: Cannot start LFSCK layout assistant thread: "
4275 "rc = %ld\n", lfsck_lfsck2name(lfsck), rc);
4276 lfsck_thread_args_fini(lta);
4278 struct l_wait_info lwi = { 0 };
4280 l_wait_event(mthread->t_ctl_waitq,
4281 thread_is_running(athread) ||
4282 thread_is_stopped(athread),
4284 if (unlikely(!thread_is_running(athread)))
4285 rc = llmd->llmd_assistant_status;
4293 /* Pre-fetch the attribute for each stripe in the given layout EA. */
4294 static int lfsck_layout_scan_stripes(const struct lu_env *env,
4295 struct lfsck_component *com,
4296 struct dt_object *parent,
4297 struct lov_mds_md_v1 *lmm)
4299 struct lfsck_thread_info *info = lfsck_env_info(env);
4300 struct lfsck_instance *lfsck = com->lc_lfsck;
4301 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
4302 struct lfsck_layout *lo = com->lc_file_ram;
4303 struct lfsck_layout_master_data *llmd = com->lc_data;
4304 struct lfsck_layout_object *llo = NULL;
4305 struct lov_ost_data_v1 *objs;
4306 struct lfsck_tgt_descs *ltds = &lfsck->li_ost_descs;
4307 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4308 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4309 struct l_wait_info lwi = { 0 };
4318 buf = lfsck_buf_get(env, &info->lti_old_pfid,
4319 sizeof(struct filter_fid_old));
4320 count = le16_to_cpu(lmm->lmm_stripe_count);
4321 gen = le16_to_cpu(lmm->lmm_layout_gen);
4322 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
4323 * been verified in lfsck_layout_verify_header() already. If some
4324 * new magic introduced in the future, then layout LFSCK needs to
4325 * be updated also. */
4326 magic = le32_to_cpu(lmm->lmm_magic);
4327 if (magic == LOV_MAGIC_V1) {
4328 objs = &(lmm->lmm_objects[0]);
4330 LASSERT(magic == LOV_MAGIC_V3);
4331 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
4334 for (i = 0; i < count; i++, objs++) {
4335 struct lu_fid *fid = &info->lti_fid;
4336 struct ost_id *oi = &info->lti_oi;
4337 struct lfsck_layout_req *llr;
4338 struct lfsck_tgt_desc *tgt = NULL;
4339 struct dt_object *cobj = NULL;
4341 le32_to_cpu(objs->l_ost_idx);
4342 bool wakeup = false;
4344 if (is_dummy_lov_ost_data(objs))
4347 l_wait_event(mthread->t_ctl_waitq,
4348 bk->lb_async_windows == 0 ||
4349 llmd->llmd_prefetched < bk->lb_async_windows ||
4350 !thread_is_running(mthread) ||
4351 thread_is_stopped(athread),
4354 if (unlikely(!thread_is_running(mthread)) ||
4355 thread_is_stopped(athread))
4358 ostid_le_to_cpu(&objs->l_ost_oi, oi);
4359 ostid_to_fid(fid, oi, index);
4360 tgt = lfsck_tgt_get(ltds, index);
4361 if (unlikely(tgt == NULL)) {
4362 CERROR("%s: Cannot talk with OST %x which did not join "
4363 "the layout LFSCK.\n",
4364 lfsck_lfsck2name(lfsck), index);
4365 lo->ll_flags |= LF_INCOMPLETE;
4369 cobj = lfsck_object_find_by_dev(env, tgt->ltd_tgt, fid);
4375 rc = dt_declare_attr_get(env, cobj, BYPASS_CAPA);
4379 rc = dt_declare_xattr_get(env, cobj, buf, XATTR_NAME_FID,
4385 llo = lfsck_layout_object_init(env, parent, gen);
4392 llr = lfsck_layout_req_init(llo, cobj, index, i);
4399 spin_lock(&llmd->llmd_lock);
4400 if (llmd->llmd_assistant_status < 0) {
4401 spin_unlock(&llmd->llmd_lock);
4402 lfsck_layout_req_fini(env, llr);
4404 RETURN(llmd->llmd_assistant_status);
4407 list_add_tail(&llr->llr_list, &llmd->llmd_req_list);
4408 if (llmd->llmd_prefetched == 0)
4411 llmd->llmd_prefetched++;
4412 spin_unlock(&llmd->llmd_lock);
4414 wake_up_all(&athread->t_ctl_waitq);
4417 down_write(&com->lc_sem);
4418 com->lc_new_checked++;
4420 lfsck_layout_record_failure(env, lfsck, lo);
4421 up_write(&com->lc_sem);
4423 if (cobj != NULL && !IS_ERR(cobj))
4424 lu_object_put(env, &cobj->do_lu);
4426 if (likely(tgt != NULL))
4429 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
4436 if (llo != NULL && !IS_ERR(llo))
4437 lfsck_layout_object_put(env, llo);
4442 /* For the given object, read its layout EA locally. For each stripe, pre-fetch
4443 * the OST-object's attribute and generate an structure lfsck_layout_req on the
4444 * list ::llmd_req_list.
4446 * For each request on above list, the lfsck_layout_assistant thread compares
4447 * the OST side attribute with local attribute, if inconsistent, then repair it.
4449 * All above processing is async mode with pipeline. */
4450 static int lfsck_layout_master_exec_oit(const struct lu_env *env,
4451 struct lfsck_component *com,
4452 struct dt_object *obj)
4454 struct lfsck_thread_info *info = lfsck_env_info(env);
4455 struct ost_id *oi = &info->lti_oi;
4456 struct lfsck_layout *lo = com->lc_file_ram;
4457 struct lfsck_layout_master_data *llmd = com->lc_data;
4458 struct lfsck_instance *lfsck = com->lc_lfsck;
4459 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
4460 struct thandle *handle = NULL;
4461 struct lu_buf *buf = &info->lti_big_buf;
4462 struct lov_mds_md_v1 *lmm = NULL;
4463 struct dt_device *dev = lfsck->li_bottom;
4464 struct lustre_handle lh = { 0 };
4465 ssize_t buflen = buf->lb_len;
4467 bool locked = false;
4468 bool stripe = false;
4471 if (!S_ISREG(lfsck_object_type(obj)))
4474 if (llmd->llmd_assistant_status < 0)
4475 GOTO(out, rc = -ESRCH);
4477 fid_to_lmm_oi(lfsck_dto2fid(obj), oi);
4478 lmm_oi_cpu_to_le(oi, oi);
4479 dt_read_lock(env, obj, 0);
4483 rc = lfsck_layout_get_lovea(env, obj, buf, &buflen);
4489 rc = lfsck_layout_verify_header(lmm);
4493 if (memcmp(oi, &lmm->lmm_oi, sizeof(*oi)) == 0)
4494 GOTO(out, stripe = true);
4496 /* Inconsistent lmm_oi, should be repaired. */
4497 CDEBUG(D_LFSCK, "Repair bad lmm_oi for "DFID"\n",
4498 PFID(lfsck_dto2fid(obj)));
4500 if (bk->lb_param & LPF_DRYRUN) {
4501 down_write(&com->lc_sem);
4502 lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
4503 up_write(&com->lc_sem);
4505 GOTO(out, stripe = true);
4508 if (!lustre_handle_is_used(&lh)) {
4509 dt_read_unlock(env, obj);
4511 buf->lb_len = buflen;
4512 rc = lfsck_layout_lock(env, com, obj, &lh,
4513 MDS_INODELOCK_LAYOUT |
4514 MDS_INODELOCK_XATTR);
4518 handle = dt_trans_create(env, dev);
4520 GOTO(out, rc = PTR_ERR(handle));
4522 rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_LOV,
4523 LU_XATTR_REPLACE, handle);
4527 rc = dt_trans_start_local(env, dev, handle);
4531 dt_write_lock(env, obj, 0);
4538 rc = dt_xattr_set(env, obj, buf, XATTR_NAME_LOV,
4539 LU_XATTR_REPLACE, handle, BYPASS_CAPA);
4543 down_write(&com->lc_sem);
4544 lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
4545 up_write(&com->lc_sem);
4547 GOTO(out, stripe = true);
4551 if (lustre_handle_is_used(&lh))
4552 dt_write_unlock(env, obj);
4554 dt_read_unlock(env, obj);
4557 if (handle != NULL && !IS_ERR(handle))
4558 dt_trans_stop(env, dev, handle);
4560 lfsck_layout_unlock(&lh);
4562 rc = lfsck_layout_scan_stripes(env, com, obj, lmm);
4564 down_write(&com->lc_sem);
4565 com->lc_new_checked++;
4567 lfsck_layout_record_failure(env, lfsck, lo);
4568 up_write(&com->lc_sem);
4570 buf->lb_len = buflen;
4575 static int lfsck_layout_slave_exec_oit(const struct lu_env *env,
4576 struct lfsck_component *com,
4577 struct dt_object *obj)
4579 struct lfsck_instance *lfsck = com->lc_lfsck;
4580 struct lfsck_layout *lo = com->lc_file_ram;
4581 const struct lu_fid *fid = lfsck_dto2fid(obj);
4582 struct lfsck_layout_slave_data *llsd = com->lc_data;
4583 struct lfsck_layout_seq *lls;
4589 LASSERT(llsd != NULL);
4591 lfsck_rbtree_update_bitmap(env, com, fid, false);
4593 down_write(&com->lc_sem);
4594 if (fid_is_idif(fid))
4596 else if (!fid_is_norm(fid) ||
4597 !fid_is_for_ostobj(env, lfsck->li_next, obj, fid))
4598 GOTO(unlock, rc = 0);
4601 com->lc_new_checked++;
4603 lls = lfsck_layout_seq_lookup(llsd, seq);
4606 if (unlikely(lls == NULL))
4607 GOTO(unlock, rc = -ENOMEM);
4609 INIT_LIST_HEAD(&lls->lls_list);
4611 rc = lfsck_layout_lastid_load(env, com, lls);
4613 lo->ll_objs_failed_phase1++;
4618 lfsck_layout_seq_insert(llsd, lls);
4621 if (unlikely(fid_is_last_id(fid)))
4622 GOTO(unlock, rc = 0);
4625 if (oid > lls->lls_lastid_known)
4626 lls->lls_lastid_known = oid;
4628 if (oid > lls->lls_lastid) {
4629 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
4630 /* OFD may create new objects during LFSCK scanning. */
4631 rc = lfsck_layout_lastid_reload(env, com, lls);
4632 if (unlikely(rc != 0))
4633 CWARN("%s: failed to reload LAST_ID for "LPX64
4635 lfsck_lfsck2name(com->lc_lfsck),
4637 if (oid <= lls->lls_lastid)
4638 GOTO(unlock, rc = 0);
4640 LASSERT(lfsck->li_out_notify != NULL);
4642 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
4643 LE_LASTID_REBUILDING);
4644 lo->ll_flags |= LF_CRASHED_LASTID;
4647 lls->lls_lastid = oid;
4651 GOTO(unlock, rc = 0);
4654 up_write(&com->lc_sem);
4659 static int lfsck_layout_exec_dir(const struct lu_env *env,
4660 struct lfsck_component *com,
4661 struct dt_object *obj,
4662 struct lu_dirent *ent)
4667 static int lfsck_layout_master_post(const struct lu_env *env,
4668 struct lfsck_component *com,
4669 int result, bool init)
4671 struct lfsck_instance *lfsck = com->lc_lfsck;
4672 struct lfsck_layout *lo = com->lc_file_ram;
4673 struct lfsck_layout_master_data *llmd = com->lc_data;
4674 struct ptlrpc_thread *mthread = &lfsck->li_thread;
4675 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4676 struct l_wait_info lwi = { 0 };
4681 llmd->llmd_post_result = result;
4682 llmd->llmd_to_post = 1;
4683 if (llmd->llmd_post_result <= 0)
4684 llmd->llmd_exit = 1;
4686 wake_up_all(&athread->t_ctl_waitq);
4687 l_wait_event(mthread->t_ctl_waitq,
4688 (result > 0 && list_empty(&llmd->llmd_req_list)) ||
4689 thread_is_stopped(athread),
4692 if (llmd->llmd_assistant_status < 0)
4693 result = llmd->llmd_assistant_status;
4695 down_write(&com->lc_sem);
4696 spin_lock(&lfsck->li_lock);
4697 /* When LFSCK failed, there may be some prefetched objects those are
4698 * not been processed yet, we do not know the exactly position, then
4699 * just restart from last check-point next time. */
4700 if (!init && !llmd->llmd_exit)
4701 lo->ll_pos_last_checkpoint =
4702 lfsck->li_pos_current.lp_oit_cookie;
4705 lo->ll_status = LS_SCANNING_PHASE2;
4706 lo->ll_flags |= LF_SCANNED_ONCE;
4707 lo->ll_flags &= ~LF_UPGRADE;
4708 list_del_init(&com->lc_link);
4709 list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
4710 } else if (result == 0) {
4711 lo->ll_status = lfsck->li_status;
4712 if (lo->ll_status == 0)
4713 lo->ll_status = LS_STOPPED;
4714 if (lo->ll_status != LS_PAUSED) {
4715 list_del_init(&com->lc_link);
4716 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
4719 lo->ll_status = LS_FAILED;
4720 list_del_init(&com->lc_link);
4721 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
4723 spin_unlock(&lfsck->li_lock);
4726 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4727 HALF_SEC - lfsck->li_time_last_checkpoint);
4728 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4729 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4730 com->lc_new_checked = 0;
4733 rc = lfsck_layout_store(env, com);
4734 up_write(&com->lc_sem);
4739 static int lfsck_layout_slave_post(const struct lu_env *env,
4740 struct lfsck_component *com,
4741 int result, bool init)
4743 struct lfsck_instance *lfsck = com->lc_lfsck;
4744 struct lfsck_layout *lo = com->lc_file_ram;
4748 rc = lfsck_layout_lastid_store(env, com);
4752 LASSERT(lfsck->li_out_notify != NULL);
4754 down_write(&com->lc_sem);
4756 spin_lock(&lfsck->li_lock);
4758 lo->ll_pos_last_checkpoint =
4759 lfsck->li_pos_current.lp_oit_cookie;
4761 lo->ll_status = LS_SCANNING_PHASE2;
4762 lo->ll_flags |= LF_SCANNED_ONCE;
4763 if (lo->ll_flags & LF_CRASHED_LASTID) {
4765 lo->ll_flags &= ~LF_CRASHED_LASTID;
4767 lo->ll_flags &= ~LF_UPGRADE;
4768 list_del_init(&com->lc_link);
4769 list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
4770 } else if (result == 0) {
4771 lo->ll_status = lfsck->li_status;
4772 if (lo->ll_status == 0)
4773 lo->ll_status = LS_STOPPED;
4774 if (lo->ll_status != LS_PAUSED) {
4775 list_del_init(&com->lc_link);
4776 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
4779 lo->ll_status = LS_FAILED;
4780 list_del_init(&com->lc_link);
4781 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
4783 spin_unlock(&lfsck->li_lock);
4786 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
4790 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
4791 HALF_SEC - lfsck->li_time_last_checkpoint);
4792 lo->ll_time_last_checkpoint = cfs_time_current_sec();
4793 lo->ll_objs_checked_phase1 += com->lc_new_checked;
4794 com->lc_new_checked = 0;
4797 rc = lfsck_layout_store(env, com);
4799 up_write(&com->lc_sem);
4801 lfsck_layout_slave_notify_master(env, com, LE_PHASE1_DONE, result);
4804 lfsck_rbtree_cleanup(env, com);
4809 static int lfsck_layout_dump(const struct lu_env *env,
4810 struct lfsck_component *com, char *buf, int len)
4812 struct lfsck_instance *lfsck = com->lc_lfsck;
4813 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
4814 struct lfsck_layout *lo = com->lc_file_ram;
4819 down_read(&com->lc_sem);
4820 rc = snprintf(buf, len,
4821 "name: lfsck_layout\n"
4827 lfsck_status2names(lo->ll_status));
4833 rc = lfsck_bits_dump(&buf, &len, lo->ll_flags, lfsck_flags_names,
4838 rc = lfsck_bits_dump(&buf, &len, bk->lb_param, lfsck_param_names,
4843 rc = lfsck_time_dump(&buf, &len, lo->ll_time_last_complete,
4844 "time_since_last_completed");
4848 rc = lfsck_time_dump(&buf, &len, lo->ll_time_latest_start,
4849 "time_since_latest_start");
4853 rc = lfsck_time_dump(&buf, &len, lo->ll_time_last_checkpoint,
4854 "time_since_last_checkpoint");
4858 rc = snprintf(buf, len,
4859 "latest_start_position: "LPU64"\n"
4860 "last_checkpoint_position: "LPU64"\n"
4861 "first_failure_position: "LPU64"\n",
4862 lo->ll_pos_latest_start,
4863 lo->ll_pos_last_checkpoint,
4864 lo->ll_pos_first_inconsistent);
4871 rc = snprintf(buf, len,
4872 "success_count: %u\n"
4873 "repaired_dangling: "LPU64"\n"
4874 "repaired_unmatched_pair: "LPU64"\n"
4875 "repaired_multiple_referenced: "LPU64"\n"
4876 "repaired_orphan: "LPU64"\n"
4877 "repaired_inconsistent_owner: "LPU64"\n"
4878 "repaired_others: "LPU64"\n"
4879 "skipped: "LPU64"\n"
4880 "failed_phase1: "LPU64"\n"
4881 "failed_phase2: "LPU64"\n",
4882 lo->ll_success_count,
4883 lo->ll_objs_repaired[LLIT_DANGLING - 1],
4884 lo->ll_objs_repaired[LLIT_UNMATCHED_PAIR - 1],
4885 lo->ll_objs_repaired[LLIT_MULTIPLE_REFERENCED - 1],
4886 lo->ll_objs_repaired[LLIT_ORPHAN - 1],
4887 lo->ll_objs_repaired[LLIT_INCONSISTENT_OWNER - 1],
4888 lo->ll_objs_repaired[LLIT_OTHERS - 1],
4889 lo->ll_objs_skipped,
4890 lo->ll_objs_failed_phase1,
4891 lo->ll_objs_failed_phase2);
4898 if (lo->ll_status == LS_SCANNING_PHASE1) {
4900 const struct dt_it_ops *iops;
4901 cfs_duration_t duration = cfs_time_current() -
4902 lfsck->li_time_last_checkpoint;
4903 __u64 checked = lo->ll_objs_checked_phase1 +
4904 com->lc_new_checked;
4905 __u64 speed = checked;
4906 __u64 new_checked = com->lc_new_checked * HZ;
4907 __u32 rtime = lo->ll_run_time_phase1 +
4908 cfs_duration_sec(duration + HALF_SEC);
4911 do_div(new_checked, duration);
4913 do_div(speed, rtime);
4914 rc = snprintf(buf, len,
4915 "checked_phase1: "LPU64"\n"
4916 "checked_phase2: "LPU64"\n"
4917 "run_time_phase1: %u seconds\n"
4918 "run_time_phase2: %u seconds\n"
4919 "average_speed_phase1: "LPU64" items/sec\n"
4920 "average_speed_phase2: N/A\n"
4921 "real-time_speed_phase1: "LPU64" items/sec\n"
4922 "real-time_speed_phase2: N/A\n",
4924 lo->ll_objs_checked_phase2,
4926 lo->ll_run_time_phase2,
4935 LASSERT(lfsck->li_di_oit != NULL);
4937 iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
4939 /* The low layer otable-based iteration position may NOT
4940 * exactly match the layout-based directory traversal
4941 * cookie. Generally, it is not a serious issue. But the
4942 * caller should NOT make assumption on that. */
4943 pos = iops->store(env, lfsck->li_di_oit);
4944 if (!lfsck->li_current_oit_processed)
4946 rc = snprintf(buf, len, "current_position: "LPU64"\n", pos);
4952 } else if (lo->ll_status == LS_SCANNING_PHASE2) {
4953 cfs_duration_t duration = cfs_time_current() -
4954 lfsck->li_time_last_checkpoint;
4955 __u64 checked = lo->ll_objs_checked_phase2 +
4956 com->lc_new_checked;
4957 __u64 speed1 = lo->ll_objs_checked_phase1;
4958 __u64 speed2 = checked;
4959 __u64 new_checked = com->lc_new_checked * HZ;
4960 __u32 rtime = lo->ll_run_time_phase2 +
4961 cfs_duration_sec(duration + HALF_SEC);
4964 do_div(new_checked, duration);
4965 if (lo->ll_run_time_phase1 != 0)
4966 do_div(speed1, lo->ll_run_time_phase1);
4968 do_div(speed2, rtime);
4969 rc = snprintf(buf, len,
4970 "checked_phase1: "LPU64"\n"
4971 "checked_phase2: "LPU64"\n"
4972 "run_time_phase1: %u seconds\n"
4973 "run_time_phase2: %u seconds\n"
4974 "average_speed_phase1: "LPU64" items/sec\n"
4975 "average_speed_phase2: "LPU64" items/sec\n"
4976 "real-time_speed_phase1: N/A\n"
4977 "real-time_speed_phase2: "LPU64" items/sec\n"
4978 "current_position: "DFID"\n",
4979 lo->ll_objs_checked_phase1,
4981 lo->ll_run_time_phase1,
4986 PFID(&com->lc_fid_latest_scanned_phase2));
4993 __u64 speed1 = lo->ll_objs_checked_phase1;
4994 __u64 speed2 = lo->ll_objs_checked_phase2;
4996 if (lo->ll_run_time_phase1 != 0)
4997 do_div(speed1, lo->ll_run_time_phase1);
4998 if (lo->ll_run_time_phase2 != 0)
4999 do_div(speed2, lo->ll_run_time_phase2);
5000 rc = snprintf(buf, len,
5001 "checked_phase1: "LPU64"\n"
5002 "checked_phase2: "LPU64"\n"
5003 "run_time_phase1: %u seconds\n"
5004 "run_time_phase2: %u seconds\n"
5005 "average_speed_phase1: "LPU64" items/sec\n"
5006 "average_speed_phase2: "LPU64" objs/sec\n"
5007 "real-time_speed_phase1: N/A\n"
5008 "real-time_speed_phase2: N/A\n"
5009 "current_position: N/A\n",
5010 lo->ll_objs_checked_phase1,
5011 lo->ll_objs_checked_phase2,
5012 lo->ll_run_time_phase1,
5013 lo->ll_run_time_phase2,
5025 up_read(&com->lc_sem);
5030 static int lfsck_layout_master_double_scan(const struct lu_env *env,
5031 struct lfsck_component *com)
5033 struct lfsck_layout_master_data *llmd = com->lc_data;
5034 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
5035 struct ptlrpc_thread *athread = &llmd->llmd_thread;
5036 struct lfsck_layout *lo = com->lc_file_ram;
5037 struct l_wait_info lwi = { 0 };
5039 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2))
5042 llmd->llmd_to_double_scan = 1;
5043 wake_up_all(&athread->t_ctl_waitq);
5044 l_wait_event(mthread->t_ctl_waitq,
5045 llmd->llmd_in_double_scan ||
5046 thread_is_stopped(athread),
5048 if (llmd->llmd_assistant_status < 0)
5049 return llmd->llmd_assistant_status;
5054 static int lfsck_layout_slave_double_scan(const struct lu_env *env,
5055 struct lfsck_component *com)
5057 struct lfsck_instance *lfsck = com->lc_lfsck;
5058 struct lfsck_layout_slave_data *llsd = com->lc_data;
5059 struct lfsck_layout *lo = com->lc_file_ram;
5060 struct ptlrpc_thread *thread = &lfsck->li_thread;
5064 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2)) {
5065 lfsck_rbtree_cleanup(env, com);
5066 lfsck_layout_slave_notify_master(env, com, LE_PHASE2_DONE, 0);
5070 atomic_inc(&lfsck->li_double_scan_count);
5072 com->lc_new_checked = 0;
5073 com->lc_new_scanned = 0;
5074 com->lc_time_last_checkpoint = cfs_time_current();
5075 com->lc_time_next_checkpoint = com->lc_time_last_checkpoint +
5076 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
5079 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(30),
5082 rc = lfsck_layout_slave_query_master(env, com);
5083 if (list_empty(&llsd->llsd_master_list)) {
5084 if (unlikely(!thread_is_running(thread)))
5095 rc = l_wait_event(thread->t_ctl_waitq,
5096 !thread_is_running(thread) ||
5097 list_empty(&llsd->llsd_master_list),
5099 if (unlikely(!thread_is_running(thread)))
5102 if (rc == -ETIMEDOUT)
5105 GOTO(done, rc = (rc < 0 ? rc : 1));
5109 rc = lfsck_layout_double_scan_result(env, com, rc);
5111 lfsck_rbtree_cleanup(env, com);
5112 lfsck_layout_slave_notify_master(env, com, LE_PHASE2_DONE, rc);
5113 if (atomic_dec_and_test(&lfsck->li_double_scan_count))
5114 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
5119 static void lfsck_layout_master_data_release(const struct lu_env *env,
5120 struct lfsck_component *com)
5122 struct lfsck_layout_master_data *llmd = com->lc_data;
5123 struct lfsck_instance *lfsck = com->lc_lfsck;
5124 struct lfsck_tgt_descs *ltds;
5125 struct lfsck_tgt_desc *ltd;
5126 struct lfsck_tgt_desc *next;
5128 LASSERT(llmd != NULL);
5129 LASSERT(thread_is_init(&llmd->llmd_thread) ||
5130 thread_is_stopped(&llmd->llmd_thread));
5131 LASSERT(list_empty(&llmd->llmd_req_list));
5133 com->lc_data = NULL;
5135 ltds = &lfsck->li_ost_descs;
5136 spin_lock(<ds->ltd_lock);
5137 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase1_list,
5138 ltd_layout_phase_list) {
5139 list_del_init(<d->ltd_layout_phase_list);
5141 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase2_list,
5142 ltd_layout_phase_list) {
5143 list_del_init(<d->ltd_layout_phase_list);
5145 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_list,
5147 list_del_init(<d->ltd_layout_list);
5149 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase1_list,
5150 ltd_layout_phase_list) {
5151 list_del_init(<d->ltd_layout_phase_list);
5153 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase2_list,
5154 ltd_layout_phase_list) {
5155 list_del_init(<d->ltd_layout_phase_list);
5157 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_list,
5159 list_del_init(<d->ltd_layout_list);
5161 spin_unlock(<ds->ltd_lock);
5166 static void lfsck_layout_slave_data_release(const struct lu_env *env,
5167 struct lfsck_component *com)
5169 struct lfsck_layout_slave_data *llsd = com->lc_data;
5170 struct lfsck_layout_seq *lls;
5171 struct lfsck_layout_seq *next;
5172 struct lfsck_layout_slave_target *llst;
5173 struct lfsck_layout_slave_target *tmp;
5175 LASSERT(llsd != NULL);
5177 list_for_each_entry_safe(lls, next, &llsd->llsd_seq_list,
5179 list_del_init(&lls->lls_list);
5180 lfsck_object_put(env, lls->lls_lastid_obj);
5184 list_for_each_entry_safe(llst, tmp, &llsd->llsd_master_list,
5186 list_del_init(&llst->llst_list);
5190 lfsck_rbtree_cleanup(env, com);
5191 com->lc_data = NULL;
5195 static void lfsck_layout_master_quit(const struct lu_env *env,
5196 struct lfsck_component *com)
5198 struct lfsck_layout_master_data *llmd = com->lc_data;
5199 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
5200 struct ptlrpc_thread *athread = &llmd->llmd_thread;
5201 struct l_wait_info lwi = { 0 };
5203 llmd->llmd_exit = 1;
5204 wake_up_all(&athread->t_ctl_waitq);
5205 l_wait_event(mthread->t_ctl_waitq,
5206 thread_is_init(athread) ||
5207 thread_is_stopped(athread),
5211 static void lfsck_layout_slave_quit(const struct lu_env *env,
5212 struct lfsck_component *com)
5214 lfsck_rbtree_cleanup(env, com);
5217 static int lfsck_layout_master_in_notify(const struct lu_env *env,
5218 struct lfsck_component *com,
5219 struct lfsck_request *lr)
5221 struct lfsck_instance *lfsck = com->lc_lfsck;
5222 struct lfsck_layout *lo = com->lc_file_ram;
5223 struct lfsck_layout_master_data *llmd = com->lc_data;
5224 struct lfsck_tgt_descs *ltds;
5225 struct lfsck_tgt_desc *ltd;
5229 if (lr->lr_event == LE_PAIRS_VERIFY) {
5232 rc = lfsck_layout_master_check_pairs(env, com, &lr->lr_fid,
5238 if (lr->lr_event != LE_PHASE1_DONE &&
5239 lr->lr_event != LE_PHASE2_DONE &&
5240 lr->lr_event != LE_PEER_EXIT)
5243 if (lr->lr_flags & LEF_FROM_OST)
5244 ltds = &lfsck->li_ost_descs;
5246 ltds = &lfsck->li_mdt_descs;
5247 spin_lock(<ds->ltd_lock);
5248 ltd = LTD_TGT(ltds, lr->lr_index);
5250 spin_unlock(<ds->ltd_lock);
5255 list_del_init(<d->ltd_layout_phase_list);
5256 switch (lr->lr_event) {
5257 case LE_PHASE1_DONE:
5258 if (lr->lr_status <= 0) {
5259 ltd->ltd_layout_done = 1;
5260 list_del_init(<d->ltd_layout_list);
5261 CWARN("%s: %s %x failed/stopped at phase1: rc = %d.\n",
5262 lfsck_lfsck2name(lfsck),
5263 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
5264 ltd->ltd_index, lr->lr_status);
5265 lo->ll_flags |= LF_INCOMPLETE;
5270 if (lr->lr_flags & LEF_FROM_OST) {
5271 if (list_empty(<d->ltd_layout_list))
5272 list_add_tail(<d->ltd_layout_list,
5273 &llmd->llmd_ost_list);
5274 list_add_tail(<d->ltd_layout_phase_list,
5275 &llmd->llmd_ost_phase2_list);
5277 if (list_empty(<d->ltd_layout_list))
5278 list_add_tail(<d->ltd_layout_list,
5279 &llmd->llmd_mdt_list);
5280 list_add_tail(<d->ltd_layout_phase_list,
5281 &llmd->llmd_mdt_phase2_list);
5284 case LE_PHASE2_DONE:
5285 ltd->ltd_layout_done = 1;
5286 list_del_init(<d->ltd_layout_list);
5290 ltd->ltd_layout_done = 1;
5291 list_del_init(<d->ltd_layout_list);
5292 if (!(lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT)) {
5293 CWARN("%s: the peer %s %x exit layout LFSCK.\n",
5294 lfsck_lfsck2name(lfsck),
5295 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
5297 lo->ll_flags |= LF_INCOMPLETE;
5303 spin_unlock(<ds->ltd_lock);
5305 if (fail && lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT) {
5306 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
5308 memset(stop, 0, sizeof(*stop));
5309 stop->ls_status = lr->lr_status;
5310 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
5311 lfsck_stop(env, lfsck->li_bottom, stop);
5312 } else if (lfsck_layout_master_to_orphan(llmd)) {
5313 wake_up_all(&llmd->llmd_thread.t_ctl_waitq);
5319 static int lfsck_layout_slave_in_notify(const struct lu_env *env,
5320 struct lfsck_component *com,
5321 struct lfsck_request *lr)
5323 struct lfsck_instance *lfsck = com->lc_lfsck;
5324 struct lfsck_layout_slave_data *llsd = com->lc_data;
5325 struct lfsck_layout_slave_target *llst;
5329 switch (lr->lr_event) {
5330 case LE_FID_ACCESSED:
5331 lfsck_rbtree_update_bitmap(env, com, &lr->lr_fid, true);
5333 case LE_CONDITIONAL_DESTROY:
5334 rc = lfsck_layout_slave_conditional_destroy(env, com, lr);
5336 case LE_PAIRS_VERIFY: {
5337 lr->lr_status = LPVS_INIT;
5338 /* Firstly, if the MDT-object which is claimed via OST-object
5339 * local stored PFID xattr recognizes the OST-object, then it
5340 * must be that the client given PFID is wrong. */
5341 rc = lfsck_layout_slave_check_pairs(env, com, &lr->lr_fid,
5346 lr->lr_status = LPVS_INCONSISTENT;
5347 /* The OST-object local stored PFID xattr is stale. We need to
5348 * check whether the MDT-object that is claimed via the client
5349 * given PFID information recognizes the OST-object or not. If
5350 * matches, then need to update the OST-object's PFID xattr. */
5351 rc = lfsck_layout_slave_check_pairs(env, com, &lr->lr_fid,
5354 * We are not sure whether the client given PFID information
5355 * is correct or not, do nothing to avoid improper fixing.
5358 * The client given PFID information is also invalid, we can
5359 * NOT fix the OST-object inconsistency.
5364 lr->lr_status = LPVS_INCONSISTENT_TOFIX;
5365 rc = lfsck_layout_slave_repair_pfid(env, com, lr);
5369 case LE_PHASE2_DONE:
5376 llst = lfsck_layout_llst_find_and_del(llsd, lr->lr_index, true);
5380 lfsck_layout_llst_put(llst);
5381 if (list_empty(&llsd->llsd_master_list))
5382 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
5384 if (lr->lr_event == LE_PEER_EXIT &&
5385 lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT) {
5386 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
5388 memset(stop, 0, sizeof(*stop));
5389 stop->ls_status = lr->lr_status;
5390 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
5391 lfsck_stop(env, lfsck->li_bottom, stop);
5397 static int lfsck_layout_query(const struct lu_env *env,
5398 struct lfsck_component *com)
5400 struct lfsck_layout *lo = com->lc_file_ram;
5402 return lo->ll_status;
5405 static int lfsck_layout_master_stop_notify(const struct lu_env *env,
5406 struct lfsck_component *com,
5407 struct lfsck_tgt_descs *ltds,
5408 struct lfsck_tgt_desc *ltd,
5409 struct ptlrpc_request_set *set)
5411 struct lfsck_thread_info *info = lfsck_env_info(env);
5412 struct lfsck_async_interpret_args *laia = &info->lti_laia;
5413 struct lfsck_request *lr = &info->lti_lr;
5414 struct lfsck_instance *lfsck = com->lc_lfsck;
5417 spin_lock(<ds->ltd_lock);
5418 if (list_empty(<d->ltd_layout_list)) {
5419 LASSERT(list_empty(<d->ltd_layout_phase_list));
5420 spin_unlock(<ds->ltd_lock);
5425 list_del_init(<d->ltd_layout_phase_list);
5426 list_del_init(<d->ltd_layout_list);
5427 spin_unlock(<ds->ltd_lock);
5429 memset(lr, 0, sizeof(*lr));
5430 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
5431 lr->lr_event = LE_PEER_EXIT;
5432 lr->lr_active = LT_LAYOUT;
5433 lr->lr_status = LS_CO_PAUSED;
5434 if (ltds == &lfsck->li_ost_descs)
5435 lr->lr_flags = LEF_TO_OST;
5437 laia->laia_com = com;
5438 laia->laia_ltds = ltds;
5439 atomic_inc(<d->ltd_ref);
5440 laia->laia_ltd = ltd;
5442 laia->laia_shared = 0;
5444 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
5445 lfsck_layout_master_async_interpret,
5446 laia, LFSCK_NOTIFY);
5448 CERROR("%s: Fail to notify %s %x for co-stop: rc = %d\n",
5449 lfsck_lfsck2name(lfsck),
5450 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
5451 ltd->ltd_index, rc);
5458 /* with lfsck::li_lock held */
5459 static int lfsck_layout_slave_join(const struct lu_env *env,
5460 struct lfsck_component *com,
5461 struct lfsck_start_param *lsp)
5463 struct lfsck_instance *lfsck = com->lc_lfsck;
5464 struct lfsck_layout_slave_data *llsd = com->lc_data;
5465 struct lfsck_layout_slave_target *llst;
5466 struct lfsck_start *start = lsp->lsp_start;
5470 if (!lsp->lsp_index_valid || start == NULL ||
5471 !(start->ls_flags & LPF_ALL_TGT) ||
5472 !(lfsck->li_bookmark_ram.lb_param & LPF_ALL_TGT))
5475 spin_unlock(&lfsck->li_lock);
5476 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
5477 spin_lock(&lfsck->li_lock);
5478 if (rc == 0 && !thread_is_running(&lfsck->li_thread)) {
5479 spin_unlock(&lfsck->li_lock);
5480 llst = lfsck_layout_llst_find_and_del(llsd, lsp->lsp_index,
5483 lfsck_layout_llst_put(llst);
5484 spin_lock(&lfsck->li_lock);
5491 static struct lfsck_operations lfsck_layout_master_ops = {
5492 .lfsck_reset = lfsck_layout_reset,
5493 .lfsck_fail = lfsck_layout_fail,
5494 .lfsck_checkpoint = lfsck_layout_master_checkpoint,
5495 .lfsck_prep = lfsck_layout_master_prep,
5496 .lfsck_exec_oit = lfsck_layout_master_exec_oit,
5497 .lfsck_exec_dir = lfsck_layout_exec_dir,
5498 .lfsck_post = lfsck_layout_master_post,
5499 .lfsck_interpret = lfsck_layout_master_async_interpret,
5500 .lfsck_dump = lfsck_layout_dump,
5501 .lfsck_double_scan = lfsck_layout_master_double_scan,
5502 .lfsck_data_release = lfsck_layout_master_data_release,
5503 .lfsck_quit = lfsck_layout_master_quit,
5504 .lfsck_in_notify = lfsck_layout_master_in_notify,
5505 .lfsck_query = lfsck_layout_query,
5506 .lfsck_stop_notify = lfsck_layout_master_stop_notify,
5509 static struct lfsck_operations lfsck_layout_slave_ops = {
5510 .lfsck_reset = lfsck_layout_reset,
5511 .lfsck_fail = lfsck_layout_fail,
5512 .lfsck_checkpoint = lfsck_layout_slave_checkpoint,
5513 .lfsck_prep = lfsck_layout_slave_prep,
5514 .lfsck_exec_oit = lfsck_layout_slave_exec_oit,
5515 .lfsck_exec_dir = lfsck_layout_exec_dir,
5516 .lfsck_post = lfsck_layout_slave_post,
5517 .lfsck_dump = lfsck_layout_dump,
5518 .lfsck_double_scan = lfsck_layout_slave_double_scan,
5519 .lfsck_data_release = lfsck_layout_slave_data_release,
5520 .lfsck_quit = lfsck_layout_slave_quit,
5521 .lfsck_in_notify = lfsck_layout_slave_in_notify,
5522 .lfsck_query = lfsck_layout_query,
5523 .lfsck_join = lfsck_layout_slave_join,
5526 int lfsck_layout_setup(const struct lu_env *env, struct lfsck_instance *lfsck)
5528 struct lfsck_component *com;
5529 struct lfsck_layout *lo;
5530 struct dt_object *root = NULL;
5531 struct dt_object *obj;
5539 INIT_LIST_HEAD(&com->lc_link);
5540 INIT_LIST_HEAD(&com->lc_link_dir);
5541 init_rwsem(&com->lc_sem);
5542 atomic_set(&com->lc_ref, 1);
5543 com->lc_lfsck = lfsck;
5544 com->lc_type = LT_LAYOUT;
5545 if (lfsck->li_master) {
5546 struct lfsck_layout_master_data *llmd;
5548 com->lc_ops = &lfsck_layout_master_ops;
5549 OBD_ALLOC_PTR(llmd);
5551 GOTO(out, rc = -ENOMEM);
5553 INIT_LIST_HEAD(&llmd->llmd_req_list);
5554 spin_lock_init(&llmd->llmd_lock);
5555 INIT_LIST_HEAD(&llmd->llmd_ost_list);
5556 INIT_LIST_HEAD(&llmd->llmd_ost_phase1_list);
5557 INIT_LIST_HEAD(&llmd->llmd_ost_phase2_list);
5558 INIT_LIST_HEAD(&llmd->llmd_mdt_list);
5559 INIT_LIST_HEAD(&llmd->llmd_mdt_phase1_list);
5560 INIT_LIST_HEAD(&llmd->llmd_mdt_phase2_list);
5561 init_waitqueue_head(&llmd->llmd_thread.t_ctl_waitq);
5562 com->lc_data = llmd;
5564 struct lfsck_layout_slave_data *llsd;
5566 com->lc_ops = &lfsck_layout_slave_ops;
5567 OBD_ALLOC_PTR(llsd);
5569 GOTO(out, rc = -ENOMEM);
5571 INIT_LIST_HEAD(&llsd->llsd_seq_list);
5572 INIT_LIST_HEAD(&llsd->llsd_master_list);
5573 spin_lock_init(&llsd->llsd_lock);
5574 llsd->llsd_rb_root = RB_ROOT;
5575 rwlock_init(&llsd->llsd_rb_lock);
5576 com->lc_data = llsd;
5578 com->lc_file_size = sizeof(*lo);
5579 OBD_ALLOC(com->lc_file_ram, com->lc_file_size);
5580 if (com->lc_file_ram == NULL)
5581 GOTO(out, rc = -ENOMEM);
5583 OBD_ALLOC(com->lc_file_disk, com->lc_file_size);
5584 if (com->lc_file_disk == NULL)
5585 GOTO(out, rc = -ENOMEM);
5587 root = dt_locate(env, lfsck->li_bottom, &lfsck->li_local_root_fid);
5589 GOTO(out, rc = PTR_ERR(root));
5591 if (unlikely(!dt_try_as_dir(env, root)))
5592 GOTO(out, rc = -ENOTDIR);
5594 obj = local_file_find_or_create(env, lfsck->li_los, root,
5596 S_IFREG | S_IRUGO | S_IWUSR);
5598 GOTO(out, rc = PTR_ERR(obj));
5601 rc = lfsck_layout_load(env, com);
5603 rc = lfsck_layout_reset(env, com, true);
5604 else if (rc == -ENOENT)
5605 rc = lfsck_layout_init(env, com);
5610 lo = com->lc_file_ram;
5611 switch (lo->ll_status) {
5617 spin_lock(&lfsck->li_lock);
5618 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
5619 spin_unlock(&lfsck->li_lock);
5622 CERROR("%s: unknown lfsck_layout status: rc = %u\n",
5623 lfsck_lfsck2name(lfsck), lo->ll_status);
5625 case LS_SCANNING_PHASE1:
5626 case LS_SCANNING_PHASE2:
5627 /* No need to store the status to disk right now.
5628 * If the system crashed before the status stored,
5629 * it will be loaded back when next time. */
5630 lo->ll_status = LS_CRASHED;
5631 lo->ll_flags |= LF_INCOMPLETE;
5638 spin_lock(&lfsck->li_lock);
5639 list_add_tail(&com->lc_link, &lfsck->li_list_scan);
5640 spin_unlock(&lfsck->li_lock);
5644 if (lo->ll_flags & LF_CRASHED_LASTID) {
5645 LASSERT(lfsck->li_out_notify != NULL);
5647 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
5648 LE_LASTID_REBUILDING);
5654 if (root != NULL && !IS_ERR(root))
5655 lu_object_put(env, &root->do_lu);
5658 lfsck_component_cleanup(env, com);
5663 struct lfsck_orphan_it {
5664 struct lfsck_component *loi_com;
5665 struct lfsck_rbtree_node *loi_lrn;
5666 struct lfsck_layout_slave_target *loi_llst;
5667 struct lu_fid loi_key;
5668 struct lu_orphan_rec loi_rec;
5670 unsigned int loi_over:1;
5673 static int lfsck_fid_match_idx(const struct lu_env *env,
5674 struct lfsck_instance *lfsck,
5675 const struct lu_fid *fid, int idx)
5677 struct seq_server_site *ss;
5678 struct lu_server_fld *sf;
5679 struct lu_seq_range range = { 0 };
5682 /* All abnormal cases will be returned to MDT0. */
5683 if (!fid_is_norm(fid)) {
5690 ss = lu_site2seq(lfsck->li_bottom->dd_lu_dev.ld_site);
5691 if (unlikely(ss == NULL))
5694 sf = ss->ss_server_fld;
5695 LASSERT(sf != NULL);
5697 fld_range_set_any(&range);
5698 rc = fld_server_lookup(env, sf, fid_seq(fid), &range);
5702 if (!fld_range_is_mdt(&range))
5705 if (range.lsr_index == idx)
5711 static void lfsck_layout_destroy_orphan(const struct lu_env *env,
5712 struct dt_device *dev,
5713 struct dt_object *obj)
5715 struct thandle *handle;
5719 handle = dt_trans_create(env, dev);
5723 rc = dt_declare_ref_del(env, obj, handle);
5727 rc = dt_declare_destroy(env, obj, handle);
5731 rc = dt_trans_start_local(env, dev, handle);
5735 dt_write_lock(env, obj, 0);
5736 rc = dt_ref_del(env, obj, handle);
5738 rc = dt_destroy(env, obj, handle);
5739 dt_write_unlock(env, obj);
5744 dt_trans_stop(env, dev, handle);
5749 static int lfsck_orphan_index_lookup(const struct lu_env *env,
5750 struct dt_object *dt,
5752 const struct dt_key *key,
5753 struct lustre_capa *capa)
5758 static int lfsck_orphan_index_declare_insert(const struct lu_env *env,
5759 struct dt_object *dt,
5760 const struct dt_rec *rec,
5761 const struct dt_key *key,
5762 struct thandle *handle)
5767 static int lfsck_orphan_index_insert(const struct lu_env *env,
5768 struct dt_object *dt,
5769 const struct dt_rec *rec,
5770 const struct dt_key *key,
5771 struct thandle *handle,
5772 struct lustre_capa *capa,
5778 static int lfsck_orphan_index_declare_delete(const struct lu_env *env,
5779 struct dt_object *dt,
5780 const struct dt_key *key,
5781 struct thandle *handle)
5786 static int lfsck_orphan_index_delete(const struct lu_env *env,
5787 struct dt_object *dt,
5788 const struct dt_key *key,
5789 struct thandle *handle,
5790 struct lustre_capa *capa)
5795 static struct dt_it *lfsck_orphan_it_init(const struct lu_env *env,
5796 struct dt_object *dt,
5798 struct lustre_capa *capa)
5800 struct dt_device *dev = lu2dt_dev(dt->do_lu.lo_dev);
5801 struct lfsck_instance *lfsck;
5802 struct lfsck_component *com = NULL;
5803 struct lfsck_layout_slave_data *llsd;
5804 struct lfsck_orphan_it *it = NULL;
5808 lfsck = lfsck_instance_find(dev, true, false);
5809 if (unlikely(lfsck == NULL))
5810 RETURN(ERR_PTR(-ENXIO));
5812 com = lfsck_component_find(lfsck, LT_LAYOUT);
5813 if (unlikely(com == NULL))
5814 GOTO(out, rc = -ENOENT);
5816 llsd = com->lc_data;
5817 if (!llsd->llsd_rbtree_valid)
5818 GOTO(out, rc = -ESRCH);
5822 GOTO(out, rc = -ENOMEM);
5824 it->loi_llst = lfsck_layout_llst_find_and_del(llsd, attr, false);
5825 if (it->loi_llst == NULL)
5826 GOTO(out, rc = -ENXIO);
5828 if (dev->dd_record_fid_accessed) {
5829 /* The first iteration against the rbtree, scan the whole rbtree
5830 * to remove the nodes which do NOT need to be handled. */
5831 write_lock(&llsd->llsd_rb_lock);
5832 if (dev->dd_record_fid_accessed) {
5833 struct rb_node *node;
5834 struct rb_node *next;
5835 struct lfsck_rbtree_node *lrn;
5837 /* No need to record the fid accessing anymore. */
5838 dev->dd_record_fid_accessed = 0;
5840 node = rb_first(&llsd->llsd_rb_root);
5841 while (node != NULL) {
5842 next = rb_next(node);
5843 lrn = rb_entry(node, struct lfsck_rbtree_node,
5845 if (atomic_read(&lrn->lrn_known_count) <=
5846 atomic_read(&lrn->lrn_accessed_count)) {
5847 rb_erase(node, &llsd->llsd_rb_root);
5848 lfsck_rbtree_free(lrn);
5853 write_unlock(&llsd->llsd_rb_lock);
5856 /* read lock the rbtree when init, and unlock when fini */
5857 read_lock(&llsd->llsd_rb_lock);
5865 lfsck_component_put(env, com);
5866 lfsck_instance_put(env, lfsck);
5871 it = (struct lfsck_orphan_it *)ERR_PTR(rc);
5874 return (struct dt_it *)it;
5877 static void lfsck_orphan_it_fini(const struct lu_env *env,
5880 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
5881 struct lfsck_component *com = it->loi_com;
5882 struct lfsck_layout_slave_data *llsd;
5883 struct lfsck_layout_slave_target *llst;
5886 llsd = com->lc_data;
5887 read_unlock(&llsd->llsd_rb_lock);
5888 llst = it->loi_llst;
5889 LASSERT(llst != NULL);
5891 /* Save the key and hash for iterate next. */
5892 llst->llst_fid = it->loi_key;
5893 llst->llst_hash = it->loi_hash;
5894 lfsck_layout_llst_put(llst);
5895 lfsck_component_put(env, com);
5901 * \retval +1: the iteration finished
5902 * \retval 0: on success, not finished
5903 * \retval -ve: on error
5905 static int lfsck_orphan_it_next(const struct lu_env *env,
5908 struct lfsck_thread_info *info = lfsck_env_info(env);
5909 struct filter_fid_old *pfid = &info->lti_old_pfid;
5910 struct lu_attr *la = &info->lti_la;
5911 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
5912 struct lu_fid *key = &it->loi_key;
5913 struct lu_orphan_rec *rec = &it->loi_rec;
5914 struct lfsck_component *com = it->loi_com;
5915 struct lfsck_instance *lfsck = com->lc_lfsck;
5916 struct lfsck_layout_slave_data *llsd = com->lc_data;
5917 struct dt_object *obj;
5918 struct lfsck_rbtree_node *lrn;
5922 __u32 idx = it->loi_llst->llst_index;
5932 lrn = lfsck_rbtree_search(llsd, key, &exact);
5940 key->f_seq = lrn->lrn_seq;
5941 key->f_oid = lrn->lrn_first_oid;
5946 if (unlikely(key->f_oid == 0)) {
5953 lrn->lrn_first_oid + LFSCK_RBTREE_BITMAP_WIDTH) {
5959 if (unlikely(atomic_read(&lrn->lrn_known_count) <=
5960 atomic_read(&lrn->lrn_accessed_count))) {
5961 struct rb_node *next = rb_next(&lrn->lrn_node);
5963 while (next != NULL) {
5964 lrn = rb_entry(next, struct lfsck_rbtree_node,
5966 if (atomic_read(&lrn->lrn_known_count) >
5967 atomic_read(&lrn->lrn_accessed_count))
5969 next = rb_next(next);
5978 key->f_seq = lrn->lrn_seq;
5979 key->f_oid = lrn->lrn_first_oid;
5983 pos = key->f_oid - lrn->lrn_first_oid;
5986 pos = find_next_bit(lrn->lrn_known_bitmap,
5987 LFSCK_RBTREE_BITMAP_WIDTH, pos);
5988 if (pos >= LFSCK_RBTREE_BITMAP_WIDTH) {
5989 key->f_oid = lrn->lrn_first_oid + pos;
5990 if (unlikely(key->f_oid < lrn->lrn_first_oid)) {
5998 if (test_bit(pos, lrn->lrn_accessed_bitmap)) {
6003 key->f_oid = lrn->lrn_first_oid + pos;
6004 obj = lfsck_object_find(env, lfsck, key);
6007 if (rc == -ENOENT) {
6014 dt_read_lock(env, obj, 0);
6015 if (!dt_object_exists(obj)) {
6016 dt_read_unlock(env, obj);
6017 lfsck_object_put(env, obj);
6022 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
6026 rc = dt_xattr_get(env, obj, lfsck_buf_get(env, pfid, sizeof(*pfid)),
6027 XATTR_NAME_FID, BYPASS_CAPA);
6028 if (rc == -ENODATA) {
6029 /* For the pre-created OST-object, update the bitmap to avoid
6030 * others LFSCK (second phase) iteration to touch it again. */
6031 if (la->la_ctime == 0) {
6032 if (!test_and_set_bit(pos, lrn->lrn_accessed_bitmap))
6033 atomic_inc(&lrn->lrn_accessed_count);
6035 /* For the race between repairing dangling referenced
6036 * MDT-object and unlink the file, it may left orphan
6037 * OST-object there. Destroy it now! */
6038 if (unlikely(!(la->la_mode & S_ISUID))) {
6039 dt_read_unlock(env, obj);
6040 lfsck_layout_destroy_orphan(env,
6043 lfsck_object_put(env, obj);
6047 } else if (idx == 0) {
6048 /* If the orphan OST-object has no parent information,
6049 * regard it as referenced by the MDT-object on MDT0. */
6050 fid_zero(&rec->lor_fid);
6051 rec->lor_uid = la->la_uid;
6052 rec->lor_gid = la->la_gid;
6056 dt_read_unlock(env, obj);
6057 lfsck_object_put(env, obj);
6065 if (rc != sizeof(struct filter_fid) &&
6066 rc != sizeof(struct filter_fid_old))
6067 GOTO(out, rc = -EINVAL);
6069 fid_le_to_cpu(&rec->lor_fid, &pfid->ff_parent);
6070 /* Currently, the filter_fid::ff_parent::f_ver is not the real parent
6071 * MDT-object's FID::f_ver, instead it is the OST-object index in its
6072 * parent MDT-object's layout EA. */
6073 save = rec->lor_fid.f_stripe_idx;
6074 rec->lor_fid.f_ver = 0;
6075 rc = lfsck_fid_match_idx(env, lfsck, &rec->lor_fid, idx);
6076 /* If the orphan OST-object does not claim the MDT, then next.
6078 * If we do not know whether it matches or not, then return it
6079 * to the MDT for further check. */
6081 dt_read_unlock(env, obj);
6082 lfsck_object_put(env, obj);
6087 rec->lor_fid.f_stripe_idx = save;
6088 rec->lor_uid = la->la_uid;
6089 rec->lor_gid = la->la_gid;
6091 CDEBUG(D_LFSCK, "%s: return orphan "DFID", PFID "DFID", owner %u:%u\n",
6092 lfsck_lfsck2name(com->lc_lfsck), PFID(key), PFID(&rec->lor_fid),
6093 rec->lor_uid, rec->lor_gid);
6098 dt_read_unlock(env, obj);
6099 lfsck_object_put(env, obj);
6107 * \retval +1: locate to the exactly position
6108 * \retval 0: cannot locate to the exactly position,
6109 * call next() to move to a valid position.
6110 * \retval -ve: on error
6112 static int lfsck_orphan_it_get(const struct lu_env *env,
6114 const struct dt_key *key)
6116 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6119 it->loi_key = *(struct lu_fid *)key;
6120 rc = lfsck_orphan_it_next(env, di);
6130 static void lfsck_orphan_it_put(const struct lu_env *env,
6135 static struct dt_key *lfsck_orphan_it_key(const struct lu_env *env,
6136 const struct dt_it *di)
6138 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6140 return (struct dt_key *)&it->loi_key;
6143 static int lfsck_orphan_it_key_size(const struct lu_env *env,
6144 const struct dt_it *di)
6146 return sizeof(struct lu_fid);
6149 static int lfsck_orphan_it_rec(const struct lu_env *env,
6150 const struct dt_it *di,
6154 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6156 *(struct lu_orphan_rec *)rec = it->loi_rec;
6161 static __u64 lfsck_orphan_it_store(const struct lu_env *env,
6162 const struct dt_it *di)
6164 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6166 return it->loi_hash;
6170 * \retval +1: locate to the exactly position
6171 * \retval 0: cannot locate to the exactly position,
6172 * call next() to move to a valid position.
6173 * \retval -ve: on error
6175 static int lfsck_orphan_it_load(const struct lu_env *env,
6176 const struct dt_it *di,
6179 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
6180 struct lfsck_layout_slave_target *llst = it->loi_llst;
6183 LASSERT(llst != NULL);
6185 if (hash != llst->llst_hash) {
6186 CWARN("%s: the given hash "LPU64" for orphan iteration does "
6187 "not match the one when fini "LPU64", to be reset.\n",
6188 lfsck_lfsck2name(it->loi_com->lc_lfsck), hash,
6190 fid_zero(&llst->llst_fid);
6191 llst->llst_hash = 0;
6194 it->loi_key = llst->llst_fid;
6195 it->loi_hash = llst->llst_hash;
6196 rc = lfsck_orphan_it_next(env, (struct dt_it *)di);
6206 static int lfsck_orphan_it_key_rec(const struct lu_env *env,
6207 const struct dt_it *di,
6213 const struct dt_index_operations lfsck_orphan_index_ops = {
6214 .dio_lookup = lfsck_orphan_index_lookup,
6215 .dio_declare_insert = lfsck_orphan_index_declare_insert,
6216 .dio_insert = lfsck_orphan_index_insert,
6217 .dio_declare_delete = lfsck_orphan_index_declare_delete,
6218 .dio_delete = lfsck_orphan_index_delete,
6220 .init = lfsck_orphan_it_init,
6221 .fini = lfsck_orphan_it_fini,
6222 .get = lfsck_orphan_it_get,
6223 .put = lfsck_orphan_it_put,
6224 .next = lfsck_orphan_it_next,
6225 .key = lfsck_orphan_it_key,
6226 .key_size = lfsck_orphan_it_key_size,
6227 .rec = lfsck_orphan_it_rec,
6228 .store = lfsck_orphan_it_store,
6229 .load = lfsck_orphan_it_load,
6230 .key_rec = lfsck_orphan_it_key_rec,