4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2013, Intel Corporation.
26 * lustre/lfsck/lfsck_layout.c
28 * Author: Fan, Yong <fan.yong@intel.com>
32 # define EXPORT_SYMTAB
34 #define DEBUG_SUBSYSTEM S_LFSCK
36 #include <linux/bitops.h>
37 #include <linux/rbtree.h>
39 #include <lustre/lustre_idl.h>
40 #include <lu_object.h>
41 #include <dt_object.h>
42 #include <lustre_linkea.h>
43 #include <lustre_fid.h>
44 #include <lustre_lib.h>
45 #include <lustre_net.h>
46 #include <lustre/lustre_user.h>
47 #include <md_object.h>
48 #include <obd_class.h>
50 #include "lfsck_internal.h"
52 #define LFSCK_LAYOUT_MAGIC 0xB173AE14
54 static const char lfsck_layout_name[] = "lfsck_layout";
56 struct lfsck_layout_seq {
57 struct list_head lls_list;
60 __u64 lls_lastid_known;
61 struct dt_object *lls_lastid_obj;
62 unsigned int lls_dirty:1;
65 struct lfsck_layout_slave_target {
66 /* link into lfsck_layout_slave_data::llsd_master_list. */
67 struct list_head llst_list;
68 /* The position for next record in the rbtree for iteration. */
69 struct lu_fid llst_fid;
70 /* Dummy hash for iteration against the rbtree. */
77 struct lfsck_layout_slave_data {
78 /* list for lfsck_layout_seq */
79 struct list_head llsd_seq_list;
81 /* list for the masters involve layout verification. */
82 struct list_head llsd_master_list;
85 struct dt_object *llsd_rb_obj;
86 struct rb_root llsd_rb_root;
87 rwlock_t llsd_rb_lock;
88 unsigned int llsd_rbtree_valid:1;
91 struct lfsck_layout_object {
92 struct dt_object *llo_obj;
93 struct lu_attr llo_attr;
98 struct lfsck_layout_req {
99 struct list_head llr_list;
100 struct lfsck_layout_object *llr_parent;
101 struct dt_object *llr_child;
103 __u32 llr_lov_idx; /* offset in LOV EA */
106 struct lfsck_layout_master_data {
107 spinlock_t llmd_lock;
108 struct list_head llmd_req_list;
110 /* list for the ost targets involve layout verification. */
111 struct list_head llmd_ost_list;
113 /* list for the ost targets in phase1 scanning. */
114 struct list_head llmd_ost_phase1_list;
116 /* list for the ost targets in phase1 scanning. */
117 struct list_head llmd_ost_phase2_list;
119 /* list for the mdt targets involve layout verification. */
120 struct list_head llmd_mdt_list;
122 /* list for the mdt targets in phase1 scanning. */
123 struct list_head llmd_mdt_phase1_list;
125 /* list for the mdt targets in phase1 scanning. */
126 struct list_head llmd_mdt_phase2_list;
128 struct ptlrpc_thread llmd_thread;
129 __u32 llmd_touch_gen;
131 int llmd_assistant_status;
132 int llmd_post_result;
133 unsigned int llmd_to_post:1,
134 llmd_to_double_scan:1,
135 llmd_in_double_scan:1,
139 struct lfsck_layout_slave_async_args {
140 struct obd_export *llsaa_exp;
141 struct lfsck_component *llsaa_com;
142 struct lfsck_layout_slave_target *llsaa_llst;
145 static struct lfsck_layout_object *
146 lfsck_layout_object_init(const struct lu_env *env, struct dt_object *obj,
149 struct lfsck_layout_object *llo;
154 return ERR_PTR(-ENOMEM);
156 rc = dt_attr_get(env, obj, &llo->llo_attr, BYPASS_CAPA);
163 lu_object_get(&obj->do_lu);
165 /* The gen can be used to check whether some others have changed the
166 * file layout after LFSCK pre-fetching but before real verification. */
168 atomic_set(&llo->llo_ref, 1);
174 lfsck_layout_llst_put(struct lfsck_layout_slave_target *llst)
176 if (atomic_dec_and_test(&llst->llst_ref)) {
177 LASSERT(list_empty(&llst->llst_list));
184 lfsck_layout_llst_add(struct lfsck_layout_slave_data *llsd, __u32 index)
186 struct lfsck_layout_slave_target *llst;
187 struct lfsck_layout_slave_target *tmp;
194 INIT_LIST_HEAD(&llst->llst_list);
196 llst->llst_index = index;
197 atomic_set(&llst->llst_ref, 1);
199 spin_lock(&llsd->llsd_lock);
200 list_for_each_entry(tmp, &llsd->llsd_master_list, llst_list) {
201 if (tmp->llst_index == index) {
207 list_add_tail(&llst->llst_list, &llsd->llsd_master_list);
208 spin_unlock(&llsd->llsd_lock);
217 lfsck_layout_llst_del(struct lfsck_layout_slave_data *llsd,
218 struct lfsck_layout_slave_target *llst)
222 spin_lock(&llsd->llsd_lock);
223 if (!list_empty(&llst->llst_list)) {
224 list_del_init(&llst->llst_list);
227 spin_unlock(&llsd->llsd_lock);
230 lfsck_layout_llst_put(llst);
233 static inline struct lfsck_layout_slave_target *
234 lfsck_layout_llst_find_and_del(struct lfsck_layout_slave_data *llsd,
235 __u32 index, bool unlink)
237 struct lfsck_layout_slave_target *llst;
239 spin_lock(&llsd->llsd_lock);
240 list_for_each_entry(llst, &llsd->llsd_master_list, llst_list) {
241 if (llst->llst_index == index) {
243 list_del_init(&llst->llst_list);
245 atomic_inc(&llst->llst_ref);
246 spin_unlock(&llsd->llsd_lock);
251 spin_unlock(&llsd->llsd_lock);
256 static inline void lfsck_layout_object_put(const struct lu_env *env,
257 struct lfsck_layout_object *llo)
259 if (atomic_dec_and_test(&llo->llo_ref)) {
260 lfsck_object_put(env, llo->llo_obj);
265 static struct lfsck_layout_req *
266 lfsck_layout_req_init(struct lfsck_layout_object *parent,
267 struct dt_object *child, __u32 ost_idx, __u32 lov_idx)
269 struct lfsck_layout_req *llr;
273 return ERR_PTR(-ENOMEM);
275 INIT_LIST_HEAD(&llr->llr_list);
276 atomic_inc(&parent->llo_ref);
277 llr->llr_parent = parent;
278 llr->llr_child = child;
279 llr->llr_ost_idx = ost_idx;
280 llr->llr_lov_idx = lov_idx;
285 static inline void lfsck_layout_req_fini(const struct lu_env *env,
286 struct lfsck_layout_req *llr)
288 lu_object_put(env, &llr->llr_child->do_lu);
289 lfsck_layout_object_put(env, llr->llr_parent);
293 static inline bool lfsck_layout_req_empty(struct lfsck_layout_master_data *llmd)
297 spin_lock(&llmd->llmd_lock);
298 if (list_empty(&llmd->llmd_req_list))
300 spin_unlock(&llmd->llmd_lock);
305 static int lfsck_layout_get_lovea(const struct lu_env *env,
306 struct dt_object *obj,
307 struct lu_buf *buf, ssize_t *buflen)
312 rc = dt_xattr_get(env, obj, buf, XATTR_NAME_LOV, BYPASS_CAPA);
314 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_LOV,
319 lu_buf_realloc(buf, rc);
321 *buflen = buf->lb_len;
323 if (buf->lb_buf == NULL)
335 if (unlikely(buf->lb_buf == NULL)) {
336 lu_buf_alloc(buf, rc);
338 *buflen = buf->lb_len;
340 if (buf->lb_buf == NULL)
349 static int lfsck_layout_verify_header(struct lov_mds_md_v1 *lmm)
354 magic = le32_to_cpu(lmm->lmm_magic);
355 /* If magic crashed, keep it there. Sometime later, during OST-object
356 * orphan handling, if some OST-object(s) back-point to it, it can be
357 * verified and repaired. */
358 if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3)
361 patten = le32_to_cpu(lmm->lmm_pattern);
362 /* XXX: currently, we only support LOV_PATTERN_RAID0. */
363 if (patten != LOV_PATTERN_RAID0)
369 #define LFSCK_RBTREE_BITMAP_SIZE PAGE_CACHE_SIZE
370 #define LFSCK_RBTREE_BITMAP_WIDTH (LFSCK_RBTREE_BITMAP_SIZE << 3)
371 #define LFSCK_RBTREE_BITMAP_MASK (LFSCK_RBTREE_BITMAP_SIZE - 1)
373 struct lfsck_rbtree_node {
374 struct rb_node lrn_node;
377 atomic_t lrn_known_count;
378 atomic_t lrn_accessed_count;
379 void *lrn_known_bitmap;
380 void *lrn_accessed_bitmap;
383 static inline int lfsck_rbtree_cmp(struct lfsck_rbtree_node *lrn,
384 __u64 seq, __u32 oid)
386 if (seq < lrn->lrn_seq)
389 if (seq > lrn->lrn_seq)
392 if (oid < lrn->lrn_first_oid)
395 if (oid >= lrn->lrn_first_oid + LFSCK_RBTREE_BITMAP_WIDTH)
401 /* The caller should hold llsd->llsd_rb_lock. */
402 static struct lfsck_rbtree_node *
403 lfsck_rbtree_search(struct lfsck_layout_slave_data *llsd,
404 const struct lu_fid *fid, bool *exact)
406 struct rb_node *node = llsd->llsd_rb_root.rb_node;
407 struct rb_node *prev = NULL;
408 struct lfsck_rbtree_node *lrn = NULL;
414 while (node != NULL) {
416 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
417 rc = lfsck_rbtree_cmp(lrn, fid_seq(fid), fid_oid(fid));
419 node = node->rb_left;
421 node = node->rb_right;
429 /* If there is no exactly matched one, then to the next valid one. */
432 /* The rbtree is empty. */
439 node = rb_next(prev);
441 /* The end of the rbtree. */
445 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
450 static struct lfsck_rbtree_node *lfsck_rbtree_new(const struct lu_env *env,
451 const struct lu_fid *fid)
453 struct lfsck_rbtree_node *lrn;
457 return ERR_PTR(-ENOMEM);
459 OBD_ALLOC(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
460 if (lrn->lrn_known_bitmap == NULL) {
463 return ERR_PTR(-ENOMEM);
466 OBD_ALLOC(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
467 if (lrn->lrn_accessed_bitmap == NULL) {
468 OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
471 return ERR_PTR(-ENOMEM);
474 rb_init_node(&lrn->lrn_node);
475 lrn->lrn_seq = fid_seq(fid);
476 lrn->lrn_first_oid = fid_oid(fid) & ~LFSCK_RBTREE_BITMAP_MASK;
477 atomic_set(&lrn->lrn_known_count, 0);
478 atomic_set(&lrn->lrn_accessed_count, 0);
483 static void lfsck_rbtree_free(struct lfsck_rbtree_node *lrn)
485 OBD_FREE(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
486 OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
490 /* The caller should hold lock. */
491 static struct lfsck_rbtree_node *
492 lfsck_rbtree_insert(struct lfsck_layout_slave_data *llsd,
493 struct lfsck_rbtree_node *lrn)
495 struct rb_node **pos = &(llsd->llsd_rb_root.rb_node);
496 struct rb_node *parent = NULL;
497 struct lfsck_rbtree_node *tmp;
502 tmp = rb_entry(*pos, struct lfsck_rbtree_node, lrn_node);
503 rc = lfsck_rbtree_cmp(tmp, lrn->lrn_seq, lrn->lrn_first_oid);
505 pos = &((*pos)->rb_left);
507 pos = &((*pos)->rb_right);
512 rb_link_node(&lrn->lrn_node, parent, pos);
513 rb_insert_color(&lrn->lrn_node, &llsd->llsd_rb_root);
518 extern const struct dt_index_operations lfsck_orphan_index_ops;
520 static int lfsck_rbtree_setup(const struct lu_env *env,
521 struct lfsck_component *com)
523 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
524 struct lfsck_instance *lfsck = com->lc_lfsck;
525 struct dt_device *dev = lfsck->li_bottom;
526 struct lfsck_layout_slave_data *llsd = com->lc_data;
527 struct dt_object *obj;
529 fid->f_seq = FID_SEQ_LAYOUT_RBTREE;
530 fid->f_oid = lfsck_dev_idx(dev);
532 obj = dt_locate(env, dev, fid);
534 RETURN(PTR_ERR(obj));
536 /* Generate an in-RAM object to stand for the layout rbtree.
537 * Scanning the layout rbtree will be via the iteration over
538 * the object. In the future, the rbtree may be written onto
539 * disk with the object.
541 * Mark the object to be as exist. */
542 obj->do_lu.lo_header->loh_attr |= LOHA_EXISTS;
543 obj->do_index_ops = &lfsck_orphan_index_ops;
544 llsd->llsd_rb_obj = obj;
545 llsd->llsd_rbtree_valid = 1;
546 dev->dd_record_fid_accessed = 1;
551 static void lfsck_rbtree_cleanup(const struct lu_env *env,
552 struct lfsck_component *com)
554 struct lfsck_instance *lfsck = com->lc_lfsck;
555 struct lfsck_layout_slave_data *llsd = com->lc_data;
556 struct rb_node *node = rb_first(&llsd->llsd_rb_root);
557 struct rb_node *next;
558 struct lfsck_rbtree_node *lrn;
560 lfsck->li_bottom->dd_record_fid_accessed = 0;
561 /* Invalid the rbtree, then no others will use it. */
562 write_lock(&llsd->llsd_rb_lock);
563 llsd->llsd_rbtree_valid = 0;
564 write_unlock(&llsd->llsd_rb_lock);
566 while (node != NULL) {
567 next = rb_next(node);
568 lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
569 rb_erase(node, &llsd->llsd_rb_root);
570 lfsck_rbtree_free(lrn);
574 if (llsd->llsd_rb_obj != NULL) {
575 lu_object_put(env, &llsd->llsd_rb_obj->do_lu);
576 llsd->llsd_rb_obj = NULL;
580 static void lfsck_rbtree_update_bitmap(const struct lu_env *env,
581 struct lfsck_component *com,
582 const struct lu_fid *fid,
585 struct lfsck_layout_slave_data *llsd = com->lc_data;
586 struct lfsck_rbtree_node *lrn;
592 CDEBUG(D_LFSCK, "%s: update bitmap for "DFID"\n",
593 lfsck_lfsck2name(com->lc_lfsck), PFID(fid));
595 if (unlikely(!fid_is_sane(fid) || fid_is_last_id(fid)))
598 if (!fid_is_idif(fid) && !fid_is_norm(fid))
601 read_lock(&llsd->llsd_rb_lock);
602 if (!llsd->llsd_rbtree_valid)
603 GOTO(unlock, rc = 0);
605 lrn = lfsck_rbtree_search(llsd, fid, NULL);
607 struct lfsck_rbtree_node *tmp;
611 read_unlock(&llsd->llsd_rb_lock);
612 tmp = lfsck_rbtree_new(env, fid);
614 GOTO(out, rc = PTR_ERR(tmp));
617 write_lock(&llsd->llsd_rb_lock);
618 if (!llsd->llsd_rbtree_valid) {
619 lfsck_rbtree_free(tmp);
620 GOTO(unlock, rc = 0);
623 lrn = lfsck_rbtree_insert(llsd, tmp);
625 lfsck_rbtree_free(tmp);
628 idx = fid_oid(fid) & LFSCK_RBTREE_BITMAP_MASK;
629 /* Any accessed object must be a known object. */
630 if (!test_and_set_bit(idx, lrn->lrn_known_bitmap))
631 atomic_inc(&lrn->lrn_known_count);
632 if (accessed && !test_and_set_bit(idx, lrn->lrn_accessed_bitmap))
633 atomic_inc(&lrn->lrn_accessed_count);
635 GOTO(unlock, rc = 0);
639 write_unlock(&llsd->llsd_rb_lock);
641 read_unlock(&llsd->llsd_rb_lock);
643 if (rc != 0 && accessed) {
644 struct lfsck_layout *lo = com->lc_file_ram;
646 CERROR("%s: Fail to update object accessed bitmap, will cause "
647 "incorrect LFSCK OST-object handling, so disable it to "
648 "cancel orphan handling for related device. rc = %d.\n",
649 lfsck_lfsck2name(com->lc_lfsck), rc);
650 lo->ll_flags |= LF_INCOMPLETE;
651 lfsck_rbtree_cleanup(env, com);
655 static void lfsck_layout_le_to_cpu(struct lfsck_layout *des,
656 const struct lfsck_layout *src)
660 des->ll_magic = le32_to_cpu(src->ll_magic);
661 des->ll_status = le32_to_cpu(src->ll_status);
662 des->ll_flags = le32_to_cpu(src->ll_flags);
663 des->ll_success_count = le32_to_cpu(src->ll_success_count);
664 des->ll_run_time_phase1 = le32_to_cpu(src->ll_run_time_phase1);
665 des->ll_run_time_phase2 = le32_to_cpu(src->ll_run_time_phase2);
666 des->ll_time_last_complete = le64_to_cpu(src->ll_time_last_complete);
667 des->ll_time_latest_start = le64_to_cpu(src->ll_time_latest_start);
668 des->ll_time_last_checkpoint =
669 le64_to_cpu(src->ll_time_last_checkpoint);
670 des->ll_pos_latest_start = le64_to_cpu(src->ll_pos_latest_start);
671 des->ll_pos_last_checkpoint = le64_to_cpu(src->ll_pos_last_checkpoint);
672 des->ll_pos_first_inconsistent =
673 le64_to_cpu(src->ll_pos_first_inconsistent);
674 des->ll_objs_checked_phase1 = le64_to_cpu(src->ll_objs_checked_phase1);
675 des->ll_objs_failed_phase1 = le64_to_cpu(src->ll_objs_failed_phase1);
676 des->ll_objs_checked_phase2 = le64_to_cpu(src->ll_objs_checked_phase2);
677 des->ll_objs_failed_phase2 = le64_to_cpu(src->ll_objs_failed_phase2);
678 for (i = 0; i < LLIT_MAX; i++)
679 des->ll_objs_repaired[i] =
680 le64_to_cpu(src->ll_objs_repaired[i]);
681 des->ll_objs_skipped = le64_to_cpu(src->ll_objs_skipped);
684 static void lfsck_layout_cpu_to_le(struct lfsck_layout *des,
685 const struct lfsck_layout *src)
689 des->ll_magic = cpu_to_le32(src->ll_magic);
690 des->ll_status = cpu_to_le32(src->ll_status);
691 des->ll_flags = cpu_to_le32(src->ll_flags);
692 des->ll_success_count = cpu_to_le32(src->ll_success_count);
693 des->ll_run_time_phase1 = cpu_to_le32(src->ll_run_time_phase1);
694 des->ll_run_time_phase2 = cpu_to_le32(src->ll_run_time_phase2);
695 des->ll_time_last_complete = cpu_to_le64(src->ll_time_last_complete);
696 des->ll_time_latest_start = cpu_to_le64(src->ll_time_latest_start);
697 des->ll_time_last_checkpoint =
698 cpu_to_le64(src->ll_time_last_checkpoint);
699 des->ll_pos_latest_start = cpu_to_le64(src->ll_pos_latest_start);
700 des->ll_pos_last_checkpoint = cpu_to_le64(src->ll_pos_last_checkpoint);
701 des->ll_pos_first_inconsistent =
702 cpu_to_le64(src->ll_pos_first_inconsistent);
703 des->ll_objs_checked_phase1 = cpu_to_le64(src->ll_objs_checked_phase1);
704 des->ll_objs_failed_phase1 = cpu_to_le64(src->ll_objs_failed_phase1);
705 des->ll_objs_checked_phase2 = cpu_to_le64(src->ll_objs_checked_phase2);
706 des->ll_objs_failed_phase2 = cpu_to_le64(src->ll_objs_failed_phase2);
707 for (i = 0; i < LLIT_MAX; i++)
708 des->ll_objs_repaired[i] =
709 cpu_to_le64(src->ll_objs_repaired[i]);
710 des->ll_objs_skipped = cpu_to_le64(src->ll_objs_skipped);
714 * \retval +ve: the lfsck_layout is broken, the caller should reset it.
715 * \retval 0: succeed.
716 * \retval -ve: failed cases.
718 static int lfsck_layout_load(const struct lu_env *env,
719 struct lfsck_component *com)
721 struct lfsck_layout *lo = com->lc_file_ram;
722 const struct dt_body_operations *dbo = com->lc_obj->do_body_ops;
723 ssize_t size = com->lc_file_size;
727 rc = dbo->dbo_read(env, com->lc_obj,
728 lfsck_buf_get(env, com->lc_file_disk, size), &pos,
733 CWARN("%s: failed to load lfsck_layout: rc = %d\n",
734 lfsck_lfsck2name(com->lc_lfsck), rc);
736 } else if (rc != size) {
737 CWARN("%s: crashed lfsck_layout, to be reset: rc = %d\n",
738 lfsck_lfsck2name(com->lc_lfsck), rc);
742 lfsck_layout_le_to_cpu(lo, com->lc_file_disk);
743 if (lo->ll_magic != LFSCK_LAYOUT_MAGIC) {
744 CWARN("%s: invalid lfsck_layout magic %#x != %#x, "
745 "to be reset\n", lfsck_lfsck2name(com->lc_lfsck),
746 lo->ll_magic, LFSCK_LAYOUT_MAGIC);
753 static int lfsck_layout_store(const struct lu_env *env,
754 struct lfsck_component *com)
756 struct dt_object *obj = com->lc_obj;
757 struct lfsck_instance *lfsck = com->lc_lfsck;
758 struct lfsck_layout *lo = com->lc_file_disk;
759 struct thandle *handle;
760 ssize_t size = com->lc_file_size;
765 lfsck_layout_cpu_to_le(lo, com->lc_file_ram);
766 handle = dt_trans_create(env, lfsck->li_bottom);
767 if (IS_ERR(handle)) {
768 rc = PTR_ERR(handle);
769 CERROR("%s: fail to create trans for storing lfsck_layout: "
770 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
774 rc = dt_declare_record_write(env, obj, size, pos, handle);
776 CERROR("%s: fail to declare trans for storing lfsck_layout(1): "
777 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
781 rc = dt_trans_start_local(env, lfsck->li_bottom, handle);
783 CERROR("%s: fail to start trans for storing lfsck_layout: "
784 "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
788 rc = dt_record_write(env, obj, lfsck_buf_get(env, lo, size), &pos,
791 CERROR("%s: fail to store lfsck_layout(1): size = %d, "
792 "rc = %d\n", lfsck_lfsck2name(lfsck), (int)size, rc);
797 dt_trans_stop(env, lfsck->li_bottom, handle);
802 static int lfsck_layout_init(const struct lu_env *env,
803 struct lfsck_component *com)
805 struct lfsck_layout *lo = com->lc_file_ram;
808 memset(lo, 0, com->lc_file_size);
809 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
810 lo->ll_status = LS_INIT;
811 down_write(&com->lc_sem);
812 rc = lfsck_layout_store(env, com);
813 up_write(&com->lc_sem);
818 static int fid_is_for_ostobj(const struct lu_env *env, struct dt_device *dt,
819 struct dt_object *obj, const struct lu_fid *fid)
821 struct seq_server_site *ss = lu_site2seq(dt->dd_lu_dev.ld_site);
822 struct lu_seq_range range = { 0 };
823 struct lustre_mdt_attrs *lma;
826 fld_range_set_any(&range);
827 rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(fid), &range);
829 if (fld_range_is_ost(&range))
835 lma = &lfsck_env_info(env)->lti_lma;
836 rc = dt_xattr_get(env, obj, lfsck_buf_get(env, lma, sizeof(*lma)),
837 XATTR_NAME_LMA, BYPASS_CAPA);
838 if (rc == sizeof(*lma)) {
839 lustre_lma_swab(lma);
841 return lma->lma_compat & LMAC_FID_ON_OST ? 1 : 0;
844 rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_FID, BYPASS_CAPA);
849 static struct lfsck_layout_seq *
850 lfsck_layout_seq_lookup(struct lfsck_layout_slave_data *llsd, __u64 seq)
852 struct lfsck_layout_seq *lls;
854 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
855 if (lls->lls_seq == seq)
858 if (lls->lls_seq > seq)
866 lfsck_layout_seq_insert(struct lfsck_layout_slave_data *llsd,
867 struct lfsck_layout_seq *lls)
869 struct lfsck_layout_seq *tmp;
870 struct list_head *pos = &llsd->llsd_seq_list;
872 list_for_each_entry(tmp, &llsd->llsd_seq_list, lls_list) {
873 if (lls->lls_seq < tmp->lls_seq) {
874 pos = &tmp->lls_list;
878 list_add_tail(&lls->lls_list, pos);
882 lfsck_layout_lastid_create(const struct lu_env *env,
883 struct lfsck_instance *lfsck,
884 struct dt_object *obj)
886 struct lfsck_thread_info *info = lfsck_env_info(env);
887 struct lu_attr *la = &info->lti_la;
888 struct dt_object_format *dof = &info->lti_dof;
889 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
890 struct dt_device *dt = lfsck->li_bottom;
897 CDEBUG(D_LFSCK, "To create LAST_ID for <seq> "LPX64"\n",
898 fid_seq(lfsck_dto2fid(obj)));
900 if (bk->lb_param & LPF_DRYRUN)
903 memset(la, 0, sizeof(*la));
904 la->la_mode = S_IFREG | S_IRUGO | S_IWUSR;
905 la->la_valid = LA_MODE | LA_UID | LA_GID;
906 dof->dof_type = dt_mode_to_dft(S_IFREG);
908 th = dt_trans_create(env, dt);
910 RETURN(rc = PTR_ERR(th));
912 rc = dt_declare_create(env, obj, la, NULL, dof, th);
916 rc = dt_declare_record_write(env, obj, sizeof(lastid), pos, th);
920 rc = dt_trans_start_local(env, dt, th);
924 dt_write_lock(env, obj, 0);
925 if (likely(!dt_object_exists(obj))) {
926 rc = dt_create(env, obj, la, NULL, dof, th);
928 rc = dt_record_write(env, obj,
929 lfsck_buf_get(env, &lastid, sizeof(lastid)),
932 dt_write_unlock(env, obj);
937 dt_trans_stop(env, dt, th);
943 lfsck_layout_lastid_reload(const struct lu_env *env,
944 struct lfsck_component *com,
945 struct lfsck_layout_seq *lls)
951 dt_read_lock(env, lls->lls_lastid_obj, 0);
952 rc = dt_record_read(env, lls->lls_lastid_obj,
953 lfsck_buf_get(env, &lastid, sizeof(lastid)), &pos);
954 dt_read_unlock(env, lls->lls_lastid_obj);
955 if (unlikely(rc != 0))
958 lastid = le64_to_cpu(lastid);
959 if (lastid < lls->lls_lastid_known) {
960 struct lfsck_instance *lfsck = com->lc_lfsck;
961 struct lfsck_layout *lo = com->lc_file_ram;
963 lls->lls_lastid = lls->lls_lastid_known;
965 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
966 LASSERT(lfsck->li_out_notify != NULL);
968 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
969 LE_LASTID_REBUILDING);
970 lo->ll_flags |= LF_CRASHED_LASTID;
972 } else if (lastid >= lls->lls_lastid) {
973 lls->lls_lastid = lastid;
981 lfsck_layout_lastid_store(const struct lu_env *env,
982 struct lfsck_component *com)
984 struct lfsck_instance *lfsck = com->lc_lfsck;
985 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
986 struct dt_device *dt = lfsck->li_bottom;
987 struct lfsck_layout_slave_data *llsd = com->lc_data;
988 struct lfsck_layout_seq *lls;
994 list_for_each_entry(lls, &llsd->llsd_seq_list, lls_list) {
997 /* XXX: Add the code back if we really found related
998 * inconsistent cases in the future. */
1000 if (!lls->lls_dirty) {
1001 /* In OFD, before the pre-creation, the LAST_ID
1002 * file will be updated firstly, which may hide
1003 * some potential crashed cases. For example:
1005 * The old obj1's ID is higher than old LAST_ID
1006 * but lower than the new LAST_ID, but the LFSCK
1007 * have not touch the obj1 until the OFD updated
1008 * the LAST_ID. So the LFSCK does not regard it
1009 * as crashed case. But when OFD does not create
1010 * successfully, it will set the LAST_ID as the
1011 * real created objects' ID, then LFSCK needs to
1012 * found related inconsistency. */
1013 rc = lfsck_layout_lastid_reload(env, com, lls);
1014 if (likely(!lls->lls_dirty))
1019 CDEBUG(D_LFSCK, "To sync the LAST_ID for <seq> "LPX64
1020 " as <oid> "LPU64"\n", lls->lls_seq, lls->lls_lastid);
1022 if (bk->lb_param & LPF_DRYRUN) {
1027 th = dt_trans_create(env, dt);
1030 CERROR("%s: (1) failed to store "LPX64": rc = %d\n",
1031 lfsck_lfsck2name(com->lc_lfsck),
1036 rc = dt_declare_record_write(env, lls->lls_lastid_obj,
1037 sizeof(lastid), pos, th);
1041 rc = dt_trans_start_local(env, dt, th);
1045 lastid = cpu_to_le64(lls->lls_lastid);
1046 dt_write_lock(env, lls->lls_lastid_obj, 0);
1047 rc = dt_record_write(env, lls->lls_lastid_obj,
1048 lfsck_buf_get(env, &lastid,
1049 sizeof(lastid)), &pos, th);
1050 dt_write_unlock(env, lls->lls_lastid_obj);
1055 dt_trans_stop(env, dt, th);
1058 CERROR("%s: (2) failed to store "LPX64": rc = %d\n",
1059 lfsck_lfsck2name(com->lc_lfsck),
1068 lfsck_layout_lastid_load(const struct lu_env *env,
1069 struct lfsck_component *com,
1070 struct lfsck_layout_seq *lls)
1072 struct lfsck_instance *lfsck = com->lc_lfsck;
1073 struct lfsck_layout *lo = com->lc_file_ram;
1074 struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
1075 struct dt_object *obj;
1080 lu_last_id_fid(fid, lls->lls_seq, lfsck_dev_idx(lfsck->li_bottom));
1081 obj = dt_locate(env, lfsck->li_bottom, fid);
1083 RETURN(PTR_ERR(obj));
1085 /* LAST_ID crashed, to be rebuilt */
1086 if (!dt_object_exists(obj)) {
1087 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
1088 LASSERT(lfsck->li_out_notify != NULL);
1090 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1091 LE_LASTID_REBUILDING);
1092 lo->ll_flags |= LF_CRASHED_LASTID;
1094 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY4) &&
1096 struct l_wait_info lwi = LWI_TIMEOUT(
1097 cfs_time_seconds(cfs_fail_val),
1100 up_write(&com->lc_sem);
1101 l_wait_event(lfsck->li_thread.t_ctl_waitq,
1102 !thread_is_running(&lfsck->li_thread),
1104 down_write(&com->lc_sem);
1108 rc = lfsck_layout_lastid_create(env, lfsck, obj);
1110 dt_read_lock(env, obj, 0);
1111 rc = dt_read(env, obj,
1112 lfsck_buf_get(env, &lls->lls_lastid, sizeof(__u64)),
1114 dt_read_unlock(env, obj);
1115 if (rc != 0 && rc != sizeof(__u64))
1116 GOTO(out, rc = (rc > 0 ? -EFAULT : rc));
1118 if (rc == 0 && !(lo->ll_flags & LF_CRASHED_LASTID)) {
1119 LASSERT(lfsck->li_out_notify != NULL);
1121 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
1122 LE_LASTID_REBUILDING);
1123 lo->ll_flags |= LF_CRASHED_LASTID;
1126 lls->lls_lastid = le64_to_cpu(lls->lls_lastid);
1134 lfsck_object_put(env, obj);
1136 lls->lls_lastid_obj = obj;
1141 static int lfsck_layout_master_async_interpret(const struct lu_env *env,
1142 struct ptlrpc_request *req,
1145 struct lfsck_async_interpret_args *laia = args;
1146 struct lfsck_component *com = laia->laia_com;
1147 struct lfsck_layout_master_data *llmd = com->lc_data;
1148 struct lfsck_tgt_descs *ltds = laia->laia_ltds;
1149 struct lfsck_tgt_desc *ltd = laia->laia_ltd;
1150 struct lfsck_request *lr = laia->laia_lr;
1152 switch (lr->lr_event) {
1155 struct lfsck_layout *lo = com->lc_file_ram;
1157 CERROR("%s: fail to notify %s %x for layout start: "
1158 "rc = %d\n", lfsck_lfsck2name(com->lc_lfsck),
1159 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1160 ltd->ltd_index, rc);
1161 lo->ll_flags |= LF_INCOMPLETE;
1165 spin_lock(<ds->ltd_lock);
1166 if (ltd->ltd_dead || ltd->ltd_layout_done) {
1167 spin_unlock(<ds->ltd_lock);
1171 if (lr->lr_flags & LEF_TO_OST) {
1172 if (list_empty(<d->ltd_layout_list))
1173 list_add_tail(<d->ltd_layout_list,
1174 &llmd->llmd_ost_list);
1175 if (list_empty(<d->ltd_layout_phase_list))
1176 list_add_tail(<d->ltd_layout_phase_list,
1177 &llmd->llmd_ost_phase1_list);
1179 if (list_empty(<d->ltd_layout_list))
1180 list_add_tail(<d->ltd_layout_list,
1181 &llmd->llmd_mdt_list);
1182 if (list_empty(<d->ltd_layout_phase_list))
1183 list_add_tail(<d->ltd_layout_phase_list,
1184 &llmd->llmd_mdt_phase1_list);
1186 spin_unlock(<ds->ltd_lock);
1189 case LE_PHASE1_DONE:
1190 case LE_PHASE2_DONE:
1192 if (rc != 0 && rc != -EALREADY)
1193 CWARN("%s: fail to notify %s %x for layout: "
1194 "event = %d, rc = %d\n",
1195 lfsck_lfsck2name(com->lc_lfsck),
1196 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1197 ltd->ltd_index, lr->lr_event, rc);
1200 struct lfsck_reply *reply;
1203 spin_lock(<ds->ltd_lock);
1204 list_del_init(<d->ltd_layout_phase_list);
1205 list_del_init(<d->ltd_layout_list);
1206 spin_unlock(<ds->ltd_lock);
1210 reply = req_capsule_server_get(&req->rq_pill,
1212 if (reply == NULL) {
1214 CERROR("%s: invalid return value: rc = %d\n",
1215 lfsck_lfsck2name(com->lc_lfsck), rc);
1216 spin_lock(<ds->ltd_lock);
1217 list_del_init(<d->ltd_layout_phase_list);
1218 list_del_init(<d->ltd_layout_list);
1219 spin_unlock(<ds->ltd_lock);
1223 switch (reply->lr_status) {
1224 case LS_SCANNING_PHASE1:
1226 case LS_SCANNING_PHASE2:
1227 spin_lock(<ds->ltd_lock);
1228 list_del_init(<d->ltd_layout_phase_list);
1229 if (ltd->ltd_dead || ltd->ltd_layout_done) {
1230 spin_unlock(<ds->ltd_lock);
1234 if (lr->lr_flags & LEF_TO_OST)
1235 list_add_tail(<d->ltd_layout_phase_list,
1236 &llmd->llmd_ost_phase2_list);
1238 list_add_tail(<d->ltd_layout_phase_list,
1239 &llmd->llmd_mdt_phase2_list);
1240 spin_unlock(<ds->ltd_lock);
1243 spin_lock(<ds->ltd_lock);
1244 list_del_init(<d->ltd_layout_phase_list);
1245 list_del_init(<d->ltd_layout_list);
1246 spin_unlock(<ds->ltd_lock);
1252 CERROR("%s: unexpected event: rc = %d\n",
1253 lfsck_lfsck2name(com->lc_lfsck), lr->lr_event);
1257 if (!laia->laia_shared) {
1259 lfsck_component_put(env, com);
1265 static int lfsck_layout_master_query_others(const struct lu_env *env,
1266 struct lfsck_component *com)
1268 struct lfsck_thread_info *info = lfsck_env_info(env);
1269 struct lfsck_request *lr = &info->lti_lr;
1270 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1271 struct lfsck_instance *lfsck = com->lc_lfsck;
1272 struct lfsck_layout_master_data *llmd = com->lc_data;
1273 struct ptlrpc_request_set *set;
1274 struct lfsck_tgt_descs *ltds;
1275 struct lfsck_tgt_desc *ltd;
1276 struct list_head *head;
1281 set = ptlrpc_prep_set();
1285 llmd->llmd_touch_gen++;
1286 memset(lr, 0, sizeof(*lr));
1287 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1288 lr->lr_event = LE_QUERY;
1289 lr->lr_active = LT_LAYOUT;
1290 laia->laia_com = com;
1292 laia->laia_shared = 0;
1294 if (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1295 ltds = &lfsck->li_mdt_descs;
1297 head = &llmd->llmd_mdt_phase1_list;
1301 ltds = &lfsck->li_ost_descs;
1302 lr->lr_flags = LEF_TO_OST;
1303 head = &llmd->llmd_ost_phase1_list;
1306 laia->laia_ltds = ltds;
1307 spin_lock(<ds->ltd_lock);
1308 while (!list_empty(head)) {
1309 ltd = list_entry(head->next,
1310 struct lfsck_tgt_desc,
1311 ltd_layout_phase_list);
1312 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1315 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1316 list_del(<d->ltd_layout_phase_list);
1317 list_add_tail(<d->ltd_layout_phase_list, head);
1318 atomic_inc(<d->ltd_ref);
1319 laia->laia_ltd = ltd;
1320 spin_unlock(<ds->ltd_lock);
1321 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1322 lfsck_layout_master_async_interpret,
1325 CERROR("%s: fail to query %s %x for layout: rc = %d\n",
1326 lfsck_lfsck2name(lfsck),
1327 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
1328 ltd->ltd_index, rc);
1332 spin_lock(<ds->ltd_lock);
1334 spin_unlock(<ds->ltd_lock);
1336 rc = ptlrpc_set_wait(set);
1338 ptlrpc_set_destroy(set);
1342 if (!(lr->lr_flags & LEF_TO_OST) &&
1343 list_empty(&llmd->llmd_mdt_phase1_list))
1346 ptlrpc_set_destroy(set);
1348 RETURN(rc1 != 0 ? rc1 : rc);
1352 lfsck_layout_master_to_orphan(struct lfsck_layout_master_data *llmd)
1354 return list_empty(&llmd->llmd_mdt_phase1_list) &&
1355 (!list_empty(&llmd->llmd_ost_phase2_list) ||
1356 list_empty(&llmd->llmd_ost_phase1_list));
1359 static int lfsck_layout_master_notify_others(const struct lu_env *env,
1360 struct lfsck_component *com,
1361 struct lfsck_request *lr)
1363 struct lfsck_thread_info *info = lfsck_env_info(env);
1364 struct lfsck_async_interpret_args *laia = &info->lti_laia;
1365 struct lfsck_instance *lfsck = com->lc_lfsck;
1366 struct lfsck_layout_master_data *llmd = com->lc_data;
1367 struct lfsck_layout *lo = com->lc_file_ram;
1368 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1369 struct ptlrpc_request_set *set;
1370 struct lfsck_tgt_descs *ltds;
1371 struct lfsck_tgt_desc *ltd;
1372 struct lfsck_tgt_desc *next;
1373 struct list_head *head;
1378 set = ptlrpc_prep_set();
1382 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
1383 lr->lr_active = LT_LAYOUT;
1384 laia->laia_com = com;
1386 laia->laia_shared = 0;
1387 switch (lr->lr_event) {
1389 /* Notify OSTs firstly, then handle other MDTs if needed. */
1390 ltds = &lfsck->li_ost_descs;
1391 laia->laia_ltds = ltds;
1392 down_read(<ds->ltd_rw_sem);
1393 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1394 ltd = lfsck_tgt_get(ltds, idx);
1395 LASSERT(ltd != NULL);
1397 laia->laia_ltd = ltd;
1398 ltd->ltd_layout_done = 0;
1399 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1400 lfsck_layout_master_async_interpret,
1401 laia, LFSCK_NOTIFY);
1403 CERROR("%s: fail to notify %s %x for layout "
1405 lfsck_lfsck2name(lfsck),
1406 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1409 lo->ll_flags |= LF_INCOMPLETE;
1412 up_read(<ds->ltd_rw_sem);
1415 rc = ptlrpc_set_wait(set);
1417 ptlrpc_set_destroy(set);
1421 if (!(bk->lb_param & LPF_ALL_TGT))
1424 /* link other MDT targets locallly. */
1425 spin_lock(<ds->ltd_lock);
1426 cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
1427 ltd = LTD_TGT(ltds, idx);
1428 LASSERT(ltd != NULL);
1430 if (!list_empty(<d->ltd_layout_list))
1433 list_add_tail(<d->ltd_layout_list,
1434 &llmd->llmd_mdt_list);
1435 list_add_tail(<d->ltd_layout_phase_list,
1436 &llmd->llmd_mdt_phase1_list);
1438 spin_unlock(<ds->ltd_lock);
1441 case LE_PHASE2_DONE:
1442 case LE_PEER_EXIT: {
1443 /* Handle other MDTs firstly if needed, then notify the OSTs. */
1444 if (bk->lb_param & LPF_ALL_TGT) {
1445 head = &llmd->llmd_mdt_list;
1446 ltds = &lfsck->li_mdt_descs;
1447 if (lr->lr_event == LE_STOP) {
1448 /* unlink other MDT targets locallly. */
1449 spin_lock(<ds->ltd_lock);
1450 list_for_each_entry_safe(ltd, next, head,
1452 list_del_init(<d->ltd_layout_phase_list);
1453 list_del_init(<d->ltd_layout_list);
1455 spin_unlock(<ds->ltd_lock);
1457 lr->lr_flags |= LEF_TO_OST;
1458 head = &llmd->llmd_ost_list;
1459 ltds = &lfsck->li_ost_descs;
1461 lr->lr_flags &= ~LEF_TO_OST;
1464 lr->lr_flags |= LEF_TO_OST;
1465 head = &llmd->llmd_ost_list;
1466 ltds = &lfsck->li_ost_descs;
1470 laia->laia_ltds = ltds;
1471 spin_lock(<ds->ltd_lock);
1472 while (!list_empty(head)) {
1473 ltd = list_entry(head->next, struct lfsck_tgt_desc,
1475 if (!list_empty(<d->ltd_layout_phase_list))
1476 list_del_init(<d->ltd_layout_phase_list);
1477 list_del_init(<d->ltd_layout_list);
1478 atomic_inc(<d->ltd_ref);
1479 laia->laia_ltd = ltd;
1480 spin_unlock(<ds->ltd_lock);
1481 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1482 lfsck_layout_master_async_interpret,
1483 laia, LFSCK_NOTIFY);
1485 CERROR("%s: fail to notify %s %x for layout "
1486 "stop/phase2: rc = %d\n",
1487 lfsck_lfsck2name(lfsck),
1488 (lr->lr_flags & LEF_TO_OST) ? "OST" :
1489 "MDT", ltd->ltd_index, rc);
1492 spin_lock(<ds->ltd_lock);
1494 spin_unlock(<ds->ltd_lock);
1496 rc = ptlrpc_set_wait(set);
1498 ptlrpc_set_destroy(set);
1502 if (!(lr->lr_flags & LEF_TO_OST)) {
1503 lr->lr_flags |= LEF_TO_OST;
1504 head = &llmd->llmd_ost_list;
1505 ltds = &lfsck->li_ost_descs;
1510 case LE_PHASE1_DONE:
1511 llmd->llmd_touch_gen++;
1512 ltds = &lfsck->li_mdt_descs;
1513 laia->laia_ltds = ltds;
1514 spin_lock(<ds->ltd_lock);
1515 while (!list_empty(&llmd->llmd_mdt_phase1_list)) {
1516 ltd = list_entry(llmd->llmd_mdt_phase1_list.next,
1517 struct lfsck_tgt_desc,
1518 ltd_layout_phase_list);
1519 if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
1522 ltd->ltd_layout_gen = llmd->llmd_touch_gen;
1523 list_del_init(<d->ltd_layout_phase_list);
1524 list_add_tail(<d->ltd_layout_phase_list,
1525 &llmd->llmd_mdt_phase1_list);
1526 atomic_inc(<d->ltd_ref);
1527 laia->laia_ltd = ltd;
1528 spin_unlock(<ds->ltd_lock);
1529 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
1530 lfsck_layout_master_async_interpret,
1531 laia, LFSCK_NOTIFY);
1533 CERROR("%s: fail to notify MDT %x for layout "
1534 "phase1 done: rc = %d\n",
1535 lfsck_lfsck2name(lfsck),
1536 ltd->ltd_index, rc);
1539 spin_lock(<ds->ltd_lock);
1541 spin_unlock(<ds->ltd_lock);
1544 CERROR("%s: unexpected LFSCK event: rc = %d\n",
1545 lfsck_lfsck2name(lfsck), lr->lr_event);
1550 rc = ptlrpc_set_wait(set);
1551 ptlrpc_set_destroy(set);
1556 static int lfsck_layout_double_scan_result(const struct lu_env *env,
1557 struct lfsck_component *com,
1560 struct lfsck_instance *lfsck = com->lc_lfsck;
1561 struct lfsck_layout *lo = com->lc_file_ram;
1562 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1564 down_write(&com->lc_sem);
1566 lo->ll_run_time_phase2 += cfs_duration_sec(cfs_time_current() +
1567 HALF_SEC - lfsck->li_time_last_checkpoint);
1568 lo->ll_time_last_checkpoint = cfs_time_current_sec();
1569 lo->ll_objs_checked_phase2 += com->lc_new_checked;
1572 com->lc_journal = 0;
1573 if (lo->ll_flags & LF_INCOMPLETE)
1574 lo->ll_status = LS_PARTIAL;
1576 lo->ll_status = LS_COMPLETED;
1577 if (!(bk->lb_param & LPF_DRYRUN))
1578 lo->ll_flags &= ~(LF_SCANNED_ONCE | LF_INCONSISTENT);
1579 lo->ll_time_last_complete = lo->ll_time_last_checkpoint;
1580 lo->ll_success_count++;
1581 } else if (rc == 0) {
1582 lo->ll_status = lfsck->li_status;
1583 if (lo->ll_status == 0)
1584 lo->ll_status = LS_STOPPED;
1586 lo->ll_status = LS_FAILED;
1589 if (lo->ll_status != LS_PAUSED) {
1590 spin_lock(&lfsck->li_lock);
1591 list_del_init(&com->lc_link);
1592 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
1593 spin_unlock(&lfsck->li_lock);
1596 rc = lfsck_layout_store(env, com);
1598 up_write(&com->lc_sem);
1603 static int lfsck_layout_lock(const struct lu_env *env,
1604 struct lfsck_component *com,
1605 struct dt_object *obj,
1606 struct lustre_handle *lh, __u64 bits)
1608 struct lfsck_thread_info *info = lfsck_env_info(env);
1609 ldlm_policy_data_t *policy = &info->lti_policy;
1610 struct ldlm_res_id *resid = &info->lti_resid;
1611 struct lfsck_instance *lfsck = com->lc_lfsck;
1612 __u64 flags = LDLM_FL_ATOMIC_CB;
1615 LASSERT(lfsck->li_namespace != NULL);
1617 memset(policy, 0, sizeof(*policy));
1618 policy->l_inodebits.bits = bits;
1619 fid_build_reg_res_name(lfsck_dto2fid(obj), resid);
1620 rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid, LDLM_IBITS,
1621 policy, LCK_EX, &flags, ldlm_blocking_ast,
1622 ldlm_completion_ast, NULL, NULL, 0,
1623 LVB_T_NONE, NULL, lh);
1624 if (rc == ELDLM_OK) {
1627 memset(lh, 0, sizeof(*lh));
1634 static void lfsck_layout_unlock(struct lustre_handle *lh)
1636 if (lustre_handle_is_used(lh)) {
1637 ldlm_lock_decref(lh, LCK_EX);
1638 memset(lh, 0, sizeof(*lh));
1642 static int lfsck_layout_trans_stop(const struct lu_env *env,
1643 struct dt_device *dev,
1644 struct thandle *handle, int result)
1648 handle->th_result = result;
1649 rc = dt_trans_stop(env, dev, handle);
1658 static int lfsck_layout_scan_orphan_one(const struct lu_env *env,
1659 struct lfsck_component *com,
1660 struct lfsck_tgt_desc *ltd,
1661 struct lu_orphan_rec *rec,
1662 struct lu_fid *cfid)
1664 struct lfsck_layout *lo = com->lc_file_ram;
1667 /* XXX: To be extended in other patch. */
1669 down_write(&com->lc_sem);
1670 com->lc_new_scanned++;
1671 com->lc_new_checked++;
1673 lo->ll_objs_repaired[LLIT_ORPHAN - 1]++;
1675 } else if (rc < 0) {
1676 lo->ll_objs_failed_phase2++;
1678 up_write(&com->lc_sem);
1683 static int lfsck_layout_scan_orphan(const struct lu_env *env,
1684 struct lfsck_component *com,
1685 struct lfsck_tgt_desc *ltd)
1687 struct lfsck_layout *lo = com->lc_file_ram;
1688 struct lfsck_instance *lfsck = com->lc_lfsck;
1689 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
1690 struct lfsck_thread_info *info = lfsck_env_info(env);
1691 struct ost_id *oi = &info->lti_oi;
1692 struct lu_fid *fid = &info->lti_fid;
1693 struct dt_object *obj;
1694 const struct dt_it_ops *iops;
1699 CDEBUG(D_LFSCK, "%s: start the orphan scanning for OST%04x\n",
1700 lfsck_lfsck2name(lfsck), ltd->ltd_index);
1702 ostid_set_seq(oi, FID_SEQ_IDIF);
1703 ostid_set_id(oi, 0);
1704 ostid_to_fid(fid, oi, ltd->ltd_index);
1705 obj = lfsck_object_find_by_dev(env, ltd->ltd_tgt, fid);
1706 if (unlikely(IS_ERR(obj)))
1707 RETURN(PTR_ERR(obj));
1709 rc = obj->do_ops->do_index_try(env, obj, &dt_lfsck_orphan_features);
1713 iops = &obj->do_index_ops->dio_it;
1714 di = iops->init(env, obj, 0, BYPASS_CAPA);
1716 GOTO(put, rc = PTR_ERR(di));
1718 rc = iops->load(env, di, 0);
1720 /* -ESRCH means that the orphan OST-objects rbtree has been
1721 * cleanup because of the OSS server restart or other errors. */
1722 lo->ll_flags |= LF_INCOMPLETE;
1727 rc = iops->next(env, di);
1739 struct lu_orphan_rec *rec = &info->lti_rec;
1741 key = iops->key(env, di);
1742 com->lc_fid_latest_scanned_phase2 = *(struct lu_fid *)key;
1743 rc = iops->rec(env, di, (struct dt_rec *)rec, 0);
1745 rc = lfsck_layout_scan_orphan_one(env, com, ltd, rec,
1746 &com->lc_fid_latest_scanned_phase2);
1747 if (rc != 0 && bk->lb_param & LPF_FAILOUT)
1750 lfsck_control_speed_by_self(com);
1752 rc = iops->next(env, di);
1753 } while (rc < 0 && !(bk->lb_param & LPF_FAILOUT));
1760 iops->fini(env, di);
1762 lu_object_put(env, &obj->do_lu);
1764 CDEBUG(D_LFSCK, "%s: finish the orphan scanning for OST%04x, rc = %d\n",
1765 lfsck_lfsck2name(lfsck), ltd->ltd_index, rc);
1767 return rc > 0 ? 0 : rc;
1770 /* For the MDT-object with dangling reference, we need to re-create
1771 * the missed OST-object with the known FID/owner information. */
1772 static int lfsck_layout_recreate_ostobj(const struct lu_env *env,
1773 struct lfsck_component *com,
1774 struct lfsck_layout_req *llr,
1777 struct lfsck_thread_info *info = lfsck_env_info(env);
1778 struct filter_fid *pfid = &info->lti_new_pfid;
1779 struct dt_allocation_hint *hint = &info->lti_hint;
1780 struct dt_object *parent = llr->llr_parent->llo_obj;
1781 struct dt_object *child = llr->llr_child;
1782 struct dt_device *dev = lfsck_obj2dt_dev(child);
1783 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
1784 struct thandle *handle;
1786 struct lustre_handle lh = { 0 };
1790 CDEBUG(D_LFSCK, "Repair dangling reference for: parent "DFID
1791 ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
1792 PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
1793 llr->llr_ost_idx, llr->llr_lov_idx, la->la_uid, la->la_gid);
1795 rc = lfsck_layout_lock(env, com, parent, &lh,
1796 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
1800 handle = dt_trans_create(env, dev);
1802 GOTO(unlock1, rc = PTR_ERR(handle));
1804 hint->dah_parent = NULL;
1806 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
1807 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
1808 pfid->ff_parent.f_ver = cpu_to_le32(llr->llr_lov_idx);
1809 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
1811 rc = dt_declare_create(env, child, la, hint, NULL, handle);
1815 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID,
1816 LU_XATTR_CREATE, handle);
1820 rc = dt_trans_start(env, dev, handle);
1824 dt_read_lock(env, parent, 0);
1825 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
1826 GOTO(unlock2, rc = 1);
1828 rc = dt_create(env, child, la, hint, NULL, handle);
1832 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, LU_XATTR_CREATE,
1833 handle, BYPASS_CAPA);
1838 dt_read_unlock(env, parent);
1841 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
1844 lfsck_layout_unlock(&lh);
1849 /* If the OST-object does not recognize the MDT-object as its parent, and
1850 * there is no other MDT-object claims as its parent, then just trust the
1851 * given MDT-object as its parent. So update the OST-object filter_fid. */
1852 static int lfsck_layout_repair_unmatched_pair(const struct lu_env *env,
1853 struct lfsck_component *com,
1854 struct lfsck_layout_req *llr,
1855 const struct lu_attr *pla)
1857 struct lfsck_thread_info *info = lfsck_env_info(env);
1858 struct filter_fid *pfid = &info->lti_new_pfid;
1859 struct lu_attr *tla = &info->lti_la3;
1860 struct dt_object *parent = llr->llr_parent->llo_obj;
1861 struct dt_object *child = llr->llr_child;
1862 struct dt_device *dev = lfsck_obj2dt_dev(child);
1863 const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
1864 struct thandle *handle;
1866 struct lustre_handle lh = { 0 };
1870 CDEBUG(D_LFSCK, "Repair unmatched MDT-OST pair for: parent "DFID
1871 ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
1872 PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
1873 llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid);
1875 rc = lfsck_layout_lock(env, com, parent, &lh,
1876 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
1880 handle = dt_trans_create(env, dev);
1882 GOTO(unlock1, rc = PTR_ERR(handle));
1884 pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
1885 pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
1886 /* The ff_parent->f_ver is not the real parent fid->f_ver. Instead,
1887 * it is the OST-object index in the parent MDT-object layout. */
1888 pfid->ff_parent.f_ver = cpu_to_le32(llr->llr_lov_idx);
1889 buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
1891 rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle);
1895 tla->la_valid = LA_UID | LA_GID;
1896 tla->la_uid = pla->la_uid;
1897 tla->la_gid = pla->la_gid;
1898 rc = dt_declare_attr_set(env, child, tla, handle);
1902 rc = dt_trans_start(env, dev, handle);
1906 dt_write_lock(env, parent, 0);
1907 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
1908 GOTO(unlock2, rc = 1);
1910 rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle,
1915 /* Get the latest parent's owner. */
1916 rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
1920 tla->la_valid = LA_UID | LA_GID;
1921 rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
1926 dt_write_unlock(env, parent);
1929 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
1932 lfsck_layout_unlock(&lh);
1937 /* If there are more than one MDT-objects claim as the OST-object's parent,
1938 * and the OST-object only recognizes one of them, then we need to generate
1939 * new OST-object(s) with new fid(s) for the non-recognized MDT-object(s). */
1940 static int lfsck_layout_repair_multiple_references(const struct lu_env *env,
1941 struct lfsck_component *com,
1942 struct lfsck_layout_req *llr,
1946 struct lfsck_thread_info *info = lfsck_env_info(env);
1947 struct dt_allocation_hint *hint = &info->lti_hint;
1948 struct dt_object_format *dof = &info->lti_dof;
1949 struct dt_device *pdev = com->lc_lfsck->li_next;
1950 struct ost_id *oi = &info->lti_oi;
1951 struct dt_object *parent = llr->llr_parent->llo_obj;
1952 struct dt_device *cdev = lfsck_obj2dt_dev(llr->llr_child);
1953 struct dt_object *child = NULL;
1954 struct lu_device *d = &cdev->dd_lu_dev;
1955 struct lu_object *o = NULL;
1956 struct thandle *handle;
1957 struct lov_mds_md_v1 *lmm;
1958 struct lov_ost_data_v1 *objs;
1959 struct lustre_handle lh = { 0 };
1964 CDEBUG(D_LFSCK, "Repair multiple references for: parent "DFID
1965 ", OST-index %u, stripe-index %u, owner %u:%u\n",
1966 PFID(lfsck_dto2fid(parent)), llr->llr_ost_idx,
1967 llr->llr_lov_idx, la->la_uid, la->la_gid);
1969 rc = lfsck_layout_lock(env, com, parent, &lh,
1970 MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
1974 handle = dt_trans_create(env, pdev);
1976 GOTO(unlock1, rc = PTR_ERR(handle));
1978 o = lu_object_anon(env, d, NULL);
1980 GOTO(stop, rc = PTR_ERR(o));
1982 child = container_of(o, struct dt_object, do_lu);
1983 o = lu_object_locate(o->lo_header, d->ld_type);
1984 if (unlikely(o == NULL))
1985 GOTO(stop, rc = -EINVAL);
1987 child = container_of(o, struct dt_object, do_lu);
1988 la->la_valid = LA_UID | LA_GID;
1989 hint->dah_parent = NULL;
1991 dof->dof_type = DFT_REGULAR;
1992 rc = dt_declare_create(env, child, la, NULL, NULL, handle);
1996 rc = dt_declare_xattr_set(env, parent, buf, XATTR_NAME_LOV,
1997 LU_XATTR_REPLACE, handle);
2001 rc = dt_trans_start(env, pdev, handle);
2005 dt_write_lock(env, parent, 0);
2006 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
2007 GOTO(unlock2, rc = 0);
2009 rc = dt_xattr_get(env, parent, buf, XATTR_NAME_LOV, BYPASS_CAPA);
2010 if (unlikely(rc == 0 || rc == -ENODATA || rc == -ERANGE))
2011 GOTO(unlock2, rc = 0);
2014 rc = lfsck_layout_verify_header(lmm);
2018 /* Someone change layout during the LFSCK, no need to repair then. */
2019 if (le16_to_cpu(lmm->lmm_layout_gen) != llr->llr_parent->llo_gen)
2020 GOTO(unlock2, rc = 0);
2022 rc = dt_create(env, child, la, hint, dof, handle);
2026 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
2027 * been verified in lfsck_layout_verify_header() already. If some
2028 * new magic introduced in the future, then layout LFSCK needs to
2029 * be updated also. */
2030 magic = le32_to_cpu(lmm->lmm_magic);
2031 if (magic == LOV_MAGIC_V1) {
2032 objs = &(lmm->lmm_objects[0]);
2034 LASSERT(magic == LOV_MAGIC_V3);
2035 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
2038 lmm->lmm_layout_gen = cpu_to_le16(llr->llr_parent->llo_gen + 1);
2039 fid_to_ostid(lu_object_fid(&child->do_lu), oi);
2040 ostid_cpu_to_le(oi, &objs[llr->llr_lov_idx].l_ost_oi);
2041 objs[llr->llr_lov_idx].l_ost_gen = cpu_to_le32(0);
2042 objs[llr->llr_lov_idx].l_ost_idx = cpu_to_le32(llr->llr_ost_idx);
2043 rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV,
2044 LU_XATTR_REPLACE, handle, BYPASS_CAPA);
2046 GOTO(unlock2, rc = (rc == 0 ? 1 : rc));
2049 dt_write_unlock(env, parent);
2053 lu_object_put(env, &child->do_lu);
2055 dt_trans_stop(env, pdev, handle);
2058 lfsck_layout_unlock(&lh);
2063 /* If the MDT-object and the OST-object have different owner information,
2064 * then trust the MDT-object, because the normal chown/chgrp handle order
2065 * is from MDT to OST, and it is possible that some chown/chgrp operation
2066 * is partly done. */
2067 static int lfsck_layout_repair_owner(const struct lu_env *env,
2068 struct lfsck_component *com,
2069 struct lfsck_layout_req *llr,
2070 struct lu_attr *pla)
2072 struct lfsck_thread_info *info = lfsck_env_info(env);
2073 struct lu_attr *tla = &info->lti_la3;
2074 struct dt_object *parent = llr->llr_parent->llo_obj;
2075 struct dt_object *child = llr->llr_child;
2076 struct dt_device *dev = lfsck_obj2dt_dev(child);
2077 struct thandle *handle;
2081 CDEBUG(D_LFSCK, "Repair inconsistent file owner for: parent "DFID
2082 ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
2083 PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
2084 llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid);
2086 handle = dt_trans_create(env, dev);
2088 RETURN(PTR_ERR(handle));
2090 tla->la_uid = pla->la_uid;
2091 tla->la_gid = pla->la_gid;
2092 tla->la_valid = LA_UID | LA_GID;
2093 rc = dt_declare_attr_set(env, child, tla, handle);
2097 rc = dt_trans_start(env, dev, handle);
2101 /* Use the dt_object lock to serialize with destroy and attr_set. */
2102 dt_read_lock(env, parent, 0);
2103 if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
2104 GOTO(unlock, rc = 1);
2106 /* Get the latest parent's owner. */
2107 rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
2109 CWARN("%s: fail to get the latest parent's ("DFID") owner, "
2110 "not sure whether some others chown/chgrp during the "
2111 "LFSCK: rc = %d\n", lfsck_lfsck2name(com->lc_lfsck),
2112 PFID(lfsck_dto2fid(parent)), rc);
2117 /* Some others chown/chgrp during the LFSCK, needs to do nothing. */
2118 if (unlikely(tla->la_uid != pla->la_uid ||
2119 tla->la_gid != pla->la_gid))
2120 GOTO(unlock, rc = 1);
2122 tla->la_valid = LA_UID | LA_GID;
2123 rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
2128 dt_read_unlock(env, parent);
2131 rc = lfsck_layout_trans_stop(env, dev, handle, rc);
2136 /* Check whether the OST-object correctly back points to the
2137 * MDT-object (@parent) via the XATTR_NAME_FID xattr (@pfid). */
2138 static int lfsck_layout_check_parent(const struct lu_env *env,
2139 struct lfsck_component *com,
2140 struct dt_object *parent,
2141 const struct lu_fid *pfid,
2142 const struct lu_fid *cfid,
2143 const struct lu_attr *pla,
2144 const struct lu_attr *cla,
2145 struct lfsck_layout_req *llr,
2146 struct lu_buf *lov_ea, __u32 idx)
2148 struct lfsck_thread_info *info = lfsck_env_info(env);
2149 struct lu_buf *buf = &info->lti_big_buf;
2150 struct dt_object *tobj;
2151 struct lov_mds_md_v1 *lmm;
2152 struct lov_ost_data_v1 *objs;
2159 if (fid_is_zero(pfid)) {
2160 /* client never wrote. */
2161 if (cla->la_size == 0 && cla->la_blocks == 0) {
2162 if (unlikely(cla->la_uid != pla->la_uid ||
2163 cla->la_gid != pla->la_gid))
2164 RETURN (LLIT_INCONSISTENT_OWNER);
2169 RETURN(LLIT_UNMATCHED_PAIR);
2172 if (unlikely(!fid_is_sane(pfid)))
2173 RETURN(LLIT_UNMATCHED_PAIR);
2175 if (lu_fid_eq(pfid, lu_object_fid(&parent->do_lu))) {
2176 if (llr->llr_lov_idx == idx)
2179 RETURN(LLIT_UNMATCHED_PAIR);
2182 tobj = lfsck_object_find(env, com->lc_lfsck, pfid);
2184 RETURN(LLIT_UNMATCHED_PAIR);
2187 RETURN(PTR_ERR(tobj));
2189 if (!dt_object_exists(tobj))
2190 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
2192 /* Load the tobj's layout EA, in spite of it is a local MDT-object or
2193 * remote one on another MDT. Then check whether the given OST-object
2194 * is in such layout. If yes, it is multiple referenced, otherwise it
2195 * is unmatched referenced case. */
2196 rc = lfsck_layout_get_lovea(env, tobj, buf, NULL);
2198 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
2204 rc = lfsck_layout_verify_header(lmm);
2208 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
2209 * been verified in lfsck_layout_verify_header() already. If some
2210 * new magic introduced in the future, then layout LFSCK needs to
2211 * be updated also. */
2212 magic = le32_to_cpu(lmm->lmm_magic);
2213 if (magic == LOV_MAGIC_V1) {
2214 objs = &(lmm->lmm_objects[0]);
2216 LASSERT(magic == LOV_MAGIC_V3);
2217 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
2220 count = le16_to_cpu(lmm->lmm_stripe_count);
2221 for (i = 0; i < count; i++, objs++) {
2222 struct lu_fid *tfid = &info->lti_fid2;
2223 struct ost_id *oi = &info->lti_oi;
2225 ostid_le_to_cpu(&objs->l_ost_oi, oi);
2226 ostid_to_fid(tfid, oi, le32_to_cpu(objs->l_ost_idx));
2227 if (lu_fid_eq(cfid, tfid)) {
2230 GOTO(out, rc = LLIT_MULTIPLE_REFERENCED);
2234 GOTO(out, rc = LLIT_UNMATCHED_PAIR);
2237 lfsck_object_put(env, tobj);
2242 static int lfsck_layout_assistant_handle_one(const struct lu_env *env,
2243 struct lfsck_component *com,
2244 struct lfsck_layout_req *llr)
2246 struct lfsck_layout *lo = com->lc_file_ram;
2247 struct lfsck_thread_info *info = lfsck_env_info(env);
2248 struct filter_fid_old *pea = &info->lti_old_pfid;
2249 struct lu_fid *pfid = &info->lti_fid;
2250 struct lu_buf *buf = NULL;
2251 struct dt_object *parent = llr->llr_parent->llo_obj;
2252 struct dt_object *child = llr->llr_child;
2253 struct lu_attr *pla = &info->lti_la;
2254 struct lu_attr *cla = &info->lti_la2;
2255 struct lfsck_instance *lfsck = com->lc_lfsck;
2256 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2257 enum lfsck_layout_inconsistency_type type = LLIT_NONE;
2262 rc = dt_attr_get(env, parent, pla, BYPASS_CAPA);
2264 if (lu_object_is_dying(parent->do_lu.lo_header))
2270 rc = dt_attr_get(env, child, cla, BYPASS_CAPA);
2271 if (rc == -ENOENT) {
2272 if (lu_object_is_dying(parent->do_lu.lo_header))
2275 type = LLIT_DANGLING;
2282 buf = lfsck_buf_get(env, pea, sizeof(struct filter_fid_old));
2283 rc= dt_xattr_get(env, child, buf, XATTR_NAME_FID, BYPASS_CAPA);
2284 if (unlikely(rc >= 0 && rc != sizeof(struct filter_fid_old) &&
2285 rc != sizeof(struct filter_fid))) {
2286 type = LLIT_UNMATCHED_PAIR;
2290 if (rc < 0 && rc != -ENODATA)
2293 if (rc == -ENODATA) {
2296 fid_le_to_cpu(pfid, &pea->ff_parent);
2297 /* OST-object does not save parent FID::f_ver, instead,
2298 * the OST-object index in the parent MDT-object layout
2299 * EA reuses the pfid->f_ver. */
2304 rc = lfsck_layout_check_parent(env, com, parent, pfid,
2305 lu_object_fid(&child->do_lu),
2306 pla, cla, llr, buf, idx);
2315 if (unlikely(cla->la_uid != pla->la_uid ||
2316 cla->la_gid != pla->la_gid)) {
2317 type = LLIT_INCONSISTENT_OWNER;
2322 if (bk->lb_param & LPF_DRYRUN) {
2323 if (type != LLIT_NONE)
2331 memset(cla, 0, sizeof(*cla));
2332 cla->la_uid = pla->la_uid;
2333 cla->la_gid = pla->la_gid;
2334 cla->la_mode = S_IFREG | 0666;
2335 cla->la_valid = LA_TYPE | LA_MODE | LA_UID | LA_GID |
2336 LA_ATIME | LA_MTIME | LA_CTIME;
2337 rc = lfsck_layout_recreate_ostobj(env, com, llr, cla);
2339 case LLIT_UNMATCHED_PAIR:
2340 rc = lfsck_layout_repair_unmatched_pair(env, com, llr, pla);
2342 case LLIT_MULTIPLE_REFERENCED:
2343 rc = lfsck_layout_repair_multiple_references(env, com, llr,
2346 case LLIT_INCONSISTENT_OWNER:
2347 rc = lfsck_layout_repair_owner(env, com, llr, pla);
2357 down_write(&com->lc_sem);
2359 /* If cannot touch the target server,
2360 * mark the LFSCK as INCOMPLETE. */
2361 if (rc == -ENOTCONN || rc == -ESHUTDOWN || rc == -ETIMEDOUT ||
2362 rc == -EHOSTDOWN || rc == -EHOSTUNREACH) {
2363 CERROR("%s: Fail to talk with OST %x: rc = %d.\n",
2364 lfsck_lfsck2name(lfsck), llr->llr_ost_idx, rc);
2365 lo->ll_flags |= LF_INCOMPLETE;
2366 lo->ll_objs_skipped++;
2369 lo->ll_objs_failed_phase1++;
2371 } else if (rc > 0) {
2372 LASSERTF(type > LLIT_NONE && type <= LLIT_MAX,
2373 "unknown type = %d\n", type);
2375 lo->ll_objs_repaired[type - 1]++;
2377 up_write(&com->lc_sem);
2382 static int lfsck_layout_assistant(void *args)
2384 struct lfsck_thread_args *lta = args;
2385 struct lu_env *env = <a->lta_env;
2386 struct lfsck_component *com = lta->lta_com;
2387 struct lfsck_instance *lfsck = lta->lta_lfsck;
2388 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
2389 struct lfsck_position *pos = &com->lc_pos_start;
2390 struct lfsck_thread_info *info = lfsck_env_info(env);
2391 struct lfsck_request *lr = &info->lti_lr;
2392 struct lfsck_layout_master_data *llmd = com->lc_data;
2393 struct ptlrpc_thread *mthread = &lfsck->li_thread;
2394 struct ptlrpc_thread *athread = &llmd->llmd_thread;
2395 struct lfsck_layout_req *llr;
2396 struct l_wait_info lwi = { 0 };
2401 memset(lr, 0, sizeof(*lr));
2402 lr->lr_event = LE_START;
2403 lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
2405 lr->lr_speed = bk->lb_speed_limit;
2406 lr->lr_version = bk->lb_version;
2407 lr->lr_param = bk->lb_param;
2408 lr->lr_async_windows = bk->lb_async_windows;
2409 lr->lr_flags = LEF_TO_OST;
2410 if (pos->lp_oit_cookie <= 1)
2411 lr->lr_param |= LPF_RESET;
2413 rc = lfsck_layout_master_notify_others(env, com, lr);
2415 CERROR("%s: fail to notify others for layout start: rc = %d\n",
2416 lfsck_lfsck2name(lfsck), rc);
2420 spin_lock(&llmd->llmd_lock);
2421 thread_set_flags(athread, SVC_RUNNING);
2422 spin_unlock(&llmd->llmd_lock);
2423 wake_up_all(&mthread->t_ctl_waitq);
2426 while (!list_empty(&llmd->llmd_req_list)) {
2427 bool wakeup = false;
2429 if (unlikely(llmd->llmd_exit))
2430 GOTO(cleanup1, rc = llmd->llmd_post_result);
2432 llr = list_entry(llmd->llmd_req_list.next,
2433 struct lfsck_layout_req,
2435 /* Only the lfsck_layout_assistant thread itself can
2436 * remove the "llr" from the head of the list, LFSCK
2437 * engine thread only inserts other new "lld" at the
2438 * end of the list. So it is safe to handle current
2439 * "llr" without the spin_lock. */
2440 rc = lfsck_layout_assistant_handle_one(env, com, llr);
2441 spin_lock(&llmd->llmd_lock);
2442 list_del_init(&llr->llr_list);
2443 if (bk->lb_async_windows != 0 &&
2444 llmd->llmd_prefetched >= bk->lb_async_windows)
2447 llmd->llmd_prefetched--;
2448 spin_unlock(&llmd->llmd_lock);
2450 wake_up_all(&mthread->t_ctl_waitq);
2452 lfsck_layout_req_fini(env, llr);
2453 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
2457 /* Wakeup the master engine if it is waiting in checkpoint. */
2458 wake_up_all(&mthread->t_ctl_waitq);
2460 l_wait_event(athread->t_ctl_waitq,
2461 !lfsck_layout_req_empty(llmd) ||
2463 llmd->llmd_to_post ||
2464 llmd->llmd_to_double_scan,
2467 if (unlikely(llmd->llmd_exit))
2468 GOTO(cleanup1, rc = llmd->llmd_post_result);
2470 if (!list_empty(&llmd->llmd_req_list))
2473 if (llmd->llmd_to_post) {
2474 llmd->llmd_to_post = 0;
2475 LASSERT(llmd->llmd_post_result > 0);
2477 memset(lr, 0, sizeof(*lr));
2478 lr->lr_event = LE_PHASE1_DONE;
2479 lr->lr_status = llmd->llmd_post_result;
2480 rc = lfsck_layout_master_notify_others(env, com, lr);
2482 CERROR("%s: failed to notify others "
2483 "for layout post: rc = %d\n",
2484 lfsck_lfsck2name(lfsck), rc);
2486 /* Wakeup the master engine to go ahead. */
2487 wake_up_all(&mthread->t_ctl_waitq);
2490 if (llmd->llmd_to_double_scan) {
2491 llmd->llmd_to_double_scan = 0;
2492 atomic_inc(&lfsck->li_double_scan_count);
2493 llmd->llmd_in_double_scan = 1;
2494 wake_up_all(&mthread->t_ctl_waitq);
2496 com->lc_new_checked = 0;
2497 com->lc_new_scanned = 0;
2498 com->lc_time_last_checkpoint = cfs_time_current();
2499 com->lc_time_next_checkpoint =
2500 com->lc_time_last_checkpoint +
2501 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
2503 while (llmd->llmd_in_double_scan) {
2504 struct lfsck_tgt_descs *ltds =
2505 &lfsck->li_ost_descs;
2506 struct lfsck_tgt_desc *ltd;
2508 rc = lfsck_layout_master_query_others(env, com);
2509 if (lfsck_layout_master_to_orphan(llmd))
2515 /* Pull LFSCK status on related targets once
2516 * per 30 seconds if we are not notified. */
2517 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(30),
2518 cfs_time_seconds(1),
2520 rc = l_wait_event(athread->t_ctl_waitq,
2521 lfsck_layout_master_to_orphan(llmd) ||
2523 !thread_is_running(mthread),
2526 if (unlikely(llmd->llmd_exit ||
2527 !thread_is_running(mthread)))
2528 GOTO(cleanup2, rc = 0);
2530 if (rc == -ETIMEDOUT)
2537 spin_lock(<ds->ltd_lock);
2539 &llmd->llmd_ost_phase2_list)) {
2541 llmd->llmd_ost_phase2_list.next,
2542 struct lfsck_tgt_desc,
2543 ltd_layout_phase_list);
2545 <d->ltd_layout_phase_list);
2546 spin_unlock(<ds->ltd_lock);
2548 if (bk->lb_param & LPF_ALL_TGT) {
2549 rc = lfsck_layout_scan_orphan(
2552 bk->lb_param & LPF_FAILOUT)
2556 if (unlikely(llmd->llmd_exit ||
2557 !thread_is_running(mthread)))
2558 GOTO(cleanup2, rc = 0);
2560 spin_lock(<ds->ltd_lock);
2563 if (list_empty(&llmd->llmd_ost_phase1_list)) {
2564 spin_unlock(<ds->ltd_lock);
2565 GOTO(cleanup2, rc = 1);
2567 spin_unlock(<ds->ltd_lock);
2573 /* Cleanup the unfinished requests. */
2574 spin_lock(&llmd->llmd_lock);
2576 llmd->llmd_assistant_status = rc;
2578 while (!list_empty(&llmd->llmd_req_list)) {
2579 llr = list_entry(llmd->llmd_req_list.next,
2580 struct lfsck_layout_req,
2582 list_del_init(&llr->llr_list);
2583 llmd->llmd_prefetched--;
2584 spin_unlock(&llmd->llmd_lock);
2585 lfsck_layout_req_fini(env, llr);
2586 spin_lock(&llmd->llmd_lock);
2588 spin_unlock(&llmd->llmd_lock);
2590 LASSERTF(llmd->llmd_prefetched == 0, "unmatched prefeteched objs %d\n",
2591 llmd->llmd_prefetched);
2594 memset(lr, 0, sizeof(*lr));
2596 lr->lr_event = LE_PHASE2_DONE;
2598 } else if (rc == 0) {
2599 if (lfsck->li_flags & LPF_ALL_TGT) {
2600 lr->lr_event = LE_STOP;
2601 lr->lr_status = LS_STOPPED;
2603 lr->lr_event = LE_PEER_EXIT;
2604 switch (lfsck->li_status) {
2607 lr->lr_status = LS_CO_PAUSED;
2611 lr->lr_status = LS_CO_STOPPED;
2614 CERROR("%s: unknown status: rc = %d\n",
2615 lfsck_lfsck2name(lfsck),
2617 lr->lr_status = LS_CO_FAILED;
2622 if (lfsck->li_flags & LPF_ALL_TGT) {
2623 lr->lr_event = LE_STOP;
2624 lr->lr_status = LS_FAILED;
2626 lr->lr_event = LE_PEER_EXIT;
2627 lr->lr_status = LS_CO_FAILED;
2631 rc1 = lfsck_layout_master_notify_others(env, com, lr);
2633 CERROR("%s: failed to notify others for layout quit: rc = %d\n",
2634 lfsck_lfsck2name(lfsck), rc1);
2638 /* Under force exit case, some requests may be just freed without
2639 * verification, those objects should be re-handled when next run.
2640 * So not update the on-disk tracing file under such case. */
2641 if (!llmd->llmd_exit)
2642 rc1 = lfsck_layout_double_scan_result(env, com, rc);
2645 if (llmd->llmd_in_double_scan)
2646 atomic_dec(&lfsck->li_double_scan_count);
2648 spin_lock(&llmd->llmd_lock);
2649 llmd->llmd_assistant_status = (rc1 != 0 ? rc1 : rc);
2650 thread_set_flags(athread, SVC_STOPPED);
2651 wake_up_all(&mthread->t_ctl_waitq);
2652 spin_unlock(&llmd->llmd_lock);
2653 lfsck_thread_args_fini(lta);
2659 lfsck_layout_slave_async_interpret(const struct lu_env *env,
2660 struct ptlrpc_request *req,
2663 struct lfsck_layout_slave_async_args *llsaa = args;
2664 struct obd_export *exp = llsaa->llsaa_exp;
2665 struct lfsck_component *com = llsaa->llsaa_com;
2666 struct lfsck_layout_slave_target *llst = llsaa->llsaa_llst;
2667 struct lfsck_layout_slave_data *llsd = com->lc_data;
2671 /* It is quite probably caused by target crash,
2672 * to make the LFSCK can go ahead, assume that
2673 * the target finished the LFSCK prcoessing. */
2676 struct lfsck_reply *lr;
2678 lr = req_capsule_server_get(&req->rq_pill, &RMF_LFSCK_REPLY);
2679 if (lr->lr_status != LS_SCANNING_PHASE1 &&
2680 lr->lr_status != LS_SCANNING_PHASE2)
2684 lfsck_layout_llst_del(llsd, llst);
2685 lfsck_layout_llst_put(llst);
2686 lfsck_component_put(env, com);
2687 class_export_put(exp);
2692 static int lfsck_layout_async_query(const struct lu_env *env,
2693 struct lfsck_component *com,
2694 struct obd_export *exp,
2695 struct lfsck_layout_slave_target *llst,
2696 struct lfsck_request *lr,
2697 struct ptlrpc_request_set *set)
2699 struct lfsck_layout_slave_async_args *llsaa;
2700 struct ptlrpc_request *req;
2701 struct lfsck_request *tmp;
2705 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_QUERY);
2709 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_QUERY);
2711 ptlrpc_request_free(req);
2715 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
2717 ptlrpc_request_set_replen(req);
2719 llsaa = ptlrpc_req_async_args(req);
2720 llsaa->llsaa_exp = exp;
2721 llsaa->llsaa_com = lfsck_component_get(com);
2722 llsaa->llsaa_llst = llst;
2723 req->rq_interpret_reply = lfsck_layout_slave_async_interpret;
2724 ptlrpc_set_add_req(set, req);
2729 static int lfsck_layout_async_notify(const struct lu_env *env,
2730 struct obd_export *exp,
2731 struct lfsck_request *lr,
2732 struct ptlrpc_request_set *set)
2734 struct ptlrpc_request *req;
2735 struct lfsck_request *tmp;
2739 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LFSCK_NOTIFY);
2743 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, LFSCK_NOTIFY);
2745 ptlrpc_request_free(req);
2749 tmp = req_capsule_client_get(&req->rq_pill, &RMF_LFSCK_REQUEST);
2751 ptlrpc_request_set_replen(req);
2752 ptlrpc_set_add_req(set, req);
2758 lfsck_layout_slave_query_master(const struct lu_env *env,
2759 struct lfsck_component *com)
2761 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
2762 struct lfsck_instance *lfsck = com->lc_lfsck;
2763 struct lfsck_layout_slave_data *llsd = com->lc_data;
2764 struct lfsck_layout_slave_target *llst;
2765 struct obd_export *exp;
2766 struct ptlrpc_request_set *set;
2771 set = ptlrpc_prep_set();
2775 memset(lr, 0, sizeof(*lr));
2776 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
2777 lr->lr_event = LE_QUERY;
2778 lr->lr_active = LT_LAYOUT;
2780 llsd->llsd_touch_gen++;
2781 spin_lock(&llsd->llsd_lock);
2782 while (!list_empty(&llsd->llsd_master_list)) {
2783 llst = list_entry(llsd->llsd_master_list.next,
2784 struct lfsck_layout_slave_target,
2786 if (llst->llst_gen == llsd->llsd_touch_gen)
2789 llst->llst_gen = llsd->llsd_touch_gen;
2790 list_del(&llst->llst_list);
2791 list_add_tail(&llst->llst_list,
2792 &llsd->llsd_master_list);
2793 atomic_inc(&llst->llst_ref);
2794 spin_unlock(&llsd->llsd_lock);
2796 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
2799 lfsck_layout_llst_del(llsd, llst);
2800 lfsck_layout_llst_put(llst);
2801 spin_lock(&llsd->llsd_lock);
2805 rc = lfsck_layout_async_query(env, com, exp, llst, lr, set);
2807 CERROR("%s: slave fail to query %s for layout: "
2808 "rc = %d\n", lfsck_lfsck2name(lfsck),
2809 exp->exp_obd->obd_name, rc);
2811 lfsck_layout_llst_put(llst);
2812 class_export_put(exp);
2814 spin_lock(&llsd->llsd_lock);
2816 spin_unlock(&llsd->llsd_lock);
2818 rc = ptlrpc_set_wait(set);
2819 ptlrpc_set_destroy(set);
2821 RETURN(rc1 != 0 ? rc1 : rc);
2825 lfsck_layout_slave_notify_master(const struct lu_env *env,
2826 struct lfsck_component *com,
2827 enum lfsck_events event, int result)
2829 struct lfsck_instance *lfsck = com->lc_lfsck;
2830 struct lfsck_layout_slave_data *llsd = com->lc_data;
2831 struct lfsck_request *lr = &lfsck_env_info(env)->lti_lr;
2832 struct lfsck_layout_slave_target *llst;
2833 struct obd_export *exp;
2834 struct ptlrpc_request_set *set;
2838 set = ptlrpc_prep_set();
2842 memset(lr, 0, sizeof(*lr));
2843 lr->lr_event = event;
2844 lr->lr_flags = LEF_FROM_OST;
2845 lr->lr_status = result;
2846 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
2847 lr->lr_active = LT_LAYOUT;
2848 llsd->llsd_touch_gen++;
2849 spin_lock(&llsd->llsd_lock);
2850 while (!list_empty(&llsd->llsd_master_list)) {
2851 llst = list_entry(llsd->llsd_master_list.next,
2852 struct lfsck_layout_slave_target,
2854 if (llst->llst_gen == llsd->llsd_touch_gen)
2857 llst->llst_gen = llsd->llsd_touch_gen;
2858 list_del(&llst->llst_list);
2859 list_add_tail(&llst->llst_list,
2860 &llsd->llsd_master_list);
2861 atomic_inc(&llst->llst_ref);
2862 spin_unlock(&llsd->llsd_lock);
2864 exp = lustre_find_lwp_by_index(lfsck->li_obd->obd_name,
2867 lfsck_layout_llst_del(llsd, llst);
2868 lfsck_layout_llst_put(llst);
2869 spin_lock(&llsd->llsd_lock);
2873 rc = lfsck_layout_async_notify(env, exp, lr, set);
2875 CERROR("%s: slave fail to notify %s for layout: "
2876 "rc = %d\n", lfsck_lfsck2name(lfsck),
2877 exp->exp_obd->obd_name, rc);
2878 lfsck_layout_llst_put(llst);
2879 class_export_put(exp);
2880 spin_lock(&llsd->llsd_lock);
2882 spin_unlock(&llsd->llsd_lock);
2884 ptlrpc_set_wait(set);
2885 ptlrpc_set_destroy(set);
2892 static int lfsck_layout_reset(const struct lu_env *env,
2893 struct lfsck_component *com, bool init)
2895 struct lfsck_layout *lo = com->lc_file_ram;
2898 down_write(&com->lc_sem);
2900 memset(lo, 0, com->lc_file_size);
2902 __u32 count = lo->ll_success_count;
2903 __u64 last_time = lo->ll_time_last_complete;
2905 memset(lo, 0, com->lc_file_size);
2906 lo->ll_success_count = count;
2907 lo->ll_time_last_complete = last_time;
2910 lo->ll_magic = LFSCK_LAYOUT_MAGIC;
2911 lo->ll_status = LS_INIT;
2913 rc = lfsck_layout_store(env, com);
2914 up_write(&com->lc_sem);
2919 static void lfsck_layout_fail(const struct lu_env *env,
2920 struct lfsck_component *com, bool new_checked)
2922 struct lfsck_layout *lo = com->lc_file_ram;
2924 down_write(&com->lc_sem);
2926 com->lc_new_checked++;
2927 lo->ll_objs_failed_phase1++;
2928 if (lo->ll_pos_first_inconsistent == 0) {
2929 struct lfsck_instance *lfsck = com->lc_lfsck;
2931 lo->ll_pos_first_inconsistent =
2932 lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
2935 up_write(&com->lc_sem);
2938 static int lfsck_layout_master_checkpoint(const struct lu_env *env,
2939 struct lfsck_component *com, bool init)
2941 struct lfsck_instance *lfsck = com->lc_lfsck;
2942 struct lfsck_layout *lo = com->lc_file_ram;
2943 struct lfsck_layout_master_data *llmd = com->lc_data;
2944 struct ptlrpc_thread *mthread = &lfsck->li_thread;
2945 struct ptlrpc_thread *athread = &llmd->llmd_thread;
2946 struct l_wait_info lwi = { 0 };
2949 if (com->lc_new_checked == 0 && !init)
2952 l_wait_event(mthread->t_ctl_waitq,
2953 list_empty(&llmd->llmd_req_list) ||
2954 !thread_is_running(mthread) ||
2955 thread_is_stopped(athread),
2958 if (!thread_is_running(mthread) || thread_is_stopped(athread))
2961 down_write(&com->lc_sem);
2963 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
2965 lo->ll_pos_last_checkpoint =
2966 lfsck->li_pos_current.lp_oit_cookie;
2967 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
2968 HALF_SEC - lfsck->li_time_last_checkpoint);
2969 lo->ll_time_last_checkpoint = cfs_time_current_sec();
2970 lo->ll_objs_checked_phase1 += com->lc_new_checked;
2971 com->lc_new_checked = 0;
2974 rc = lfsck_layout_store(env, com);
2975 up_write(&com->lc_sem);
2980 static int lfsck_layout_slave_checkpoint(const struct lu_env *env,
2981 struct lfsck_component *com, bool init)
2983 struct lfsck_instance *lfsck = com->lc_lfsck;
2984 struct lfsck_layout *lo = com->lc_file_ram;
2987 if (com->lc_new_checked == 0 && !init)
2990 down_write(&com->lc_sem);
2993 lo->ll_pos_latest_start = lfsck->li_pos_current.lp_oit_cookie;
2995 lo->ll_pos_last_checkpoint =
2996 lfsck->li_pos_current.lp_oit_cookie;
2997 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
2998 HALF_SEC - lfsck->li_time_last_checkpoint);
2999 lo->ll_time_last_checkpoint = cfs_time_current_sec();
3000 lo->ll_objs_checked_phase1 += com->lc_new_checked;
3001 com->lc_new_checked = 0;
3004 rc = lfsck_layout_store(env, com);
3006 up_write(&com->lc_sem);
3011 static int lfsck_layout_prep(const struct lu_env *env,
3012 struct lfsck_component *com,
3013 struct lfsck_start *start)
3015 struct lfsck_instance *lfsck = com->lc_lfsck;
3016 struct lfsck_layout *lo = com->lc_file_ram;
3017 struct lfsck_position *pos = &com->lc_pos_start;
3019 fid_zero(&pos->lp_dir_parent);
3020 pos->lp_dir_cookie = 0;
3021 if (lo->ll_status == LS_COMPLETED ||
3022 lo->ll_status == LS_PARTIAL ||
3023 /* To handle orphan, must scan from the beginning. */
3024 (start != NULL && start->ls_flags & LPF_ORPHAN)) {
3027 rc = lfsck_layout_reset(env, com, false);
3032 down_write(&com->lc_sem);
3033 lo->ll_time_latest_start = cfs_time_current_sec();
3034 spin_lock(&lfsck->li_lock);
3035 if (lo->ll_flags & LF_SCANNED_ONCE) {
3036 if (!lfsck->li_drop_dryrun ||
3037 lo->ll_pos_first_inconsistent == 0) {
3038 lo->ll_status = LS_SCANNING_PHASE2;
3039 list_del_init(&com->lc_link);
3040 list_add_tail(&com->lc_link,
3041 &lfsck->li_list_double_scan);
3042 pos->lp_oit_cookie = 0;
3046 lo->ll_status = LS_SCANNING_PHASE1;
3047 lo->ll_run_time_phase1 = 0;
3048 lo->ll_run_time_phase2 = 0;
3049 lo->ll_objs_checked_phase1 = 0;
3050 lo->ll_objs_checked_phase2 = 0;
3051 lo->ll_objs_failed_phase1 = 0;
3052 lo->ll_objs_failed_phase2 = 0;
3053 for (i = 0; i < LLIT_MAX; i++)
3054 lo->ll_objs_repaired[i] = 0;
3056 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
3057 fid_zero(&com->lc_fid_latest_scanned_phase2);
3060 lo->ll_status = LS_SCANNING_PHASE1;
3061 if (!lfsck->li_drop_dryrun ||
3062 lo->ll_pos_first_inconsistent == 0)
3063 pos->lp_oit_cookie = lo->ll_pos_last_checkpoint + 1;
3065 pos->lp_oit_cookie = lo->ll_pos_first_inconsistent;
3067 spin_unlock(&lfsck->li_lock);
3068 up_write(&com->lc_sem);
3073 static int lfsck_layout_slave_prep(const struct lu_env *env,
3074 struct lfsck_component *com,
3075 struct lfsck_start_param *lsp)
3077 struct lfsck_layout_slave_data *llsd = com->lc_data;
3078 struct lfsck_start *start = lsp->lsp_start;
3081 rc = lfsck_layout_prep(env, com, start);
3082 if (rc != 0 || !lsp->lsp_index_valid)
3085 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
3086 if (rc == 0 && start != NULL && start->ls_flags & LPF_ORPHAN) {
3087 LASSERT(!llsd->llsd_rbtree_valid);
3089 write_lock(&llsd->llsd_rb_lock);
3090 rc = lfsck_rbtree_setup(env, com);
3091 write_unlock(&llsd->llsd_rb_lock);
3097 static int lfsck_layout_master_prep(const struct lu_env *env,
3098 struct lfsck_component *com,
3099 struct lfsck_start_param *lsp)
3101 struct lfsck_instance *lfsck = com->lc_lfsck;
3102 struct lfsck_layout_master_data *llmd = com->lc_data;
3103 struct ptlrpc_thread *mthread = &lfsck->li_thread;
3104 struct ptlrpc_thread *athread = &llmd->llmd_thread;
3105 struct lfsck_thread_args *lta;
3109 rc = lfsck_layout_prep(env, com, lsp->lsp_start);
3113 llmd->llmd_assistant_status = 0;
3114 llmd->llmd_post_result = 0;
3115 llmd->llmd_to_post = 0;
3116 llmd->llmd_to_double_scan = 0;
3117 llmd->llmd_in_double_scan = 0;
3118 llmd->llmd_exit = 0;
3119 thread_set_flags(athread, 0);
3121 lta = lfsck_thread_args_init(lfsck, com, lsp);
3123 RETURN(PTR_ERR(lta));
3125 rc = PTR_ERR(kthread_run(lfsck_layout_assistant, lta, "lfsck_layout"));
3126 if (IS_ERR_VALUE(rc)) {
3127 CERROR("%s: Cannot start LFSCK layout assistant thread: "
3128 "rc = %ld\n", lfsck_lfsck2name(lfsck), rc);
3129 lfsck_thread_args_fini(lta);
3131 struct l_wait_info lwi = { 0 };
3133 l_wait_event(mthread->t_ctl_waitq,
3134 thread_is_running(athread) ||
3135 thread_is_stopped(athread),
3137 if (unlikely(!thread_is_running(athread)))
3138 rc = llmd->llmd_assistant_status;
3146 /* Pre-fetch the attribute for each stripe in the given layout EA. */
3147 static int lfsck_layout_scan_stripes(const struct lu_env *env,
3148 struct lfsck_component *com,
3149 struct dt_object *parent,
3150 struct lov_mds_md_v1 *lmm)
3152 struct lfsck_thread_info *info = lfsck_env_info(env);
3153 struct lfsck_instance *lfsck = com->lc_lfsck;
3154 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3155 struct lfsck_layout *lo = com->lc_file_ram;
3156 struct lfsck_layout_master_data *llmd = com->lc_data;
3157 struct lfsck_layout_object *llo = NULL;
3158 struct lov_ost_data_v1 *objs;
3159 struct lfsck_tgt_descs *ltds = &lfsck->li_ost_descs;
3160 struct ptlrpc_thread *mthread = &lfsck->li_thread;
3161 struct ptlrpc_thread *athread = &llmd->llmd_thread;
3162 struct l_wait_info lwi = { 0 };
3171 buf = lfsck_buf_get(env, &info->lti_old_pfid,
3172 sizeof(struct filter_fid_old));
3173 count = le16_to_cpu(lmm->lmm_stripe_count);
3174 gen = le16_to_cpu(lmm->lmm_layout_gen);
3175 /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
3176 * been verified in lfsck_layout_verify_header() already. If some
3177 * new magic introduced in the future, then layout LFSCK needs to
3178 * be updated also. */
3179 magic = le32_to_cpu(lmm->lmm_magic);
3180 if (magic == LOV_MAGIC_V1) {
3181 objs = &(lmm->lmm_objects[0]);
3183 LASSERT(magic == LOV_MAGIC_V3);
3184 objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
3187 for (i = 0; i < count; i++, objs++) {
3188 struct lu_fid *fid = &info->lti_fid;
3189 struct ost_id *oi = &info->lti_oi;
3190 struct lfsck_layout_req *llr;
3191 struct lfsck_tgt_desc *tgt = NULL;
3192 struct dt_object *cobj = NULL;
3194 le32_to_cpu(objs->l_ost_idx);
3195 bool wakeup = false;
3197 l_wait_event(mthread->t_ctl_waitq,
3198 bk->lb_async_windows == 0 ||
3199 llmd->llmd_prefetched < bk->lb_async_windows ||
3200 !thread_is_running(mthread) ||
3201 thread_is_stopped(athread),
3204 if (unlikely(!thread_is_running(mthread)) ||
3205 thread_is_stopped(athread))
3208 ostid_le_to_cpu(&objs->l_ost_oi, oi);
3209 ostid_to_fid(fid, oi, index);
3210 tgt = lfsck_tgt_get(ltds, index);
3211 if (unlikely(tgt == NULL)) {
3212 CERROR("%s: Cannot talk with OST %x which did not join "
3213 "the layout LFSCK.\n",
3214 lfsck_lfsck2name(lfsck), index);
3215 lo->ll_flags |= LF_INCOMPLETE;
3219 cobj = lfsck_object_find_by_dev(env, tgt->ltd_tgt, fid);
3225 rc = dt_declare_attr_get(env, cobj, BYPASS_CAPA);
3229 rc = dt_declare_xattr_get(env, cobj, buf, XATTR_NAME_FID,
3235 llo = lfsck_layout_object_init(env, parent, gen);
3242 llr = lfsck_layout_req_init(llo, cobj, index, i);
3249 spin_lock(&llmd->llmd_lock);
3250 if (llmd->llmd_assistant_status < 0) {
3251 spin_unlock(&llmd->llmd_lock);
3252 lfsck_layout_req_fini(env, llr);
3254 RETURN(llmd->llmd_assistant_status);
3257 list_add_tail(&llr->llr_list, &llmd->llmd_req_list);
3258 if (llmd->llmd_prefetched == 0)
3261 llmd->llmd_prefetched++;
3262 spin_unlock(&llmd->llmd_lock);
3264 wake_up_all(&athread->t_ctl_waitq);
3267 down_write(&com->lc_sem);
3268 com->lc_new_checked++;
3270 lo->ll_objs_failed_phase1++;
3271 up_write(&com->lc_sem);
3273 if (cobj != NULL && !IS_ERR(cobj))
3274 lu_object_put(env, &cobj->do_lu);
3276 if (likely(tgt != NULL))
3279 if (rc < 0 && bk->lb_param & LPF_FAILOUT)
3286 if (llo != NULL && !IS_ERR(llo))
3287 lfsck_layout_object_put(env, llo);
3292 /* For the given object, read its layout EA locally. For each stripe, pre-fetch
3293 * the OST-object's attribute and generate an structure lfsck_layout_req on the
3294 * list ::llmd_req_list.
3296 * For each request on above list, the lfsck_layout_assistant thread compares
3297 * the OST side attribute with local attribute, if inconsistent, then repair it.
3299 * All above processing is async mode with pipeline. */
3300 static int lfsck_layout_master_exec_oit(const struct lu_env *env,
3301 struct lfsck_component *com,
3302 struct dt_object *obj)
3304 struct lfsck_thread_info *info = lfsck_env_info(env);
3305 struct ost_id *oi = &info->lti_oi;
3306 struct lfsck_layout *lo = com->lc_file_ram;
3307 struct lfsck_layout_master_data *llmd = com->lc_data;
3308 struct lfsck_instance *lfsck = com->lc_lfsck;
3309 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3310 struct thandle *handle = NULL;
3311 struct lu_buf *buf = &info->lti_big_buf;
3312 struct lov_mds_md_v1 *lmm = NULL;
3313 struct dt_device *dev = lfsck->li_bottom;
3314 struct lustre_handle lh = { 0 };
3315 ssize_t buflen = buf->lb_len;
3317 bool locked = false;
3318 bool stripe = false;
3321 if (!S_ISREG(lfsck_object_type(obj)))
3324 if (llmd->llmd_assistant_status < 0)
3325 GOTO(out, rc = -ESRCH);
3327 fid_to_lmm_oi(lfsck_dto2fid(obj), oi);
3328 lmm_oi_cpu_to_le(oi, oi);
3329 dt_read_lock(env, obj, 0);
3333 rc = lfsck_layout_get_lovea(env, obj, buf, &buflen);
3339 rc = lfsck_layout_verify_header(lmm);
3343 if (memcmp(oi, &lmm->lmm_oi, sizeof(*oi)) == 0)
3344 GOTO(out, stripe = true);
3346 /* Inconsistent lmm_oi, should be repaired. */
3347 CDEBUG(D_LFSCK, "Repair bad lmm_oi for "DFID"\n",
3348 PFID(lfsck_dto2fid(obj)));
3350 if (bk->lb_param & LPF_DRYRUN) {
3351 down_write(&com->lc_sem);
3352 lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
3353 up_write(&com->lc_sem);
3355 GOTO(out, stripe = true);
3358 if (!lustre_handle_is_used(&lh)) {
3359 dt_read_unlock(env, obj);
3361 buf->lb_len = buflen;
3362 rc = lfsck_layout_lock(env, com, obj, &lh,
3363 MDS_INODELOCK_LAYOUT |
3364 MDS_INODELOCK_XATTR);
3368 handle = dt_trans_create(env, dev);
3370 GOTO(out, rc = PTR_ERR(handle));
3372 rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_LOV,
3373 LU_XATTR_REPLACE, handle);
3377 rc = dt_trans_start_local(env, dev, handle);
3381 dt_write_lock(env, obj, 0);
3388 rc = dt_xattr_set(env, obj, buf, XATTR_NAME_LOV,
3389 LU_XATTR_REPLACE, handle, BYPASS_CAPA);
3393 down_write(&com->lc_sem);
3394 lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
3395 up_write(&com->lc_sem);
3397 GOTO(out, stripe = true);
3401 if (lustre_handle_is_used(&lh))
3402 dt_write_unlock(env, obj);
3404 dt_read_unlock(env, obj);
3407 if (handle != NULL && !IS_ERR(handle))
3408 dt_trans_stop(env, dev, handle);
3410 lfsck_layout_unlock(&lh);
3412 rc = lfsck_layout_scan_stripes(env, com, obj, lmm);
3414 down_write(&com->lc_sem);
3415 com->lc_new_checked++;
3417 lo->ll_objs_failed_phase1++;
3418 up_write(&com->lc_sem);
3420 buf->lb_len = buflen;
3425 static int lfsck_layout_slave_exec_oit(const struct lu_env *env,
3426 struct lfsck_component *com,
3427 struct dt_object *obj)
3429 struct lfsck_instance *lfsck = com->lc_lfsck;
3430 struct lfsck_layout *lo = com->lc_file_ram;
3431 const struct lu_fid *fid = lfsck_dto2fid(obj);
3432 struct lfsck_layout_slave_data *llsd = com->lc_data;
3433 struct lfsck_layout_seq *lls;
3439 LASSERT(llsd != NULL);
3441 lfsck_rbtree_update_bitmap(env, com, fid, false);
3443 down_write(&com->lc_sem);
3444 if (fid_is_idif(fid))
3446 else if (!fid_is_norm(fid) ||
3447 !fid_is_for_ostobj(env, lfsck->li_next, obj, fid))
3448 GOTO(unlock, rc = 0);
3451 com->lc_new_checked++;
3453 lls = lfsck_layout_seq_lookup(llsd, seq);
3456 if (unlikely(lls == NULL))
3457 GOTO(unlock, rc = -ENOMEM);
3459 INIT_LIST_HEAD(&lls->lls_list);
3461 rc = lfsck_layout_lastid_load(env, com, lls);
3463 lo->ll_objs_failed_phase1++;
3468 lfsck_layout_seq_insert(llsd, lls);
3471 if (unlikely(fid_is_last_id(fid)))
3472 GOTO(unlock, rc = 0);
3475 if (oid > lls->lls_lastid_known)
3476 lls->lls_lastid_known = oid;
3478 if (oid > lls->lls_lastid) {
3479 if (!(lo->ll_flags & LF_CRASHED_LASTID)) {
3480 /* OFD may create new objects during LFSCK scanning. */
3481 rc = lfsck_layout_lastid_reload(env, com, lls);
3482 if (unlikely(rc != 0))
3483 CWARN("%s: failed to reload LAST_ID for "LPX64
3485 lfsck_lfsck2name(com->lc_lfsck),
3487 if (oid <= lls->lls_lastid)
3488 GOTO(unlock, rc = 0);
3490 LASSERT(lfsck->li_out_notify != NULL);
3492 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
3493 LE_LASTID_REBUILDING);
3494 lo->ll_flags |= LF_CRASHED_LASTID;
3497 lls->lls_lastid = oid;
3501 GOTO(unlock, rc = 0);
3504 up_write(&com->lc_sem);
3509 static int lfsck_layout_exec_dir(const struct lu_env *env,
3510 struct lfsck_component *com,
3511 struct dt_object *obj,
3512 struct lu_dirent *ent)
3517 static int lfsck_layout_master_post(const struct lu_env *env,
3518 struct lfsck_component *com,
3519 int result, bool init)
3521 struct lfsck_instance *lfsck = com->lc_lfsck;
3522 struct lfsck_layout *lo = com->lc_file_ram;
3523 struct lfsck_layout_master_data *llmd = com->lc_data;
3524 struct ptlrpc_thread *mthread = &lfsck->li_thread;
3525 struct ptlrpc_thread *athread = &llmd->llmd_thread;
3526 struct l_wait_info lwi = { 0 };
3531 llmd->llmd_post_result = result;
3532 llmd->llmd_to_post = 1;
3533 if (llmd->llmd_post_result <= 0)
3534 llmd->llmd_exit = 1;
3536 wake_up_all(&athread->t_ctl_waitq);
3537 l_wait_event(mthread->t_ctl_waitq,
3538 (result > 0 && list_empty(&llmd->llmd_req_list)) ||
3539 thread_is_stopped(athread),
3542 if (llmd->llmd_assistant_status < 0)
3543 result = llmd->llmd_assistant_status;
3545 down_write(&com->lc_sem);
3546 spin_lock(&lfsck->li_lock);
3547 /* When LFSCK failed, there may be some prefetched objects those are
3548 * not been processed yet, we do not know the exactly position, then
3549 * just restart from last check-point next time. */
3550 if (!init && !llmd->llmd_exit)
3551 lo->ll_pos_last_checkpoint =
3552 lfsck->li_pos_current.lp_oit_cookie;
3555 lo->ll_status = LS_SCANNING_PHASE2;
3556 lo->ll_flags |= LF_SCANNED_ONCE;
3557 lo->ll_flags &= ~LF_UPGRADE;
3558 list_del_init(&com->lc_link);
3559 list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
3560 } else if (result == 0) {
3561 lo->ll_status = lfsck->li_status;
3562 if (lo->ll_status == 0)
3563 lo->ll_status = LS_STOPPED;
3564 if (lo->ll_status != LS_PAUSED) {
3565 list_del_init(&com->lc_link);
3566 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
3569 lo->ll_status = LS_FAILED;
3570 list_del_init(&com->lc_link);
3571 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
3573 spin_unlock(&lfsck->li_lock);
3576 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
3577 HALF_SEC - lfsck->li_time_last_checkpoint);
3578 lo->ll_time_last_checkpoint = cfs_time_current_sec();
3579 lo->ll_objs_checked_phase1 += com->lc_new_checked;
3580 com->lc_new_checked = 0;
3583 rc = lfsck_layout_store(env, com);
3584 up_write(&com->lc_sem);
3589 static int lfsck_layout_slave_post(const struct lu_env *env,
3590 struct lfsck_component *com,
3591 int result, bool init)
3593 struct lfsck_instance *lfsck = com->lc_lfsck;
3594 struct lfsck_layout *lo = com->lc_file_ram;
3598 rc = lfsck_layout_lastid_store(env, com);
3602 LASSERT(lfsck->li_out_notify != NULL);
3604 down_write(&com->lc_sem);
3606 spin_lock(&lfsck->li_lock);
3608 lo->ll_pos_last_checkpoint =
3609 lfsck->li_pos_current.lp_oit_cookie;
3611 lo->ll_status = LS_SCANNING_PHASE2;
3612 lo->ll_flags |= LF_SCANNED_ONCE;
3613 if (lo->ll_flags & LF_CRASHED_LASTID) {
3615 lo->ll_flags &= ~LF_CRASHED_LASTID;
3617 lo->ll_flags &= ~LF_UPGRADE;
3618 list_del_init(&com->lc_link);
3619 list_add_tail(&com->lc_link, &lfsck->li_list_double_scan);
3620 } else if (result == 0) {
3621 lo->ll_status = lfsck->li_status;
3622 if (lo->ll_status == 0)
3623 lo->ll_status = LS_STOPPED;
3624 if (lo->ll_status != LS_PAUSED) {
3625 list_del_init(&com->lc_link);
3626 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
3629 lo->ll_status = LS_FAILED;
3630 list_del_init(&com->lc_link);
3631 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
3633 spin_unlock(&lfsck->li_lock);
3636 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
3640 lo->ll_run_time_phase1 += cfs_duration_sec(cfs_time_current() +
3641 HALF_SEC - lfsck->li_time_last_checkpoint);
3642 lo->ll_time_last_checkpoint = cfs_time_current_sec();
3643 lo->ll_objs_checked_phase1 += com->lc_new_checked;
3644 com->lc_new_checked = 0;
3647 rc = lfsck_layout_store(env, com);
3649 up_write(&com->lc_sem);
3651 lfsck_layout_slave_notify_master(env, com, LE_PHASE1_DONE, result);
3654 lfsck_rbtree_cleanup(env, com);
3659 static int lfsck_layout_dump(const struct lu_env *env,
3660 struct lfsck_component *com, char *buf, int len)
3662 struct lfsck_instance *lfsck = com->lc_lfsck;
3663 struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
3664 struct lfsck_layout *lo = com->lc_file_ram;
3669 down_read(&com->lc_sem);
3670 rc = snprintf(buf, len,
3671 "name: lfsck_layout\n"
3677 lfsck_status2names(lo->ll_status));
3683 rc = lfsck_bits_dump(&buf, &len, lo->ll_flags, lfsck_flags_names,
3688 rc = lfsck_bits_dump(&buf, &len, bk->lb_param, lfsck_param_names,
3693 rc = lfsck_time_dump(&buf, &len, lo->ll_time_last_complete,
3694 "time_since_last_completed");
3698 rc = lfsck_time_dump(&buf, &len, lo->ll_time_latest_start,
3699 "time_since_latest_start");
3703 rc = lfsck_time_dump(&buf, &len, lo->ll_time_last_checkpoint,
3704 "time_since_last_checkpoint");
3708 rc = snprintf(buf, len,
3709 "latest_start_position: "LPU64"\n"
3710 "last_checkpoint_position: "LPU64"\n"
3711 "first_failure_position: "LPU64"\n",
3712 lo->ll_pos_latest_start,
3713 lo->ll_pos_last_checkpoint,
3714 lo->ll_pos_first_inconsistent);
3721 rc = snprintf(buf, len,
3722 "success_count: %u\n"
3723 "repaired_dangling: "LPU64"\n"
3724 "repaired_unmatched_pair: "LPU64"\n"
3725 "repaired_multiple_referenced: "LPU64"\n"
3726 "repaired_orphan: "LPU64"\n"
3727 "repaired_inconsistent_owner: "LPU64"\n"
3728 "repaired_others: "LPU64"\n"
3729 "skipped: "LPU64"\n"
3730 "failed_phase1: "LPU64"\n"
3731 "failed_phase2: "LPU64"\n",
3732 lo->ll_success_count,
3733 lo->ll_objs_repaired[LLIT_DANGLING - 1],
3734 lo->ll_objs_repaired[LLIT_UNMATCHED_PAIR - 1],
3735 lo->ll_objs_repaired[LLIT_MULTIPLE_REFERENCED - 1],
3736 lo->ll_objs_repaired[LLIT_ORPHAN - 1],
3737 lo->ll_objs_repaired[LLIT_INCONSISTENT_OWNER - 1],
3738 lo->ll_objs_repaired[LLIT_OTHERS - 1],
3739 lo->ll_objs_skipped,
3740 lo->ll_objs_failed_phase1,
3741 lo->ll_objs_failed_phase2);
3748 if (lo->ll_status == LS_SCANNING_PHASE1) {
3750 const struct dt_it_ops *iops;
3751 cfs_duration_t duration = cfs_time_current() -
3752 lfsck->li_time_last_checkpoint;
3753 __u64 checked = lo->ll_objs_checked_phase1 + com->lc_new_checked;
3754 __u64 speed = checked;
3755 __u64 new_checked = com->lc_new_checked * HZ;
3756 __u32 rtime = lo->ll_run_time_phase1 +
3757 cfs_duration_sec(duration + HALF_SEC);
3760 do_div(new_checked, duration);
3762 do_div(speed, rtime);
3763 rc = snprintf(buf, len,
3764 "checked_phase1: "LPU64"\n"
3765 "checked_phase2: "LPU64"\n"
3766 "run_time_phase1: %u seconds\n"
3767 "run_time_phase2: %u seconds\n"
3768 "average_speed_phase1: "LPU64" items/sec\n"
3769 "average_speed_phase2: N/A\n"
3770 "real-time_speed_phase1: "LPU64" items/sec\n"
3771 "real-time_speed_phase2: N/A\n",
3773 lo->ll_objs_checked_phase2,
3775 lo->ll_run_time_phase2,
3784 LASSERT(lfsck->li_di_oit != NULL);
3786 iops = &lfsck->li_obj_oit->do_index_ops->dio_it;
3788 /* The low layer otable-based iteration position may NOT
3789 * exactly match the layout-based directory traversal
3790 * cookie. Generally, it is not a serious issue. But the
3791 * caller should NOT make assumption on that. */
3792 pos = iops->store(env, lfsck->li_di_oit);
3793 if (!lfsck->li_current_oit_processed)
3795 rc = snprintf(buf, len, "current_position: "LPU64"\n", pos);
3801 } else if (lo->ll_status == LS_SCANNING_PHASE2) {
3802 cfs_duration_t duration = cfs_time_current() -
3803 lfsck->li_time_last_checkpoint;
3804 __u64 checked = lo->ll_objs_checked_phase1 + com->lc_new_checked;
3805 __u64 speed = checked;
3806 __u64 new_checked = com->lc_new_checked * HZ;
3807 __u32 rtime = lo->ll_run_time_phase1 +
3808 cfs_duration_sec(duration + HALF_SEC);
3811 do_div(new_checked, duration);
3813 do_div(speed, rtime);
3814 rc = snprintf(buf, len,
3815 "checked_phase1: "LPU64"\n"
3816 "checked_phase2: "LPU64"\n"
3817 "run_time_phase1: %u seconds\n"
3818 "run_time_phase2: %u seconds\n"
3819 "average_speed_phase1: "LPU64" items/sec\n"
3820 "average_speed_phase2: N/A\n"
3821 "real-time_speed_phase1: "LPU64" items/sec\n"
3822 "real-time_speed_phase2: N/A\n"
3823 "current_position: "DFID"\n",
3825 lo->ll_objs_checked_phase2,
3827 lo->ll_run_time_phase2,
3830 PFID(&com->lc_fid_latest_scanned_phase2));
3837 __u64 speed1 = lo->ll_objs_checked_phase1;
3838 __u64 speed2 = lo->ll_objs_checked_phase2;
3840 if (lo->ll_run_time_phase1 != 0)
3841 do_div(speed1, lo->ll_run_time_phase1);
3842 if (lo->ll_run_time_phase2 != 0)
3843 do_div(speed2, lo->ll_run_time_phase2);
3844 rc = snprintf(buf, len,
3845 "checked_phase1: "LPU64"\n"
3846 "checked_phase2: "LPU64"\n"
3847 "run_time_phase1: %u seconds\n"
3848 "run_time_phase2: %u seconds\n"
3849 "average_speed_phase1: "LPU64" items/sec\n"
3850 "average_speed_phase2: "LPU64" objs/sec\n"
3851 "real-time_speed_phase1: N/A\n"
3852 "real-time_speed_phase2: N/A\n"
3853 "current_position: N/A\n",
3854 lo->ll_objs_checked_phase1,
3855 lo->ll_objs_checked_phase2,
3856 lo->ll_run_time_phase1,
3857 lo->ll_run_time_phase2,
3869 up_read(&com->lc_sem);
3874 static int lfsck_layout_master_double_scan(const struct lu_env *env,
3875 struct lfsck_component *com)
3877 struct lfsck_layout_master_data *llmd = com->lc_data;
3878 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
3879 struct ptlrpc_thread *athread = &llmd->llmd_thread;
3880 struct lfsck_layout *lo = com->lc_file_ram;
3881 struct l_wait_info lwi = { 0 };
3883 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2))
3886 llmd->llmd_to_double_scan = 1;
3887 wake_up_all(&athread->t_ctl_waitq);
3888 l_wait_event(mthread->t_ctl_waitq,
3889 llmd->llmd_in_double_scan ||
3890 thread_is_stopped(athread),
3892 if (llmd->llmd_assistant_status < 0)
3893 return llmd->llmd_assistant_status;
3898 static int lfsck_layout_slave_double_scan(const struct lu_env *env,
3899 struct lfsck_component *com)
3901 struct lfsck_instance *lfsck = com->lc_lfsck;
3902 struct lfsck_layout_slave_data *llsd = com->lc_data;
3903 struct lfsck_layout *lo = com->lc_file_ram;
3904 struct ptlrpc_thread *thread = &lfsck->li_thread;
3908 if (unlikely(lo->ll_status != LS_SCANNING_PHASE2)) {
3909 lfsck_rbtree_cleanup(env, com);
3910 lfsck_layout_slave_notify_master(env, com, LE_PHASE2_DONE, 0);
3914 atomic_inc(&lfsck->li_double_scan_count);
3916 com->lc_new_checked = 0;
3917 com->lc_new_scanned = 0;
3918 com->lc_time_last_checkpoint = cfs_time_current();
3919 com->lc_time_next_checkpoint = com->lc_time_last_checkpoint +
3920 cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
3923 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(30),
3926 rc = lfsck_layout_slave_query_master(env, com);
3927 if (list_empty(&llsd->llsd_master_list)) {
3928 if (unlikely(!thread_is_running(thread)))
3939 rc = l_wait_event(thread->t_ctl_waitq,
3940 !thread_is_running(thread) ||
3941 list_empty(&llsd->llsd_master_list),
3943 if (unlikely(!thread_is_running(thread)))
3946 if (rc == -ETIMEDOUT)
3949 GOTO(done, rc = (rc < 0 ? rc : 1));
3953 rc = lfsck_layout_double_scan_result(env, com, rc);
3955 lfsck_rbtree_cleanup(env, com);
3956 lfsck_layout_slave_notify_master(env, com, LE_PHASE2_DONE, rc);
3957 if (atomic_dec_and_test(&lfsck->li_double_scan_count))
3958 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
3963 static void lfsck_layout_master_data_release(const struct lu_env *env,
3964 struct lfsck_component *com)
3966 struct lfsck_layout_master_data *llmd = com->lc_data;
3967 struct lfsck_instance *lfsck = com->lc_lfsck;
3968 struct lfsck_tgt_descs *ltds;
3969 struct lfsck_tgt_desc *ltd;
3970 struct lfsck_tgt_desc *next;
3972 LASSERT(llmd != NULL);
3973 LASSERT(thread_is_init(&llmd->llmd_thread) ||
3974 thread_is_stopped(&llmd->llmd_thread));
3975 LASSERT(list_empty(&llmd->llmd_req_list));
3977 com->lc_data = NULL;
3979 ltds = &lfsck->li_ost_descs;
3980 spin_lock(<ds->ltd_lock);
3981 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase1_list,
3982 ltd_layout_phase_list) {
3983 list_del_init(<d->ltd_layout_phase_list);
3985 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_phase2_list,
3986 ltd_layout_phase_list) {
3987 list_del_init(<d->ltd_layout_phase_list);
3989 list_for_each_entry_safe(ltd, next, &llmd->llmd_ost_list,
3991 list_del_init(<d->ltd_layout_list);
3993 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase1_list,
3994 ltd_layout_phase_list) {
3995 list_del_init(<d->ltd_layout_phase_list);
3997 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase2_list,
3998 ltd_layout_phase_list) {
3999 list_del_init(<d->ltd_layout_phase_list);
4001 list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_list,
4003 list_del_init(<d->ltd_layout_list);
4005 spin_unlock(<ds->ltd_lock);
4010 static void lfsck_layout_slave_data_release(const struct lu_env *env,
4011 struct lfsck_component *com)
4013 struct lfsck_layout_slave_data *llsd = com->lc_data;
4014 struct lfsck_layout_seq *lls;
4015 struct lfsck_layout_seq *next;
4016 struct lfsck_layout_slave_target *llst;
4017 struct lfsck_layout_slave_target *tmp;
4019 LASSERT(llsd != NULL);
4021 list_for_each_entry_safe(lls, next, &llsd->llsd_seq_list,
4023 list_del_init(&lls->lls_list);
4024 lfsck_object_put(env, lls->lls_lastid_obj);
4028 list_for_each_entry_safe(llst, tmp, &llsd->llsd_master_list,
4030 list_del_init(&llst->llst_list);
4034 lfsck_rbtree_cleanup(env, com);
4035 com->lc_data = NULL;
4039 static void lfsck_layout_master_quit(const struct lu_env *env,
4040 struct lfsck_component *com)
4042 struct lfsck_layout_master_data *llmd = com->lc_data;
4043 struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
4044 struct ptlrpc_thread *athread = &llmd->llmd_thread;
4045 struct l_wait_info lwi = { 0 };
4047 llmd->llmd_exit = 1;
4048 wake_up_all(&athread->t_ctl_waitq);
4049 l_wait_event(mthread->t_ctl_waitq,
4050 thread_is_init(athread) ||
4051 thread_is_stopped(athread),
4055 static void lfsck_layout_slave_quit(const struct lu_env *env,
4056 struct lfsck_component *com)
4058 lfsck_rbtree_cleanup(env, com);
4061 static int lfsck_layout_master_in_notify(const struct lu_env *env,
4062 struct lfsck_component *com,
4063 struct lfsck_request *lr)
4065 struct lfsck_instance *lfsck = com->lc_lfsck;
4066 struct lfsck_layout *lo = com->lc_file_ram;
4067 struct lfsck_layout_master_data *llmd = com->lc_data;
4068 struct lfsck_tgt_descs *ltds;
4069 struct lfsck_tgt_desc *ltd;
4073 if (lr->lr_event != LE_PHASE1_DONE &&
4074 lr->lr_event != LE_PHASE2_DONE &&
4075 lr->lr_event != LE_PEER_EXIT)
4078 if (lr->lr_flags & LEF_FROM_OST)
4079 ltds = &lfsck->li_ost_descs;
4081 ltds = &lfsck->li_mdt_descs;
4082 spin_lock(<ds->ltd_lock);
4083 ltd = LTD_TGT(ltds, lr->lr_index);
4085 spin_unlock(<ds->ltd_lock);
4090 list_del_init(<d->ltd_layout_phase_list);
4091 switch (lr->lr_event) {
4092 case LE_PHASE1_DONE:
4093 if (lr->lr_status <= 0) {
4094 ltd->ltd_layout_done = 1;
4095 list_del_init(<d->ltd_layout_list);
4096 CWARN("%s: %s %x failed/stopped at phase1: rc = %d.\n",
4097 lfsck_lfsck2name(lfsck),
4098 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
4099 ltd->ltd_index, lr->lr_status);
4100 lo->ll_flags |= LF_INCOMPLETE;
4105 if (lr->lr_flags & LEF_FROM_OST) {
4106 if (list_empty(<d->ltd_layout_list))
4107 list_add_tail(<d->ltd_layout_list,
4108 &llmd->llmd_ost_list);
4109 list_add_tail(<d->ltd_layout_phase_list,
4110 &llmd->llmd_ost_phase2_list);
4112 if (list_empty(<d->ltd_layout_list))
4113 list_add_tail(<d->ltd_layout_list,
4114 &llmd->llmd_mdt_list);
4115 list_add_tail(<d->ltd_layout_phase_list,
4116 &llmd->llmd_mdt_phase2_list);
4119 case LE_PHASE2_DONE:
4120 ltd->ltd_layout_done = 1;
4121 list_del_init(<d->ltd_layout_list);
4125 ltd->ltd_layout_done = 1;
4126 list_del_init(<d->ltd_layout_list);
4127 if (!(lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT)) {
4128 CWARN("%s: the peer %s %x exit layout LFSCK.\n",
4129 lfsck_lfsck2name(lfsck),
4130 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
4132 lo->ll_flags |= LF_INCOMPLETE;
4138 spin_unlock(<ds->ltd_lock);
4140 if (fail && lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT) {
4141 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
4143 memset(stop, 0, sizeof(*stop));
4144 stop->ls_status = lr->lr_status;
4145 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
4146 lfsck_stop(env, lfsck->li_bottom, stop);
4147 } else if (lfsck_layout_master_to_orphan(llmd)) {
4148 wake_up_all(&llmd->llmd_thread.t_ctl_waitq);
4154 static int lfsck_layout_slave_in_notify(const struct lu_env *env,
4155 struct lfsck_component *com,
4156 struct lfsck_request *lr)
4158 struct lfsck_instance *lfsck = com->lc_lfsck;
4159 struct lfsck_layout_slave_data *llsd = com->lc_data;
4160 struct lfsck_layout_slave_target *llst;
4163 if (lr->lr_event == LE_FID_ACCESSED) {
4164 lfsck_rbtree_update_bitmap(env, com, &lr->lr_fid, true);
4169 if (lr->lr_event != LE_PHASE2_DONE && lr->lr_event != LE_PEER_EXIT)
4172 llst = lfsck_layout_llst_find_and_del(llsd, lr->lr_index, true);
4176 lfsck_layout_llst_put(llst);
4177 if (list_empty(&llsd->llsd_master_list))
4178 wake_up_all(&lfsck->li_thread.t_ctl_waitq);
4180 if (lr->lr_event == LE_PEER_EXIT &&
4181 lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT) {
4182 struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
4184 memset(stop, 0, sizeof(*stop));
4185 stop->ls_status = lr->lr_status;
4186 stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
4187 lfsck_stop(env, lfsck->li_bottom, stop);
4193 static int lfsck_layout_query(const struct lu_env *env,
4194 struct lfsck_component *com)
4196 struct lfsck_layout *lo = com->lc_file_ram;
4198 return lo->ll_status;
4201 static int lfsck_layout_master_stop_notify(const struct lu_env *env,
4202 struct lfsck_component *com,
4203 struct lfsck_tgt_descs *ltds,
4204 struct lfsck_tgt_desc *ltd,
4205 struct ptlrpc_request_set *set)
4207 struct lfsck_thread_info *info = lfsck_env_info(env);
4208 struct lfsck_async_interpret_args *laia = &info->lti_laia;
4209 struct lfsck_request *lr = &info->lti_lr;
4210 struct lfsck_instance *lfsck = com->lc_lfsck;
4213 spin_lock(<ds->ltd_lock);
4214 if (list_empty(<d->ltd_layout_list)) {
4215 LASSERT(list_empty(<d->ltd_layout_phase_list));
4216 spin_unlock(<ds->ltd_lock);
4221 list_del_init(<d->ltd_layout_phase_list);
4222 list_del_init(<d->ltd_layout_list);
4223 spin_unlock(<ds->ltd_lock);
4225 memset(lr, 0, sizeof(*lr));
4226 lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
4227 lr->lr_event = LE_PEER_EXIT;
4228 lr->lr_active = LT_LAYOUT;
4229 lr->lr_status = LS_CO_PAUSED;
4230 if (ltds == &lfsck->li_ost_descs)
4231 lr->lr_flags = LEF_TO_OST;
4233 laia->laia_com = com;
4234 laia->laia_ltds = ltds;
4235 atomic_inc(<d->ltd_ref);
4236 laia->laia_ltd = ltd;
4238 laia->laia_shared = 0;
4240 rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
4241 lfsck_layout_master_async_interpret,
4242 laia, LFSCK_NOTIFY);
4244 CERROR("%s: Fail to notify %s %x for co-stop: rc = %d\n",
4245 lfsck_lfsck2name(lfsck),
4246 (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
4247 ltd->ltd_index, rc);
4254 /* with lfsck::li_lock held */
4255 static int lfsck_layout_slave_join(const struct lu_env *env,
4256 struct lfsck_component *com,
4257 struct lfsck_start_param *lsp)
4259 struct lfsck_instance *lfsck = com->lc_lfsck;
4260 struct lfsck_layout_slave_data *llsd = com->lc_data;
4261 struct lfsck_layout_slave_target *llst;
4262 struct lfsck_start *start = lsp->lsp_start;
4266 if (!lsp->lsp_index_valid || start == NULL ||
4267 !(start->ls_flags & LPF_ALL_TGT) ||
4268 !(lfsck->li_bookmark_ram.lb_param & LPF_ALL_TGT))
4271 spin_unlock(&lfsck->li_lock);
4272 rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
4273 spin_lock(&lfsck->li_lock);
4274 if (rc == 0 && !thread_is_running(&lfsck->li_thread)) {
4275 spin_unlock(&lfsck->li_lock);
4276 llst = lfsck_layout_llst_find_and_del(llsd, lsp->lsp_index,
4279 lfsck_layout_llst_put(llst);
4280 spin_lock(&lfsck->li_lock);
4287 static struct lfsck_operations lfsck_layout_master_ops = {
4288 .lfsck_reset = lfsck_layout_reset,
4289 .lfsck_fail = lfsck_layout_fail,
4290 .lfsck_checkpoint = lfsck_layout_master_checkpoint,
4291 .lfsck_prep = lfsck_layout_master_prep,
4292 .lfsck_exec_oit = lfsck_layout_master_exec_oit,
4293 .lfsck_exec_dir = lfsck_layout_exec_dir,
4294 .lfsck_post = lfsck_layout_master_post,
4295 .lfsck_interpret = lfsck_layout_master_async_interpret,
4296 .lfsck_dump = lfsck_layout_dump,
4297 .lfsck_double_scan = lfsck_layout_master_double_scan,
4298 .lfsck_data_release = lfsck_layout_master_data_release,
4299 .lfsck_quit = lfsck_layout_master_quit,
4300 .lfsck_in_notify = lfsck_layout_master_in_notify,
4301 .lfsck_query = lfsck_layout_query,
4302 .lfsck_stop_notify = lfsck_layout_master_stop_notify,
4305 static struct lfsck_operations lfsck_layout_slave_ops = {
4306 .lfsck_reset = lfsck_layout_reset,
4307 .lfsck_fail = lfsck_layout_fail,
4308 .lfsck_checkpoint = lfsck_layout_slave_checkpoint,
4309 .lfsck_prep = lfsck_layout_slave_prep,
4310 .lfsck_exec_oit = lfsck_layout_slave_exec_oit,
4311 .lfsck_exec_dir = lfsck_layout_exec_dir,
4312 .lfsck_post = lfsck_layout_slave_post,
4313 .lfsck_dump = lfsck_layout_dump,
4314 .lfsck_double_scan = lfsck_layout_slave_double_scan,
4315 .lfsck_data_release = lfsck_layout_slave_data_release,
4316 .lfsck_quit = lfsck_layout_slave_quit,
4317 .lfsck_in_notify = lfsck_layout_slave_in_notify,
4318 .lfsck_query = lfsck_layout_query,
4319 .lfsck_join = lfsck_layout_slave_join,
4322 int lfsck_layout_setup(const struct lu_env *env, struct lfsck_instance *lfsck)
4324 struct lfsck_component *com;
4325 struct lfsck_layout *lo;
4326 struct dt_object *root = NULL;
4327 struct dt_object *obj;
4335 INIT_LIST_HEAD(&com->lc_link);
4336 INIT_LIST_HEAD(&com->lc_link_dir);
4337 init_rwsem(&com->lc_sem);
4338 atomic_set(&com->lc_ref, 1);
4339 com->lc_lfsck = lfsck;
4340 com->lc_type = LT_LAYOUT;
4341 if (lfsck->li_master) {
4342 struct lfsck_layout_master_data *llmd;
4344 com->lc_ops = &lfsck_layout_master_ops;
4345 OBD_ALLOC_PTR(llmd);
4347 GOTO(out, rc = -ENOMEM);
4349 INIT_LIST_HEAD(&llmd->llmd_req_list);
4350 spin_lock_init(&llmd->llmd_lock);
4351 INIT_LIST_HEAD(&llmd->llmd_ost_list);
4352 INIT_LIST_HEAD(&llmd->llmd_ost_phase1_list);
4353 INIT_LIST_HEAD(&llmd->llmd_ost_phase2_list);
4354 INIT_LIST_HEAD(&llmd->llmd_mdt_list);
4355 INIT_LIST_HEAD(&llmd->llmd_mdt_phase1_list);
4356 INIT_LIST_HEAD(&llmd->llmd_mdt_phase2_list);
4357 init_waitqueue_head(&llmd->llmd_thread.t_ctl_waitq);
4358 com->lc_data = llmd;
4360 struct lfsck_layout_slave_data *llsd;
4362 com->lc_ops = &lfsck_layout_slave_ops;
4363 OBD_ALLOC_PTR(llsd);
4365 GOTO(out, rc = -ENOMEM);
4367 INIT_LIST_HEAD(&llsd->llsd_seq_list);
4368 INIT_LIST_HEAD(&llsd->llsd_master_list);
4369 spin_lock_init(&llsd->llsd_lock);
4370 llsd->llsd_rb_root = RB_ROOT;
4371 rwlock_init(&llsd->llsd_rb_lock);
4372 com->lc_data = llsd;
4374 com->lc_file_size = sizeof(*lo);
4375 OBD_ALLOC(com->lc_file_ram, com->lc_file_size);
4376 if (com->lc_file_ram == NULL)
4377 GOTO(out, rc = -ENOMEM);
4379 OBD_ALLOC(com->lc_file_disk, com->lc_file_size);
4380 if (com->lc_file_disk == NULL)
4381 GOTO(out, rc = -ENOMEM);
4383 root = dt_locate(env, lfsck->li_bottom, &lfsck->li_local_root_fid);
4385 GOTO(out, rc = PTR_ERR(root));
4387 if (unlikely(!dt_try_as_dir(env, root)))
4388 GOTO(out, rc = -ENOTDIR);
4390 obj = local_file_find_or_create(env, lfsck->li_los, root,
4392 S_IFREG | S_IRUGO | S_IWUSR);
4394 GOTO(out, rc = PTR_ERR(obj));
4397 rc = lfsck_layout_load(env, com);
4399 rc = lfsck_layout_reset(env, com, true);
4400 else if (rc == -ENOENT)
4401 rc = lfsck_layout_init(env, com);
4406 lo = com->lc_file_ram;
4407 switch (lo->ll_status) {
4413 spin_lock(&lfsck->li_lock);
4414 list_add_tail(&com->lc_link, &lfsck->li_list_idle);
4415 spin_unlock(&lfsck->li_lock);
4418 CERROR("%s: unknown lfsck_layout status: rc = %u\n",
4419 lfsck_lfsck2name(lfsck), lo->ll_status);
4421 case LS_SCANNING_PHASE1:
4422 case LS_SCANNING_PHASE2:
4423 /* No need to store the status to disk right now.
4424 * If the system crashed before the status stored,
4425 * it will be loaded back when next time. */
4426 lo->ll_status = LS_CRASHED;
4427 lo->ll_flags |= LF_INCOMPLETE;
4434 spin_lock(&lfsck->li_lock);
4435 list_add_tail(&com->lc_link, &lfsck->li_list_scan);
4436 spin_unlock(&lfsck->li_lock);
4440 if (lo->ll_flags & LF_CRASHED_LASTID) {
4441 LASSERT(lfsck->li_out_notify != NULL);
4443 lfsck->li_out_notify(env, lfsck->li_out_notify_data,
4444 LE_LASTID_REBUILDING);
4450 if (root != NULL && !IS_ERR(root))
4451 lu_object_put(env, &root->do_lu);
4454 lfsck_component_cleanup(env, com);
4459 struct lfsck_orphan_it {
4460 struct lfsck_component *loi_com;
4461 struct lfsck_rbtree_node *loi_lrn;
4462 struct lfsck_layout_slave_target *loi_llst;
4463 struct lu_fid loi_key;
4464 struct lu_orphan_rec loi_rec;
4466 unsigned int loi_over:1;
4469 static int lfsck_fid_match_idx(const struct lu_env *env,
4470 struct lfsck_instance *lfsck,
4471 const struct lu_fid *fid, int idx)
4473 struct seq_server_site *ss;
4474 struct lu_server_fld *sf;
4475 struct lu_seq_range range = { 0 };
4478 /* All abnormal cases will be returned to MDT0. */
4479 if (!fid_is_norm(fid)) {
4486 ss = lu_site2seq(lfsck->li_bottom->dd_lu_dev.ld_site);
4487 if (unlikely(ss == NULL))
4490 sf = ss->ss_server_fld;
4491 LASSERT(sf != NULL);
4493 fld_range_set_any(&range);
4494 rc = fld_server_lookup(env, sf, fid_seq(fid), &range);
4498 if (!fld_range_is_mdt(&range))
4501 if (range.lsr_index == idx)
4507 static void lfsck_layout_destroy_orphan(const struct lu_env *env,
4508 struct dt_device *dev,
4509 struct dt_object *obj)
4511 struct thandle *handle;
4515 handle = dt_trans_create(env, dev);
4519 rc = dt_declare_ref_del(env, obj, handle);
4523 rc = dt_declare_destroy(env, obj, handle);
4527 rc = dt_trans_start_local(env, dev, handle);
4531 dt_write_lock(env, obj, 0);
4532 rc = dt_ref_del(env, obj, handle);
4534 rc = dt_destroy(env, obj, handle);
4535 dt_write_unlock(env, obj);
4540 dt_trans_stop(env, dev, handle);
4545 static int lfsck_orphan_index_lookup(const struct lu_env *env,
4546 struct dt_object *dt,
4548 const struct dt_key *key,
4549 struct lustre_capa *capa)
4554 static int lfsck_orphan_index_declare_insert(const struct lu_env *env,
4555 struct dt_object *dt,
4556 const struct dt_rec *rec,
4557 const struct dt_key *key,
4558 struct thandle *handle)
4563 static int lfsck_orphan_index_insert(const struct lu_env *env,
4564 struct dt_object *dt,
4565 const struct dt_rec *rec,
4566 const struct dt_key *key,
4567 struct thandle *handle,
4568 struct lustre_capa *capa,
4574 static int lfsck_orphan_index_declare_delete(const struct lu_env *env,
4575 struct dt_object *dt,
4576 const struct dt_key *key,
4577 struct thandle *handle)
4582 static int lfsck_orphan_index_delete(const struct lu_env *env,
4583 struct dt_object *dt,
4584 const struct dt_key *key,
4585 struct thandle *handle,
4586 struct lustre_capa *capa)
4591 static struct dt_it *lfsck_orphan_it_init(const struct lu_env *env,
4592 struct dt_object *dt,
4594 struct lustre_capa *capa)
4596 struct dt_device *dev = lu2dt_dev(dt->do_lu.lo_dev);
4597 struct lfsck_instance *lfsck;
4598 struct lfsck_component *com = NULL;
4599 struct lfsck_layout_slave_data *llsd;
4600 struct lfsck_orphan_it *it = NULL;
4604 lfsck = lfsck_instance_find(dev, true, false);
4605 if (unlikely(lfsck == NULL))
4606 RETURN(ERR_PTR(-ENODEV));
4608 com = lfsck_component_find(lfsck, LT_LAYOUT);
4609 if (unlikely(com == NULL))
4610 GOTO(out, rc = -ENOENT);
4612 llsd = com->lc_data;
4613 if (!llsd->llsd_rbtree_valid)
4614 GOTO(out, rc = -ESRCH);
4618 GOTO(out, rc = -ENOMEM);
4620 it->loi_llst = lfsck_layout_llst_find_and_del(llsd, attr, false);
4621 if (it->loi_llst == NULL)
4622 GOTO(out, rc = -ENODEV);
4624 if (dev->dd_record_fid_accessed) {
4625 /* The first iteratino against the rbtree, scan the whole rbtree
4626 * to remove the nodes which do NOT need to be handled. */
4627 write_lock(&llsd->llsd_rb_lock);
4628 if (dev->dd_record_fid_accessed) {
4629 struct rb_node *node;
4630 struct rb_node *next;
4631 struct lfsck_rbtree_node *lrn;
4633 /* No need to record the fid accessing anymore. */
4634 dev->dd_record_fid_accessed = 0;
4636 node = rb_first(&llsd->llsd_rb_root);
4637 while (node != NULL) {
4638 next = rb_next(node);
4639 lrn = rb_entry(node, struct lfsck_rbtree_node,
4641 if (atomic_read(&lrn->lrn_known_count) <=
4642 atomic_read(&lrn->lrn_accessed_count)) {
4643 rb_erase(node, &llsd->llsd_rb_root);
4644 lfsck_rbtree_free(lrn);
4649 write_unlock(&llsd->llsd_rb_lock);
4652 /* read lock the rbtree when init, and unlock when fini */
4653 read_lock(&llsd->llsd_rb_lock);
4661 lfsck_component_put(env, com);
4662 lfsck_instance_put(env, lfsck);
4667 it = (struct lfsck_orphan_it *)ERR_PTR(rc);
4670 return (struct dt_it *)it;
4673 static void lfsck_orphan_it_fini(const struct lu_env *env,
4676 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
4677 struct lfsck_component *com = it->loi_com;
4678 struct lfsck_layout_slave_data *llsd;
4679 struct lfsck_layout_slave_target *llst;
4682 llsd = com->lc_data;
4683 read_unlock(&llsd->llsd_rb_lock);
4684 llst = it->loi_llst;
4685 LASSERT(llst != NULL);
4687 /* Save the key and hash for iterate next. */
4688 llst->llst_fid = it->loi_key;
4689 llst->llst_hash = it->loi_hash;
4690 lfsck_layout_llst_put(llst);
4691 lfsck_component_put(env, com);
4697 * \retval +1: the iteration finished
4698 * \retval 0: on success, not finished
4699 * \retval -ve: on error
4701 static int lfsck_orphan_it_next(const struct lu_env *env,
4704 struct lfsck_thread_info *info = lfsck_env_info(env);
4705 struct filter_fid_old *pfid = &info->lti_old_pfid;
4706 struct lu_attr *la = &info->lti_la;
4707 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
4708 struct lu_fid *key = &it->loi_key;
4709 struct lu_orphan_rec *rec = &it->loi_rec;
4710 struct lfsck_component *com = it->loi_com;
4711 struct lfsck_instance *lfsck = com->lc_lfsck;
4712 struct lfsck_layout_slave_data *llsd = com->lc_data;
4713 struct dt_object *obj;
4714 struct lfsck_rbtree_node *lrn;
4718 __u32 idx = it->loi_llst->llst_index;
4728 lrn = lfsck_rbtree_search(llsd, key, &exact);
4736 key->f_seq = lrn->lrn_seq;
4737 key->f_oid = lrn->lrn_first_oid;
4742 if (unlikely(key->f_oid == 0)) {
4749 lrn->lrn_first_oid + LFSCK_RBTREE_BITMAP_WIDTH) {
4755 if (unlikely(atomic_read(&lrn->lrn_known_count) <=
4756 atomic_read(&lrn->lrn_accessed_count))) {
4757 struct rb_node *next = rb_next(&lrn->lrn_node);
4759 while (next != NULL) {
4760 lrn = rb_entry(next, struct lfsck_rbtree_node,
4762 if (atomic_read(&lrn->lrn_known_count) >
4763 atomic_read(&lrn->lrn_accessed_count))
4765 next = rb_next(next);
4774 key->f_seq = lrn->lrn_seq;
4775 key->f_oid = lrn->lrn_first_oid;
4779 pos = key->f_oid - lrn->lrn_first_oid;
4782 pos = find_next_bit(lrn->lrn_known_bitmap,
4783 LFSCK_RBTREE_BITMAP_WIDTH, pos);
4784 if (pos >= LFSCK_RBTREE_BITMAP_WIDTH) {
4785 key->f_oid = lrn->lrn_first_oid + pos;
4786 if (unlikely(key->f_oid < lrn->lrn_first_oid)) {
4794 if (test_bit(pos, lrn->lrn_accessed_bitmap)) {
4799 key->f_oid = lrn->lrn_first_oid + pos;
4800 obj = lfsck_object_find(env, lfsck, key);
4803 if (rc == -ENOENT) {
4810 dt_read_lock(env, obj, 0);
4811 if (!dt_object_exists(obj)) {
4812 dt_read_unlock(env, obj);
4813 lfsck_object_put(env, obj);
4818 rc = dt_attr_get(env, obj, la, BYPASS_CAPA);
4822 rc = dt_xattr_get(env, obj, lfsck_buf_get(env, pfid, sizeof(*pfid)),
4823 XATTR_NAME_FID, BYPASS_CAPA);
4824 if (rc == -ENODATA) {
4825 /* For the pre-created OST-object, update the bitmap to avoid
4826 * others LFSCK (second phase) iteration to touch it again. */
4827 if (la->la_ctime == 0) {
4828 if (!test_and_set_bit(pos, lrn->lrn_accessed_bitmap))
4829 atomic_inc(&lrn->lrn_accessed_count);
4831 /* For the race between repairing dangling referenced
4832 * MDT-object and unlink the file, it may left orphan
4833 * OST-object there. Destroy it now! */
4834 if (unlikely(!(la->la_mode & S_ISUID))) {
4835 dt_read_unlock(env, obj);
4836 lfsck_layout_destroy_orphan(env,
4839 lfsck_object_put(env, obj);
4843 } else if (idx == 0) {
4844 /* If the orphan OST-object has no parent information,
4845 * regard it as referenced by the MDT-object on MDT0. */
4846 fid_zero(&rec->lor_fid);
4847 rec->lor_uid = la->la_uid;
4848 rec->lor_gid = la->la_gid;
4852 dt_read_unlock(env, obj);
4853 lfsck_object_put(env, obj);
4861 if (rc != sizeof(struct filter_fid) &&
4862 rc != sizeof(struct filter_fid_old))
4863 GOTO(out, rc = -EINVAL);
4865 fid_le_to_cpu(&rec->lor_fid, &pfid->ff_parent);
4866 /* In fact, the ff_parent::f_ver is not the real parent FID::f_ver,
4867 * instead, it is the OST-object index in its parent MDT-object
4869 save = rec->lor_fid.f_ver;
4870 rec->lor_fid.f_ver = 0;
4871 rc = lfsck_fid_match_idx(env, lfsck, &rec->lor_fid, idx);
4872 /* If the orphan OST-object does not claim the MDT, then next.
4874 * If we do not know whether it matches or not, then return it
4875 * to the MDT for further check. */
4877 dt_read_unlock(env, obj);
4878 lfsck_object_put(env, obj);
4883 rec->lor_fid.f_ver = save;
4884 rec->lor_uid = la->la_uid;
4885 rec->lor_gid = la->la_gid;
4887 CDEBUG(D_LFSCK, "%s: return orphan "DFID", PFID "DFID", owner %u:%u\n",
4888 lfsck_lfsck2name(com->lc_lfsck), PFID(key), PFID(&rec->lor_fid),
4889 rec->lor_uid, rec->lor_gid);
4894 dt_read_unlock(env, obj);
4895 lfsck_object_put(env, obj);
4903 * \retval +1: locate to the exactly position
4904 * \retval 0: cannot locate to the exactly position,
4905 * call next() to move to a valid position.
4906 * \retval -ve: on error
4908 static int lfsck_orphan_it_get(const struct lu_env *env,
4910 const struct dt_key *key)
4912 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
4915 it->loi_key = *(struct lu_fid *)key;
4916 rc = lfsck_orphan_it_next(env, di);
4926 static void lfsck_orphan_it_put(const struct lu_env *env,
4931 static struct dt_key *lfsck_orphan_it_key(const struct lu_env *env,
4932 const struct dt_it *di)
4934 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
4936 return (struct dt_key *)&it->loi_key;
4939 static int lfsck_orphan_it_key_size(const struct lu_env *env,
4940 const struct dt_it *di)
4942 return sizeof(struct lu_fid);
4945 static int lfsck_orphan_it_rec(const struct lu_env *env,
4946 const struct dt_it *di,
4950 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
4952 *(struct lu_orphan_rec *)rec = it->loi_rec;
4957 static __u64 lfsck_orphan_it_store(const struct lu_env *env,
4958 const struct dt_it *di)
4960 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
4962 return it->loi_hash;
4966 * \retval +1: locate to the exactly position
4967 * \retval 0: cannot locate to the exactly position,
4968 * call next() to move to a valid position.
4969 * \retval -ve: on error
4971 static int lfsck_orphan_it_load(const struct lu_env *env,
4972 const struct dt_it *di,
4975 struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
4976 struct lfsck_layout_slave_target *llst = it->loi_llst;
4979 LASSERT(llst != NULL);
4981 if (hash != llst->llst_hash) {
4982 CWARN("%s: the given hash "LPU64" for orphan iteration does "
4983 "not match the one when fini "LPU64", to be reset.\n",
4984 lfsck_lfsck2name(it->loi_com->lc_lfsck), hash,
4986 fid_zero(&llst->llst_fid);
4987 llst->llst_hash = 0;
4990 it->loi_key = llst->llst_fid;
4991 it->loi_hash = llst->llst_hash;
4992 rc = lfsck_orphan_it_next(env, (struct dt_it *)di);
5002 static int lfsck_orphan_it_key_rec(const struct lu_env *env,
5003 const struct dt_it *di,
5009 const struct dt_index_operations lfsck_orphan_index_ops = {
5010 .dio_lookup = lfsck_orphan_index_lookup,
5011 .dio_declare_insert = lfsck_orphan_index_declare_insert,
5012 .dio_insert = lfsck_orphan_index_insert,
5013 .dio_declare_delete = lfsck_orphan_index_declare_delete,
5014 .dio_delete = lfsck_orphan_index_delete,
5016 .init = lfsck_orphan_it_init,
5017 .fini = lfsck_orphan_it_fini,
5018 .get = lfsck_orphan_it_get,
5019 .put = lfsck_orphan_it_put,
5020 .next = lfsck_orphan_it_next,
5021 .key = lfsck_orphan_it_key,
5022 .key_size = lfsck_orphan_it_key_size,
5023 .rec = lfsck_orphan_it_rec,
5024 .store = lfsck_orphan_it_store,
5025 .load = lfsck_orphan_it_load,
5026 .key_rec = lfsck_orphan_it_key_rec,