#define DEBUG_SUBSYSTEM S_LFSCK
#include <linux/bitops.h>
+#include <linux/rbtree.h>
#include <lustre/lustre_idl.h>
#include <lu_object.h>
struct list_head llsd_master_list;
spinlock_t llsd_lock;
__u64 llsd_touch_gen;
+ struct dt_object *llsd_rb_obj;
+ struct rb_root llsd_rb_root;
+ rwlock_t llsd_rb_lock;
+ unsigned int llsd_rbtree_valid:1;
};
struct lfsck_layout_object {
/* list for the ost targets in phase1 scanning. */
struct list_head llmd_ost_phase2_list;
+ /* list for the mdt targets involve layout verification. */
+ struct list_head llmd_mdt_list;
+
+ /* list for the mdt targets in phase1 scanning. */
+ struct list_head llmd_mdt_phase1_list;
+
+ /* list for the mdt targets in phase1 scanning. */
+ struct list_head llmd_mdt_phase2_list;
+
struct ptlrpc_thread llmd_thread;
- atomic_t llmd_rpcs_in_flight;
__u32 llmd_touch_gen;
int llmd_prefetched;
int llmd_assistant_status;
struct lfsck_layout_slave_target *llsaa_llst;
};
+static struct lfsck_layout_object *
+lfsck_layout_object_init(const struct lu_env *env, struct dt_object *obj,
+ __u16 gen)
+{
+ struct lfsck_layout_object *llo;
+ int rc;
+
+ OBD_ALLOC_PTR(llo);
+ if (llo == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ rc = dt_attr_get(env, obj, &llo->llo_attr, BYPASS_CAPA);
+ if (rc != 0) {
+ OBD_FREE_PTR(llo);
+
+ return ERR_PTR(rc);
+ }
+
+ lu_object_get(&obj->do_lu);
+ llo->llo_obj = obj;
+ /* The gen can be used to check whether some others have changed the
+ * file layout after LFSCK pre-fetching but before real verification. */
+ llo->llo_gen = gen;
+ atomic_set(&llo->llo_ref, 1);
+
+ return llo;
+}
+
static inline void
lfsck_layout_llst_put(struct lfsck_layout_slave_target *llst)
{
}
}
+static struct lfsck_layout_req *
+lfsck_layout_req_init(struct lfsck_layout_object *parent,
+ struct dt_object *child, __u32 ost_idx, __u32 lov_idx)
+{
+ struct lfsck_layout_req *llr;
+
+ OBD_ALLOC_PTR(llr);
+ if (llr == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&llr->llr_list);
+ atomic_inc(&parent->llo_ref);
+ llr->llr_parent = parent;
+ llr->llr_child = child;
+ llr->llr_ost_idx = ost_idx;
+ llr->llr_lov_idx = lov_idx;
+
+ return llr;
+}
+
static inline void lfsck_layout_req_fini(const struct lu_env *env,
struct lfsck_layout_req *llr)
{
return empty;
}
+static int lfsck_layout_get_lovea(const struct lu_env *env,
+ struct dt_object *obj,
+ struct lu_buf *buf, ssize_t *buflen)
+{
+ int rc;
+
+again:
+ rc = dt_xattr_get(env, obj, buf, XATTR_NAME_LOV, BYPASS_CAPA);
+ if (rc == -ERANGE) {
+ rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_LOV,
+ BYPASS_CAPA);
+ if (rc <= 0)
+ return rc;
+
+ lu_buf_realloc(buf, rc);
+ if (buflen != NULL)
+ *buflen = buf->lb_len;
+
+ if (buf->lb_buf == NULL)
+ return -ENOMEM;
+
+ goto again;
+ }
+
+ if (rc == -ENODATA)
+ rc = 0;
+
+ if (rc <= 0)
+ return rc;
+
+ if (unlikely(buf->lb_buf == NULL)) {
+ lu_buf_alloc(buf, rc);
+ if (buflen != NULL)
+ *buflen = buf->lb_len;
+
+ if (buf->lb_buf == NULL)
+ return -ENOMEM;
+
+ goto again;
+ }
+
+ return rc;
+}
+
+static int lfsck_layout_verify_header(struct lov_mds_md_v1 *lmm)
+{
+ __u32 magic;
+ __u32 patten;
+
+ magic = le32_to_cpu(lmm->lmm_magic);
+ /* If magic crashed, keep it there. Sometime later, during OST-object
+ * orphan handling, if some OST-object(s) back-point to it, it can be
+ * verified and repaired. */
+ if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3)
+ return -EINVAL;
+
+ patten = le32_to_cpu(lmm->lmm_pattern);
+ /* XXX: currently, we only support LOV_PATTERN_RAID0. */
+ if (patten != LOV_PATTERN_RAID0)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+#define LFSCK_RBTREE_BITMAP_SIZE PAGE_CACHE_SIZE
+#define LFSCK_RBTREE_BITMAP_WIDTH (LFSCK_RBTREE_BITMAP_SIZE << 3)
+#define LFSCK_RBTREE_BITMAP_MASK (LFSCK_RBTREE_BITMAP_SIZE - 1)
+
+struct lfsck_rbtree_node {
+ struct rb_node lrn_node;
+ __u64 lrn_seq;
+ __u32 lrn_first_oid;
+ atomic_t lrn_known_count;
+ atomic_t lrn_accessed_count;
+ void *lrn_known_bitmap;
+ void *lrn_accessed_bitmap;
+};
+
+static inline int lfsck_rbtree_cmp(struct lfsck_rbtree_node *lrn,
+ __u64 seq, __u32 oid)
+{
+ if (seq < lrn->lrn_seq)
+ return -1;
+
+ if (seq > lrn->lrn_seq)
+ return 1;
+
+ if (oid < lrn->lrn_first_oid)
+ return -1;
+
+ if (oid >= lrn->lrn_first_oid + LFSCK_RBTREE_BITMAP_WIDTH)
+ return 1;
+
+ return 0;
+}
+
+/* The caller should hold lock. */
+static struct lfsck_rbtree_node *
+lfsck_rbtree_search(struct lfsck_layout_slave_data *llsd,
+ const struct lu_fid *fid)
+{
+ struct rb_node *node = llsd->llsd_rb_root.rb_node;
+ struct lfsck_rbtree_node *lrn;
+ int rc;
+
+ while (node != NULL) {
+ lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
+ rc = lfsck_rbtree_cmp(lrn, fid_seq(fid), fid_oid(fid));
+ if (rc < 0)
+ node = node->rb_left;
+ else if (rc > 0)
+ node = node->rb_right;
+ else
+ return lrn;
+ }
+
+ return NULL;
+}
+
+static struct lfsck_rbtree_node *lfsck_rbtree_new(const struct lu_env *env,
+ const struct lu_fid *fid)
+{
+ struct lfsck_rbtree_node *lrn;
+
+ OBD_ALLOC_PTR(lrn);
+ if (lrn == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ OBD_ALLOC(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
+ if (lrn->lrn_known_bitmap == NULL) {
+ OBD_FREE_PTR(lrn);
+
+ return ERR_PTR(-ENOMEM);
+ }
+
+ OBD_ALLOC(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
+ if (lrn->lrn_accessed_bitmap == NULL) {
+ OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
+ OBD_FREE_PTR(lrn);
+
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rb_init_node(&lrn->lrn_node);
+ lrn->lrn_seq = fid_seq(fid);
+ lrn->lrn_first_oid = fid_oid(fid) & ~LFSCK_RBTREE_BITMAP_MASK;
+ atomic_set(&lrn->lrn_known_count, 0);
+ atomic_set(&lrn->lrn_accessed_count, 0);
+
+ return lrn;
+}
+
+static void lfsck_rbtree_free(struct lfsck_rbtree_node *lrn)
+{
+ OBD_FREE(lrn->lrn_accessed_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
+ OBD_FREE(lrn->lrn_known_bitmap, LFSCK_RBTREE_BITMAP_SIZE);
+ OBD_FREE_PTR(lrn);
+}
+
+/* The caller should hold lock. */
+static struct lfsck_rbtree_node *
+lfsck_rbtree_insert(struct lfsck_layout_slave_data *llsd,
+ struct lfsck_rbtree_node *lrn)
+{
+ struct rb_node **pos = &(llsd->llsd_rb_root.rb_node);
+ struct rb_node *parent = NULL;
+ struct lfsck_rbtree_node *tmp;
+ int rc;
+
+ while (*pos) {
+ parent = *pos;
+ tmp = rb_entry(*pos, struct lfsck_rbtree_node, lrn_node);
+ rc = lfsck_rbtree_cmp(tmp, lrn->lrn_seq, lrn->lrn_first_oid);
+ if (rc < 0)
+ pos = &((*pos)->rb_left);
+ else if (rc > 0)
+ pos = &((*pos)->rb_right);
+ else
+ return tmp;
+ }
+
+ rb_link_node(&lrn->lrn_node, parent, pos);
+ rb_insert_color(&lrn->lrn_node, &llsd->llsd_rb_root);
+
+ return lrn;
+}
+
+static int lfsck_rbtree_setup(const struct lu_env *env,
+ struct lfsck_component *com)
+{
+ struct lu_fid *fid = &lfsck_env_info(env)->lti_fid;
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ struct dt_device *dev = lfsck->li_bottom;
+ struct lfsck_layout_slave_data *llsd = com->lc_data;
+ struct dt_object *obj;
+
+ fid->f_seq = FID_SEQ_LAYOUT_RBTREE;
+ fid->f_oid = lfsck_dev_idx(dev);
+ fid->f_ver = 0;
+ obj = dt_locate(env, dev, fid);
+ if (IS_ERR(obj))
+ RETURN(PTR_ERR(obj));
+
+ /* XXX: Generate an in-RAM object to stand for the layout rbtree.
+ * Scanning the layout rbtree will be via the iteration over
+ * the object. In the future, the rbtree may be written onto
+ * disk with the object.
+ *
+ * Mark the object to be as exist. */
+ obj->do_lu.lo_header->loh_attr |= LOHA_EXISTS;
+ llsd->llsd_rb_obj = obj;
+ llsd->llsd_rbtree_valid = 1;
+ dev->dd_record_fid_accessed = 1;
+
+ return 0;
+}
+
+static void lfsck_rbtree_cleanup(const struct lu_env *env,
+ struct lfsck_component *com)
+{
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ struct lfsck_layout_slave_data *llsd = com->lc_data;
+ struct rb_node *node = rb_first(&llsd->llsd_rb_root);
+ struct rb_node *next;
+ struct lfsck_rbtree_node *lrn;
+
+ lfsck->li_bottom->dd_record_fid_accessed = 0;
+ /* Invalid the rbtree, then no others will use it. */
+ write_lock(&llsd->llsd_rb_lock);
+ llsd->llsd_rbtree_valid = 0;
+ write_unlock(&llsd->llsd_rb_lock);
+
+ while (node != NULL) {
+ next = rb_next(node);
+ lrn = rb_entry(node, struct lfsck_rbtree_node, lrn_node);
+ rb_erase(node, &llsd->llsd_rb_root);
+ lfsck_rbtree_free(lrn);
+ node = next;
+ }
+
+ if (llsd->llsd_rb_obj != NULL) {
+ lu_object_put(env, &llsd->llsd_rb_obj->do_lu);
+ llsd->llsd_rb_obj = NULL;
+ }
+}
+
+static void lfsck_rbtree_update_bitmap(const struct lu_env *env,
+ struct lfsck_component *com,
+ const struct lu_fid *fid,
+ bool accessed)
+{
+ struct lfsck_layout_slave_data *llsd = com->lc_data;
+ struct lfsck_rbtree_node *lrn;
+ bool insert = false;
+ int idx;
+ int rc = 0;
+ ENTRY;
+
+ CDEBUG(D_LFSCK, "%s: update bitmap for "DFID"\n",
+ lfsck_lfsck2name(com->lc_lfsck), PFID(fid));
+
+ if (unlikely(!fid_is_sane(fid) || fid_is_last_id(fid)))
+ RETURN_EXIT;
+
+ if (!fid_is_idif(fid) && !fid_is_norm(fid))
+ RETURN_EXIT;
+
+ read_lock(&llsd->llsd_rb_lock);
+ if (!llsd->llsd_rbtree_valid)
+ GOTO(unlock, rc = 0);
+
+ lrn = lfsck_rbtree_search(llsd, fid);
+ if (lrn == NULL) {
+ struct lfsck_rbtree_node *tmp;
+
+ LASSERT(!insert);
+
+ read_unlock(&llsd->llsd_rb_lock);
+ tmp = lfsck_rbtree_new(env, fid);
+ if (IS_ERR(tmp))
+ GOTO(out, rc = PTR_ERR(tmp));
+
+ insert = true;
+ write_lock(&llsd->llsd_rb_lock);
+ if (!llsd->llsd_rbtree_valid) {
+ lfsck_rbtree_free(tmp);
+ GOTO(unlock, rc = 0);
+ }
+
+ lrn = lfsck_rbtree_insert(llsd, tmp);
+ if (lrn != tmp)
+ lfsck_rbtree_free(tmp);
+ }
+
+ idx = fid_oid(fid) & LFSCK_RBTREE_BITMAP_MASK;
+ /* Any accessed object must be a known object. */
+ if (!test_and_set_bit(idx, lrn->lrn_known_bitmap))
+ atomic_inc(&lrn->lrn_known_count);
+ if (accessed) {
+ if (!test_and_set_bit(idx, lrn->lrn_accessed_bitmap))
+ atomic_inc(&lrn->lrn_accessed_count);
+ }
+
+ GOTO(unlock, rc = 0);
+
+unlock:
+ if (insert)
+ write_unlock(&llsd->llsd_rb_lock);
+ else
+ read_unlock(&llsd->llsd_rb_lock);
+out:
+ if (rc != 0 && accessed) {
+ struct lfsck_layout *lo = com->lc_file_ram;
+
+ CERROR("%s: Fail to update object accessed bitmap, will cause "
+ "incorrect LFSCK OST-object handling, so disable it to "
+ "cancel orphan handling for related device. rc = %d.\n",
+ lfsck_lfsck2name(com->lc_lfsck), rc);
+ lo->ll_flags |= LF_INCOMPLETE;
+ lfsck_rbtree_cleanup(env, com);
+ }
+}
+
static void lfsck_layout_le_to_cpu(struct lfsck_layout *des,
const struct lfsck_layout *src)
{
if (rc == sizeof(*lma)) {
lustre_lma_swab(lma);
- /* Generally, the low layer OSD create handler or OI scrub
- * will set the LMAC_FID_ON_OST for all external visible
- * OST-objects. But to make the otable-based iteration to
- * be independent from OI scrub in spite of it got failure
- * or not, we check the LMAC_FID_ON_OST here to guarantee
- * that the LFSCK will not repair something by wrong. */
return lma->lma_compat & LMAC_FID_ON_OST ? 1 : 0;
}
switch (lr->lr_event) {
case LE_START:
- if (rc == 0) {
- spin_lock(<ds->ltd_lock);
- if (!ltd->ltd_dead && !ltd->ltd_layout_done) {
- if (list_empty(<d->ltd_layout_list))
- list_add_tail(
- <d->ltd_layout_list,
- &llmd->llmd_ost_list);
- if (list_empty(<d->ltd_layout_phase_list))
- list_add_tail(
- <d->ltd_layout_phase_list,
- &llmd->llmd_ost_phase1_list);
- }
- spin_unlock(<ds->ltd_lock);
- } else {
+ if (rc != 0) {
struct lfsck_layout *lo = com->lc_file_ram;
lo->ll_flags |= LF_INCOMPLETE;
+ lfsck_tgt_put(ltd);
+ break;
+ }
+
+ spin_lock(<ds->ltd_lock);
+ if (ltd->ltd_dead || ltd->ltd_layout_done) {
+ spin_unlock(<ds->ltd_lock);
+ lfsck_tgt_put(ltd);
+ break;
+ }
+
+ if (lr->lr_flags & LEF_TO_OST) {
+ if (list_empty(<d->ltd_layout_list))
+ list_add_tail(<d->ltd_layout_list,
+ &llmd->llmd_ost_list);
+ if (list_empty(<d->ltd_layout_phase_list))
+ list_add_tail(<d->ltd_layout_phase_list,
+ &llmd->llmd_ost_phase1_list);
+ } else {
+ if (list_empty(<d->ltd_layout_list))
+ list_add_tail(<d->ltd_layout_list,
+ &llmd->llmd_mdt_list);
+ if (list_empty(<d->ltd_layout_phase_list))
+ list_add_tail(<d->ltd_layout_phase_list,
+ &llmd->llmd_mdt_phase1_list);
}
+ spin_unlock(<ds->ltd_lock);
lfsck_tgt_put(ltd);
break;
case LE_STOP:
+ case LE_PHASE1_DONE:
case LE_PHASE2_DONE:
+ if (rc != 0)
+ CERROR("%s: fail to notify %s %x for layout: "
+ "event = %d, rc = %d\n",
+ lfsck_lfsck2name(com->lc_lfsck),
+ (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
+ ltd->ltd_index, lr->lr_event, rc);
break;
- case LE_QUERY:
- spin_lock(<ds->ltd_lock);
- if (rc == 0 && !ltd->ltd_dead && !ltd->ltd_layout_done) {
- struct lfsck_reply *reply;
+ case LE_QUERY: {
+ struct lfsck_reply *reply;
+
+ if (rc != 0) {
+ spin_lock(<ds->ltd_lock);
+ list_del_init(<d->ltd_layout_phase_list);
+ list_del_init(<d->ltd_layout_list);
+ spin_unlock(<ds->ltd_lock);
+ lfsck_tgt_put(ltd);
+ break;
+ }
+
+ reply = req_capsule_server_get(&req->rq_pill,
+ &RMF_LFSCK_REPLY);
+ if (reply == NULL) {
+ rc = -EPROTO;
+ CERROR("%s: invalid return value: rc = %d\n",
+ lfsck_lfsck2name(com->lc_lfsck), rc);
+ spin_lock(<ds->ltd_lock);
+ list_del_init(<d->ltd_layout_phase_list);
+ list_del_init(<d->ltd_layout_list);
+ spin_unlock(<ds->ltd_lock);
+ lfsck_tgt_put(ltd);
+ break;
+ }
- reply = req_capsule_server_get(&req->rq_pill,
- &RMF_LFSCK_REPLY);
- switch (reply->lr_status) {
- case LS_SCANNING_PHASE1:
+ switch (reply->lr_status) {
+ case LS_SCANNING_PHASE1:
+ break;
+ case LS_SCANNING_PHASE2:
+ spin_lock(<ds->ltd_lock);
+ list_del_init(<d->ltd_layout_phase_list);
+ if (ltd->ltd_dead || ltd->ltd_layout_done) {
+ spin_unlock(<ds->ltd_lock);
break;
- case LS_SCANNING_PHASE2:
- list_del(<d->ltd_layout_phase_list);
+ }
+
+ if (lr->lr_flags & LEF_TO_OST)
list_add_tail(<d->ltd_layout_phase_list,
&llmd->llmd_ost_phase2_list);
- break;
- default:
- list_del_init(<d->ltd_layout_phase_list);
- list_del_init(<d->ltd_layout_list);
- break;
- }
+ else
+ list_add_tail(<d->ltd_layout_phase_list,
+ &llmd->llmd_mdt_phase2_list);
+ spin_unlock(<ds->ltd_lock);
+ break;
+ default:
+ spin_lock(<ds->ltd_lock);
+ list_del_init(<d->ltd_layout_phase_list);
+ list_del_init(<d->ltd_layout_list);
+ spin_unlock(<ds->ltd_lock);
+ break;
}
- spin_unlock(<ds->ltd_lock);
lfsck_tgt_put(ltd);
break;
+ }
default:
CERROR("%s: unexpected event: rc = %d\n",
lfsck_lfsck2name(com->lc_lfsck), lr->lr_event);
break;
}
+ lfsck_component_put(env, com);
+
return 0;
}
struct ptlrpc_request_set *set;
struct lfsck_tgt_descs *ltds;
struct lfsck_tgt_desc *ltd;
+ struct list_head *head;
__u32 cnt = 0;
int rc = 0;
int rc1 = 0;
RETURN(-ENOMEM);
llmd->llmd_touch_gen++;
- ltds = &lfsck->li_ost_descs;
memset(lr, 0, sizeof(*lr));
lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
lr->lr_event = LE_QUERY;
lr->lr_active = LT_LAYOUT;
-
laia->laia_com = com;
- laia->laia_ltds = ltds;
laia->laia_lr = lr;
+
+ if (!list_empty(&llmd->llmd_mdt_phase1_list)) {
+ ltds = &lfsck->li_mdt_descs;
+ lr->lr_flags = 0;
+ head = &llmd->llmd_mdt_phase1_list;
+ } else {
+
+again:
+ ltds = &lfsck->li_ost_descs;
+ lr->lr_flags = LEF_TO_OST;
+ head = &llmd->llmd_ost_phase1_list;
+ }
+
+ laia->laia_ltds = ltds;
spin_lock(<ds->ltd_lock);
- while (!list_empty(&llmd->llmd_ost_phase1_list)) {
- ltd = list_entry(llmd->llmd_ost_phase1_list.next,
+ while (!list_empty(head)) {
+ ltd = list_entry(head->next,
struct lfsck_tgt_desc,
ltd_layout_phase_list);
if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
ltd->ltd_layout_gen = llmd->llmd_touch_gen;
list_del(<d->ltd_layout_phase_list);
- list_add_tail(<d->ltd_layout_phase_list,
- &llmd->llmd_ost_phase1_list);
+ list_add_tail(<d->ltd_layout_phase_list, head);
atomic_inc(<d->ltd_ref);
laia->laia_ltd = ltd;
spin_unlock(<ds->ltd_lock);
lfsck_layout_master_async_interpret,
laia, LFSCK_QUERY);
if (rc != 0) {
- CERROR("%s: fail to query OST %x for layout: rc = %d\n",
- lfsck_lfsck2name(lfsck), ltd->ltd_index, rc);
+ CERROR("%s: fail to query %s %x for layout: rc = %d\n",
+ lfsck_lfsck2name(lfsck),
+ (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
+ ltd->ltd_index, rc);
lfsck_tgt_put(ltd);
rc1 = rc;
} else {
}
spin_unlock(<ds->ltd_lock);
- if (cnt > 0)
+ if (cnt > 0) {
rc = ptlrpc_set_wait(set);
+ if (rc < 0) {
+ ptlrpc_set_destroy(set);
+ RETURN(rc);
+ }
+ cnt = 0;
+ }
+
+ if (!(lr->lr_flags & LEF_TO_OST) &&
+ list_empty(&llmd->llmd_mdt_phase1_list))
+ goto again;
+
ptlrpc_set_destroy(set);
RETURN(rc1 != 0 ? rc1 : rc);
static inline bool
lfsck_layout_master_to_orphan(struct lfsck_layout_master_data *llmd)
{
- return !list_empty(&llmd->llmd_ost_phase2_list) ||
- list_empty(&llmd->llmd_ost_phase1_list);
+ return list_empty(&llmd->llmd_mdt_phase1_list) &&
+ (!list_empty(&llmd->llmd_ost_phase2_list) ||
+ list_empty(&llmd->llmd_ost_phase1_list));
}
static int lfsck_layout_master_notify_others(const struct lu_env *env,
struct lfsck_component *com,
- struct lfsck_request *lr)
+ struct lfsck_request *lr,
+ __u32 flags)
{
struct lfsck_thread_info *info = lfsck_env_info(env);
struct lfsck_async_interpret_args *laia = &info->lti_laia;
struct ptlrpc_request_set *set;
struct lfsck_tgt_descs *ltds;
struct lfsck_tgt_desc *ltd;
+ struct lfsck_tgt_desc *next;
+ struct list_head *head;
__u32 idx;
__u32 cnt = 0;
int rc = 0;
lr->lr_active = LT_LAYOUT;
laia->laia_com = com;
laia->laia_lr = lr;
+ lr->lr_flags = 0;
switch (lr->lr_event) {
case LE_START:
+ /* Notify OSTs firstly, then other MDTs if needed. */
+ lr->lr_flags |= LEF_TO_OST;
ltds = &lfsck->li_ost_descs;
+
+lable1:
laia->laia_ltds = ltds;
down_read(<ds->ltd_rw_sem);
cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
lfsck_layout_master_async_interpret,
laia, LFSCK_NOTIFY);
if (rc != 0) {
- CERROR("%s: fail to notify OST %x for layout "
+ CERROR("%s: fail to notify %s %x for layout "
"start: rc = %d\n",
- lfsck_lfsck2name(lfsck), idx, rc);
+ lfsck_lfsck2name(lfsck),
+ (lr->lr_flags & LEF_TO_OST) ? "OST" :
+ "MDT", idx, rc);
lfsck_tgt_put(ltd);
lo->ll_flags |= LF_INCOMPLETE;
} else {
}
}
up_read(<ds->ltd_rw_sem);
+
+ /* Sync up */
+ if (cnt > 0) {
+ rc = ptlrpc_set_wait(set);
+ if (rc < 0) {
+ ptlrpc_set_destroy(set);
+ RETURN(rc);
+ }
+ cnt = 0;
+ }
+
+ if (!(flags & LPF_ALL_MDT))
+ break;
+
+ ltds = &lfsck->li_mdt_descs;
+ /* The sponsor broadcasts the request to other MDTs. */
+ if (flags & LPF_BROADCAST) {
+ flags &= ~LPF_ALL_MDT;
+ lr->lr_flags &= ~LEF_TO_OST;
+ goto lable1;
+ }
+
+ /* non-sponsors link other MDT targets locallly. */
+ spin_lock(<ds->ltd_lock);
+ cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
+ ltd = LTD_TGT(ltds, idx);
+ LASSERT(ltd != NULL);
+
+ if (!list_empty(<d->ltd_layout_list))
+ continue;
+
+ list_add_tail(<d->ltd_layout_list,
+ &llmd->llmd_mdt_list);
+ list_add_tail(<d->ltd_layout_phase_list,
+ &llmd->llmd_mdt_phase1_list);
+ }
+ spin_unlock(<ds->ltd_lock);
+
break;
case LE_STOP:
+ if (flags & LPF_BROADCAST)
+ lr->lr_flags |= LEF_FORCE_STOP;
case LE_PHASE2_DONE:
+ /* Notify other MDTs if needed, then the OSTs. */
+ if (flags & LPF_ALL_MDT) {
+ /* The sponsor broadcasts the request to other MDTs. */
+ if (flags & LPF_BROADCAST) {
+ lr->lr_flags &= ~LEF_TO_OST;
+ head = &llmd->llmd_mdt_list;
+ ltds = &lfsck->li_mdt_descs;
+ goto lable3;
+ }
+
+ /* non-sponsors unlink other MDT targets locallly. */
+ ltds = &lfsck->li_mdt_descs;
+ spin_lock(<ds->ltd_lock);
+ list_for_each_entry_safe(ltd, next,
+ &llmd->llmd_mdt_list,
+ ltd_layout_list) {
+ list_del_init(<d->ltd_layout_phase_list);
+ list_del_init(<d->ltd_layout_list);
+ }
+ spin_unlock(<ds->ltd_lock);
+ }
+
+lable2:
+ lr->lr_flags |= LEF_TO_OST;
+ head = &llmd->llmd_ost_list;
ltds = &lfsck->li_ost_descs;
+
+lable3:
laia->laia_ltds = ltds;
spin_lock(<ds->ltd_lock);
- while (!list_empty(&llmd->llmd_ost_list)) {
- ltd = list_entry(llmd->llmd_ost_list.next,
- struct lfsck_tgt_desc,
+ while (!list_empty(head)) {
+ ltd = list_entry(head->next, struct lfsck_tgt_desc,
ltd_layout_list);
- list_del_init(<d->ltd_layout_phase_list);
+ if (!list_empty(<d->ltd_layout_phase_list))
+ list_del_init(<d->ltd_layout_phase_list);
list_del_init(<d->ltd_layout_list);
laia->laia_ltd = ltd;
spin_unlock(<ds->ltd_lock);
lfsck_layout_master_async_interpret,
laia, LFSCK_NOTIFY);
if (rc != 0)
- CERROR("%s: fail to notify OST %x for layout "
- "stop/done: rc = %d\n",
+ CERROR("%s: fail to notify %s %x for layout "
+ "stop/phase2: rc = %d\n",
lfsck_lfsck2name(lfsck),
- ltd->ltd_index, rc);
+ (lr->lr_flags & LEF_TO_OST) ? "OST" :
+ "MDT", ltd->ltd_index, rc);
else
cnt++;
spin_lock(<ds->ltd_lock);
}
spin_unlock(<ds->ltd_lock);
- break;
+
+ if (!(flags & LPF_BROADCAST))
+ break;
+
+ /* Sync up */
+ if (cnt > 0) {
+ rc = ptlrpc_set_wait(set);
+ if (rc < 0) {
+ ptlrpc_set_destroy(set);
+ RETURN(rc);
+ }
+ cnt = 0;
+ }
+
+ flags &= ~LPF_BROADCAST;
+ goto lable2;
case LE_PHASE1_DONE:
+ llmd->llmd_touch_gen++;
+ lr->lr_flags &= ~LEF_TO_OST;
+ ltds = &lfsck->li_mdt_descs;
+ laia->laia_ltds = ltds;
+ spin_lock(<ds->ltd_lock);
+ while (!list_empty(&llmd->llmd_mdt_phase1_list)) {
+ ltd = list_entry(llmd->llmd_mdt_phase1_list.next,
+ struct lfsck_tgt_desc,
+ ltd_layout_phase_list);
+ if (ltd->ltd_layout_gen == llmd->llmd_touch_gen)
+ break;
+
+ ltd->ltd_layout_gen = llmd->llmd_touch_gen;
+ list_del_init(<d->ltd_layout_phase_list);
+ list_add_tail(<d->ltd_layout_phase_list,
+ &llmd->llmd_mdt_phase1_list);
+ laia->laia_ltd = ltd;
+ spin_unlock(<ds->ltd_lock);
+ rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
+ lfsck_layout_master_async_interpret,
+ laia, LFSCK_NOTIFY);
+ if (rc != 0)
+ CERROR("%s: fail to notify MDT %x for layout "
+ "phase1 done: rc = %d\n",
+ lfsck_lfsck2name(lfsck),
+ ltd->ltd_index, rc);
+ else
+ cnt++;
+ spin_lock(<ds->ltd_lock);
+ }
+ spin_unlock(<ds->ltd_lock);
break;
default:
CERROR("%s: unexpected LFSCK event: rc = %d\n",
lo->ll_time_last_checkpoint = cfs_time_current_sec();
lo->ll_objs_checked_phase2 += com->lc_new_checked;
- if (rc > 0) {
- com->lc_journal = 0;
- if (lo->ll_flags & LF_INCOMPLETE)
- lo->ll_status = LS_PARTIAL;
+ if (rc > 0) {
+ com->lc_journal = 0;
+ if (lo->ll_flags & LF_INCOMPLETE)
+ lo->ll_status = LS_PARTIAL;
+ else
+ lo->ll_status = LS_COMPLETED;
+ if (!(bk->lb_param & LPF_DRYRUN))
+ lo->ll_flags &= ~(LF_SCANNED_ONCE | LF_INCONSISTENT);
+ lo->ll_time_last_complete = lo->ll_time_last_checkpoint;
+ lo->ll_success_count++;
+ } else if (rc == 0) {
+ lo->ll_status = lfsck->li_status;
+ if (lo->ll_status == 0)
+ lo->ll_status = LS_STOPPED;
+ } else {
+ lo->ll_status = LS_FAILED;
+ }
+
+ if (lo->ll_status != LS_PAUSED) {
+ spin_lock(&lfsck->li_lock);
+ list_del_init(&com->lc_link);
+ list_add_tail(&com->lc_link, &lfsck->li_list_idle);
+ spin_unlock(&lfsck->li_lock);
+ }
+
+ rc = lfsck_layout_store(env, com);
+
+ up_write(&com->lc_sem);
+
+ return rc;
+}
+
+static int lfsck_layout_lock(const struct lu_env *env,
+ struct lfsck_component *com,
+ struct dt_object *obj,
+ struct lustre_handle *lh, __u64 bits)
+{
+ struct lfsck_thread_info *info = lfsck_env_info(env);
+ ldlm_policy_data_t *policy = &info->lti_policy;
+ struct ldlm_res_id *resid = &info->lti_resid;
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ __u64 flags = LDLM_FL_ATOMIC_CB;
+ int rc;
+
+ LASSERT(lfsck->li_namespace != NULL);
+
+ memset(policy, 0, sizeof(*policy));
+ policy->l_inodebits.bits = bits;
+ fid_build_reg_res_name(lfsck_dto2fid(obj), resid);
+ rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid, LDLM_IBITS,
+ policy, LCK_EX, &flags, ldlm_blocking_ast,
+ ldlm_completion_ast, NULL, NULL, 0,
+ LVB_T_NONE, NULL, lh);
+ if (rc == ELDLM_OK) {
+ rc = 0;
+ } else {
+ memset(lh, 0, sizeof(*lh));
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+static void lfsck_layout_unlock(struct lustre_handle *lh)
+{
+ if (lustre_handle_is_used(lh)) {
+ ldlm_lock_decref(lh, LCK_EX);
+ memset(lh, 0, sizeof(*lh));
+ }
+}
+
+static int lfsck_layout_trans_stop(const struct lu_env *env,
+ struct dt_device *dev,
+ struct thandle *handle, int result)
+{
+ int rc;
+
+ handle->th_result = result;
+ rc = dt_trans_stop(env, dev, handle);
+ if (rc > 0)
+ rc = 0;
+ else if (rc == 0)
+ rc = 1;
+
+ return rc;
+}
+
+static int lfsck_layout_scan_orphan(const struct lu_env *env,
+ struct lfsck_component *com,
+ struct lfsck_tgt_desc *ltd)
+{
+ /* XXX: To be extended in other patch. */
+
+ return 0;
+}
+
+/* For the MDT-object with dangling reference, we need to re-create
+ * the missed OST-object with the known FID/owner information. */
+static int lfsck_layout_recreate_ostobj(const struct lu_env *env,
+ struct lfsck_component *com,
+ struct lfsck_layout_req *llr,
+ struct lu_attr *la)
+{
+ struct lfsck_thread_info *info = lfsck_env_info(env);
+ struct filter_fid *pfid = &info->lti_new_pfid;
+ struct dt_allocation_hint *hint = &info->lti_hint;
+ struct dt_object *parent = llr->llr_parent->llo_obj;
+ struct dt_object *child = llr->llr_child;
+ struct dt_device *dev = lfsck_obj2dt_dev(child);
+ const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
+ struct thandle *handle;
+ struct lu_buf *buf;
+ struct lustre_handle lh = { 0 };
+ int rc;
+ ENTRY;
+
+ CDEBUG(D_LFSCK, "Repair dangling reference for: parent "DFID
+ ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
+ PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
+ llr->llr_ost_idx, llr->llr_lov_idx, la->la_uid, la->la_gid);
+
+ rc = lfsck_layout_lock(env, com, parent, &lh,
+ MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
+ if (rc != 0)
+ RETURN(rc);
+
+ handle = dt_trans_create(env, dev);
+ if (IS_ERR(handle))
+ GOTO(unlock1, rc = PTR_ERR(handle));
+
+ hint->dah_parent = NULL;
+ hint->dah_mode = 0;
+ pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
+ pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
+ pfid->ff_parent.f_ver = cpu_to_le32(llr->llr_lov_idx);
+ buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
+
+ rc = dt_declare_create(env, child, la, hint, NULL, handle);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID,
+ LU_XATTR_CREATE, handle);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ rc = dt_trans_start(env, dev, handle);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ dt_read_lock(env, parent, 0);
+ if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
+ GOTO(unlock2, rc = 1);
+
+ rc = dt_create(env, child, la, hint, NULL, handle);
+ if (rc != 0)
+ GOTO(unlock2, rc);
+
+ rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, LU_XATTR_CREATE,
+ handle, BYPASS_CAPA);
+
+ GOTO(unlock2, rc);
+
+unlock2:
+ dt_read_unlock(env, parent);
+
+stop:
+ rc = lfsck_layout_trans_stop(env, dev, handle, rc);
+
+unlock1:
+ lfsck_layout_unlock(&lh);
+
+ return rc;
+}
+
+/* If the OST-object does not recognize the MDT-object as its parent, and
+ * there is no other MDT-object claims as its parent, then just trust the
+ * given MDT-object as its parent. So update the OST-object filter_fid. */
+static int lfsck_layout_repair_unmatched_pair(const struct lu_env *env,
+ struct lfsck_component *com,
+ struct lfsck_layout_req *llr,
+ const struct lu_attr *pla)
+{
+ struct lfsck_thread_info *info = lfsck_env_info(env);
+ struct filter_fid *pfid = &info->lti_new_pfid;
+ struct lu_attr *tla = &info->lti_la3;
+ struct dt_object *parent = llr->llr_parent->llo_obj;
+ struct dt_object *child = llr->llr_child;
+ struct dt_device *dev = lfsck_obj2dt_dev(child);
+ const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
+ struct thandle *handle;
+ struct lu_buf *buf;
+ struct lustre_handle lh = { 0 };
+ int rc;
+ ENTRY;
+
+ CDEBUG(D_LFSCK, "Repair unmatched MDT-OST pair for: parent "DFID
+ ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
+ PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
+ llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid);
+
+ rc = lfsck_layout_lock(env, com, parent, &lh,
+ MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
+ if (rc != 0)
+ RETURN(rc);
+
+ handle = dt_trans_create(env, dev);
+ if (IS_ERR(handle))
+ GOTO(unlock1, rc = PTR_ERR(handle));
+
+ pfid->ff_parent.f_seq = cpu_to_le64(tfid->f_seq);
+ pfid->ff_parent.f_oid = cpu_to_le32(tfid->f_oid);
+ /* The ff_parent->f_ver is not the real parent fid->f_ver. Instead,
+ * it is the OST-object index in the parent MDT-object layout. */
+ pfid->ff_parent.f_ver = cpu_to_le32(llr->llr_lov_idx);
+ buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
+
+ rc = dt_declare_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ tla->la_valid = LA_UID | LA_GID;
+ tla->la_uid = pla->la_uid;
+ tla->la_gid = pla->la_gid;
+ rc = dt_declare_attr_set(env, child, tla, handle);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ rc = dt_trans_start(env, dev, handle);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ dt_write_lock(env, parent, 0);
+ if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
+ GOTO(unlock2, rc = 1);
+
+ rc = dt_xattr_set(env, child, buf, XATTR_NAME_FID, 0, handle,
+ BYPASS_CAPA);
+ if (rc != 0)
+ GOTO(unlock2, rc);
+
+ /* Get the latest parent's owner. */
+ rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
+ if (rc != 0)
+ GOTO(unlock2, rc);
+
+ tla->la_valid = LA_UID | LA_GID;
+ rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
+
+ GOTO(unlock2, rc);
+
+unlock2:
+ dt_write_unlock(env, parent);
+
+stop:
+ rc = lfsck_layout_trans_stop(env, dev, handle, rc);
+
+unlock1:
+ lfsck_layout_unlock(&lh);
+
+ return rc;
+}
+
+/* If there are more than one MDT-objects claim as the OST-object's parent,
+ * and the OST-object only recognizes one of them, then we need to generate
+ * new OST-object(s) with new fid(s) for the non-recognized MDT-object(s). */
+static int lfsck_layout_repair_multiple_references(const struct lu_env *env,
+ struct lfsck_component *com,
+ struct lfsck_layout_req *llr,
+ struct lu_attr *la,
+ struct lu_buf *buf)
+{
+ struct lfsck_thread_info *info = lfsck_env_info(env);
+ struct dt_allocation_hint *hint = &info->lti_hint;
+ struct dt_object_format *dof = &info->lti_dof;
+ struct dt_device *pdev = com->lc_lfsck->li_next;
+ struct ost_id *oi = &info->lti_oi;
+ struct dt_object *parent = llr->llr_parent->llo_obj;
+ struct dt_device *cdev = lfsck_obj2dt_dev(llr->llr_child);
+ struct dt_object *child = NULL;
+ struct lu_device *d = &cdev->dd_lu_dev;
+ struct lu_object *o = NULL;
+ struct thandle *handle;
+ struct lov_mds_md_v1 *lmm;
+ struct lov_ost_data_v1 *objs;
+ struct lustre_handle lh = { 0 };
+ __u32 magic;
+ int rc;
+ ENTRY;
+
+ CDEBUG(D_LFSCK, "Repair multiple references for: parent "DFID
+ ", OST-index %u, stripe-index %u, owner %u:%u\n",
+ PFID(lfsck_dto2fid(parent)), llr->llr_ost_idx,
+ llr->llr_lov_idx, la->la_uid, la->la_gid);
+
+ rc = lfsck_layout_lock(env, com, parent, &lh,
+ MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
+ if (rc != 0)
+ RETURN(rc);
+
+ handle = dt_trans_create(env, pdev);
+ if (IS_ERR(handle))
+ GOTO(unlock1, rc = PTR_ERR(handle));
+
+ o = lu_object_anon(env, d, NULL);
+ if (IS_ERR(o))
+ GOTO(stop, rc = PTR_ERR(o));
+
+ child = container_of(o, struct dt_object, do_lu);
+ o = lu_object_locate(o->lo_header, d->ld_type);
+ if (unlikely(o == NULL))
+ GOTO(stop, rc = -EINVAL);
+
+ child = container_of(o, struct dt_object, do_lu);
+ la->la_valid = LA_UID | LA_GID;
+ hint->dah_parent = NULL;
+ hint->dah_mode = 0;
+ dof->dof_type = DFT_REGULAR;
+ rc = dt_declare_create(env, child, la, NULL, NULL, handle);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ rc = dt_declare_xattr_set(env, parent, buf, XATTR_NAME_LOV,
+ LU_XATTR_REPLACE, handle);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ rc = dt_trans_start(env, pdev, handle);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ dt_write_lock(env, parent, 0);
+ if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
+ GOTO(unlock2, rc = 0);
+
+ rc = dt_xattr_get(env, parent, buf, XATTR_NAME_LOV, BYPASS_CAPA);
+ if (unlikely(rc == 0 || rc == -ENODATA || rc == -ERANGE))
+ GOTO(unlock2, rc = 0);
+
+ lmm = buf->lb_buf;
+ rc = lfsck_layout_verify_header(lmm);
+ if (rc != 0)
+ GOTO(unlock2, rc);
+
+ /* Someone change layout during the LFSCK, no need to repair then. */
+ if (le16_to_cpu(lmm->lmm_layout_gen) != llr->llr_parent->llo_gen)
+ GOTO(unlock2, rc = 0);
+
+ rc = dt_create(env, child, la, hint, dof, handle);
+ if (rc != 0)
+ GOTO(unlock2, rc);
+
+ /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
+ * been verified in lfsck_layout_verify_header() already. If some
+ * new magic introduced in the future, then layout LFSCK needs to
+ * be updated also. */
+ magic = le32_to_cpu(lmm->lmm_magic);
+ if (magic == LOV_MAGIC_V1) {
+ objs = &(lmm->lmm_objects[0]);
+ } else {
+ LASSERT(magic == LOV_MAGIC_V3);
+ objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
+ }
+
+ lmm->lmm_layout_gen = cpu_to_le16(llr->llr_parent->llo_gen + 1);
+ fid_to_ostid(lu_object_fid(&child->do_lu), oi);
+ ostid_cpu_to_le(oi, &objs[llr->llr_lov_idx].l_ost_oi);
+ objs[llr->llr_lov_idx].l_ost_gen = cpu_to_le32(0);
+ objs[llr->llr_lov_idx].l_ost_idx = cpu_to_le32(llr->llr_ost_idx);
+ rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV,
+ LU_XATTR_REPLACE, handle, BYPASS_CAPA);
+
+ GOTO(unlock2, rc = (rc == 0 ? 1 : rc));
+
+unlock2:
+ dt_write_unlock(env, parent);
+
+stop:
+ if (child != NULL)
+ lu_object_put(env, &child->do_lu);
+
+ dt_trans_stop(env, pdev, handle);
+
+unlock1:
+ lfsck_layout_unlock(&lh);
+
+ return rc;
+}
+
+/* If the MDT-object and the OST-object have different owner information,
+ * then trust the MDT-object, because the normal chown/chgrp handle order
+ * is from MDT to OST, and it is possible that some chown/chgrp operation
+ * is partly done. */
+static int lfsck_layout_repair_owner(const struct lu_env *env,
+ struct lfsck_component *com,
+ struct lfsck_layout_req *llr,
+ struct lu_attr *pla)
+{
+ struct lfsck_thread_info *info = lfsck_env_info(env);
+ struct lu_attr *tla = &info->lti_la3;
+ struct dt_object *parent = llr->llr_parent->llo_obj;
+ struct dt_object *child = llr->llr_child;
+ struct dt_device *dev = lfsck_obj2dt_dev(child);
+ struct thandle *handle;
+ int rc;
+ ENTRY;
+
+ CDEBUG(D_LFSCK, "Repair inconsistent file owner for: parent "DFID
+ ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
+ PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
+ llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid);
+
+ handle = dt_trans_create(env, dev);
+ if (IS_ERR(handle))
+ RETURN(PTR_ERR(handle));
+
+ tla->la_uid = pla->la_uid;
+ tla->la_gid = pla->la_gid;
+ tla->la_valid = LA_UID | LA_GID;
+ rc = dt_declare_attr_set(env, child, tla, handle);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ rc = dt_trans_start(env, dev, handle);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ /* Use the dt_object lock to serialize with destroy and attr_set. */
+ dt_read_lock(env, parent, 0);
+ if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
+ GOTO(unlock, rc = 1);
+
+ /* Get the latest parent's owner. */
+ rc = dt_attr_get(env, parent, tla, BYPASS_CAPA);
+ if (rc != 0) {
+ CWARN("%s: fail to get the latest parent's ("DFID") owner, "
+ "not sure whether some others chown/chgrp during the "
+ "LFSCK: rc = %d\n", lfsck_lfsck2name(com->lc_lfsck),
+ PFID(lfsck_dto2fid(parent)), rc);
+
+ GOTO(unlock, rc);
+ }
+
+ /* Some others chown/chgrp during the LFSCK, needs to do nothing. */
+ if (unlikely(tla->la_uid != pla->la_uid ||
+ tla->la_gid != pla->la_gid))
+ GOTO(unlock, rc = 1);
+
+ tla->la_valid = LA_UID | LA_GID;
+ rc = dt_attr_set(env, child, tla, handle, BYPASS_CAPA);
+
+ GOTO(unlock, rc);
+
+unlock:
+ dt_read_unlock(env, parent);
+
+stop:
+ rc = lfsck_layout_trans_stop(env, dev, handle, rc);
+
+ return rc;
+}
+
+/* Check whether the OST-object correctly back points to the
+ * MDT-object (@parent) via the XATTR_NAME_FID xattr (@pfid). */
+static int lfsck_layout_check_parent(const struct lu_env *env,
+ struct lfsck_component *com,
+ struct dt_object *parent,
+ const struct lu_fid *pfid,
+ const struct lu_fid *cfid,
+ const struct lu_attr *pla,
+ const struct lu_attr *cla,
+ struct lfsck_layout_req *llr,
+ struct lu_buf *lov_ea, __u32 idx)
+{
+ struct lfsck_thread_info *info = lfsck_env_info(env);
+ struct lu_buf *buf = &info->lti_big_buf;
+ struct dt_object *tobj;
+ struct lov_mds_md_v1 *lmm;
+ struct lov_ost_data_v1 *objs;
+ int rc;
+ int i;
+ __u32 magic;
+ __u16 count;
+ ENTRY;
+
+ if (fid_is_zero(pfid)) {
+ /* client never wrote. */
+ if (cla->la_size == 0 && cla->la_blocks == 0) {
+ if (unlikely(cla->la_uid != pla->la_uid ||
+ cla->la_gid != pla->la_gid))
+ RETURN (LLIT_INCONSISTENT_OWNER);
+
+ RETURN(0);
+ }
+
+ RETURN(LLIT_UNMATCHED_PAIR);
+ }
+
+ if (unlikely(!fid_is_sane(pfid)))
+ RETURN(LLIT_UNMATCHED_PAIR);
+
+ if (lu_fid_eq(pfid, lu_object_fid(&parent->do_lu))) {
+ if (llr->llr_lov_idx == idx)
+ RETURN(0);
+
+ RETURN(LLIT_UNMATCHED_PAIR);
+ }
+
+ tobj = lfsck_object_find(env, com->lc_lfsck, pfid);
+ if (tobj == NULL)
+ RETURN(LLIT_UNMATCHED_PAIR);
+
+ if (IS_ERR(tobj))
+ RETURN(PTR_ERR(tobj));
+
+ if (!dt_object_exists(tobj))
+ GOTO(out, rc = LLIT_UNMATCHED_PAIR);
+
+ /* Load the tobj's layout EA, in spite of it is a local MDT-object or
+ * remote one on another MDT. Then check whether the given OST-object
+ * is in such layout. If yes, it is multiple referenced, otherwise it
+ * is unmatched referenced case. */
+ rc = lfsck_layout_get_lovea(env, tobj, buf, NULL);
+ if (rc == 0)
+ GOTO(out, rc = LLIT_UNMATCHED_PAIR);
+
+ if (rc < 0)
+ GOTO(out, rc);
+
+ lmm = buf->lb_buf;
+ rc = lfsck_layout_verify_header(lmm);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
+ * been verified in lfsck_layout_verify_header() already. If some
+ * new magic introduced in the future, then layout LFSCK needs to
+ * be updated also. */
+ magic = le32_to_cpu(lmm->lmm_magic);
+ if (magic == LOV_MAGIC_V1) {
+ objs = &(lmm->lmm_objects[0]);
+ } else {
+ LASSERT(magic == LOV_MAGIC_V3);
+ objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
+ }
+
+ count = le16_to_cpu(lmm->lmm_stripe_count);
+ for (i = 0; i < count; i++, objs++) {
+ struct lu_fid *tfid = &info->lti_fid2;
+ struct ost_id *oi = &info->lti_oi;
+
+ ostid_le_to_cpu(&objs->l_ost_oi, oi);
+ ostid_to_fid(tfid, oi, le32_to_cpu(objs->l_ost_idx));
+ if (lu_fid_eq(cfid, tfid)) {
+ *lov_ea = *buf;
+
+ GOTO(out, rc = LLIT_MULTIPLE_REFERENCED);
+ }
+ }
+
+ GOTO(out, rc = LLIT_UNMATCHED_PAIR);
+
+out:
+ lfsck_object_put(env, tobj);
+
+ return rc;
+}
+
+static int lfsck_layout_assistant_handle_one(const struct lu_env *env,
+ struct lfsck_component *com,
+ struct lfsck_layout_req *llr)
+{
+ struct lfsck_layout *lo = com->lc_file_ram;
+ struct lfsck_thread_info *info = lfsck_env_info(env);
+ struct filter_fid_old *pea = &info->lti_old_pfid;
+ struct lu_fid *pfid = &info->lti_fid;
+ struct lu_buf *buf = NULL;
+ struct dt_object *parent = llr->llr_parent->llo_obj;
+ struct dt_object *child = llr->llr_child;
+ struct lu_attr *pla = &info->lti_la;
+ struct lu_attr *cla = &info->lti_la2;
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
+ enum lfsck_layout_inconsistency_type type = LLIT_NONE;
+ __u32 idx = 0;
+ int rc;
+ ENTRY;
+
+ rc = dt_attr_get(env, parent, pla, BYPASS_CAPA);
+ if (rc != 0) {
+ if (lu_object_is_dying(parent->do_lu.lo_header))
+ RETURN(0);
+
+ GOTO(out, rc);
+ }
+
+ rc = dt_attr_get(env, child, cla, BYPASS_CAPA);
+ if (rc == -ENOENT) {
+ if (lu_object_is_dying(parent->do_lu.lo_header))
+ RETURN(0);
+
+ type = LLIT_DANGLING;
+ goto repair;
+ }
+
+ if (rc != 0)
+ GOTO(out, rc);
+
+ buf = lfsck_buf_get(env, pea, sizeof(struct filter_fid_old));
+ rc= dt_xattr_get(env, child, buf, XATTR_NAME_FID, BYPASS_CAPA);
+ if (unlikely(rc >= 0 && rc != sizeof(struct filter_fid_old) &&
+ rc != sizeof(struct filter_fid))) {
+ type = LLIT_UNMATCHED_PAIR;
+ goto repair;
+ }
+
+ if (rc < 0 && rc != -ENODATA)
+ GOTO(out, rc);
+
+ if (rc == -ENODATA) {
+ fid_zero(pfid);
+ } else {
+ fid_le_to_cpu(pfid, &pea->ff_parent);
+ /* OST-object does not save parent FID::f_ver, instead,
+ * the OST-object index in the parent MDT-object layout
+ * EA reuses the pfid->f_ver. */
+ idx = pfid->f_ver;
+ pfid->f_ver = 0;
+ }
+
+ rc = lfsck_layout_check_parent(env, com, parent, pfid,
+ lu_object_fid(&child->do_lu),
+ pla, cla, llr, buf, idx);
+ if (rc > 0) {
+ type = rc;
+ goto repair;
+ }
+
+ if (rc < 0)
+ GOTO(out, rc);
+
+ if (unlikely(cla->la_uid != pla->la_uid ||
+ cla->la_gid != pla->la_gid)) {
+ type = LLIT_INCONSISTENT_OWNER;
+ goto repair;
+ }
+
+repair:
+ if (bk->lb_param & LPF_DRYRUN) {
+ if (type != LLIT_NONE)
+ GOTO(out, rc = 1);
else
- lo->ll_status = LS_COMPLETED;
- if (!(bk->lb_param & LPF_DRYRUN))
- lo->ll_flags &= ~(LF_SCANNED_ONCE | LF_INCONSISTENT);
- lo->ll_time_last_complete = lo->ll_time_last_checkpoint;
- lo->ll_success_count++;
- } else if (rc == 0) {
- lo->ll_status = lfsck->li_status;
- if (lo->ll_status == 0)
- lo->ll_status = LS_STOPPED;
- } else {
- lo->ll_status = LS_FAILED;
+ GOTO(out, rc = 0);
}
- if (lo->ll_status != LS_PAUSED) {
- spin_lock(&lfsck->li_lock);
- list_del_init(&com->lc_link);
- list_add_tail(&com->lc_link, &lfsck->li_list_idle);
- spin_unlock(&lfsck->li_lock);
+ switch (type) {
+ case LLIT_DANGLING:
+ memset(cla, 0, sizeof(*cla));
+ cla->la_uid = pla->la_uid;
+ cla->la_gid = pla->la_gid;
+ cla->la_mode = S_IFREG | 0666;
+ cla->la_valid = LA_TYPE | LA_MODE | LA_UID | LA_GID |
+ LA_ATIME | LA_MTIME | LA_CTIME;
+ rc = lfsck_layout_recreate_ostobj(env, com, llr, cla);
+ break;
+ case LLIT_UNMATCHED_PAIR:
+ rc = lfsck_layout_repair_unmatched_pair(env, com, llr, pla);
+ break;
+ case LLIT_MULTIPLE_REFERENCED:
+ rc = lfsck_layout_repair_multiple_references(env, com, llr,
+ pla, buf);
+ break;
+ case LLIT_INCONSISTENT_OWNER:
+ rc = lfsck_layout_repair_owner(env, com, llr, pla);
+ break;
+ default:
+ rc = 0;
+ break;
}
- rc = lfsck_layout_store(env, com);
+ GOTO(out, rc);
+
+out:
+ down_write(&com->lc_sem);
+ if (rc < 0) {
+ /* If cannot touch the target server,
+ * mark the LFSCK as INCOMPLETE. */
+ if (rc == -ENOTCONN || rc == -ESHUTDOWN || rc == -ETIMEDOUT ||
+ rc == -EHOSTDOWN || rc == -EHOSTUNREACH) {
+ lo->ll_flags |= LF_INCOMPLETE;
+ lo->ll_objs_skipped++;
+ rc = 0;
+ } else {
+ lo->ll_objs_failed_phase1++;
+ }
+ } else if (rc > 0) {
+ LASSERTF(type > LLIT_NONE && type <= LLIT_MAX,
+ "unknown type = %d\n", type);
+ lo->ll_objs_repaired[type - 1]++;
+ }
up_write(&com->lc_sem);
return rc;
}
-static int lfsck_layout_scan_orphan(const struct lu_env *env,
- struct lfsck_component *com,
- struct lfsck_tgt_desc *ltd)
-{
- /* XXX: To be extended in other patch. */
-
- return 0;
-}
-
static int lfsck_layout_assistant(void *args)
{
struct lfsck_thread_args *lta = args;
struct l_wait_info lwi = { 0 };
int rc = 0;
int rc1 = 0;
+ __u32 flags;
ENTRY;
+ if (lta->lta_lsp->lsp_start != NULL)
+ flags = lta->lta_lsp->lsp_start->ls_flags;
+ else
+ flags = bk->lb_param;
memset(lr, 0, sizeof(*lr));
lr->lr_event = LE_START;
lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
if (pos->lp_oit_cookie <= 1)
lr->lr_param |= LPF_RESET;
- rc = lfsck_layout_master_notify_others(env, com, lr);
+ rc = lfsck_layout_master_notify_others(env, com, lr, flags);
if (rc != 0) {
CERROR("%s: fail to notify others for layout start: rc = %d\n",
lfsck_lfsck2name(lfsck), rc);
while (!list_empty(&llmd->llmd_req_list)) {
bool wakeup = false;
- l_wait_event(athread->t_ctl_waitq,
- bk->lb_async_windows == 0 ||
- atomic_read(&llmd->llmd_rpcs_in_flight) <
- bk->lb_async_windows ||
- llmd->llmd_exit,
- &lwi);
-
if (unlikely(llmd->llmd_exit))
GOTO(cleanup1, rc = llmd->llmd_post_result);
- /* XXX: To be extended in other patch.
- *
- * Compare the OST side attribute with local attribute,
- * and fix it if found inconsistency. */
-
- spin_lock(&llmd->llmd_lock);
llr = list_entry(llmd->llmd_req_list.next,
struct lfsck_layout_req,
llr_list);
+ /* Only the lfsck_layout_assistant thread itself can
+ * remove the "llr" from the head of the list, LFSCK
+ * engine thread only inserts other new "lld" at the
+ * end of the list. So it is safe to handle current
+ * "llr" without the spin_lock. */
+ rc = lfsck_layout_assistant_handle_one(env, com, llr);
+ spin_lock(&llmd->llmd_lock);
list_del_init(&llr->llr_list);
if (bk->lb_async_windows != 0 &&
llmd->llmd_prefetched >= bk->lb_async_windows)
wake_up_all(&mthread->t_ctl_waitq);
lfsck_layout_req_fini(env, llr);
+ if (rc < 0 && bk->lb_param & LPF_FAILOUT)
+ GOTO(cleanup1, rc);
}
/* Wakeup the master engine if it is waiting in checkpoint. */
- if (atomic_read(&llmd->llmd_rpcs_in_flight) == 0)
- wake_up_all(&mthread->t_ctl_waitq);
+ wake_up_all(&mthread->t_ctl_waitq);
l_wait_event(athread->t_ctl_waitq,
!lfsck_layout_req_empty(llmd) ||
lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
lr->lr_event = LE_PHASE1_DONE;
lr->lr_status = llmd->llmd_post_result;
- rc = lfsck_layout_master_notify_others(env, com, lr);
+ rc = lfsck_layout_master_notify_others(env, com, lr, 0);
if (rc != 0)
CERROR("%s: failed to notify others "
"for layout post: rc = %d\n",
cleanup1:
/* Cleanup the unfinished requests. */
spin_lock(&llmd->llmd_lock);
+ if (rc < 0)
+ llmd->llmd_assistant_status = rc;
+
while (!list_empty(&llmd->llmd_req_list)) {
llr = list_entry(llmd->llmd_req_list.next,
struct lfsck_layout_req,
LASSERTF(llmd->llmd_prefetched == 0, "unmatched prefeteched objs %d\n",
llmd->llmd_prefetched);
- l_wait_event(athread->t_ctl_waitq,
- atomic_read(&llmd->llmd_rpcs_in_flight) == 0,
- &lwi);
-
cleanup2:
memset(lr, 0, sizeof(*lr));
lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
if (rc > 0) {
lr->lr_event = LE_PHASE2_DONE;
+ flags = 0;
lr->lr_status = rc;
} else if (rc == 0) {
lr->lr_event = LE_STOP;
if (lfsck->li_status == LS_PAUSED ||
- lfsck->li_status == LS_CO_PAUSED)
+ lfsck->li_status == LS_CO_PAUSED) {
+ flags = 0;
lr->lr_status = LS_CO_PAUSED;
- else if (lfsck->li_status == LS_STOPPED ||
- lfsck->li_status == LS_CO_STOPPED)
- lr->lr_status = LS_CO_STOPPED;
- else
+ } else if (lfsck->li_status == LS_STOPPED ||
+ lfsck->li_status == LS_CO_STOPPED) {
+ flags = lfsck->li_flags;
+ if (flags & LPF_BROADCAST)
+ lr->lr_status = LS_STOPPED;
+ else
+ lr->lr_status = LS_CO_STOPPED;
+ } else {
LBUG();
+ }
} else {
lr->lr_event = LE_STOP;
+ flags = 0;
lr->lr_status = LS_CO_FAILED;
}
- rc1 = lfsck_layout_master_notify_others(env, com, lr);
+ rc1 = lfsck_layout_master_notify_others(env, com, lr, flags);
if (rc1 != 0) {
CERROR("%s: failed to notify others for layout quit: rc = %d\n",
lfsck_lfsck2name(lfsck), rc1);
memset(lr, 0, sizeof(*lr));
lr->lr_event = event;
+ lr->lr_flags = LEF_FROM_OST;
lr->lr_status = result;
lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
lr->lr_active = LT_LAYOUT;
return 0;
l_wait_event(mthread->t_ctl_waitq,
- (list_empty(&llmd->llmd_req_list) &&
- atomic_read(&llmd->llmd_rpcs_in_flight) == 0) ||
+ list_empty(&llmd->llmd_req_list) ||
!thread_is_running(mthread) ||
thread_is_stopped(athread),
&lwi);
struct lfsck_layout_slave_data *llsd = com->lc_data;
int rc;
- /* XXX: For a new scanning, generate OST-objects
- * bitmap for orphan detection. */
-
rc = lfsck_layout_prep(env, com);
if (rc != 0 || lo->ll_status != LS_SCANNING_PHASE1 ||
!lsp->lsp_index_valid)
return rc;
rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
+ if (rc == 0 && !(lo->ll_flags & LF_INCOMPLETE)) {
+ LASSERT(!llsd->llsd_rbtree_valid);
+
+ write_lock(&llsd->llsd_rb_lock);
+ rc = lfsck_rbtree_setup(env, com);
+ write_unlock(&llsd->llsd_rb_lock);
+ }
return rc;
}
RETURN(rc);
}
+/* Pre-fetch the attribute for each stripe in the given layout EA. */
+static int lfsck_layout_scan_stripes(const struct lu_env *env,
+ struct lfsck_component *com,
+ struct dt_object *parent,
+ struct lov_mds_md_v1 *lmm)
+{
+ struct lfsck_thread_info *info = lfsck_env_info(env);
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
+ struct lfsck_layout *lo = com->lc_file_ram;
+ struct lfsck_layout_master_data *llmd = com->lc_data;
+ struct lfsck_layout_object *llo = NULL;
+ struct lov_ost_data_v1 *objs;
+ struct lfsck_tgt_descs *ltds = &lfsck->li_ost_descs;
+ struct ptlrpc_thread *mthread = &lfsck->li_thread;
+ struct ptlrpc_thread *athread = &llmd->llmd_thread;
+ struct l_wait_info lwi = { 0 };
+ struct lu_buf *buf;
+ int rc = 0;
+ int i;
+ __u32 magic;
+ __u16 count;
+ __u16 gen;
+ ENTRY;
+
+ buf = lfsck_buf_get(env, &info->lti_old_pfid,
+ sizeof(struct filter_fid_old));
+ count = le16_to_cpu(lmm->lmm_stripe_count);
+ gen = le16_to_cpu(lmm->lmm_layout_gen);
+ /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
+ * been verified in lfsck_layout_verify_header() already. If some
+ * new magic introduced in the future, then layout LFSCK needs to
+ * be updated also. */
+ magic = le32_to_cpu(lmm->lmm_magic);
+ if (magic == LOV_MAGIC_V1) {
+ objs = &(lmm->lmm_objects[0]);
+ } else {
+ LASSERT(magic == LOV_MAGIC_V3);
+ objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
+ }
+
+ for (i = 0; i < count; i++, objs++) {
+ struct lu_fid *fid = &info->lti_fid;
+ struct ost_id *oi = &info->lti_oi;
+ struct lfsck_layout_req *llr;
+ struct lfsck_tgt_desc *tgt = NULL;
+ struct dt_object *cobj = NULL;
+ __u32 index =
+ le32_to_cpu(objs->l_ost_idx);
+ bool wakeup = false;
+
+ l_wait_event(mthread->t_ctl_waitq,
+ bk->lb_async_windows == 0 ||
+ llmd->llmd_prefetched < bk->lb_async_windows ||
+ !thread_is_running(mthread) ||
+ thread_is_stopped(athread),
+ &lwi);
+
+ if (unlikely(!thread_is_running(mthread)) ||
+ thread_is_stopped(athread))
+ GOTO(out, rc = 0);
+
+ ostid_le_to_cpu(&objs->l_ost_oi, oi);
+ ostid_to_fid(fid, oi, index);
+ tgt = lfsck_tgt_get(ltds, index);
+ if (unlikely(tgt == NULL)) {
+ lo->ll_flags |= LF_INCOMPLETE;
+ goto next;
+ }
+
+ cobj = lfsck_object_find_by_dev(env, tgt->ltd_tgt, fid);
+ if (IS_ERR(cobj)) {
+ rc = PTR_ERR(cobj);
+ goto next;
+ }
+
+ rc = dt_declare_attr_get(env, cobj, BYPASS_CAPA);
+ if (rc != 0)
+ goto next;
+
+ rc = dt_declare_xattr_get(env, cobj, buf, XATTR_NAME_FID,
+ BYPASS_CAPA);
+ if (rc != 0)
+ goto next;
+
+ if (llo == NULL) {
+ llo = lfsck_layout_object_init(env, parent, gen);
+ if (IS_ERR(llo)) {
+ rc = PTR_ERR(llo);
+ goto next;
+ }
+ }
+
+ llr = lfsck_layout_req_init(llo, cobj, index, i);
+ if (IS_ERR(llr)) {
+ rc = PTR_ERR(llr);
+ goto next;
+ }
+
+ cobj = NULL;
+ spin_lock(&llmd->llmd_lock);
+ if (llmd->llmd_assistant_status < 0) {
+ spin_unlock(&llmd->llmd_lock);
+ lfsck_layout_req_fini(env, llr);
+ lfsck_tgt_put(tgt);
+ RETURN(llmd->llmd_assistant_status);
+ }
+
+ list_add_tail(&llr->llr_list, &llmd->llmd_req_list);
+ if (llmd->llmd_prefetched == 0)
+ wakeup = true;
+
+ llmd->llmd_prefetched++;
+ spin_unlock(&llmd->llmd_lock);
+ if (wakeup)
+ wake_up_all(&athread->t_ctl_waitq);
+
+next:
+ down_write(&com->lc_sem);
+ com->lc_new_checked++;
+ if (rc < 0)
+ lo->ll_objs_failed_phase1++;
+ up_write(&com->lc_sem);
+
+ if (cobj != NULL && !IS_ERR(cobj))
+ lu_object_put(env, &cobj->do_lu);
+
+ if (likely(tgt != NULL))
+ lfsck_tgt_put(tgt);
+
+ if (rc < 0 && bk->lb_param & LPF_FAILOUT)
+ GOTO(out, rc);
+ }
+
+ GOTO(out, rc = 0);
+
+out:
+ if (llo != NULL && !IS_ERR(llo))
+ lfsck_layout_object_put(env, llo);
+
+ return rc;
+}
+
+/* For the given object, read its layout EA locally. For each stripe, pre-fetch
+ * the OST-object's attribute and generate an structure lfsck_layout_req on the
+ * list ::llmd_req_list.
+ *
+ * For each request on above list, the lfsck_layout_assistant thread compares
+ * the OST side attribute with local attribute, if inconsistent, then repair it.
+ *
+ * All above processing is async mode with pipeline. */
static int lfsck_layout_master_exec_oit(const struct lu_env *env,
struct lfsck_component *com,
struct dt_object *obj)
{
- /* XXX: To be implemented in other patches.
- *
- * For the given object, read its layout EA locally. For each stripe,
- * pre-fetch the OST-object's attribute and generate an structure
- * lfsck_layout_req on the list ::llmd_req_list.
- *
- * For each request on the ::llmd_req_list, the lfsck_layout_assistant
- * thread will compare the OST side attribute with local attribute,
- * if inconsistent, then repair it.
- *
- * All above processing is async mode with pipeline. */
+ struct lfsck_thread_info *info = lfsck_env_info(env);
+ struct ost_id *oi = &info->lti_oi;
+ struct lfsck_layout *lo = com->lc_file_ram;
+ struct lfsck_layout_master_data *llmd = com->lc_data;
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
+ struct thandle *handle = NULL;
+ struct lu_buf *buf = &info->lti_big_buf;
+ struct lov_mds_md_v1 *lmm = NULL;
+ struct dt_device *dev = lfsck->li_bottom;
+ struct lustre_handle lh = { 0 };
+ ssize_t buflen = buf->lb_len;
+ int rc = 0;
+ bool locked = false;
+ bool stripe = false;
+ ENTRY;
- return 0;
+ if (!S_ISREG(lfsck_object_type(obj)))
+ GOTO(out, rc = 0);
+
+ if (llmd->llmd_assistant_status < 0)
+ GOTO(out, rc = -ESRCH);
+
+ fid_to_lmm_oi(lfsck_dto2fid(obj), oi);
+ lmm_oi_cpu_to_le(oi, oi);
+ dt_read_lock(env, obj, 0);
+ locked = true;
+
+again:
+ rc = lfsck_layout_get_lovea(env, obj, buf, &buflen);
+ if (rc <= 0)
+ GOTO(out, rc);
+
+ buf->lb_len = rc;
+ lmm = buf->lb_buf;
+ rc = lfsck_layout_verify_header(lmm);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ if (memcmp(oi, &lmm->lmm_oi, sizeof(*oi)) == 0)
+ GOTO(out, stripe = true);
+
+ /* Inconsistent lmm_oi, should be repaired. */
+ CDEBUG(D_LFSCK, "Repair bad lmm_oi for "DFID"\n",
+ PFID(lfsck_dto2fid(obj)));
+
+ if (bk->lb_param & LPF_DRYRUN) {
+ down_write(&com->lc_sem);
+ lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
+ up_write(&com->lc_sem);
+
+ GOTO(out, stripe = true);
+ }
+
+ if (!lustre_handle_is_used(&lh)) {
+ dt_read_unlock(env, obj);
+ locked = false;
+ buf->lb_len = buflen;
+ rc = lfsck_layout_lock(env, com, obj, &lh,
+ MDS_INODELOCK_LAYOUT |
+ MDS_INODELOCK_XATTR);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ handle = dt_trans_create(env, dev);
+ if (IS_ERR(handle))
+ GOTO(out, rc = PTR_ERR(handle));
+
+ rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_LOV,
+ LU_XATTR_REPLACE, handle);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ rc = dt_trans_start_local(env, dev, handle);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ dt_write_lock(env, obj, 0);
+ locked = true;
+
+ goto again;
+ }
+
+ lmm->lmm_oi = *oi;
+ rc = dt_xattr_set(env, obj, buf, XATTR_NAME_LOV,
+ LU_XATTR_REPLACE, handle, BYPASS_CAPA);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ down_write(&com->lc_sem);
+ lo->ll_objs_repaired[LLIT_OTHERS - 1]++;
+ up_write(&com->lc_sem);
+
+ GOTO(out, stripe = true);
+
+out:
+ if (locked) {
+ if (lustre_handle_is_used(&lh))
+ dt_write_unlock(env, obj);
+ else
+ dt_read_unlock(env, obj);
+ }
+
+ if (handle != NULL && !IS_ERR(handle))
+ dt_trans_stop(env, dev, handle);
+
+ lfsck_layout_unlock(&lh);
+ if (stripe) {
+ rc = lfsck_layout_scan_stripes(env, com, obj, lmm);
+ } else {
+ down_write(&com->lc_sem);
+ com->lc_new_checked++;
+ if (rc < 0)
+ lo->ll_objs_failed_phase1++;
+ up_write(&com->lc_sem);
+ }
+ buf->lb_len = buflen;
+
+ return rc;
}
static int lfsck_layout_slave_exec_oit(const struct lu_env *env,
int rc;
ENTRY;
- /* XXX: Update OST-objects bitmap for orphan detection. */
-
LASSERT(llsd != NULL);
+ lfsck_rbtree_update_bitmap(env, com, fid, false);
+
down_write(&com->lc_sem);
if (fid_is_idif(fid))
seq = 0;
wake_up_all(&athread->t_ctl_waitq);
l_wait_event(mthread->t_ctl_waitq,
- (result > 0 && list_empty(&llmd->llmd_req_list) &&
- atomic_read(&llmd->llmd_rpcs_in_flight) == 0) ||
+ (result > 0 && list_empty(&llmd->llmd_req_list)) ||
thread_is_stopped(athread),
&lwi);
lfsck_layout_slave_notify_master(env, com, LE_PHASE1_DONE, result);
+ if (result <= 0)
+ lfsck_rbtree_cleanup(env, com);
+
return rc;
}
int rc;
ENTRY;
- if (unlikely(lo->ll_status != LS_SCANNING_PHASE2))
+ if (unlikely(lo->ll_status != LS_SCANNING_PHASE2)) {
+ lfsck_rbtree_cleanup(env, com);
RETURN(0);
+ }
atomic_inc(&lfsck->li_double_scan_count);
done:
rc = lfsck_layout_double_scan_result(env, com, rc);
+ lfsck_rbtree_cleanup(env, com);
if (atomic_dec_and_test(&lfsck->li_double_scan_count))
wake_up_all(&lfsck->li_thread.t_ctl_waitq);
LASSERT(thread_is_init(&llmd->llmd_thread) ||
thread_is_stopped(&llmd->llmd_thread));
LASSERT(list_empty(&llmd->llmd_req_list));
- LASSERT(atomic_read(&llmd->llmd_rpcs_in_flight) == 0);
com->lc_data = NULL;
ltd_layout_list) {
list_del_init(<d->ltd_layout_list);
}
+ list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase1_list,
+ ltd_layout_phase_list) {
+ list_del_init(<d->ltd_layout_phase_list);
+ }
+ list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_phase2_list,
+ ltd_layout_phase_list) {
+ list_del_init(<d->ltd_layout_phase_list);
+ }
+ list_for_each_entry_safe(ltd, next, &llmd->llmd_mdt_list,
+ ltd_layout_list) {
+ list_del_init(<d->ltd_layout_list);
+ }
spin_unlock(<ds->ltd_lock);
OBD_FREE_PTR(llmd);
LASSERT(llsd != NULL);
- com->lc_data = NULL;
-
list_for_each_entry_safe(lls, next, &llsd->llsd_seq_list,
lls_list) {
list_del_init(&lls->lls_list);
OBD_FREE_PTR(llst);
}
+ lfsck_rbtree_cleanup(env, com);
+ com->lc_data = NULL;
OBD_FREE_PTR(llsd);
}
&lwi);
}
+static void lfsck_layout_slave_quit(const struct lu_env *env,
+ struct lfsck_component *com)
+{
+ lfsck_rbtree_cleanup(env, com);
+}
+
static int lfsck_layout_master_in_notify(const struct lu_env *env,
struct lfsck_component *com,
struct lfsck_request *lr)
struct lfsck_tgt_desc *ltd;
ENTRY;
- if (lr->lr_event != LE_PHASE1_DONE)
+ if (lr->lr_event != LE_PHASE1_DONE &&
+ lr->lr_event != LE_PHASE2_DONE &&
+ lr->lr_event != LE_STOP)
RETURN(-EINVAL);
- ltds = &lfsck->li_ost_descs;
+ if (lr->lr_flags & LEF_FROM_OST)
+ ltds = &lfsck->li_ost_descs;
+ else
+ ltds = &lfsck->li_mdt_descs;
spin_lock(<ds->ltd_lock);
ltd = LTD_TGT(ltds, lr->lr_index);
if (ltd == NULL) {
}
list_del_init(<d->ltd_layout_phase_list);
- if (lr->lr_status > 0) {
- if (list_empty(<d->ltd_layout_list))
- list_add_tail(<d->ltd_layout_list,
- &llmd->llmd_ost_list);
- list_add_tail(<d->ltd_layout_phase_list,
- &llmd->llmd_ost_phase2_list);
- } else {
+ switch (lr->lr_event) {
+ case LE_PHASE1_DONE:
+ if (lr->lr_status <= 0) {
+ ltd->ltd_layout_done = 1;
+ list_del_init(<d->ltd_layout_list);
+ lo->ll_flags |= LF_INCOMPLETE;
+ break;
+ }
+
+ if (lr->lr_flags & LEF_FROM_OST) {
+ if (list_empty(<d->ltd_layout_list))
+ list_add_tail(<d->ltd_layout_list,
+ &llmd->llmd_ost_list);
+ list_add_tail(<d->ltd_layout_phase_list,
+ &llmd->llmd_ost_phase2_list);
+ } else {
+ if (list_empty(<d->ltd_layout_list))
+ list_add_tail(<d->ltd_layout_list,
+ &llmd->llmd_mdt_list);
+ list_add_tail(<d->ltd_layout_phase_list,
+ &llmd->llmd_mdt_phase2_list);
+ }
+ break;
+ case LE_PHASE2_DONE:
ltd->ltd_layout_done = 1;
list_del_init(<d->ltd_layout_list);
- lo->ll_flags |= LF_INCOMPLETE;
+ break;
+ case LE_STOP:
+ ltd->ltd_layout_done = 1;
+ list_del_init(<d->ltd_layout_list);
+ if (!(lr->lr_flags & LEF_FORCE_STOP))
+ lo->ll_flags |= LF_INCOMPLETE;
+ break;
+ default:
+ break;
}
spin_unlock(<ds->ltd_lock);
- if (lfsck_layout_master_to_orphan(llmd))
+ if (lr->lr_flags & LEF_FORCE_STOP) {
+ struct lfsck_stop *stop = &lfsck_env_info(env)->lti_stop;
+
+ memset(stop, 0, sizeof(*stop));
+ stop->ls_status = lr->lr_status;
+ stop->ls_flags = lr->lr_param;
+ lfsck_stop(env, lfsck->li_bottom, stop);
+ } else if (lfsck_layout_master_to_orphan(llmd)) {
wake_up_all(&llmd->llmd_thread.t_ctl_waitq);
+ }
RETURN(0);
}
struct lfsck_layout_slave_target *llst;
ENTRY;
+ if (lr->lr_event == LE_FID_ACCESSED) {
+ lfsck_rbtree_update_bitmap(env, com, &lr->lr_fid, true);
+
+ RETURN(0);
+ }
+
if (lr->lr_event != LE_PHASE2_DONE &&
lr->lr_event != LE_STOP)
RETURN(-EINVAL);
lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
lr->lr_event = LE_STOP;
lr->lr_active = LT_LAYOUT;
+ if (ltds == &lfsck->li_ost_descs) {
+ lr->lr_flags = LEF_TO_OST;
+ } else {
+ if (ltd->ltd_index == lfsck_dev_idx(lfsck->li_bottom))
+ return 0;
+
+ lr->lr_flags = 0;
+ }
lr->lr_status = LS_CO_STOPPED;
laia->laia_com = com;
lfsck_layout_master_async_interpret,
laia, LFSCK_NOTIFY);
if (rc != 0)
- CERROR("%s: Fail to notify OST %x for stop: rc = %d\n",
- lfsck_lfsck2name(lfsck), ltd->ltd_index, rc);
+ CERROR("%s: Fail to notify %s %x for co-stop: rc = %d\n",
+ lfsck_lfsck2name(lfsck),
+ (lr->lr_flags & LEF_TO_OST) ? "OST" : "MDT",
+ ltd->ltd_index, rc);
return rc;
}
+/* with lfsck::li_lock held */
+static int lfsck_layout_slave_join(const struct lu_env *env,
+ struct lfsck_component *com,
+ struct lfsck_start_param *lsp)
+{
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ struct lfsck_layout_slave_data *llsd = com->lc_data;
+ struct lfsck_layout_slave_target *llst;
+ struct lfsck_start *start = lsp->lsp_start;
+ int rc = 0;
+ ENTRY;
+
+ if (!lsp->lsp_index_valid || start == NULL ||
+ !(start->ls_flags & LPF_ALL_MDT))
+ RETURN(-EALREADY);
+
+ spin_unlock(&lfsck->li_lock);
+ rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
+ spin_lock(&lfsck->li_lock);
+ if (rc == 0 && !thread_is_running(&lfsck->li_thread)) {
+ spin_unlock(&lfsck->li_lock);
+ llst = lfsck_layout_llst_find_and_del(llsd, lsp->lsp_index);
+ if (llst != NULL)
+ lfsck_layout_llst_put(llst);
+ spin_lock(&lfsck->li_lock);
+ rc = -EAGAIN;
+ }
+
+ RETURN(rc);
+}
+
static struct lfsck_operations lfsck_layout_master_ops = {
.lfsck_reset = lfsck_layout_reset,
.lfsck_fail = lfsck_layout_fail,
.lfsck_dump = lfsck_layout_dump,
.lfsck_double_scan = lfsck_layout_slave_double_scan,
.lfsck_data_release = lfsck_layout_slave_data_release,
+ .lfsck_quit = lfsck_layout_slave_quit,
.lfsck_in_notify = lfsck_layout_slave_in_notify,
.lfsck_query = lfsck_layout_query,
+ .lfsck_join = lfsck_layout_slave_join,
};
int lfsck_layout_setup(const struct lu_env *env, struct lfsck_instance *lfsck)
GOTO(out, rc = -ENOMEM);
INIT_LIST_HEAD(&llmd->llmd_req_list);
+ spin_lock_init(&llmd->llmd_lock);
INIT_LIST_HEAD(&llmd->llmd_ost_list);
INIT_LIST_HEAD(&llmd->llmd_ost_phase1_list);
INIT_LIST_HEAD(&llmd->llmd_ost_phase2_list);
- spin_lock_init(&llmd->llmd_lock);
+ INIT_LIST_HEAD(&llmd->llmd_mdt_list);
+ INIT_LIST_HEAD(&llmd->llmd_mdt_phase1_list);
+ INIT_LIST_HEAD(&llmd->llmd_mdt_phase2_list);
init_waitqueue_head(&llmd->llmd_thread.t_ctl_waitq);
- atomic_set(&llmd->llmd_rpcs_in_flight, 0);
com->lc_data = llmd;
} else {
struct lfsck_layout_slave_data *llsd;
INIT_LIST_HEAD(&llsd->llsd_seq_list);
INIT_LIST_HEAD(&llsd->llsd_master_list);
spin_lock_init(&llsd->llsd_lock);
+ llsd->llsd_rb_root = RB_ROOT;
+ rwlock_init(&llsd->llsd_rb_lock);
com->lc_data = llsd;
}
com->lc_file_size = sizeof(*lo);