static int lfsck_layout_verify_header(struct lov_mds_md_v1 *lmm)
{
__u32 magic;
- __u32 patten;
+ __u32 pattern;
magic = le32_to_cpu(lmm->lmm_magic);
/* If magic crashed, keep it there. Sometime later, during OST-object
* orphan handling, if some OST-object(s) back-point to it, it can be
* verified and repaired. */
- if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3)
- return -EINVAL;
+ if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3) {
+ struct ost_id oi;
+ int rc;
+
+ lmm_oi_cpu_to_le(&oi, &lmm->lmm_oi);
+ if ((magic & LOV_MAGIC_MASK) == LOV_MAGIC_MAGIC)
+ rc = -EOPNOTSUPP;
+ else
+ rc = -EINVAL;
+
+ CDEBUG(D_LFSCK, "%s LOV EA magic %u on "DOSTID"\n",
+ rc == -EINVAL ? "Unknown" : "Unsupported",
+ magic, POSTID(&oi));
+
+ return rc;
+ }
- patten = le32_to_cpu(lmm->lmm_pattern);
+ pattern = le32_to_cpu(lmm->lmm_pattern);
/* XXX: currently, we only support LOV_PATTERN_RAID0. */
- if (patten != LOV_PATTERN_RAID0)
+ if (lov_pattern(pattern) != LOV_PATTERN_RAID0) {
+ struct ost_id oi;
+
+ lmm_oi_cpu_to_le(&oi, &lmm->lmm_oi);
+ CDEBUG(D_LFSCK, "Unsupported LOV EA pattern %u on "DOSTID"\n",
+ pattern, POSTID(&oi));
+
return -EOPNOTSUPP;
+ }
return 0;
}
#define LFSCK_RBTREE_BITMAP_SIZE PAGE_CACHE_SIZE
#define LFSCK_RBTREE_BITMAP_WIDTH (LFSCK_RBTREE_BITMAP_SIZE << 3)
-#define LFSCK_RBTREE_BITMAP_MASK (LFSCK_RBTREE_BITMAP_SIZE - 1)
+#define LFSCK_RBTREE_BITMAP_MASK (LFSCK_RBTREE_BITMAP_WIDTH - 1)
struct lfsck_rbtree_node {
struct rb_node lrn_node;
if (oid < lrn->lrn_first_oid)
return -1;
- if (oid >= lrn->lrn_first_oid + LFSCK_RBTREE_BITMAP_WIDTH)
+ if (oid - lrn->lrn_first_oid >= LFSCK_RBTREE_BITMAP_WIDTH)
return 1;
return 0;
lfsck_rbtree_insert(struct lfsck_layout_slave_data *llsd,
struct lfsck_rbtree_node *lrn)
{
- struct rb_node **pos = &(llsd->llsd_rb_root.rb_node);
+ struct rb_node **pos = &llsd->llsd_rb_root.rb_node;
struct rb_node *parent = NULL;
struct lfsck_rbtree_node *tmp;
int rc;
- while (*pos) {
+ while (*pos != NULL) {
parent = *pos;
- tmp = rb_entry(*pos, struct lfsck_rbtree_node, lrn_node);
+ tmp = rb_entry(parent, struct lfsck_rbtree_node, lrn_node);
rc = lfsck_rbtree_cmp(tmp, lrn->lrn_seq, lrn->lrn_first_oid);
if (rc < 0)
- pos = &((*pos)->rb_left);
+ pos = &(*pos)->rb_left;
else if (rc > 0)
- pos = &((*pos)->rb_right);
+ pos = &(*pos)->rb_right;
else
return tmp;
}
}
}
-static inline bool is_dummy_lov_ost_data(struct lov_ost_data_v1 *obj)
-{
- if (fid_is_zero(&obj->l_ost_oi.oi_fid) &&
- obj->l_ost_gen == 0 && obj->l_ost_idx == 0)
- return true;
-
- return false;
-}
-
static void lfsck_layout_le_to_cpu(struct lfsck_layout *des,
const struct lfsck_layout *src)
{
return rc;
}
+static void lfsck_layout_record_failure(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
+ struct lfsck_layout *lo)
+{
+ lo->ll_objs_failed_phase1++;
+ if (unlikely(lo->ll_pos_first_inconsistent == 0))
+ lo->ll_pos_first_inconsistent =
+ lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
+ lfsck->li_di_oit);
+}
+
static int lfsck_layout_master_async_interpret(const struct lu_env *env,
struct ptlrpc_request *req,
void *args, int rc)
struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
down_write(&com->lc_sem);
-
lo->ll_run_time_phase2 += cfs_duration_sec(cfs_time_current() +
HALF_SEC - lfsck->li_time_last_checkpoint);
lo->ll_time_last_checkpoint = cfs_time_current_sec();
lo->ll_status = LS_FAILED;
}
- if (lo->ll_status != LS_PAUSED) {
- spin_lock(&lfsck->li_lock);
- list_del_init(&com->lc_link);
- list_add_tail(&com->lc_link, &lfsck->li_list_idle);
- spin_unlock(&lfsck->li_lock);
- }
-
rc = lfsck_layout_store(env, com);
-
up_write(&com->lc_sem);
return rc;
}
/**
+ * Get the system default stripe size.
+ *
+ * \param[in] env pointer to the thread context
+ * \param[in] lfsck pointer to the lfsck instance
+ * \param[out] size pointer to the default stripe size
+ *
+ * \retval 0 for success
+ * \retval negative error number on failure
+ */
+static int lfsck_layout_get_def_stripesize(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
+ __u32 *size)
+{
+ struct lov_user_md *lum = &lfsck_env_info(env)->lti_lum;
+ struct dt_object *root;
+ int rc;
+
+ root = dt_locate(env, lfsck->li_next, &lfsck->li_local_root_fid);
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ /* Get the default stripe size via xattr_get on the backend root. */
+ rc = dt_xattr_get(env, root, lfsck_buf_get(env, lum, sizeof(*lum)),
+ XATTR_NAME_LOV, BYPASS_CAPA);
+ if (rc > 0) {
+ /* The lum->lmm_stripe_size is LE mode. The *size also
+ * should be LE mode. So it is unnecessary to convert. */
+ *size = lum->lmm_stripe_size;
+ rc = 0;
+ } else if (unlikely(rc == 0)) {
+ rc = -EINVAL;
+ }
+
+ lfsck_object_put(env, root);
+
+ return rc;
+}
+
+/**
* \retval +1: repaired
* \retval 0: did nothing
* \retval -ve: on error
struct lov_ost_data_v1 *slot,
int fl, __u32 ost_idx)
{
- struct ost_id *oi = &lfsck_env_info(env)->lti_oi;
- int rc;
+ struct ost_id *oi = &lfsck_env_info(env)->lti_oi;
+ struct lov_mds_md_v1 *lmm = buf->lb_buf;
+ int rc;
fid_to_ostid(cfid, oi);
ostid_cpu_to_le(oi, &slot->l_ost_oi);
slot->l_ost_gen = cpu_to_le32(0);
slot->l_ost_idx = cpu_to_le32(ost_idx);
+
+ if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_HOLE) {
+ struct lov_ost_data_v1 *objs;
+ int i;
+ __u16 count;
+
+ count = le16_to_cpu(lmm->lmm_stripe_count);
+ if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_V1)
+ objs = &lmm->lmm_objects[0];
+ else
+ objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
+ for (i = 0; i < count; i++, objs++) {
+ if (objs != slot && lovea_slot_is_dummy(objs))
+ break;
+ }
+
+ /* If the @slot is the last dummy slot to be refilled,
+ * then drop LOV_PATTERN_F_HOLE from lmm::lmm_pattern. */
+ if (i == count)
+ lmm->lmm_pattern &= ~cpu_to_le32(LOV_PATTERN_F_HOLE);
+ }
+
rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV, fl, handle,
BYPASS_CAPA);
if (rc == 0)
* \retval -ve: on error
*/
static int lfsck_layout_extend_lovea(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
struct thandle *handle,
struct dt_object *parent,
struct lu_fid *cfid,
struct lu_buf *buf, int fl,
- __u32 ost_idx, __u32 ea_off)
+ __u32 ost_idx, __u32 ea_off, bool reset)
{
struct lov_mds_md_v1 *lmm = buf->lb_buf;
struct lov_ost_data_v1 *objs;
int rc;
+ __u16 count;
ENTRY;
- if (fl == LU_XATTR_CREATE) {
- LASSERT(buf->lb_len == lov_mds_md_size(ea_off + 1,
- LOV_MAGIC_V1));
+ if (fl == LU_XATTR_CREATE || reset) {
+ __u32 pattern = LOV_PATTERN_RAID0;
+
+ count = ea_off + 1;
+ LASSERT(buf->lb_len == lov_mds_md_size(count, LOV_MAGIC_V1));
+
+ if (ea_off != 0 || reset)
+ pattern |= LOV_PATTERN_F_HOLE;
memset(lmm, 0, buf->lb_len);
lmm->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
- /* XXX: currently, we only support LOV_PATTERN_RAID0. */
- lmm->lmm_pattern = cpu_to_le32(LOV_PATTERN_RAID0);
+ lmm->lmm_pattern = cpu_to_le32(pattern);
fid_to_lmm_oi(lfsck_dto2fid(parent), &lmm->lmm_oi);
lmm_oi_cpu_to_le(&lmm->lmm_oi, &lmm->lmm_oi);
- /* XXX: We cannot know the stripe size,
- * then use the default value (1 MB). */
- lmm->lmm_stripe_size =
- cpu_to_le32(LOV_DESC_STRIPE_SIZE_DEFAULT);
- objs = &(lmm->lmm_objects[ea_off]);
+
+ rc = lfsck_layout_get_def_stripesize(env, lfsck,
+ &lmm->lmm_stripe_size);
+ if (rc != 0)
+ RETURN(rc);
+
+ objs = &lmm->lmm_objects[ea_off];
} else {
- __u16 count = le16_to_cpu(lmm->lmm_stripe_count);
- int gap = ea_off - count;
__u32 magic = le32_to_cpu(lmm->lmm_magic);
+ int gap;
- /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3
- * which has been verified in lfsck_layout_verify_header()
- * already. If some new magic introduced in the future,
- * then layout LFSCK needs to be updated also. */
- if (magic == LOV_MAGIC_V1) {
- objs = &(lmm->lmm_objects[count]);
- } else {
- LASSERT(magic == LOV_MAGIC_V3);
+ count = le16_to_cpu(lmm->lmm_stripe_count);
+ if (magic == LOV_MAGIC_V1)
+ objs = &lmm->lmm_objects[count];
+ else
objs = &((struct lov_mds_md_v3 *)lmm)->
lmm_objects[count];
- }
- if (gap > 0)
+ gap = ea_off - count;
+ if (gap >= 0)
+ count = ea_off + 1;
+ LASSERT(buf->lb_len == lov_mds_md_size(count, magic));
+
+ if (gap > 0) {
memset(objs, 0, gap * sizeof(*objs));
+ lmm->lmm_pattern |= cpu_to_le32(LOV_PATTERN_F_HOLE);
+ }
+
lmm->lmm_layout_gen =
cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
objs += gap;
-
- LASSERT(buf->lb_len == lov_mds_md_size(ea_off + 1, magic));
}
- lmm->lmm_stripe_count = cpu_to_le16(ea_off + 1);
+ lmm->lmm_stripe_count = cpu_to_le16(count);
rc = lfsck_layout_refill_lovea(env, handle, parent, cfid, buf, objs,
fl, ost_idx);
memset(la, 0, sizeof(*la));
la->la_uid = rec->lor_uid;
la->la_gid = rec->lor_gid;
- la->la_mode = S_IFREG | S_IRUSR | S_IWUSR;
+ la->la_mode = S_IFREG | S_IRUSR;
la->la_valid = LA_MODE | LA_UID | LA_GID;
memset(dof, 0, sizeof(*dof));
rc = dt_create(env, pobj, la, NULL, dof, th);
if (rc == 0)
/* 3b. Add layout EA for the MDT-object. */
- rc = lfsck_layout_extend_lovea(env, th, pobj, cfid, ea_buf,
- LU_XATTR_CREATE, ltd->ltd_index,
- ea_off);
+ rc = lfsck_layout_extend_lovea(env, lfsck, th, pobj, cfid,
+ ea_buf, LU_XATTR_CREATE,
+ ltd->ltd_index, ea_off, false);
dt_write_unlock(env, pobj);
if (rc < 0)
GOTO(stop, rc);
ltd = lfsck_tgt_get(&lfsck->li_ost_descs, index);
if (unlikely(ltd == NULL))
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
exp = ltd->ltd_exp;
if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
LASSERT(buf->lb_len >= rc);
buf->lb_len = rc;
- rc = lfsck_layout_extend_lovea(env, handle, parent, cfid, buf,
- fl, ost_idx, ea_off);
+ rc = lfsck_layout_extend_lovea(env, lfsck, handle, parent, cfid,
+ buf, fl, ost_idx, ea_off, false);
GOTO(unlock_parent, rc);
}
lmm = buf->lb_buf;
rc1 = lfsck_layout_verify_header(lmm);
+
+ /* If the LOV EA crashed, the rebuild it. */
+ if (rc1 == -EINVAL) {
+ if (bk->lb_param & LPF_DRYRUN)
+ GOTO(unlock_parent, rc = 1);
+
+ LASSERT(buf->lb_len >= rc);
+
+ buf->lb_len = rc;
+ memset(lmm, 0, buf->lb_len);
+ rc = lfsck_layout_extend_lovea(env, lfsck, handle, parent, cfid,
+ buf, fl, ost_idx, ea_off, true);
+
+ GOTO(unlock_parent, rc);
+ }
+
+ /* For other unknown magic/pattern, keep the current LOV EA. */
if (rc1 != 0)
GOTO(unlock_parent, rc = rc1);
* be updated also. */
magic = le32_to_cpu(lmm->lmm_magic);
if (magic == LOV_MAGIC_V1) {
- objs = &(lmm->lmm_objects[0]);
+ objs = &lmm->lmm_objects[0];
} else {
LASSERT(magic == LOV_MAGIC_V3);
objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
goto again;
buf->lb_len = rc;
- rc = lfsck_layout_extend_lovea(env, handle, parent, cfid, buf,
- fl, ost_idx, ea_off);
+ rc = lfsck_layout_extend_lovea(env, lfsck, handle, parent, cfid,
+ buf, fl, ost_idx, ea_off, false);
+
GOTO(unlock_parent, rc);
}
for (i = 0; i < count; i++, objs++) {
/* The MDT-object was created via lfsck_layout_recover_create()
* by others before, and we fill the dummy layout EA. */
- if (is_dummy_lov_ost_data(objs)) {
+ if (lovea_slot_is_dummy(objs)) {
if (i != ea_off)
continue;
dt_trans_stop(env, dt, handle);
lfsck_layout_unlock(&lh);
if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_V1)
- objs = &(lmm->lmm_objects[ea_off]);
+ objs = &lmm->lmm_objects[ea_off];
else
objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[ea_off];
rc = lfsck_layout_conflict_create(env, com, ltd, rec, parent, cfid,
return rc > 0 ? 0 : rc;
}
-/* For the MDT-object with dangling reference, we need to re-create
- * the missed OST-object with the known FID/owner information. */
-static int lfsck_layout_recreate_ostobj(const struct lu_env *env,
+/* For the MDT-object with dangling reference, we need to repare the
+ * inconsistency according to the LFSCK sponsor's requirement:
+ *
+ * 1) Keep the inconsistency there and report the inconsistency case,
+ * then give the chance to the application to find related issues,
+ * and the users can make the decision about how to handle it with
+ * more human knownledge. (by default)
+ *
+ * 2) Re-create the missed OST-object with the FID/owner information. */
+static int lfsck_layout_repair_dangling(const struct lu_env *env,
struct lfsck_component *com,
struct lfsck_layout_req *llr,
- struct lu_attr *la)
+ const struct lu_attr *pla)
{
struct lfsck_thread_info *info = lfsck_env_info(env);
struct filter_fid *pfid = &info->lti_new_pfid;
struct dt_allocation_hint *hint = &info->lti_hint;
+ struct lu_attr *cla = &info->lti_la2;
struct dt_object *parent = llr->llr_parent->llo_obj;
struct dt_object *child = llr->llr_child;
struct dt_device *dev = lfsck_obj2dt_dev(child);
struct lu_buf *buf;
struct lustre_handle lh = { 0 };
int rc;
+ bool create;
ENTRY;
- CDEBUG(D_LFSCK, "Repair dangling reference for: parent "DFID
- ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
+ if (com->lc_lfsck->li_bookmark_ram.lb_param & LPF_CREATE_OSTOBJ)
+ create = true;
+ else
+ create = false;
+
+ CDEBUG(D_LFSCK, "Found dangling reference for: parent "DFID
+ ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u. %s",
PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
- llr->llr_ost_idx, llr->llr_lov_idx, la->la_uid, la->la_gid);
+ llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid,
+ create ? "Create the lost OST-object as required.\n" :
+ "Keep the MDT-object there by default.\n");
+
+ if (!create)
+ RETURN(1);
+
+ memset(cla, 0, sizeof(*cla));
+ cla->la_uid = pla->la_uid;
+ cla->la_gid = pla->la_gid;
+ cla->la_mode = S_IFREG | 0666;
+ cla->la_valid = LA_TYPE | LA_MODE | LA_UID | LA_GID |
+ LA_ATIME | LA_MTIME | LA_CTIME;
rc = lfsck_layout_lock(env, com, parent, &lh,
MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
pfid->ff_parent.f_stripe_idx = cpu_to_le32(llr->llr_lov_idx);
buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
- rc = dt_declare_create(env, child, la, hint, NULL, handle);
+ rc = dt_declare_create(env, child, cla, hint, NULL, handle);
if (rc != 0)
GOTO(stop, rc);
if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
GOTO(unlock2, rc = 1);
- rc = dt_create(env, child, la, hint, NULL, handle);
+ rc = dt_create(env, child, cla, hint, NULL, handle);
if (rc != 0)
GOTO(unlock2, rc);
GOTO(unlock2, rc = 0);
lmm = buf->lb_buf;
- rc = lfsck_layout_verify_header(lmm);
- if (rc != 0)
- GOTO(unlock2, rc);
-
/* Someone change layout during the LFSCK, no need to repair then. */
if (le16_to_cpu(lmm->lmm_layout_gen) != llr->llr_parent->llo_gen)
GOTO(unlock2, rc = 0);
* be updated also. */
magic = le32_to_cpu(lmm->lmm_magic);
if (magic == LOV_MAGIC_V1) {
- objs = &(lmm->lmm_objects[0]);
+ objs = &lmm->lmm_objects[0];
} else {
LASSERT(magic == LOV_MAGIC_V3);
objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
GOTO(out, rc);
lmm = buf->lb_buf;
- rc = lfsck_layout_verify_header(lmm);
- if (rc != 0)
- GOTO(out, rc);
-
- /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
- * been verified in lfsck_layout_verify_header() already. If some
- * new magic introduced in the future, then layout LFSCK needs to
- * be updated also. */
magic = le32_to_cpu(lmm->lmm_magic);
if (magic == LOV_MAGIC_V1) {
- objs = &(lmm->lmm_objects[0]);
+ objs = &lmm->lmm_objects[0];
} else {
LASSERT(magic == LOV_MAGIC_V3);
objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
struct lu_fid *tfid = &info->lti_fid2;
struct ost_id *oi = &info->lti_oi;
- if (is_dummy_lov_ost_data(objs))
+ if (lovea_slot_is_dummy(objs))
continue;
ostid_le_to_cpu(&objs->l_ost_oi, oi);
switch (type) {
case LLIT_DANGLING:
- memset(cla, 0, sizeof(*cla));
- cla->la_uid = pla->la_uid;
- cla->la_gid = pla->la_gid;
- cla->la_mode = S_IFREG | 0666;
- cla->la_valid = LA_TYPE | LA_MODE | LA_UID | LA_GID |
- LA_ATIME | LA_MTIME | LA_CTIME;
- rc = lfsck_layout_recreate_ostobj(env, com, llr, cla);
+ rc = lfsck_layout_repair_dangling(env, com, llr, pla);
break;
case LLIT_UNMATCHED_PAIR:
rc = lfsck_layout_repair_unmatched_pair(env, com, llr, pla);
out:
down_write(&com->lc_sem);
if (rc < 0) {
- /* If cannot touch the target server,
- * mark the LFSCK as INCOMPLETE. */
- if (rc == -ENOTCONN || rc == -ESHUTDOWN || rc == -ETIMEDOUT ||
- rc == -EHOSTDOWN || rc == -EHOSTUNREACH) {
+ struct lfsck_layout_master_data *llmd = com->lc_data;
+
+ if (unlikely(llmd->llmd_exit)) {
+ rc = 0;
+ } else if (rc == -ENOTCONN || rc == -ESHUTDOWN ||
+ rc == -ETIMEDOUT || rc == -EHOSTDOWN ||
+ rc == -EHOSTUNREACH) {
+ /* If cannot touch the target server,
+ * mark the LFSCK as INCOMPLETE. */
CERROR("%s: Fail to talk with OST %x: rc = %d.\n",
lfsck_lfsck2name(lfsck), llr->llr_ost_idx, rc);
lo->ll_flags |= LF_INCOMPLETE;
lo->ll_objs_skipped++;
rc = 0;
} else {
- lo->ll_objs_failed_phase1++;
+ lfsck_layout_record_failure(env, lfsck, lo);
}
} else if (rc > 0) {
LASSERTF(type > LLIT_NONE && type <= LLIT_MAX,
"unknown type = %d\n", type);
lo->ll_objs_repaired[type - 1]++;
+ if (bk->lb_param & LPF_DRYRUN &&
+ unlikely(lo->ll_pos_first_inconsistent == 0))
+ lo->ll_pos_first_inconsistent =
+ lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
+ lfsck->li_di_oit);
}
up_write(&com->lc_sem);
memset(lr, 0, sizeof(*lr));
lr->lr_event = LE_START;
lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
- LSV_ASYNC_WINDOWS;
+ LSV_ASYNC_WINDOWS | LSV_CREATE_OSTOBJ;
lr->lr_speed = bk->lb_speed_limit;
lr->lr_version = bk->lb_version;
lr->lr_param = bk->lb_param;
while (!list_empty(&llmd->llmd_req_list)) {
bool wakeup = false;
- if (unlikely(llmd->llmd_exit))
+ if (unlikely(llmd->llmd_exit ||
+ !thread_is_running(mthread)))
GOTO(cleanup1, rc = llmd->llmd_post_result);
llr = list_entry(llmd->llmd_req_list.next,
com->lc_time_last_checkpoint +
cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
+ /* flush all async updating before handling orphan. */
+ dt_sync(env, lfsck->li_next);
+
while (llmd->llmd_in_double_scan) {
struct lfsck_tgt_descs *ltds =
&lfsck->li_ost_descs;
/* Under force exit case, some requests may be just freed without
* verification, those objects should be re-handled when next run.
* So not update the on-disk tracing file under such case. */
- if (!llmd->llmd_exit)
+ if (llmd->llmd_in_double_scan && !llmd->llmd_exit)
rc1 = lfsck_layout_double_scan_result(env, com, rc);
fini:
* be updated also. */
magic = le32_to_cpu(lmm->lmm_magic);
if (magic == LOV_MAGIC_V1) {
- objs = &(lmm->lmm_objects[0]);
+ objs = &lmm->lmm_objects[0];
} else {
LASSERT(magic == LOV_MAGIC_V3);
objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
down_write(&com->lc_sem);
if (new_checked)
com->lc_new_checked++;
- lo->ll_objs_failed_phase1++;
- if (lo->ll_pos_first_inconsistent == 0) {
- struct lfsck_instance *lfsck = com->lc_lfsck;
-
- lo->ll_pos_first_inconsistent =
- lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
- lfsck->li_di_oit);
- }
+ lfsck_layout_record_failure(env, com->lc_lfsck, lo);
up_write(&com->lc_sem);
}
int rc;
rc = lfsck_layout_reset(env, com, false);
+ if (rc == 0)
+ rc = lfsck_set_param(env, lfsck, start, true);
+
if (rc != 0)
return rc;
}
struct lfsck_start_param *lsp)
{
struct lfsck_layout_slave_data *llsd = com->lc_data;
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ struct lfsck_layout *lo = com->lc_file_ram;
struct lfsck_start *start = lsp->lsp_start;
int rc;
rc = lfsck_layout_prep(env, com, start);
- if (rc != 0 || !lsp->lsp_index_valid)
+ if (rc != 0)
return rc;
+ if (lo->ll_flags & LF_CRASHED_LASTID &&
+ list_empty(&llsd->llsd_master_list)) {
+ LASSERT(lfsck->li_out_notify != NULL);
+
+ lfsck->li_out_notify(env, lfsck->li_out_notify_data,
+ LE_LASTID_REBUILDING);
+ }
+
+ if (!lsp->lsp_index_valid)
+ return 0;
+
rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
if (rc == 0 && start != NULL && start->ls_flags & LPF_ORPHAN) {
LASSERT(!llsd->llsd_rbtree_valid);
struct ptlrpc_thread *mthread = &lfsck->li_thread;
struct ptlrpc_thread *athread = &llmd->llmd_thread;
struct lfsck_thread_args *lta;
- long rc;
+ struct task_struct *task;
+ int rc;
ENTRY;
rc = lfsck_layout_prep(env, com, lsp->lsp_start);
if (IS_ERR(lta))
RETURN(PTR_ERR(lta));
- rc = PTR_ERR(kthread_run(lfsck_layout_assistant, lta, "lfsck_layout"));
- if (IS_ERR_VALUE(rc)) {
+ task = kthread_run(lfsck_layout_assistant, lta, "lfsck_layout");
+ if (IS_ERR(task)) {
+ rc = PTR_ERR(task);
CERROR("%s: Cannot start LFSCK layout assistant thread: "
- "rc = %ld\n", lfsck_lfsck2name(lfsck), rc);
+ "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
lfsck_thread_args_fini(lta);
} else {
struct l_wait_info lwi = { 0 };
* be updated also. */
magic = le32_to_cpu(lmm->lmm_magic);
if (magic == LOV_MAGIC_V1) {
- objs = &(lmm->lmm_objects[0]);
+ objs = &lmm->lmm_objects[0];
} else {
LASSERT(magic == LOV_MAGIC_V3);
objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
le32_to_cpu(objs->l_ost_idx);
bool wakeup = false;
- if (is_dummy_lov_ost_data(objs))
+ if (unlikely(lovea_slot_is_dummy(objs)))
continue;
l_wait_event(mthread->t_ctl_waitq,
down_write(&com->lc_sem);
com->lc_new_checked++;
if (rc < 0)
- lo->ll_objs_failed_phase1++;
+ lfsck_layout_record_failure(env, lfsck, lo);
up_write(&com->lc_sem);
if (cobj != NULL && !IS_ERR(cobj))
buf->lb_len = rc;
lmm = buf->lb_buf;
rc = lfsck_layout_verify_header(lmm);
+ /* If the LOV EA crashed, then it is possible to be rebuilt later
+ * when handle orphan OST-objects. */
if (rc != 0)
GOTO(out, rc);
down_write(&com->lc_sem);
com->lc_new_checked++;
if (rc < 0)
- lo->ll_objs_failed_phase1++;
+ lfsck_layout_record_failure(env, lfsck, lo);
up_write(&com->lc_sem);
}
buf->lb_len = buflen;
LASSERT(llsd != NULL);
+ if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY5) &&
+ cfs_fail_val == lfsck_dev_idx(lfsck->li_bottom)) {
+ struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(1),
+ NULL, NULL);
+ struct ptlrpc_thread *thread = &lfsck->li_thread;
+
+ l_wait_event(thread->t_ctl_waitq,
+ !thread_is_running(thread),
+ &lwi);
+ }
+
lfsck_rbtree_update_bitmap(env, com, fid, false);
down_write(&com->lc_sem);
const struct dt_it_ops *iops;
cfs_duration_t duration = cfs_time_current() -
lfsck->li_time_last_checkpoint;
- __u64 checked = lo->ll_objs_checked_phase1 + com->lc_new_checked;
+ __u64 checked = lo->ll_objs_checked_phase1 +
+ com->lc_new_checked;
__u64 speed = checked;
__u64 new_checked = com->lc_new_checked * HZ;
__u32 rtime = lo->ll_run_time_phase1 +
} else if (lo->ll_status == LS_SCANNING_PHASE2) {
cfs_duration_t duration = cfs_time_current() -
lfsck->li_time_last_checkpoint;
- __u64 checked = lo->ll_objs_checked_phase1 + com->lc_new_checked;
- __u64 speed = checked;
+ __u64 checked = lo->ll_objs_checked_phase2 +
+ com->lc_new_checked;
+ __u64 speed1 = lo->ll_objs_checked_phase1;
+ __u64 speed2 = checked;
__u64 new_checked = com->lc_new_checked * HZ;
- __u32 rtime = lo->ll_run_time_phase1 +
+ __u32 rtime = lo->ll_run_time_phase2 +
cfs_duration_sec(duration + HALF_SEC);
if (duration != 0)
do_div(new_checked, duration);
+ if (lo->ll_run_time_phase1 != 0)
+ do_div(speed1, lo->ll_run_time_phase1);
if (rtime != 0)
- do_div(speed, rtime);
+ do_div(speed2, rtime);
rc = snprintf(buf, len,
"checked_phase1: "LPU64"\n"
"checked_phase2: "LPU64"\n"
"run_time_phase1: %u seconds\n"
"run_time_phase2: %u seconds\n"
"average_speed_phase1: "LPU64" items/sec\n"
- "average_speed_phase2: N/A\n"
- "real-time_speed_phase1: "LPU64" items/sec\n"
- "real-time_speed_phase2: N/A\n"
+ "average_speed_phase2: "LPU64" items/sec\n"
+ "real-time_speed_phase1: N/A\n"
+ "real-time_speed_phase2: "LPU64" items/sec\n"
"current_position: "DFID"\n",
+ lo->ll_objs_checked_phase1,
checked,
- lo->ll_objs_checked_phase2,
+ lo->ll_run_time_phase1,
rtime,
- lo->ll_run_time_phase2,
- speed,
+ speed1,
+ speed2,
new_checked,
PFID(&com->lc_fid_latest_scanned_phase2));
if (rc <= 0)
if (ltd == NULL) {
spin_unlock(<ds->ltd_lock);
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
}
list_del_init(<d->ltd_layout_phase_list);
llst = lfsck_layout_llst_find_and_del(llsd, lr->lr_index, true);
if (llst == NULL)
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
lfsck_layout_llst_put(llst);
if (list_empty(&llsd->llsd_master_list))
lfsck = lfsck_instance_find(dev, true, false);
if (unlikely(lfsck == NULL))
- RETURN(ERR_PTR(-ENODEV));
+ RETURN(ERR_PTR(-ENXIO));
com = lfsck_component_find(lfsck, LT_LAYOUT);
if (unlikely(com == NULL))
it->loi_llst = lfsck_layout_llst_find_and_del(llsd, attr, false);
if (it->loi_llst == NULL)
- GOTO(out, rc = -ENODEV);
+ GOTO(out, rc = -ENXIO);
if (dev->dd_record_fid_accessed) {
/* The first iteration against the rbtree, scan the whole rbtree