static int lfsck_layout_verify_header(struct lov_mds_md_v1 *lmm)
{
__u32 magic;
- __u32 patten;
+ __u32 pattern;
magic = le32_to_cpu(lmm->lmm_magic);
/* If magic crashed, keep it there. Sometime later, during OST-object
* orphan handling, if some OST-object(s) back-point to it, it can be
* verified and repaired. */
- if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3)
- return -EINVAL;
+ if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3) {
+ struct ost_id oi;
+ int rc;
+
+ lmm_oi_cpu_to_le(&oi, &lmm->lmm_oi);
+ if ((magic & LOV_MAGIC_MASK) == LOV_MAGIC_MAGIC)
+ rc = -EOPNOTSUPP;
+ else
+ rc = -EINVAL;
+
+ CDEBUG(D_LFSCK, "%s LOV EA magic %u on "DOSTID"\n",
+ rc == -EINVAL ? "Unknown" : "Unsupported",
+ magic, POSTID(&oi));
- patten = le32_to_cpu(lmm->lmm_pattern);
+ return rc;
+ }
+
+ pattern = le32_to_cpu(lmm->lmm_pattern);
/* XXX: currently, we only support LOV_PATTERN_RAID0. */
- if (patten != LOV_PATTERN_RAID0)
+ if (lov_pattern(pattern) != LOV_PATTERN_RAID0) {
+ struct ost_id oi;
+
+ lmm_oi_cpu_to_le(&oi, &lmm->lmm_oi);
+ CDEBUG(D_LFSCK, "Unsupported LOV EA pattern %u on "DOSTID"\n",
+ pattern, POSTID(&oi));
+
return -EOPNOTSUPP;
+ }
return 0;
}
}
}
-static inline bool is_dummy_lov_ost_data(struct lov_ost_data_v1 *obj)
-{
- if (fid_is_zero(&obj->l_ost_oi.oi_fid) &&
- obj->l_ost_gen == 0 && obj->l_ost_idx == 0)
- return true;
-
- return false;
-}
-
static void lfsck_layout_le_to_cpu(struct lfsck_layout *des,
const struct lfsck_layout *src)
{
return rc;
}
+static void lfsck_layout_record_failure(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
+ struct lfsck_layout *lo)
+{
+ lo->ll_objs_failed_phase1++;
+ if (unlikely(lo->ll_pos_first_inconsistent == 0))
+ lo->ll_pos_first_inconsistent =
+ lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
+ lfsck->li_di_oit);
+}
+
static int lfsck_layout_master_async_interpret(const struct lu_env *env,
struct ptlrpc_request *req,
void *args, int rc)
}
/**
+ * Get the system default stripe size.
+ *
+ * \param[in] env pointer to the thread context
+ * \param[in] lfsck pointer to the lfsck instance
+ * \param[out] size pointer to the default stripe size
+ *
+ * \retval 0 for success
+ * \retval negative error number on failure
+ */
+static int lfsck_layout_get_def_stripesize(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
+ __u32 *size)
+{
+ struct lov_user_md *lum = &lfsck_env_info(env)->lti_lum;
+ struct dt_object *root;
+ int rc;
+
+ root = dt_locate(env, lfsck->li_next, &lfsck->li_local_root_fid);
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ /* Get the default stripe size via xattr_get on the backend root. */
+ rc = dt_xattr_get(env, root, lfsck_buf_get(env, lum, sizeof(*lum)),
+ XATTR_NAME_LOV, BYPASS_CAPA);
+ if (rc > 0) {
+ /* The lum->lmm_stripe_size is LE mode. The *size also
+ * should be LE mode. So it is unnecessary to convert. */
+ *size = lum->lmm_stripe_size;
+ rc = 0;
+ } else if (unlikely(rc == 0)) {
+ rc = -EINVAL;
+ }
+
+ lfsck_object_put(env, root);
+
+ return rc;
+}
+
+/**
* \retval +1: repaired
* \retval 0: did nothing
* \retval -ve: on error
struct lov_ost_data_v1 *slot,
int fl, __u32 ost_idx)
{
- struct ost_id *oi = &lfsck_env_info(env)->lti_oi;
- int rc;
+ struct ost_id *oi = &lfsck_env_info(env)->lti_oi;
+ struct lov_mds_md_v1 *lmm = buf->lb_buf;
+ int rc;
fid_to_ostid(cfid, oi);
ostid_cpu_to_le(oi, &slot->l_ost_oi);
slot->l_ost_gen = cpu_to_le32(0);
slot->l_ost_idx = cpu_to_le32(ost_idx);
+
+ if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_HOLE) {
+ struct lov_ost_data_v1 *objs;
+ int i;
+ __u16 count;
+
+ count = le16_to_cpu(lmm->lmm_stripe_count);
+ if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_V1)
+ objs = &lmm->lmm_objects[0];
+ else
+ objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
+ for (i = 0; i < count; i++, objs++) {
+ if (objs != slot && lovea_slot_is_dummy(objs))
+ break;
+ }
+
+ /* If the @slot is the last dummy slot to be refilled,
+ * then drop LOV_PATTERN_F_HOLE from lmm::lmm_pattern. */
+ if (i == count)
+ lmm->lmm_pattern &= ~cpu_to_le32(LOV_PATTERN_F_HOLE);
+ }
+
rc = dt_xattr_set(env, parent, buf, XATTR_NAME_LOV, fl, handle,
BYPASS_CAPA);
if (rc == 0)
* \retval -ve: on error
*/
static int lfsck_layout_extend_lovea(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
struct thandle *handle,
struct dt_object *parent,
struct lu_fid *cfid,
struct lu_buf *buf, int fl,
- __u32 ost_idx, __u32 ea_off)
+ __u32 ost_idx, __u32 ea_off, bool reset)
{
struct lov_mds_md_v1 *lmm = buf->lb_buf;
struct lov_ost_data_v1 *objs;
int rc;
+ __u16 count;
ENTRY;
- if (fl == LU_XATTR_CREATE) {
- LASSERT(buf->lb_len == lov_mds_md_size(ea_off + 1,
- LOV_MAGIC_V1));
+ if (fl == LU_XATTR_CREATE || reset) {
+ __u32 pattern = LOV_PATTERN_RAID0;
+
+ count = ea_off + 1;
+ LASSERT(buf->lb_len == lov_mds_md_size(count, LOV_MAGIC_V1));
+
+ if (ea_off != 0 || reset)
+ pattern |= LOV_PATTERN_F_HOLE;
memset(lmm, 0, buf->lb_len);
lmm->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
- /* XXX: currently, we only support LOV_PATTERN_RAID0. */
- lmm->lmm_pattern = cpu_to_le32(LOV_PATTERN_RAID0);
+ lmm->lmm_pattern = cpu_to_le32(pattern);
fid_to_lmm_oi(lfsck_dto2fid(parent), &lmm->lmm_oi);
lmm_oi_cpu_to_le(&lmm->lmm_oi, &lmm->lmm_oi);
- /* XXX: We cannot know the stripe size,
- * then use the default value (1 MB). */
- lmm->lmm_stripe_size =
- cpu_to_le32(LOV_DESC_STRIPE_SIZE_DEFAULT);
- objs = &(lmm->lmm_objects[ea_off]);
+
+ rc = lfsck_layout_get_def_stripesize(env, lfsck,
+ &lmm->lmm_stripe_size);
+ if (rc != 0)
+ RETURN(rc);
+
+ objs = &lmm->lmm_objects[ea_off];
} else {
- __u16 count = le16_to_cpu(lmm->lmm_stripe_count);
- int gap = ea_off - count;
__u32 magic = le32_to_cpu(lmm->lmm_magic);
+ int gap;
- /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3
- * which has been verified in lfsck_layout_verify_header()
- * already. If some new magic introduced in the future,
- * then layout LFSCK needs to be updated also. */
- if (magic == LOV_MAGIC_V1) {
- objs = &(lmm->lmm_objects[count]);
- } else {
- LASSERT(magic == LOV_MAGIC_V3);
+ count = le16_to_cpu(lmm->lmm_stripe_count);
+ if (magic == LOV_MAGIC_V1)
+ objs = &lmm->lmm_objects[count];
+ else
objs = &((struct lov_mds_md_v3 *)lmm)->
lmm_objects[count];
- }
- if (gap > 0)
+ gap = ea_off - count;
+ if (gap >= 0)
+ count = ea_off + 1;
+ LASSERT(buf->lb_len == lov_mds_md_size(count, magic));
+
+ if (gap > 0) {
memset(objs, 0, gap * sizeof(*objs));
+ lmm->lmm_pattern |= cpu_to_le32(LOV_PATTERN_F_HOLE);
+ }
+
lmm->lmm_layout_gen =
cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
objs += gap;
-
- LASSERT(buf->lb_len == lov_mds_md_size(ea_off + 1, magic));
}
- lmm->lmm_stripe_count = cpu_to_le16(ea_off + 1);
+ lmm->lmm_stripe_count = cpu_to_le16(count);
rc = lfsck_layout_refill_lovea(env, handle, parent, cfid, buf, objs,
fl, ost_idx);
memset(la, 0, sizeof(*la));
la->la_uid = rec->lor_uid;
la->la_gid = rec->lor_gid;
- la->la_mode = S_IFREG | S_IRUSR | S_IWUSR;
+ la->la_mode = S_IFREG | S_IRUSR;
la->la_valid = LA_MODE | LA_UID | LA_GID;
memset(dof, 0, sizeof(*dof));
rc = dt_create(env, pobj, la, NULL, dof, th);
if (rc == 0)
/* 3b. Add layout EA for the MDT-object. */
- rc = lfsck_layout_extend_lovea(env, th, pobj, cfid, ea_buf,
- LU_XATTR_CREATE, ltd->ltd_index,
- ea_off);
+ rc = lfsck_layout_extend_lovea(env, lfsck, th, pobj, cfid,
+ ea_buf, LU_XATTR_CREATE,
+ ltd->ltd_index, ea_off, false);
dt_write_unlock(env, pobj);
if (rc < 0)
GOTO(stop, rc);
LASSERT(buf->lb_len >= rc);
buf->lb_len = rc;
- rc = lfsck_layout_extend_lovea(env, handle, parent, cfid, buf,
- fl, ost_idx, ea_off);
+ rc = lfsck_layout_extend_lovea(env, lfsck, handle, parent, cfid,
+ buf, fl, ost_idx, ea_off, false);
GOTO(unlock_parent, rc);
}
lmm = buf->lb_buf;
rc1 = lfsck_layout_verify_header(lmm);
+
+ /* If the LOV EA crashed, the rebuild it. */
+ if (rc1 == -EINVAL) {
+ if (bk->lb_param & LPF_DRYRUN)
+ GOTO(unlock_parent, rc = 1);
+
+ LASSERT(buf->lb_len >= rc);
+
+ buf->lb_len = rc;
+ memset(lmm, 0, buf->lb_len);
+ rc = lfsck_layout_extend_lovea(env, lfsck, handle, parent, cfid,
+ buf, fl, ost_idx, ea_off, true);
+
+ GOTO(unlock_parent, rc);
+ }
+
+ /* For other unknown magic/pattern, keep the current LOV EA. */
if (rc1 != 0)
GOTO(unlock_parent, rc = rc1);
* be updated also. */
magic = le32_to_cpu(lmm->lmm_magic);
if (magic == LOV_MAGIC_V1) {
- objs = &(lmm->lmm_objects[0]);
+ objs = &lmm->lmm_objects[0];
} else {
LASSERT(magic == LOV_MAGIC_V3);
objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
goto again;
buf->lb_len = rc;
- rc = lfsck_layout_extend_lovea(env, handle, parent, cfid, buf,
- fl, ost_idx, ea_off);
+ rc = lfsck_layout_extend_lovea(env, lfsck, handle, parent, cfid,
+ buf, fl, ost_idx, ea_off, false);
+
GOTO(unlock_parent, rc);
}
for (i = 0; i < count; i++, objs++) {
/* The MDT-object was created via lfsck_layout_recover_create()
* by others before, and we fill the dummy layout EA. */
- if (is_dummy_lov_ost_data(objs)) {
+ if (lovea_slot_is_dummy(objs)) {
if (i != ea_off)
continue;
dt_trans_stop(env, dt, handle);
lfsck_layout_unlock(&lh);
if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_V1)
- objs = &(lmm->lmm_objects[ea_off]);
+ objs = &lmm->lmm_objects[ea_off];
else
objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[ea_off];
rc = lfsck_layout_conflict_create(env, com, ltd, rec, parent, cfid,
return rc > 0 ? 0 : rc;
}
-/* For the MDT-object with dangling reference, we need to re-create
- * the missed OST-object with the known FID/owner information. */
-static int lfsck_layout_recreate_ostobj(const struct lu_env *env,
+/* For the MDT-object with dangling reference, we need to repare the
+ * inconsistency according to the LFSCK sponsor's requirement:
+ *
+ * 1) Keep the inconsistency there and report the inconsistency case,
+ * then give the chance to the application to find related issues,
+ * and the users can make the decision about how to handle it with
+ * more human knownledge. (by default)
+ *
+ * 2) Re-create the missed OST-object with the FID/owner information. */
+static int lfsck_layout_repair_dangling(const struct lu_env *env,
struct lfsck_component *com,
struct lfsck_layout_req *llr,
- struct lu_attr *la)
+ const struct lu_attr *pla)
{
struct lfsck_thread_info *info = lfsck_env_info(env);
struct filter_fid *pfid = &info->lti_new_pfid;
struct dt_allocation_hint *hint = &info->lti_hint;
+ struct lu_attr *cla = &info->lti_la2;
struct dt_object *parent = llr->llr_parent->llo_obj;
struct dt_object *child = llr->llr_child;
struct dt_device *dev = lfsck_obj2dt_dev(child);
struct lu_buf *buf;
struct lustre_handle lh = { 0 };
int rc;
+ bool create;
ENTRY;
- CDEBUG(D_LFSCK, "Repair dangling reference for: parent "DFID
- ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u\n",
+ if (com->lc_lfsck->li_bookmark_ram.lb_param & LPF_CREATE_OSTOBJ)
+ create = true;
+ else
+ create = false;
+
+ CDEBUG(D_LFSCK, "Found dangling reference for: parent "DFID
+ ", child "DFID", OST-index %u, stripe-index %u, owner %u:%u. %s",
PFID(lfsck_dto2fid(parent)), PFID(lfsck_dto2fid(child)),
- llr->llr_ost_idx, llr->llr_lov_idx, la->la_uid, la->la_gid);
+ llr->llr_ost_idx, llr->llr_lov_idx, pla->la_uid, pla->la_gid,
+ create ? "Create the lost OST-object as required.\n" :
+ "Keep the MDT-object there by default.\n");
+
+ if (!create)
+ RETURN(1);
+
+ memset(cla, 0, sizeof(*cla));
+ cla->la_uid = pla->la_uid;
+ cla->la_gid = pla->la_gid;
+ cla->la_mode = S_IFREG | 0666;
+ cla->la_valid = LA_TYPE | LA_MODE | LA_UID | LA_GID |
+ LA_ATIME | LA_MTIME | LA_CTIME;
rc = lfsck_layout_lock(env, com, parent, &lh,
MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR);
pfid->ff_parent.f_stripe_idx = cpu_to_le32(llr->llr_lov_idx);
buf = lfsck_buf_get(env, pfid, sizeof(struct filter_fid));
- rc = dt_declare_create(env, child, la, hint, NULL, handle);
+ rc = dt_declare_create(env, child, cla, hint, NULL, handle);
if (rc != 0)
GOTO(stop, rc);
if (unlikely(lu_object_is_dying(parent->do_lu.lo_header)))
GOTO(unlock2, rc = 1);
- rc = dt_create(env, child, la, hint, NULL, handle);
+ rc = dt_create(env, child, cla, hint, NULL, handle);
if (rc != 0)
GOTO(unlock2, rc);
GOTO(unlock2, rc = 0);
lmm = buf->lb_buf;
- rc = lfsck_layout_verify_header(lmm);
- if (rc != 0)
- GOTO(unlock2, rc);
-
/* Someone change layout during the LFSCK, no need to repair then. */
if (le16_to_cpu(lmm->lmm_layout_gen) != llr->llr_parent->llo_gen)
GOTO(unlock2, rc = 0);
* be updated also. */
magic = le32_to_cpu(lmm->lmm_magic);
if (magic == LOV_MAGIC_V1) {
- objs = &(lmm->lmm_objects[0]);
+ objs = &lmm->lmm_objects[0];
} else {
LASSERT(magic == LOV_MAGIC_V3);
objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
GOTO(out, rc);
lmm = buf->lb_buf;
- rc = lfsck_layout_verify_header(lmm);
- if (rc != 0)
- GOTO(out, rc);
-
- /* Currently, we only support LOV_MAGIC_V1/LOV_MAGIC_V3 which has
- * been verified in lfsck_layout_verify_header() already. If some
- * new magic introduced in the future, then layout LFSCK needs to
- * be updated also. */
magic = le32_to_cpu(lmm->lmm_magic);
if (magic == LOV_MAGIC_V1) {
- objs = &(lmm->lmm_objects[0]);
+ objs = &lmm->lmm_objects[0];
} else {
LASSERT(magic == LOV_MAGIC_V3);
objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
struct lu_fid *tfid = &info->lti_fid2;
struct ost_id *oi = &info->lti_oi;
- if (is_dummy_lov_ost_data(objs))
+ if (lovea_slot_is_dummy(objs))
continue;
ostid_le_to_cpu(&objs->l_ost_oi, oi);
switch (type) {
case LLIT_DANGLING:
- memset(cla, 0, sizeof(*cla));
- cla->la_uid = pla->la_uid;
- cla->la_gid = pla->la_gid;
- cla->la_mode = S_IFREG | 0666;
- cla->la_valid = LA_TYPE | LA_MODE | LA_UID | LA_GID |
- LA_ATIME | LA_MTIME | LA_CTIME;
- rc = lfsck_layout_recreate_ostobj(env, com, llr, cla);
+ rc = lfsck_layout_repair_dangling(env, com, llr, pla);
break;
case LLIT_UNMATCHED_PAIR:
rc = lfsck_layout_repair_unmatched_pair(env, com, llr, pla);
lo->ll_objs_skipped++;
rc = 0;
} else {
- lo->ll_objs_failed_phase1++;
+ lfsck_layout_record_failure(env, lfsck, lo);
}
} else if (rc > 0) {
LASSERTF(type > LLIT_NONE && type <= LLIT_MAX,
"unknown type = %d\n", type);
lo->ll_objs_repaired[type - 1]++;
+ if (bk->lb_param & LPF_DRYRUN &&
+ unlikely(lo->ll_pos_first_inconsistent == 0))
+ lo->ll_pos_first_inconsistent =
+ lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
+ lfsck->li_di_oit);
}
up_write(&com->lc_sem);
memset(lr, 0, sizeof(*lr));
lr->lr_event = LE_START;
lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
- LSV_ASYNC_WINDOWS;
+ LSV_ASYNC_WINDOWS | LSV_CREATE_OSTOBJ;
lr->lr_speed = bk->lb_speed_limit;
lr->lr_version = bk->lb_version;
lr->lr_param = bk->lb_param;
com->lc_time_last_checkpoint +
cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
+ /* flush all async updating before handling orphan. */
+ dt_sync(env, lfsck->li_next);
+
while (llmd->llmd_in_double_scan) {
struct lfsck_tgt_descs *ltds =
&lfsck->li_ost_descs;
* be updated also. */
magic = le32_to_cpu(lmm->lmm_magic);
if (magic == LOV_MAGIC_V1) {
- objs = &(lmm->lmm_objects[0]);
+ objs = &lmm->lmm_objects[0];
} else {
LASSERT(magic == LOV_MAGIC_V3);
objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
down_write(&com->lc_sem);
if (new_checked)
com->lc_new_checked++;
- lo->ll_objs_failed_phase1++;
- if (lo->ll_pos_first_inconsistent == 0) {
- struct lfsck_instance *lfsck = com->lc_lfsck;
-
- lo->ll_pos_first_inconsistent =
- lfsck->li_obj_oit->do_index_ops->dio_it.store(env,
- lfsck->li_di_oit);
- }
+ lfsck_layout_record_failure(env, com->lc_lfsck, lo);
up_write(&com->lc_sem);
}
int rc;
rc = lfsck_layout_reset(env, com, false);
+ if (rc == 0)
+ rc = lfsck_set_param(env, lfsck, start, true);
+
if (rc != 0)
return rc;
}
struct lfsck_start_param *lsp)
{
struct lfsck_layout_slave_data *llsd = com->lc_data;
+ struct lfsck_instance *lfsck = com->lc_lfsck;
+ struct lfsck_layout *lo = com->lc_file_ram;
struct lfsck_start *start = lsp->lsp_start;
int rc;
rc = lfsck_layout_prep(env, com, start);
- if (rc != 0 || !lsp->lsp_index_valid)
+ if (rc != 0)
return rc;
+ if (lo->ll_flags & LF_CRASHED_LASTID &&
+ list_empty(&llsd->llsd_master_list)) {
+ LASSERT(lfsck->li_out_notify != NULL);
+
+ lfsck->li_out_notify(env, lfsck->li_out_notify_data,
+ LE_LASTID_REBUILDING);
+ }
+
+ if (!lsp->lsp_index_valid)
+ return 0;
+
rc = lfsck_layout_llst_add(llsd, lsp->lsp_index);
if (rc == 0 && start != NULL && start->ls_flags & LPF_ORPHAN) {
LASSERT(!llsd->llsd_rbtree_valid);
struct ptlrpc_thread *mthread = &lfsck->li_thread;
struct ptlrpc_thread *athread = &llmd->llmd_thread;
struct lfsck_thread_args *lta;
- long rc;
+ struct task_struct *task;
+ int rc;
ENTRY;
rc = lfsck_layout_prep(env, com, lsp->lsp_start);
if (IS_ERR(lta))
RETURN(PTR_ERR(lta));
- rc = PTR_ERR(kthread_run(lfsck_layout_assistant, lta, "lfsck_layout"));
- if (IS_ERR_VALUE(rc)) {
+ task = kthread_run(lfsck_layout_assistant, lta, "lfsck_layout");
+ if (IS_ERR(task)) {
+ rc = PTR_ERR(task);
CERROR("%s: Cannot start LFSCK layout assistant thread: "
- "rc = %ld\n", lfsck_lfsck2name(lfsck), rc);
+ "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
lfsck_thread_args_fini(lta);
} else {
struct l_wait_info lwi = { 0 };
* be updated also. */
magic = le32_to_cpu(lmm->lmm_magic);
if (magic == LOV_MAGIC_V1) {
- objs = &(lmm->lmm_objects[0]);
+ objs = &lmm->lmm_objects[0];
} else {
LASSERT(magic == LOV_MAGIC_V3);
objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[0];
le32_to_cpu(objs->l_ost_idx);
bool wakeup = false;
- if (is_dummy_lov_ost_data(objs))
+ if (unlikely(lovea_slot_is_dummy(objs)))
continue;
l_wait_event(mthread->t_ctl_waitq,
down_write(&com->lc_sem);
com->lc_new_checked++;
if (rc < 0)
- lo->ll_objs_failed_phase1++;
+ lfsck_layout_record_failure(env, lfsck, lo);
up_write(&com->lc_sem);
if (cobj != NULL && !IS_ERR(cobj))
buf->lb_len = rc;
lmm = buf->lb_buf;
rc = lfsck_layout_verify_header(lmm);
+ /* If the LOV EA crashed, then it is possible to be rebuilt later
+ * when handle orphan OST-objects. */
if (rc != 0)
GOTO(out, rc);
down_write(&com->lc_sem);
com->lc_new_checked++;
if (rc < 0)
- lo->ll_objs_failed_phase1++;
+ lfsck_layout_record_failure(env, lfsck, lo);
up_write(&com->lc_sem);
}
buf->lb_len = buflen;
LASSERT(llsd != NULL);
+ if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY5) &&
+ cfs_fail_val == lfsck_dev_idx(lfsck->li_bottom)) {
+ struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(1),
+ NULL, NULL);
+ struct ptlrpc_thread *thread = &lfsck->li_thread;
+
+ l_wait_event(thread->t_ctl_waitq,
+ !thread_is_running(thread),
+ &lwi);
+ }
+
lfsck_rbtree_update_bitmap(env, com, fid, false);
down_write(&com->lc_sem);