lfsck_object_put(env, dir_obj);
}
+static int lfsck_update_lma(const struct lu_env *env,
+ struct lfsck_instance *lfsck, struct dt_object *obj)
+{
+ struct lfsck_thread_info *info = lfsck_env_info(env);
+ struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
+ struct dt_device *dt = lfsck->li_bottom;
+ struct lustre_mdt_attrs *lma = &info->lti_lma;
+ struct lu_buf *buf;
+ struct thandle *th;
+ int fl;
+ int rc;
+ ENTRY;
+
+ if (bk->lb_param & LPF_DRYRUN)
+ RETURN(0);
+
+ buf = lfsck_buf_get(env, info->lti_lma_old, LMA_OLD_SIZE);
+ rc = dt_xattr_get(env, obj, buf, XATTR_NAME_LMA, BYPASS_CAPA);
+ if (rc < 0) {
+ if (rc != -ENODATA)
+ RETURN(rc);
+
+ fl = LU_XATTR_CREATE;
+ lustre_lma_init(lma, lfsck_dto2fid(obj), LMAC_FID_ON_OST, 0);
+ } else {
+ if (rc != LMA_OLD_SIZE && rc != sizeof(struct lustre_mdt_attrs))
+ RETURN(-EINVAL);
+
+ fl = LU_XATTR_REPLACE;
+ lustre_lma_swab(lma);
+ lustre_lma_init(lma, lfsck_dto2fid(obj),
+ lma->lma_compat | LMAC_FID_ON_OST,
+ lma->lma_incompat);
+ }
+ lustre_lma_swab(lma);
+
+ th = dt_trans_create(env, dt);
+ if (IS_ERR(th))
+ RETURN(PTR_ERR(th));
+
+ buf = lfsck_buf_get(env, lma, sizeof(*lma));
+ rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_LMA, fl, th);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ rc = dt_trans_start(env, dt, th);
+ if (rc != 0)
+ GOTO(stop, rc);
+
+ rc = dt_xattr_set(env, obj, buf, XATTR_NAME_LMA, fl, th, BYPASS_CAPA);
+
+ GOTO(stop, rc);
+
+stop:
+ dt_trans_stop(env, dt, th);
+ return rc;
+}
+
static int lfsck_master_dir_engine(const struct lu_env *env,
struct lfsck_instance *lfsck)
{
struct lu_fid *fid = &info->lti_fid;
struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
struct ptlrpc_thread *thread = &lfsck->li_thread;
+ __u32 idx =
+ lfsck_dev_idx(lfsck->li_bottom);
int rc;
ENTRY;
do {
struct dt_object *target;
+ bool update_lma = false;
if (lfsck->li_di_dir != NULL) {
rc = lfsck_master_dir_engine(env, lfsck);
rc = iops->rec(env, di, (struct dt_rec *)fid, 0);
if (rc != 0) {
lfsck_fail(env, lfsck, true);
- if (bk->lb_param & LPF_FAILOUT)
+ if (rc < 0 && bk->lb_param & LPF_FAILOUT)
RETURN(rc);
else
goto checkpoint;
}
+ if (fid_is_idif(fid)) {
+ __u32 idx1 = fid_idif_ost_idx(fid);
+
+ LASSERT(!lfsck->li_master);
+
+ /* It is an old format device, update the LMA. */
+ if (idx != idx1) {
+ struct ost_id *oi = &info->lti_oi;
+
+ fid_to_ostid(fid, oi);
+ ostid_to_fid(fid, oi, idx);
+ update_lma = true;
+ }
+ }
+
target = lfsck_object_find(env, lfsck, fid);
if (target == NULL) {
goto checkpoint;
/* XXX: Currently, skip remote object, the consistency for
* remote object will be processed in LFSCK phase III. */
- if (dt_object_exists(target) && !dt_object_remote(target))
- rc = lfsck_exec_oit(env, lfsck, target);
+ if (dt_object_exists(target) && !dt_object_remote(target)) {
+ if (update_lma)
+ rc = lfsck_update_lma(env, lfsck, target);
+ if (rc == 0)
+ rc = lfsck_exec_oit(env, lfsck, target);
+ }
lfsck_object_put(env, target);
if (rc != 0 && bk->lb_param & LPF_FAILOUT)
RETURN(rc);
lfsck->li_pos_current.lp_oit_cookie,
lfsck->li_pos_current.lp_dir_cookie,
PFID(&lfsck->li_pos_current.lp_dir_parent),
- cfs_curproc_pid());
+ current_pid());
spin_lock(&lfsck->li_lock);
thread_set_flags(thread, SVC_RUNNING);
spin_unlock(&lfsck->li_lock);
- cfs_waitq_broadcast(&thread->t_ctl_waitq);
+ wake_up_all(&thread->t_ctl_waitq);
if (!cfs_list_empty(&lfsck->li_list_scan) ||
cfs_list_empty(&lfsck->li_list_double_scan))
lfsck->li_pos_current.lp_oit_cookie,
lfsck->li_pos_current.lp_dir_cookie,
PFID(&lfsck->li_pos_current.lp_dir_parent),
- cfs_curproc_pid(), rc);
+ current_pid(), rc);
if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_CRASH))
rc = lfsck_post(&env, lfsck, rc);
noenv:
spin_lock(&lfsck->li_lock);
thread_set_flags(thread, SVC_STOPPED);
- cfs_waitq_broadcast(&thread->t_ctl_waitq);
+ wake_up_all(&thread->t_ctl_waitq);
spin_unlock(&lfsck->li_lock);
return rc;
}