* GPL HEADER END
*/
/*
- * Copyright (c) 2013, 2014, Intel Corporation.
+ * Copyright (c) 2013, 2015, Intel Corporation.
*/
/*
* lustre/lfsck/lfsck_engine.c
iops->put(env, di);
}
-static int lfsck_update_lma(const struct lu_env *env,
- struct lfsck_instance *lfsck, struct dt_object *obj)
-{
- struct lfsck_thread_info *info = lfsck_env_info(env);
- struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
- struct dt_device *dt = lfsck->li_bottom;
- struct lustre_mdt_attrs *lma = &info->lti_lma;
- struct lu_buf *buf;
- struct thandle *th;
- int fl;
- int rc;
- ENTRY;
-
- if (bk->lb_param & LPF_DRYRUN)
- RETURN(0);
-
- buf = lfsck_buf_get(env, info->lti_lma_old, LMA_OLD_SIZE);
- rc = dt_xattr_get(env, obj, buf, XATTR_NAME_LMA, BYPASS_CAPA);
- if (rc < 0) {
- if (rc != -ENODATA)
- RETURN(rc);
-
- fl = LU_XATTR_CREATE;
- lustre_lma_init(lma, lfsck_dto2fid(obj), LMAC_FID_ON_OST, 0);
- } else {
- if (rc != LMA_OLD_SIZE && rc != sizeof(struct lustre_mdt_attrs))
- RETURN(-EINVAL);
-
- fl = LU_XATTR_REPLACE;
- lustre_lma_swab(lma);
- lustre_lma_init(lma, lfsck_dto2fid(obj),
- lma->lma_compat | LMAC_FID_ON_OST,
- lma->lma_incompat);
- }
- lustre_lma_swab(lma);
-
- th = dt_trans_create(env, dt);
- if (IS_ERR(th))
- RETURN(PTR_ERR(th));
-
- buf = lfsck_buf_get(env, lma, sizeof(*lma));
- rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_LMA, fl, th);
- if (rc != 0)
- GOTO(stop, rc);
-
- rc = dt_trans_start(env, dt, th);
- if (rc != 0)
- GOTO(stop, rc);
-
- rc = dt_xattr_set(env, obj, buf, XATTR_NAME_LMA, fl, th, BYPASS_CAPA);
-
- GOTO(stop, rc);
-
-stop:
- dt_trans_stop(env, dt, th);
- return rc;
-}
-
static int lfsck_parent_fid(const struct lu_env *env, struct dt_object *obj,
struct lu_fid *fid)
{
return -ENOTDIR;
return dt_lookup(env, obj, (struct dt_rec *)fid,
- (const struct dt_key *)"..", BYPASS_CAPA);
+ (const struct dt_key *)"..");
}
/**
struct lfsck_thread_info *info = lfsck_env_info(env);
struct lu_fid *fid = &info->lti_fid;
struct lu_seq_range *range = &info->lti_range;
- struct dt_device *dev = lfsck->li_bottom;
- struct seq_server_site *ss = lu_site2seq(dev->dd_lu_dev.ld_site);
- __u32 idx = lfsck_dev_idx(dev);
+ struct seq_server_site *ss = lfsck_dev_site(lfsck);
+ __u32 idx = lfsck_dev_idx(lfsck);
int depth = 0;
int rc = 0;
return 1;
if (obj == NULL) {
- obj = lfsck_object_find(env, lfsck, fid);
+ obj = lfsck_object_find_bottom(env, lfsck, fid);
if (IS_ERR(obj))
return PTR_ERR(obj);
}
rc = dt_xattr_get(env, obj,
- lfsck_buf_get(env, NULL, 0), XATTR_NAME_LINK,
- BYPASS_CAPA);
+ lfsck_buf_get(env, NULL, 0), XATTR_NAME_LINK);
dt_read_unlock(env, obj);
if (rc >= 0)
GOTO(out, rc = 1);
RETURN(-ENOMEM);
}
- /* Find the object against the bottom device. */
- obj = lfsck_object_find_by_dev(env, lfsck->li_bottom,
- lfsck_dto2fid(obj));
- if (IS_ERR(obj)) {
- OBD_FREE_LARGE(lslr, sizeof(*lslr) * stripes);
- OBD_FREE_PTR(llmv);
-
- RETURN(PTR_ERR(obj));
- }
-
llmv->ll_stripes_allocated = stripes;
llmv->ll_hash_type = LMV_HASH_TYPE_UNKNOWN;
llmv->ll_lslr = lslr;
- lfsck->li_obj_dir = obj;
} else {
llmv->ll_lmv_slave = 1;
- lfsck->li_obj_dir = lfsck_object_get(obj);
}
+ lfsck->li_obj_dir = lfsck_object_get(obj);
llmv->ll_lmv = *lmv;
atomic_set(&llmv->ll_ref, 1);
lfsck->li_lmv = llmv;
}
iops = &obj->do_index_ops->dio_it;
- di = iops->init(env, obj, lfsck->li_args_dir, BYPASS_CAPA);
+ di = iops->init(env, obj, lfsck->li_args_dir);
if (IS_ERR(di))
GOTO(out, rc = PTR_ERR(di));
GOTO(out, rc = 0);
/* Find the directory for namespace-based traverse. */
- obj = lfsck_object_find(env, lfsck, &pos->lp_dir_parent);
+ obj = lfsck_object_find_bottom(env, lfsck, &pos->lp_dir_parent);
if (IS_ERR(obj))
RETURN(PTR_ERR(obj));
static int lfsck_exec_dir(const struct lu_env *env,
struct lfsck_instance *lfsck,
+ struct lfsck_assistant_object *lso,
struct lu_dirent *ent, __u16 type)
{
struct lfsck_component *com;
int rc;
list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
- rc = com->lc_ops->lfsck_exec_dir(env, com, ent, type);
+ rc = com->lc_ops->lfsck_exec_dir(env, com, lso, ent, type);
if (rc != 0)
return rc;
}
(struct lu_dirent *)info->lti_key;
struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
struct ptlrpc_thread *thread = &lfsck->li_thread;
+ struct lfsck_assistant_object *lso = NULL;
int rc;
__u16 type;
ENTRY;
lfsck_lfsck2name(lfsck),
PFID(lfsck_dto2fid(dir)), lfsck->li_cookie_dir);
- RETURN(0);
+ GOTO(out, rc = 0);
}
lfsck->li_new_scanned++;
lfsck->li_cookie_dir, rc);
lfsck_fail(env, lfsck, true);
if (bk->lb_param & LPF_FAILOUT)
- RETURN(rc);
+ GOTO(out, rc);
else
goto checkpoint;
}
- if (ent->lde_attrs & LUDA_IGNORE &&
- strcmp(ent->lde_name, dotdot) != 0)
+ if (ent->lde_attrs & LUDA_IGNORE)
goto checkpoint;
+ /* skip dot entry. */
+ if (ent->lde_namelen == 1 && ent->lde_name[0] == '.')
+ goto checkpoint;
+
+ if (lso == NULL) {
+ lso = lfsck_assistant_object_init(env,
+ lfsck_dto2fid(dir), NULL,
+ lfsck->li_pos_current.lp_oit_cookie, true);
+ if (IS_ERR(lso)) {
+ if (bk->lb_param & LPF_FAILOUT)
+ RETURN(PTR_ERR(lso));
+
+ lso = NULL;
+ goto checkpoint;
+ }
+ }
+
/* The type in the @ent structure may has been overwritten,
* so we need to pass the @type parameter independently. */
- rc = lfsck_exec_dir(env, lfsck, ent, type);
+ rc = lfsck_exec_dir(env, lfsck, lso, ent, type);
if (rc != 0 && bk->lb_param & LPF_FAILOUT)
- RETURN(rc);
+ GOTO(out, rc);
checkpoint:
rc = lfsck_checkpoint(env, lfsck);
if (rc != 0 && bk->lb_param & LPF_FAILOUT)
- RETURN(rc);
+ GOTO(out, rc);
/* Rate control. */
lfsck_control_speed(lfsck);
lfsck_lfsck2name(lfsck),
PFID(lfsck_dto2fid(dir)),
lfsck->li_cookie_dir);
- RETURN(0);
+ GOTO(out, rc = 0);
}
if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_FATAL2)) {
spin_lock(&lfsck->li_lock);
thread_set_flags(thread, SVC_STOPPING);
spin_unlock(&lfsck->li_lock);
- RETURN(-EINVAL);
+ GOTO(out, rc = -EINVAL);
}
rc = iops->next(env, di);
if (rc > 0 && !lfsck->li_oit_over)
lfsck_close_dir(env, lfsck, rc);
- RETURN(rc);
+ GOTO(out, rc);
+
+out:
+ if (lso != NULL)
+ lfsck_assistant_object_put(env, lso);
+
+ return rc;
}
/**
struct lu_fid *fid = &info->lti_fid;
struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
struct ptlrpc_thread *thread = &lfsck->li_thread;
- struct dt_device *dev = lfsck->li_bottom;
- struct seq_server_site *ss = lu_site2seq(dev->dd_lu_dev.ld_site);
- __u32 idx = lfsck_dev_idx(dev);
+ struct seq_server_site *ss = lfsck_dev_site(lfsck);
+ __u32 idx = lfsck_dev_idx(lfsck);
int rc;
ENTRY;
do {
struct dt_object *target;
- bool update_lma = false;
if (lfsck->li_di_dir != NULL) {
rc = lfsck_master_dir_engine(env, lfsck);
goto checkpoint;
}
+ if (unlikely(!fid_is_sane(fid))) {
+ CDEBUG(D_LFSCK, "%s: OIT scan find invalid FID "DFID
+ ", skip it\n",
+ lfsck_lfsck2name(lfsck), PFID(fid));
+ goto checkpoint;
+ }
+
if (fid_is_idif(fid)) {
__u32 idx1 = fid_idif_ost_idx(fid);
LASSERT(!lfsck->li_master);
- /* It is an old format device, update the LMA. */
if (idx != idx1) {
struct ost_id *oi = &info->lti_oi;
+ if (unlikely(idx1 != 0)) {
+ CDEBUG(D_LFSCK, "%s: invalid IDIF "DFID
+ ", not match device index %u\n",
+ lfsck_lfsck2name(lfsck),
+ PFID(fid), idx);
+
+ goto checkpoint;
+ }
+
+ /* rebuild the IDIF with index to
+ * avoid double instances for the
+ * same object. */
fid_to_ostid(fid, oi);
ostid_to_fid(fid, oi, idx);
- update_lma = true;
}
} else if (!fid_is_norm(fid) && !fid_is_igif(fid) &&
!fid_is_last_id(fid) &&
}
}
- target = lfsck_object_find(env, lfsck, fid);
+ target = lfsck_object_find_bottom(env, lfsck, fid);
if (IS_ERR(target)) {
CDEBUG(D_LFSCK, "%s: OIT scan failed at find target "
DFID", cookie "LPU64": rc = %d\n",
goto checkpoint;
}
- if (dt_object_exists(target)) {
- if (update_lma) {
- rc = lfsck_update_lma(env, lfsck, target);
- if (rc != 0)
- CDEBUG(D_LFSCK, "%s: fail to update "
- "LMA for "DFID": rc = %d\n",
- lfsck_lfsck2name(lfsck),
- PFID(lfsck_dto2fid(target)), rc);
- }
- if (rc == 0)
- rc = lfsck_exec_oit(env, lfsck, target);
- }
+ if (dt_object_exists(target))
+ rc = lfsck_exec_oit(env, lfsck, target);
+
lfsck_object_put(env, target);
if (rc != 0 && bk->lb_param & LPF_FAILOUT)
RETURN(rc);
int rc;
ENTRY;
+ /* There will be some objects verification during the LFSCK start,
+ * such as the subsequent lfsck_verify_lpf(). Trigger low layer OI
+ * OI scrub before that to handle the potential inconsistence. */
+ oit_di = oit_iops->init(env, oit_obj, lfsck->li_args_oit);
+ if (IS_ERR(oit_di)) {
+ rc = PTR_ERR(oit_di);
+ CDEBUG(D_LFSCK, "%s: master engine fail to init iteration: "
+ "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
+
+ GOTO(fini_args, rc);
+ }
+
if (lfsck->li_master &&
(!list_empty(&lfsck->li_list_scan) ||
!list_empty(&lfsck->li_list_double_scan))) {
lfsck_lfsck2name(lfsck), rc);
}
- oit_di = oit_iops->init(env, oit_obj, lfsck->li_args_oit, BYPASS_CAPA);
- if (IS_ERR(oit_di)) {
- rc = PTR_ERR(oit_di);
- CDEBUG(D_LFSCK, "%s: master engine fail to init iteration: "
- "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
-
- GOTO(fini_args, rc);
- }
-
spin_lock(&lfsck->li_lock);
lfsck->li_di_oit = oit_di;
spin_unlock(&lfsck->li_lock);
lad->lad_touch_gen++;
memset(lr, 0, sizeof(*lr));
- lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
lr->lr_event = LE_QUERY;
lr->lr_active = com->lc_type;
laia->laia_com = com;
if (set == NULL)
RETURN(-ENOMEM);
- lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
+ lr->lr_index = lfsck_dev_idx(lfsck);
lr->lr_active = com->lc_type;
laia->laia_com = com;
laia->laia_lr = lr;
spin_lock(<ds->ltd_lock);
if (com->lc_type == LFSCK_TYPE_LAYOUT) {
cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
- ltd = LTD_TGT(ltds, idx);
+ ltd = lfsck_ltd2tgt(ltds, idx);
LASSERT(ltd != NULL);
if (!list_empty(<d->ltd_layout_list))
}
} else {
cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
- ltd = LTD_TGT(ltds, idx);
+ ltd = lfsck_ltd2tgt(ltds, idx);
LASSERT(ltd != NULL);
if (!list_empty(<d->ltd_namespace_list))
}
spin_lock(&lad->lad_lock);
+ lad->lad_task = current;
thread_set_flags(athread, SVC_RUNNING);
spin_unlock(&lad->lad_lock);
wake_up_all(&mthread->t_ctl_waitq);
/* Wake up the main engine thread only when the list
* is empty or half of the prefetched items have been
* handled to avoid too frequent thread schedule. */
- if (lad->lad_prefetched == 0 ||
- (bk->lb_async_windows != 0 &&
- bk->lb_async_windows / 2 ==
- lad->lad_prefetched))
+ if (lad->lad_prefetched <= (bk->lb_async_windows / 2))
wakeup = true;
spin_unlock(&lad->lad_lock);
if (wakeup)
lad->lad_to_post = 0;
LASSERT(lad->lad_post_result > 0);
+ /* Wakeup the master engine to go ahead. */
+ wake_up_all(&mthread->t_ctl_waitq);
+
memset(lr, 0, sizeof(*lr));
lr->lr_event = LE_PHASE1_DONE;
lr->lr_status = lad->lad_post_result;
rc = lfsck_assistant_notify_others(env, com, lr);
- if (rc != 0)
- CDEBUG(D_LFSCK, "%s: LFSCK assistant failed to "
- "notify others for %s post: rc = %d\n",
- lfsck_lfsck2name(lfsck),
- lad->lad_name, rc);
- /* Wakeup the master engine to go ahead. */
- wake_up_all(&mthread->t_ctl_waitq);
+ CDEBUG(D_LFSCK, "%s: LFSCK assistant notified "
+ "others for %s post: rc = %d\n",
+ lfsck_lfsck2name(lfsck),
+ lad->lad_name, rc);
}
if (lad->lad_to_double_scan) {
lad->lad_assistant_status = (rc1 != 0 ? rc1 : rc);
thread_set_flags(athread, SVC_STOPPED);
wake_up_all(&mthread->t_ctl_waitq);
+ lad->lad_task = NULL;
spin_unlock(&lad->lad_lock);
CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread exit: rc = %d\n",