* GPL HEADER END
*/
/*
- * Copyright (c) 2012, 2013, Intel Corporation.
+ * Copyright (c) 2013, 2016, Intel Corporation.
*/
/*
* lustre/lfsck/lfsck_engine.c
#include "lfsck_internal.h"
-static int lfsck_unpack_ent(struct lu_dirent *ent, __u64 *cookie, __u16 *type)
+int lfsck_unpack_ent(struct lu_dirent *ent, __u64 *cookie, __u16 *type)
{
struct luda_type *lt;
int align = sizeof(*lt) - 1;
iops->put(env, di);
}
-static void lfsck_close_dir(const struct lu_env *env,
- struct lfsck_instance *lfsck)
-{
- struct dt_object *dir_obj = lfsck->li_obj_dir;
- const struct dt_it_ops *dir_iops = &dir_obj->do_index_ops->dio_it;
- struct dt_it *dir_di = lfsck->li_di_dir;
-
- lfsck_di_dir_put(env, lfsck);
- dir_iops->fini(env, dir_di);
- lfsck->li_obj_dir = NULL;
- lfsck_object_put(env, dir_obj);
-}
-
-static int lfsck_update_lma(const struct lu_env *env,
- struct lfsck_instance *lfsck, struct dt_object *obj)
-{
- struct lfsck_thread_info *info = lfsck_env_info(env);
- struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
- struct dt_device *dt = lfsck->li_bottom;
- struct lustre_mdt_attrs *lma = &info->lti_lma;
- struct lu_buf *buf;
- struct thandle *th;
- int fl;
- int rc;
- ENTRY;
-
- if (bk->lb_param & LPF_DRYRUN)
- RETURN(0);
-
- buf = lfsck_buf_get(env, info->lti_lma_old, LMA_OLD_SIZE);
- rc = dt_xattr_get(env, obj, buf, XATTR_NAME_LMA, BYPASS_CAPA);
- if (rc < 0) {
- if (rc != -ENODATA)
- RETURN(rc);
-
- fl = LU_XATTR_CREATE;
- lustre_lma_init(lma, lfsck_dto2fid(obj), LMAC_FID_ON_OST, 0);
- } else {
- if (rc != LMA_OLD_SIZE && rc != sizeof(struct lustre_mdt_attrs))
- RETURN(-EINVAL);
-
- fl = LU_XATTR_REPLACE;
- lustre_lma_swab(lma);
- lustre_lma_init(lma, lfsck_dto2fid(obj),
- lma->lma_compat | LMAC_FID_ON_OST,
- lma->lma_incompat);
- }
- lustre_lma_swab(lma);
-
- th = dt_trans_create(env, dt);
- if (IS_ERR(th))
- RETURN(PTR_ERR(th));
-
- buf = lfsck_buf_get(env, lma, sizeof(*lma));
- rc = dt_declare_xattr_set(env, obj, buf, XATTR_NAME_LMA, fl, th);
- if (rc != 0)
- GOTO(stop, rc);
-
- rc = dt_trans_start(env, dt, th);
- if (rc != 0)
- GOTO(stop, rc);
-
- rc = dt_xattr_set(env, obj, buf, XATTR_NAME_LMA, fl, th, BYPASS_CAPA);
-
- GOTO(stop, rc);
-
-stop:
- dt_trans_stop(env, dt, th);
- return rc;
-}
-
static int lfsck_parent_fid(const struct lu_env *env, struct dt_object *obj,
struct lu_fid *fid)
{
return -ENOTDIR;
return dt_lookup(env, obj, (struct dt_rec *)fid,
- (const struct dt_key *)"..", BYPASS_CAPA);
+ (const struct dt_key *)"..");
}
/**
struct lfsck_thread_info *info = lfsck_env_info(env);
struct lu_fid *fid = &info->lti_fid;
struct lu_seq_range *range = &info->lti_range;
- struct dt_device *dev = lfsck->li_bottom;
- struct seq_server_site *ss = lu_site2seq(dev->dd_lu_dev.ld_site);
- __u32 idx = lfsck_dev_idx(dev);
+ struct seq_server_site *ss = lfsck_dev_site(lfsck);
+ __u32 idx = lfsck_dev_idx(lfsck);
int depth = 0;
int rc = 0;
fld_range_set_mdt(range);
rc = fld_local_lookup(env, ss->ss_server_fld,
fid_seq(fid), range);
- if (rc != 0 || range->lsr_index != idx) {
+ if (rc != 0 || range->lsr_index != idx)
/* Current FID should NOT be for the input parameter
* @obj, because the lfsck_master_oit_engine() has
* filtered out agent object. So current FID is for
* So the ancestor is a remote directory. The input
* parameter @obj is local directory, and should be
* scanned under such case. */
- LASSERT(depth > 0);
-
return 1;
- }
/* normal FID on this target (locally) must be for the
* client-side visiable object. */
return 1;
if (obj == NULL) {
- obj = lfsck_object_find(env, lfsck, fid);
+ obj = lfsck_object_find_bottom(env, lfsck, fid);
if (IS_ERR(obj))
return PTR_ERR(obj);
}
rc = dt_xattr_get(env, obj,
- lfsck_buf_get(env, NULL, 0), XATTR_NAME_LINK,
- BYPASS_CAPA);
+ lfsck_buf_get(env, NULL, 0), XATTR_NAME_LINK);
dt_read_unlock(env, obj);
if (rc >= 0)
GOTO(out, rc = 1);
return rc;
}
+static int lfsck_load_stripe_lmv(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
+ struct dt_object *obj)
+{
+ struct lmv_mds_md_v1 *lmv = &lfsck_env_info(env)->lti_lmv;
+ struct lfsck_lmv *llmv;
+ int rc;
+ ENTRY;
+
+ LASSERT(lfsck->li_obj_dir == NULL);
+ LASSERT(lfsck->li_lmv == NULL);
+
+ rc = lfsck_read_stripe_lmv(env, obj, lmv);
+ if (rc == -ENODATA) {
+ lfsck->li_obj_dir = lfsck_object_get(obj);
+
+ RETURN(0);
+ }
+
+ if (rc < 0)
+ RETURN(rc);
+
+ OBD_ALLOC_PTR(llmv);
+ if (llmv == NULL)
+ RETURN(-ENOMEM);
+
+ if (lmv->lmv_magic == LMV_MAGIC) {
+ struct lfsck_slave_lmv_rec *lslr;
+ __u32 stripes;
+
+ llmv->ll_lmv_master = 1;
+ if (lmv->lmv_stripe_count < 1)
+ stripes = LFSCK_LMV_DEF_STRIPES;
+ else if (lmv->lmv_stripe_count > LFSCK_LMV_MAX_STRIPES)
+ stripes = LFSCK_LMV_MAX_STRIPES;
+ else
+ stripes = lmv->lmv_stripe_count;
+
+ OBD_ALLOC_LARGE(lslr, sizeof(*lslr) * stripes);
+ if (lslr == NULL) {
+ OBD_FREE_PTR(llmv);
+
+ RETURN(-ENOMEM);
+ }
+
+ llmv->ll_stripes_allocated = stripes;
+ llmv->ll_hash_type = LMV_HASH_TYPE_UNKNOWN;
+ llmv->ll_lslr = lslr;
+ } else {
+ llmv->ll_lmv_slave = 1;
+ }
+
+ lfsck->li_obj_dir = lfsck_object_get(obj);
+ llmv->ll_lmv = *lmv;
+ atomic_set(&llmv->ll_ref, 1);
+ lfsck->li_lmv = llmv;
+
+ RETURN(0);
+}
+
/* LFSCK wrap functions */
static void lfsck_fail(const struct lu_env *env, struct lfsck_instance *lfsck,
}
}
+void lfsck_close_dir(const struct lu_env *env,
+ struct lfsck_instance *lfsck, int result)
+{
+ struct lfsck_component *com;
+ ENTRY;
+
+ if (lfsck->li_lmv != NULL) {
+ lfsck->li_lmv->ll_exit_value = result;
+ if (lfsck->li_obj_dir != NULL) {
+ list_for_each_entry(com, &lfsck->li_list_dir,
+ lc_link_dir) {
+ com->lc_ops->lfsck_close_dir(env, com);
+ }
+ }
+
+ lfsck_lmv_put(env, lfsck->li_lmv);
+ lfsck->li_lmv = NULL;
+ }
+
+ if (lfsck->li_di_dir != NULL) {
+ const struct dt_it_ops *dir_iops;
+ struct dt_it *dir_di = lfsck->li_di_dir;
+
+ LASSERT(lfsck->li_obj_dir != NULL);
+
+ dir_iops = &lfsck->li_obj_dir->do_index_ops->dio_it;
+ lfsck_di_dir_put(env, lfsck);
+ dir_iops->fini(env, dir_di);
+ }
+
+ if (lfsck->li_obj_dir != NULL) {
+ struct dt_object *dir_obj = lfsck->li_obj_dir;
+
+ lfsck->li_obj_dir = NULL;
+ lfsck_object_put(env, dir_obj);
+ }
+
+ EXIT;
+}
+
+int lfsck_open_dir(const struct lu_env *env,
+ struct lfsck_instance *lfsck, __u64 cookie)
+{
+ struct dt_object *obj = lfsck->li_obj_dir;
+ struct dt_it *di = lfsck->li_di_dir;
+ struct lfsck_component *com;
+ const struct dt_it_ops *iops;
+ int rc = 0;
+ ENTRY;
+
+ LASSERT(obj != NULL);
+ LASSERT(di == NULL);
+
+ if (unlikely(!dt_try_as_dir(env, obj)))
+ GOTO(out, rc = -ENOTDIR);
+
+ list_for_each_entry(com, &lfsck->li_list_dir, lc_link_dir) {
+ rc = com->lc_ops->lfsck_open_dir(env, com);
+ if (rc != 0)
+ GOTO(out, rc);
+ }
+
+ iops = &obj->do_index_ops->dio_it;
+ di = iops->init(env, obj, lfsck->li_args_dir);
+ if (IS_ERR(di))
+ GOTO(out, rc = PTR_ERR(di));
+
+ rc = iops->load(env, di, cookie);
+ if (rc == -ENODATA)
+ rc = 1;
+ else if (rc == 0 || (rc > 0 && cookie > 0))
+ rc = iops->next(env, di);
+ else if (rc > 0)
+ rc = 0;
+
+ if (rc != 0) {
+ iops->put(env, di);
+ iops->fini(env, di);
+ } else {
+ lfsck->li_cookie_dir = iops->store(env, di);
+ spin_lock(&lfsck->li_lock);
+ lfsck->li_di_dir = di;
+ spin_unlock(&lfsck->li_lock);
+ }
+
+ GOTO(out, rc);
+
+out:
+ if (rc != 0)
+ lfsck_close_dir(env, lfsck, rc);
+
+ return rc;
+}
+
static int lfsck_checkpoint(const struct lu_env *env,
struct lfsck_instance *lfsck)
{
int rc = 0;
int rc1 = 0;
- if (likely(cfs_time_beforeq(cfs_time_current(),
- lfsck->li_time_next_checkpoint)))
+ if (likely(ktime_get_seconds() <= lfsck->li_time_next_checkpoint))
return 0;
lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, false);
rc1 = rc;
}
- lfsck->li_time_last_checkpoint = cfs_time_current();
+ lfsck->li_time_last_checkpoint = ktime_get_seconds();
lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
- cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
+ LFSCK_CHECKPOINT_INTERVAL;
return rc1 != 0 ? rc1 : rc;
}
struct lfsck_position *pos = NULL;
const struct dt_it_ops *iops =
&lfsck->li_obj_oit->do_index_ops->dio_it;
- struct dt_it *di;
int rc;
ENTRY;
/* Init otable-based iterator. */
if (pos == NULL) {
rc = iops->load(env, lfsck->li_di_oit, 0);
- if (rc > 0) {
+ if (rc > 0 || unlikely(rc == -ENODATA)) {
lfsck->li_oit_over = 1;
rc = 0;
}
}
rc = iops->load(env, lfsck->li_di_oit, pos->lp_oit_cookie);
- if (rc < 0)
- GOTO(out, rc);
- else if (rc > 0)
+ if (rc > 0 || unlikely(rc == -ENODATA))
lfsck->li_oit_over = 1;
+ else if (rc < 0)
+ GOTO(out, rc);
if (!lfsck->li_master || fid_is_zero(&pos->lp_dir_parent))
GOTO(out, rc = 0);
/* Find the directory for namespace-based traverse. */
- obj = lfsck_object_find(env, lfsck, &pos->lp_dir_parent);
+ obj = lfsck_object_find_bottom(env, lfsck, &pos->lp_dir_parent);
if (IS_ERR(obj))
RETURN(PTR_ERR(obj));
unlikely(!S_ISDIR(lfsck_object_type(obj))))
GOTO(out, rc = 0);
- if (unlikely(!dt_try_as_dir(env, obj)))
- GOTO(out, rc = -ENOTDIR);
-
- /* Init the namespace-based directory traverse. */
- iops = &obj->do_index_ops->dio_it;
- di = iops->init(env, obj, lfsck->li_args_dir, BYPASS_CAPA);
- if (IS_ERR(di))
- GOTO(out, rc = PTR_ERR(di));
+ rc = lfsck_load_stripe_lmv(env, lfsck, obj);
+ if (rc == 0) {
+ /* For the master MDT-object of a striped directory,
+ * reset the iteration from the directory beginning. */
+ if (lfsck->li_lmv != NULL && lfsck->li_lmv->ll_lmv_master)
+ pos->lp_dir_cookie = 0;
- LASSERT(pos->lp_dir_cookie < MDS_DIR_END_OFF);
-
- rc = iops->load(env, di, pos->lp_dir_cookie);
- if ((rc == 0) || (rc > 0 && pos->lp_dir_cookie > 0))
- rc = iops->next(env, di);
- else if (rc > 0)
- rc = 0;
-
- if (rc != 0) {
- iops->put(env, di);
- iops->fini(env, di);
- GOTO(out, rc);
+ rc = lfsck_open_dir(env, lfsck, pos->lp_dir_cookie);
+ if (rc > 0)
+ /* The end of the directory. */
+ rc = 0;
}
- lfsck->li_obj_dir = lfsck_object_get(obj);
- lfsck->li_cookie_dir = iops->store(env, di);
- spin_lock(&lfsck->li_lock);
- lfsck->li_di_dir = di;
- spin_unlock(&lfsck->li_lock);
-
- GOTO(out, rc = 0);
+ GOTO(out, rc);
out:
if (obj != NULL)
lfsck_object_put(env, obj);
- if (rc < 0) {
+ if (rc != 0) {
+ lfsck_close_dir(env, lfsck, rc);
list_for_each_entry_safe(com, next, &lfsck->li_list_scan,
- lc_link)
+ lc_link) {
com->lc_ops->lfsck_post(env, com, rc, true);
+ }
return rc;
}
break;
}
- lfsck->li_time_last_checkpoint = cfs_time_current();
+ lfsck->li_time_last_checkpoint = ktime_get_seconds();
lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
- cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
+ LFSCK_CHECKPOINT_INTERVAL;
return rc;
}
struct lfsck_instance *lfsck, struct dt_object *obj)
{
struct lfsck_component *com;
- const struct dt_it_ops *iops;
- struct dt_it *di;
int rc;
ENTRY;
if (rc <= 0)
GOTO(out, rc);
- if (unlikely(!dt_try_as_dir(env, obj)))
- GOTO(out, rc = -ENOTDIR);
-
- iops = &obj->do_index_ops->dio_it;
- di = iops->init(env, obj, lfsck->li_args_dir, BYPASS_CAPA);
- if (IS_ERR(di))
- GOTO(out, rc = PTR_ERR(di));
-
- rc = iops->load(env, di, 0);
+ rc = lfsck_load_stripe_lmv(env, lfsck, obj);
if (rc == 0)
- rc = iops->next(env, di);
- else if (rc > 0)
- rc = 0;
-
- if (rc != 0) {
- iops->put(env, di);
- iops->fini(env, di);
- GOTO(out, rc);
- }
-
- lfsck->li_obj_dir = lfsck_object_get(obj);
- lfsck->li_cookie_dir = iops->store(env, di);
- spin_lock(&lfsck->li_lock);
- lfsck->li_di_dir = di;
- spin_unlock(&lfsck->li_lock);
+ rc = lfsck_open_dir(env, lfsck, 0);
- GOTO(out, rc = 0);
+ GOTO(out, rc);
out:
if (rc < 0)
lfsck_fail(env, lfsck, false);
- return (rc > 0 ? 0 : rc);
+
+ if (rc != 0)
+ lfsck_close_dir(env, lfsck, rc);
+
+ return rc > 0 ? 0 : rc;
}
static int lfsck_exec_dir(const struct lu_env *env,
struct lfsck_instance *lfsck,
+ struct lfsck_assistant_object *lso,
struct lu_dirent *ent, __u16 type)
{
struct lfsck_component *com;
int rc;
list_for_each_entry(com, &lfsck->li_list_scan, lc_link) {
- rc = com->lc_ops->lfsck_exec_dir(env, com, ent, type);
+ rc = com->lc_ops->lfsck_exec_dir(env, com, lso, ent, type);
if (rc != 0)
return rc;
}
return 0;
}
+static int lfsck_master_dir_engine(const struct lu_env *env,
+ struct lfsck_instance *lfsck);
+
static int lfsck_post(const struct lu_env *env, struct lfsck_instance *lfsck,
int result)
{
struct lfsck_component *com;
struct lfsck_component *next;
- int rc = 0;
- int rc1 = 0;
+ int rc = result;
lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, false);
+ lfsck_close_dir(env, lfsck, result);
+
+ while (thread_is_running(&lfsck->li_thread) && rc > 0 &&
+ !list_empty(&lfsck->li_list_lmv)) {
+ struct lfsck_lmv_unit *llu;
+
+ spin_lock(&lfsck->li_lock);
+ llu = list_entry(lfsck->li_list_lmv.next,
+ struct lfsck_lmv_unit, llu_link);
+ list_del_init(&llu->llu_link);
+ spin_unlock(&lfsck->li_lock);
+
+ lfsck->li_lmv = &llu->llu_lmv;
+ lfsck->li_obj_dir = lfsck_object_get(llu->llu_obj);
+ rc = lfsck_open_dir(env, lfsck, 0);
+ if (rc == 0) {
+ rc = lfsck_master_dir_engine(env, lfsck);
+ lfsck_close_dir(env, lfsck, result);
+ }
+ }
+
+ result = rc;
+
list_for_each_entry_safe(com, next, &lfsck->li_list_scan, lc_link) {
rc = com->lc_ops->lfsck_post(env, com, result, false);
if (rc != 0)
- rc1 = rc;
+ CDEBUG(D_LFSCK, "%s: lfsck_post at the component %u: "
+ "rc = %d\n", lfsck_lfsck2name(lfsck),
+ (__u32)com->lc_type, rc);
}
- lfsck->li_time_last_checkpoint = cfs_time_current();
+ lfsck->li_time_last_checkpoint = ktime_get_seconds();
lfsck->li_time_next_checkpoint = lfsck->li_time_last_checkpoint +
- cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
+ LFSCK_CHECKPOINT_INTERVAL;
/* Ignore some component post failure to make other can go ahead. */
return result;
(struct lu_dirent *)info->lti_key;
struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
struct ptlrpc_thread *thread = &lfsck->li_thread;
+ struct lfsck_assistant_object *lso = NULL;
int rc;
__u16 type;
ENTRY;
do {
- if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY2) &&
- cfs_fail_val > 0) {
- struct l_wait_info lwi;
-
- lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val),
- NULL, NULL);
- l_wait_event(thread->t_ctl_waitq,
- !thread_is_running(thread),
- &lwi);
-
- if (unlikely(!thread_is_running(thread))) {
- CDEBUG(D_LFSCK, "%s: scan dir exit for engine "
- "stop, parent "DFID", cookie "LPX64"\n",
- lfsck_lfsck2name(lfsck),
- PFID(lfsck_dto2fid(dir)),
- lfsck->li_cookie_dir);
- RETURN(0);
- }
+ if (CFS_FAIL_TIMEOUT(OBD_FAIL_LFSCK_DELAY2, cfs_fail_val) &&
+ unlikely(!thread_is_running(thread))) {
+ CDEBUG(D_LFSCK, "%s: scan dir exit for engine stop, "
+ "parent "DFID", cookie %#llx\n",
+ lfsck_lfsck2name(lfsck),
+ PFID(lfsck_dto2fid(dir)), lfsck->li_cookie_dir);
+
+ GOTO(out, rc = 0);
}
lfsck->li_new_scanned++;
if (rc != 0) {
CDEBUG(D_LFSCK, "%s: scan dir failed at rec(), "
- "parent "DFID", cookie "LPX64": rc = %d\n",
+ "parent "DFID", cookie %#llx: rc = %d\n",
lfsck_lfsck2name(lfsck),
PFID(lfsck_dto2fid(dir)),
lfsck->li_cookie_dir, rc);
lfsck_fail(env, lfsck, true);
if (bk->lb_param & LPF_FAILOUT)
- RETURN(rc);
+ GOTO(out, rc);
else
goto checkpoint;
}
- if (ent->lde_attrs & LUDA_IGNORE &&
- strcmp(ent->lde_name, dotdot) != 0)
+ if (ent->lde_attrs & LUDA_IGNORE)
+ goto checkpoint;
+
+ /* skip dot entry. */
+ if (ent->lde_namelen == 1 && ent->lde_name[0] == '.')
goto checkpoint;
+ if (lso == NULL) {
+ lso = lfsck_assistant_object_init(env,
+ lfsck_dto2fid(dir), NULL,
+ lfsck->li_pos_current.lp_oit_cookie, true);
+ if (IS_ERR(lso)) {
+ if (bk->lb_param & LPF_FAILOUT)
+ RETURN(PTR_ERR(lso));
+
+ lso = NULL;
+ goto checkpoint;
+ }
+ }
+
/* The type in the @ent structure may has been overwritten,
* so we need to pass the @type parameter independently. */
- rc = lfsck_exec_dir(env, lfsck, ent, type);
+ rc = lfsck_exec_dir(env, lfsck, lso, ent, type);
if (rc != 0 && bk->lb_param & LPF_FAILOUT)
- RETURN(rc);
+ GOTO(out, rc);
checkpoint:
rc = lfsck_checkpoint(env, lfsck);
if (rc != 0 && bk->lb_param & LPF_FAILOUT)
- RETURN(rc);
+ GOTO(out, rc);
/* Rate control. */
lfsck_control_speed(lfsck);
if (unlikely(!thread_is_running(thread))) {
CDEBUG(D_LFSCK, "%s: scan dir exit for engine stop, "
- "parent "DFID", cookie "LPX64"\n",
+ "parent "DFID", cookie %#llx\n",
lfsck_lfsck2name(lfsck),
PFID(lfsck_dto2fid(dir)),
lfsck->li_cookie_dir);
- RETURN(0);
+ GOTO(out, rc = 0);
}
if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_FATAL2)) {
spin_lock(&lfsck->li_lock);
thread_set_flags(thread, SVC_STOPPING);
spin_unlock(&lfsck->li_lock);
- RETURN(-EINVAL);
+ GOTO(out, rc = -EINVAL);
}
rc = iops->next(env, di);
+ if (rc < 0)
+ CDEBUG(D_LFSCK, "%s dir engine fail to locate next "
+ "for the directory "DFID": rc = %d\n",
+ lfsck_lfsck2name(lfsck),
+ PFID(&lfsck->li_pos_current.lp_dir_parent), rc);
} while (rc == 0);
if (rc > 0 && !lfsck->li_oit_over)
- lfsck_close_dir(env, lfsck);
+ lfsck_close_dir(env, lfsck, rc);
- RETURN(rc);
+ GOTO(out, rc);
+
+out:
+ if (lso != NULL)
+ lfsck_assistant_object_put(env, lso);
+
+ return rc;
}
/**
* registered LFSCK component(s)' API to perform related consistency
* verification.
*
- * It flushes related LFSCK tracing files to disk via making checkpoint
+ * It flushes related LFSCK trace files to disk via making checkpoint
* periodically. Then if the server crashed or the LFSCK is paused, the
* LFSCK can resume from the latest checkpoint.
*
struct lu_fid *fid = &info->lti_fid;
struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
struct ptlrpc_thread *thread = &lfsck->li_thread;
- struct dt_device *dev = lfsck->li_bottom;
- struct seq_server_site *ss = lu_site2seq(dev->dd_lu_dev.ld_site);
- __u32 idx = lfsck_dev_idx(dev);
+ struct seq_server_site *ss = lfsck_dev_site(lfsck);
+ __u32 idx = lfsck_dev_idx(lfsck);
int rc;
ENTRY;
do {
struct dt_object *target;
- bool update_lma = false;
if (lfsck->li_di_dir != NULL) {
rc = lfsck_master_dir_engine(env, lfsck);
if (unlikely(lfsck->li_oit_over))
RETURN(1);
- if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY1) &&
- cfs_fail_val > 0) {
- struct l_wait_info lwi;
-
- lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val),
- NULL, NULL);
- l_wait_event(thread->t_ctl_waitq,
- !thread_is_running(thread),
- &lwi);
+ if (CFS_FAIL_TIMEOUT(OBD_FAIL_LFSCK_DELAY1, cfs_fail_val) &&
+ unlikely(!thread_is_running(thread))) {
+ CDEBUG(D_LFSCK, "%s: OIT scan exit for engine stop, "
+ "cookie %llu\n",
+ lfsck_lfsck2name(lfsck), iops->store(env, di));
- if (unlikely(!thread_is_running(thread))) {
- CDEBUG(D_LFSCK, "%s: OIT scan exit for engine "
- "stop, cookie "LPU64"\n",
- lfsck_lfsck2name(lfsck),
- iops->store(env, di));
- RETURN(0);
- }
+ RETURN(0);
}
if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_CRASH))
RETURN(0);
lfsck->li_current_oit_processed = 1;
+
+ if (!list_empty(&lfsck->li_list_lmv)) {
+ struct lfsck_lmv_unit *llu;
+
+ spin_lock(&lfsck->li_lock);
+ llu = list_entry(lfsck->li_list_lmv.next,
+ struct lfsck_lmv_unit, llu_link);
+ list_del_init(&llu->llu_link);
+ spin_unlock(&lfsck->li_lock);
+
+ lfsck->li_lmv = &llu->llu_lmv;
+ lfsck->li_obj_dir = lfsck_object_get(llu->llu_obj);
+ rc = lfsck_open_dir(env, lfsck, 0);
+ if (rc == 0)
+ rc = lfsck_master_dir_engine(env, lfsck);
+
+ if (rc <= 0)
+ RETURN(rc);
+ }
+
lfsck->li_new_scanned++;
lfsck->li_pos_current.lp_oit_cookie = iops->store(env, di);
rc = iops->rec(env, di, (struct dt_rec *)fid, 0);
goto checkpoint;
}
+ if (unlikely(!fid_is_sane(fid))) {
+ CDEBUG(D_LFSCK, "%s: OIT scan find invalid FID "DFID
+ ", skip it\n",
+ lfsck_lfsck2name(lfsck), PFID(fid));
+ goto checkpoint;
+ }
+
if (fid_is_idif(fid)) {
__u32 idx1 = fid_idif_ost_idx(fid);
LASSERT(!lfsck->li_master);
- /* It is an old format device, update the LMA. */
if (idx != idx1) {
struct ost_id *oi = &info->lti_oi;
+ if (unlikely(idx1 != 0)) {
+ CDEBUG(D_LFSCK, "%s: invalid IDIF "DFID
+ ", not match device index %u\n",
+ lfsck_lfsck2name(lfsck),
+ PFID(fid), idx);
+
+ goto checkpoint;
+ }
+
+ /* rebuild the IDIF with index to
+ * avoid double instances for the
+ * same object. */
fid_to_ostid(fid, oi);
ostid_to_fid(fid, oi, idx);
- update_lma = true;
}
} else if (!fid_is_norm(fid) && !fid_is_igif(fid) &&
!fid_is_last_id(fid) &&
}
}
- target = lfsck_object_find(env, lfsck, fid);
+ target = lfsck_object_find_bottom(env, lfsck, fid);
if (IS_ERR(target)) {
CDEBUG(D_LFSCK, "%s: OIT scan failed at find target "
- DFID", cookie "LPU64": rc = %d\n",
+ DFID", cookie %llu: rc = %d\n",
lfsck_lfsck2name(lfsck), PFID(fid),
iops->store(env, di), rc);
lfsck_fail(env, lfsck, true);
goto checkpoint;
}
- if (dt_object_exists(target)) {
- if (update_lma) {
- rc = lfsck_update_lma(env, lfsck, target);
- if (rc != 0)
- CDEBUG(D_LFSCK, "%s: fail to update "
- "LMA for "DFID": rc = %d\n",
- lfsck_lfsck2name(lfsck),
- PFID(lfsck_dto2fid(target)), rc);
- }
- if (rc == 0)
- rc = lfsck_exec_oit(env, lfsck, target);
- }
+ if (dt_object_exists(target))
+ rc = lfsck_exec_oit(env, lfsck, target);
+
lfsck_object_put(env, target);
if (rc != 0 && bk->lb_param & LPF_FAILOUT)
RETURN(rc);
lfsck->li_oit_over = 1;
else if (likely(rc == 0))
lfsck->li_current_oit_processed = 0;
+ else
+ CDEBUG(D_LFSCK, "%s oit engine fail to locate next at "
+ "%llu: rc = %d\n", lfsck_lfsck2name(lfsck),
+ iops->store(env, di), rc);
if (unlikely(!thread_is_running(thread))) {
CDEBUG(D_LFSCK, "%s: OIT scan exit for engine stop, "
- "cookie "LPU64"\n", lfsck_lfsck2name(lfsck),
+ "cookie %llu\n", lfsck_lfsck2name(lfsck),
iops->store(env, di));
RETURN(0);
}
int rc;
ENTRY;
+ spin_lock(&lfsck->li_lock);
+ lfsck->li_task = current;
+ spin_unlock(&lfsck->li_lock);
+
+ /* There will be some objects verification during the LFSCK start,
+ * such as the subsequent lfsck_verify_lpf(). Trigger low layer OI
+ * OI scrub before that to handle the potential inconsistence. */
+ oit_di = oit_iops->init(env, oit_obj, lfsck->li_args_oit);
+ if (IS_ERR(oit_di)) {
+ rc = PTR_ERR(oit_di);
+ CDEBUG(D_LFSCK, "%s: master engine fail to init iteration: "
+ "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
+
+ GOTO(fini_args, rc);
+ }
+
if (lfsck->li_master &&
(!list_empty(&lfsck->li_list_scan) ||
!list_empty(&lfsck->li_list_double_scan))) {
lfsck_lfsck2name(lfsck), rc);
}
- oit_di = oit_iops->init(env, oit_obj, lfsck->li_args_oit, BYPASS_CAPA);
- if (IS_ERR(oit_di)) {
- rc = PTR_ERR(oit_di);
- CDEBUG(D_LFSCK, "%s: master engine fail to init iteration: "
- "rc = %d\n", lfsck_lfsck2name(lfsck), rc);
-
- GOTO(fini_args, rc);
- }
-
spin_lock(&lfsck->li_lock);
lfsck->li_di_oit = oit_di;
spin_unlock(&lfsck->li_lock);
GOTO(fini_oit, rc);
CDEBUG(D_LFSCK, "LFSCK entry: oit_flags = %#x, dir_flags = %#x, "
- "oit_cookie = "LPU64", dir_cookie = "LPX64", parent = "DFID
+ "oit_cookie = %llu, dir_cookie = %#llx, parent = "DFID
", pid = %d\n", lfsck->li_args_oit, lfsck->li_args_dir,
lfsck->li_pos_checkpoint.lp_oit_cookie,
lfsck->li_pos_checkpoint.lp_dir_cookie,
current_pid());
spin_lock(&lfsck->li_lock);
+ if (unlikely(!thread_is_starting(thread))) {
+ spin_unlock(&lfsck->li_lock);
+ GOTO(fini_oit, rc = 0);
+ }
+
thread_set_flags(thread, SVC_RUNNING);
spin_unlock(&lfsck->li_lock);
wake_up_all(&thread->t_ctl_waitq);
else
rc = 1;
+ lfsck_pos_fill(env, lfsck, &lfsck->li_pos_checkpoint, false);
CDEBUG(D_LFSCK, "LFSCK exit: oit_flags = %#x, dir_flags = %#x, "
- "oit_cookie = "LPU64", dir_cookie = "LPX64", parent = "DFID
+ "oit_cookie = %llu, dir_cookie = %#llx, parent = "DFID
", pid = %d, rc = %d\n", lfsck->li_args_oit, lfsck->li_args_dir,
lfsck->li_pos_checkpoint.lp_oit_cookie,
lfsck->li_pos_checkpoint.lp_dir_cookie,
if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_CRASH))
rc = lfsck_post(env, lfsck, rc);
-
- if (lfsck->li_di_dir != NULL)
- lfsck_close_dir(env, lfsck);
+ else
+ lfsck_close_dir(env, lfsck, rc);
fini_oit:
lfsck_di_oit_put(env, lfsck);
fini_args:
spin_lock(&lfsck->li_lock);
thread_set_flags(thread, SVC_STOPPED);
+ lfsck->li_task = NULL;
spin_unlock(&lfsck->li_lock);
wake_up_all(&thread->t_ctl_waitq);
lfsck_thread_args_fini(lta);
lad->lad_touch_gen++;
memset(lr, 0, sizeof(*lr));
- lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
lr->lr_event = LE_QUERY;
lr->lr_active = com->lc_type;
+
+ memset(laia, 0, sizeof(*laia));
laia->laia_com = com;
laia->laia_lr = lr;
- laia->laia_shared = 0;
if (!list_empty(&lad->lad_mdt_phase1_list)) {
ltds = &lfsck->li_mdt_descs;
}
spin_unlock(<ds->ltd_lock);
- rc = ptlrpc_set_wait(set);
+ rc = ptlrpc_set_wait(env, set);
if (rc < 0) {
ptlrpc_set_destroy(set);
RETURN(rc);
}
/**
- * Notify the LFSCK event to the instatnces on remote servers.
+ * Notify the LFSCK event to the instances on remote servers.
*
* The LFSCK assistant thread notifies the LFSCK instances on other
* servers (MDT/OST) about some events, such as start new scanning,
if (set == NULL)
RETURN(-ENOMEM);
- lr->lr_index = lfsck_dev_idx(lfsck->li_bottom);
+ lr->lr_index = lfsck_dev_idx(lfsck);
lr->lr_active = com->lc_type;
+
+ memset(laia, 0, sizeof(*laia));
laia->laia_com = com;
laia->laia_lr = lr;
- laia->laia_shared = 0;
switch (lr->lr_event) {
case LE_START:
if (com->lc_type != LFSCK_TYPE_LAYOUT)
goto next;
- lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN |
- LSV_ASYNC_WINDOWS | LSV_CREATE_OSTOBJ;
+ lr->lr_valid = LSV_SPEED_LIMIT | LSV_ERROR_HANDLE | LSV_DRYRUN;
lr->lr_speed = bk->lb_speed_limit;
lr->lr_version = bk->lb_version;
lr->lr_param |= bk->lb_param;
LASSERT(ltd != NULL);
laia->laia_ltd = ltd;
- ltd->ltd_layout_done = 0;
rc = lfsck_async_request(env, ltd->ltd_exp, lr, set,
lfsck_async_interpret_common,
laia, LFSCK_NOTIFY);
up_read(<ds->ltd_rw_sem);
/* Sync up */
- rc = ptlrpc_set_wait(set);
+ rc = ptlrpc_set_wait(env, set);
if (rc < 0) {
ptlrpc_set_destroy(set);
RETURN(rc);
spin_lock(<ds->ltd_lock);
if (com->lc_type == LFSCK_TYPE_LAYOUT) {
cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
- ltd = LTD_TGT(ltds, idx);
+ ltd = lfsck_ltd2tgt(ltds, idx);
LASSERT(ltd != NULL);
if (!list_empty(<d->ltd_layout_list))
}
} else {
cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
- ltd = LTD_TGT(ltds, idx);
+ ltd = lfsck_ltd2tgt(ltds, idx);
LASSERT(ltd != NULL);
if (!list_empty(<d->ltd_namespace_list))
}
spin_unlock(<ds->ltd_lock);
- rc = ptlrpc_set_wait(set);
+ rc = ptlrpc_set_wait(env, set);
if (rc < 0) {
ptlrpc_set_destroy(set);
RETURN(rc);
list = <d->ltd_layout_list;
gen = <d->ltd_layout_gen;
} else {
+ struct lfsck_namespace *ns = com->lc_file_ram;
+
ltd = list_entry(lad->lad_mdt_list.next,
struct lfsck_tgt_desc,
ltd_namespace_list);
list = <d->ltd_namespace_list;
gen = <d->ltd_namespace_gen;
+ lr->lr_flags2 = ns->ln_flags & ~LF_INCOMPLETE;
}
if (*gen == lad->lad_touch_gen)
*gen = lad->lad_touch_gen;
list_move_tail(list, &lad->lad_mdt_list);
+ if (ltd->ltd_synced_failures)
+ continue;
+
atomic_inc(<d->ltd_ref);
laia->laia_ltd = ltd;
spin_unlock(<ds->ltd_lock);
break;
}
- rc1 = ptlrpc_set_wait(set);
+ rc1 = ptlrpc_set_wait(env, set);
ptlrpc_set_destroy(set);
RETURN(rc != 0 ? rc : rc1);
struct l_wait_info lwi = { 0 };
int rc = 0;
int rc1 = 0;
+ int rc2;
ENTRY;
CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread start\n",
}
spin_lock(&lad->lad_lock);
+ lad->lad_task = current;
thread_set_flags(athread, SVC_RUNNING);
spin_unlock(&lad->lad_lock);
wake_up_all(&mthread->t_ctl_waitq);
if (unlikely(lad->lad_exit ||
!thread_is_running(mthread)))
- GOTO(cleanup1, rc = lad->lad_post_result);
+ GOTO(cleanup, rc = lad->lad_post_result);
lar = list_entry(lad->lad_req_list.next,
struct lfsck_assistant_req,
/* Wake up the main engine thread only when the list
* is empty or half of the prefetched items have been
* handled to avoid too frequent thread schedule. */
- if (lad->lad_prefetched == 0 ||
- (bk->lb_async_windows != 0 &&
- bk->lb_async_windows / 2 ==
- lad->lad_prefetched))
+ if (lad->lad_prefetched <= (bk->lb_async_windows / 2))
wakeup = true;
spin_unlock(&lad->lad_lock);
if (wakeup)
lao->la_req_fini(env, lar);
if (rc < 0 && bk->lb_param & LPF_FAILOUT)
- GOTO(cleanup1, rc);
+ GOTO(cleanup, rc);
}
l_wait_event(athread->t_ctl_waitq,
&lwi);
if (unlikely(lad->lad_exit))
- GOTO(cleanup1, rc = lad->lad_post_result);
+ GOTO(cleanup, rc = lad->lad_post_result);
if (!list_empty(&lad->lad_req_list))
continue;
lfsck_lfsck2name(lfsck), lad->lad_name);
if (unlikely(lad->lad_exit))
- GOTO(cleanup1, rc = lad->lad_post_result);
+ GOTO(cleanup, rc = lad->lad_post_result);
lad->lad_to_post = 0;
LASSERT(lad->lad_post_result > 0);
+ /* Wakeup the master engine to go ahead. */
+ wake_up_all(&mthread->t_ctl_waitq);
+
memset(lr, 0, sizeof(*lr));
lr->lr_event = LE_PHASE1_DONE;
lr->lr_status = lad->lad_post_result;
rc = lfsck_assistant_notify_others(env, com, lr);
- if (rc != 0)
- CDEBUG(D_LFSCK, "%s: LFSCK assistant failed to "
- "notify others for %s post: rc = %d\n",
- lfsck_lfsck2name(lfsck),
- lad->lad_name, rc);
- /* Wakeup the master engine to go ahead. */
- wake_up_all(&mthread->t_ctl_waitq);
+ CDEBUG(D_LFSCK, "%s: LFSCK assistant notified "
+ "others for %s post: rc = %d\n",
+ lfsck_lfsck2name(lfsck),
+ lad->lad_name, rc);
}
if (lad->lad_to_double_scan) {
com->lc_new_checked = 0;
com->lc_new_scanned = 0;
- com->lc_time_last_checkpoint = cfs_time_current();
+ com->lc_time_last_checkpoint = ktime_get_seconds();
com->lc_time_next_checkpoint =
com->lc_time_last_checkpoint +
- cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
+ LFSCK_CHECKPOINT_INTERVAL;
+
+ CDEBUG(D_LFSCK, "%s: LFSCK assistant sync before "
+ "the second-stage scaning\n",
+ lfsck_lfsck2name(lfsck));
/* Flush async updates before handling orphan. */
- dt_sync(env, lfsck->li_next);
+ rc2 = dt_sync(env, lfsck->li_next);
CDEBUG(D_LFSCK, "%s: LFSCK assistant phase2 "
- "scan start\n", lfsck_lfsck2name(lfsck));
+ "scan start, synced: rc = %d\n",
+ lfsck_lfsck2name(lfsck), rc2);
if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_NO_DOUBLESCAN))
- GOTO(cleanup2, rc = 0);
+ GOTO(cleanup, rc = 0);
while (lad->lad_in_double_scan) {
rc = lfsck_assistant_query_others(env, com);
goto p2_next;
if (rc < 0)
- GOTO(cleanup2, rc);
+ GOTO(cleanup, rc);
/* Pull LFSCK status on related targets once
* per 30 seconds if we are not notified. */
if (unlikely(lad->lad_exit ||
!thread_is_running(mthread)))
- GOTO(cleanup2, rc = 0);
+ GOTO(cleanup, rc = 0);
if (rc == -ETIMEDOUT)
continue;
if (rc < 0)
- GOTO(cleanup2, rc);
+ GOTO(cleanup, rc);
p2_next:
rc = lao->la_handler_p2(env, com);
if (rc != 0)
- GOTO(cleanup2, rc);
+ GOTO(cleanup, rc);
if (unlikely(lad->lad_exit ||
!thread_is_running(mthread)))
- GOTO(cleanup2, rc = 0);
+ GOTO(cleanup, rc = 0);
}
}
}
-cleanup1:
+cleanup:
/* Cleanup the unfinished requests. */
spin_lock(&lad->lad_lock);
if (rc < 0)
if (lad->lad_exit && lad->lad_post_result <= 0)
lao->la_fill_pos(env, com, &lfsck->li_pos_checkpoint);
+ thread_set_flags(athread, SVC_STOPPING);
while (!list_empty(&lad->lad_req_list)) {
lar = list_entry(lad->lad_req_list.next,
struct lfsck_assistant_req,
}
spin_unlock(&lad->lad_lock);
- LASSERTF(lad->lad_prefetched == 0, "unmatched prefeteched objs %d\n",
- lad->lad_prefetched);
-
-cleanup2:
memset(lr, 0, sizeof(*lr));
if (rc > 0) {
lr->lr_event = LE_PHASE2_DONE;
rc = rc1;
}
+ CDEBUG(D_LFSCK, "%s: LFSCK assistant sync before exit\n",
+ lfsck_lfsck2name(lfsck));
+
/* Flush async updates before exit. */
- dt_sync(env, lfsck->li_next);
+ rc2 = dt_sync(env, lfsck->li_next);
+
+ CDEBUG(D_LFSCK, "%s: LFSCK assistant synced before exit: rc = %d\n",
+ lfsck_lfsck2name(lfsck), rc2);
/* Under force exit case, some requests may be just freed without
* verification, those objects should be re-handled when next run.
- * So not update the on-disk tracing file under such case. */
+ * So not update the on-disk trace file under such case. */
if (lad->lad_in_double_scan) {
if (!lad->lad_exit)
rc1 = lao->la_double_scan_result(env, com, rc);
spin_lock(&lad->lad_lock);
lad->lad_assistant_status = (rc1 != 0 ? rc1 : rc);
thread_set_flags(athread, SVC_STOPPED);
- wake_up_all(&mthread->t_ctl_waitq);
+ lad->lad_task = NULL;
spin_unlock(&lad->lad_lock);
CDEBUG(D_LFSCK, "%s: %s LFSCK assistant thread exit: rc = %d\n",
lad->lad_assistant_status);
lfsck_thread_args_fini(lta);
+ wake_up_all(&mthread->t_ctl_waitq);
return rc;
}