+
+ lfsck->ml_time_last_checkpoint = cfs_time_current();
+ lfsck->ml_time_next_checkpoint = lfsck->ml_time_last_checkpoint +
+ cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
+ return 0;
+}
+
+static int mdd_lfsck_prep(struct lu_env *env, struct md_lfsck *lfsck)
+{
+ struct mdd_device *mdd = mdd_lfsck2mdd(lfsck);
+ struct mdd_object *obj = NULL;
+ struct dt_object *dt_obj;
+ struct lfsck_component *com;
+ struct lfsck_component *next;
+ struct lfsck_position *pos = NULL;
+ const struct dt_it_ops *iops =
+ &lfsck->ml_obj_oit->do_index_ops->dio_it;
+ struct dt_it *di;
+ int rc;
+ ENTRY;
+
+ LASSERT(lfsck->ml_obj_dir == NULL);
+ LASSERT(lfsck->ml_di_dir == NULL);
+
+ cfs_list_for_each_entry_safe(com, next, &lfsck->ml_list_scan, lc_link) {
+ com->lc_new_checked = 0;
+ if (lfsck->ml_bookmark_ram.lb_param & LPF_DRYRUN)
+ com->lc_journal = 0;
+
+ rc = com->lc_ops->lfsck_prep(env, com);
+ if (rc != 0)
+ RETURN(rc);
+
+ if ((pos == NULL) ||
+ (!mdd_lfsck_pos_is_zero(&com->lc_pos_start) &&
+ mdd_lfsck_pos_is_eq(pos, &com->lc_pos_start) > 0))
+ pos = &com->lc_pos_start;
+ }
+
+ /* Init otable-based iterator. */
+ if (pos == NULL) {
+ rc = iops->load(env, lfsck->ml_di_oit, 0);
+ GOTO(out, rc = (rc >= 0 ? 0 : rc));
+ }
+
+ rc = iops->load(env, lfsck->ml_di_oit, pos->lp_oit_cookie);
+ if (rc < 0)
+ GOTO(out, rc);
+
+ if (fid_is_zero(&pos->lp_dir_parent))
+ GOTO(out, rc = 0);
+
+ /* Find the directory for namespace-based traverse. */
+ obj = mdd_object_find(env, mdd, &pos->lp_dir_parent);
+ if (obj == NULL)
+ GOTO(out, rc = 0);
+ else if (IS_ERR(obj))
+ RETURN(PTR_ERR(obj));
+
+ /* XXX: need more processing for remote object in the future. */
+ if (!mdd_object_exists(obj) || mdd_object_remote(obj) ||
+ unlikely(!S_ISDIR(mdd_object_type(obj))))
+ GOTO(out, rc = 0);
+
+ if (unlikely(mdd_is_dead_obj(obj)))
+ GOTO(out, rc = 0);
+
+ dt_obj = mdd_object_child(obj);
+ if (unlikely(!dt_try_as_dir(env, dt_obj)))
+ GOTO(out, rc = -ENOTDIR);
+
+ /* Init the namespace-based directory traverse. */
+ iops = &dt_obj->do_index_ops->dio_it;
+ di = iops->init(env, dt_obj, lfsck->ml_args_dir, BYPASS_CAPA);
+ if (IS_ERR(di))
+ GOTO(out, rc = PTR_ERR(di));
+
+ rc = iops->load(env, di, pos->lp_dir_cookie);
+ if (rc == 0)
+ rc = iops->next(env, di);
+ else if (rc > 0)
+ rc = 0;
+
+ if (rc != 0) {
+ iops->put(env, di);
+ iops->fini(env, di);
+ GOTO(out, rc);
+ }
+
+ lfsck->ml_obj_dir = dt_obj;
+ spin_lock(&lfsck->ml_lock);
+ lfsck->ml_di_dir = di;
+ spin_unlock(&lfsck->ml_lock);
+ obj = NULL;
+
+ GOTO(out, rc = 0);
+
+out:
+ if (obj != NULL)
+ mdd_object_put(env, obj);
+
+ if (rc != 0)
+ return (rc > 0 ? 0 : rc);
+
+ mdd_lfsck_pos_fill(env, lfsck, &lfsck->ml_pos_current, false, false);
+ cfs_list_for_each_entry(com, &lfsck->ml_list_scan, lc_link) {
+ rc = com->lc_ops->lfsck_checkpoint(env, com, true);
+ if (rc != 0)
+ break;
+ }
+
+ lfsck->ml_time_last_checkpoint = cfs_time_current();
+ lfsck->ml_time_next_checkpoint = lfsck->ml_time_last_checkpoint +
+ cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
+ return rc;
+}
+
+static int mdd_lfsck_exec_oit(const struct lu_env *env, struct md_lfsck *lfsck,
+ struct mdd_object *obj)
+{
+ struct lfsck_component *com;
+ struct dt_object *dt_obj;
+ const struct dt_it_ops *iops;
+ struct dt_it *di;
+ int rc;
+ ENTRY;
+
+ LASSERT(lfsck->ml_obj_dir == NULL);
+
+ cfs_list_for_each_entry(com, &lfsck->ml_list_scan, lc_link) {
+ rc = com->lc_ops->lfsck_exec_oit(env, com, obj);
+ if (rc != 0)
+ RETURN(rc);
+ }
+
+ if (!S_ISDIR(mdd_object_type(obj)) ||
+ cfs_list_empty(&lfsck->ml_list_dir))
+ RETURN(0);
+
+ rc = object_is_client_visible(env, mdd_lfsck2mdd(lfsck), obj);
+ if (rc <= 0)
+ GOTO(out, rc);
+
+ if (unlikely(mdd_is_dead_obj(obj)))
+ GOTO(out, rc = 0);
+
+ dt_obj = mdd_object_child(obj);
+ if (unlikely(!dt_try_as_dir(env, dt_obj)))
+ GOTO(out, rc = -ENOTDIR);
+
+ iops = &dt_obj->do_index_ops->dio_it;
+ di = iops->init(env, dt_obj, lfsck->ml_args_dir, BYPASS_CAPA);
+ if (IS_ERR(di))
+ GOTO(out, rc = PTR_ERR(di));
+
+ rc = iops->load(env, di, 0);
+ if (rc == 0)
+ rc = iops->next(env, di);
+ else if (rc > 0)
+ rc = 0;
+
+ if (rc != 0) {
+ iops->put(env, di);
+ iops->fini(env, di);
+ GOTO(out, rc);
+ }
+
+ mdd_object_get(obj);
+ lfsck->ml_obj_dir = dt_obj;
+ spin_lock(&lfsck->ml_lock);
+ lfsck->ml_di_dir = di;
+ spin_unlock(&lfsck->ml_lock);
+
+ GOTO(out, rc = 0);
+
+out:
+ if (rc < 0)
+ mdd_lfsck_fail(env, lfsck, false, false);
+ return (rc > 0 ? 0 : rc);
+}
+
+static int mdd_lfsck_exec_dir(const struct lu_env *env, struct md_lfsck *lfsck,
+ struct mdd_object *obj, struct lu_dirent *ent)
+{
+ struct lfsck_component *com;
+ int rc;
+
+ cfs_list_for_each_entry(com, &lfsck->ml_list_scan, lc_link) {
+ rc = com->lc_ops->lfsck_exec_dir(env, com, obj, ent);
+ if (rc != 0)
+ return rc;
+ }
+ return 0;
+}
+
+static int mdd_lfsck_post(const struct lu_env *env, struct md_lfsck *lfsck,
+ int result)
+{
+ struct lfsck_component *com;
+ struct lfsck_component *next;
+ int rc;
+
+ mdd_lfsck_pos_fill(env, lfsck, &lfsck->ml_pos_current, true, true);
+ cfs_list_for_each_entry_safe(com, next, &lfsck->ml_list_scan, lc_link) {
+ rc = com->lc_ops->lfsck_post(env, com, result);
+ if (rc != 0)
+ return rc;
+ }
+
+ lfsck->ml_time_last_checkpoint = cfs_time_current();
+ lfsck->ml_time_next_checkpoint = lfsck->ml_time_last_checkpoint +
+ cfs_time_seconds(LFSCK_CHECKPOINT_INTERVAL);
+ return result;
+}
+
+static int mdd_lfsck_double_scan(const struct lu_env *env,
+ struct md_lfsck *lfsck)
+{
+ struct lfsck_component *com;
+ struct lfsck_component *next;
+ int rc;
+
+ cfs_list_for_each_entry_safe(com, next, &lfsck->ml_list_double_scan,
+ lc_link) {
+ if (lfsck->ml_bookmark_ram.lb_param & LPF_DRYRUN)
+ com->lc_journal = 0;
+
+ rc = com->lc_ops->lfsck_double_scan(env, com);
+ if (rc != 0)
+ return rc;
+ }
+ return 0;
+}
+
+/* LFSCK engines */
+
+static int mdd_lfsck_dir_engine(const struct lu_env *env,
+ struct md_lfsck *lfsck)
+{
+ struct mdd_thread_info *info = mdd_env_info(env);
+ struct mdd_device *mdd = mdd_lfsck2mdd(lfsck);
+ const struct dt_it_ops *iops =
+ &lfsck->ml_obj_dir->do_index_ops->dio_it;
+ struct dt_it *di = lfsck->ml_di_dir;
+ struct lu_dirent *ent = &info->mti_ent;
+ struct lu_fid *fid = &info->mti_fid;
+ struct lfsck_bookmark *bk = &lfsck->ml_bookmark_ram;
+ struct ptlrpc_thread *thread = &lfsck->ml_thread;
+ int rc;
+ ENTRY;
+
+ do {
+ struct mdd_object *child;
+
+ lfsck->ml_new_scanned++;
+ rc = iops->rec(env, di, (struct dt_rec *)ent,
+ lfsck->ml_args_dir);
+ if (rc != 0) {
+ mdd_lfsck_fail(env, lfsck, false, true);
+ if (bk->lb_param & LPF_FAILOUT)
+ RETURN(rc);
+ else
+ goto checkpoint;
+ }
+
+ mdd_lfsck_unpack_ent(ent);
+ if (ent->lde_attrs & LUDA_IGNORE)
+ goto checkpoint;
+
+ *fid = ent->lde_fid;
+ child = mdd_object_find(env, mdd, fid);
+ if (child == NULL) {
+ goto checkpoint;
+ } else if (IS_ERR(child)) {
+ mdd_lfsck_fail(env, lfsck, false, true);
+ if (bk->lb_param & LPF_FAILOUT)
+ RETURN(PTR_ERR(child));
+ else
+ goto checkpoint;
+ }
+
+ /* XXX: need more processing for remote object in the future. */
+ if (mdd_object_exists(child) && !mdd_object_remote(child))
+ rc = mdd_lfsck_exec_dir(env, lfsck, child, ent);
+ mdd_object_put(env, child);
+ if (rc != 0 && bk->lb_param & LPF_FAILOUT)
+ RETURN(rc);
+
+checkpoint:
+ rc = mdd_lfsck_checkpoint(env, lfsck, false);
+ if (rc != 0 && bk->lb_param & LPF_FAILOUT)
+ RETURN(rc);
+
+ /* Rate control. */
+ mdd_lfsck_control_speed(lfsck);
+ if (unlikely(!thread_is_running(thread)))
+ RETURN(0);
+
+ rc = iops->next(env, di);
+ } while (rc == 0);
+
+ if (rc > 0 && !lfsck->ml_oit_over)
+ mdd_lfsck_close_dir(env, lfsck);
+
+ RETURN(rc);
+}
+
+static int mdd_lfsck_oit_engine(const struct lu_env *env,
+ struct md_lfsck *lfsck)
+{
+ struct mdd_thread_info *info = mdd_env_info(env);
+ struct mdd_device *mdd = mdd_lfsck2mdd(lfsck);
+ const struct dt_it_ops *iops =
+ &lfsck->ml_obj_oit->do_index_ops->dio_it;
+ struct dt_it *di = lfsck->ml_di_oit;
+ struct lu_fid *fid = &info->mti_fid;
+ struct lfsck_bookmark *bk = &lfsck->ml_bookmark_ram;
+ struct ptlrpc_thread *thread = &lfsck->ml_thread;
+ int rc;
+ ENTRY;
+
+ do {
+ struct mdd_object *target;
+
+ if (lfsck->ml_di_dir != NULL) {
+ rc = mdd_lfsck_dir_engine(env, lfsck);
+ if (rc <= 0)
+ RETURN(rc);
+ }
+
+ if (unlikely(lfsck->ml_oit_over))
+ RETURN(1);
+
+ lfsck->ml_new_scanned++;
+ rc = iops->rec(env, di, (struct dt_rec *)fid, 0);
+ if (rc != 0) {
+ mdd_lfsck_fail(env, lfsck, true, true);
+ if (bk->lb_param & LPF_FAILOUT)
+ RETURN(rc);
+ else
+ goto checkpoint;
+ }
+
+ target = mdd_object_find(env, mdd, fid);
+ if (target == NULL) {
+ goto checkpoint;
+ } else if (IS_ERR(target)) {
+ mdd_lfsck_fail(env, lfsck, true, true);
+ if (bk->lb_param & LPF_FAILOUT)
+ RETURN(PTR_ERR(target));
+ else
+ goto checkpoint;
+ }
+
+ /* XXX: In fact, low layer otable-based iteration should not
+ * return agent object. But before LU-2646 resolved, we
+ * need more processing for agent object. */
+ if (mdd_object_exists(target) && !mdd_object_remote(target))
+ rc = mdd_lfsck_exec_oit(env, lfsck, target);
+ mdd_object_put(env, target);
+ if (rc != 0 && bk->lb_param & LPF_FAILOUT)
+ RETURN(rc);
+
+checkpoint:
+ rc = mdd_lfsck_checkpoint(env, lfsck, true);
+ if (rc != 0 && bk->lb_param & LPF_FAILOUT)
+ RETURN(rc);
+
+ /* Rate control. */
+ mdd_lfsck_control_speed(lfsck);
+
+ rc = iops->next(env, di);
+ if (rc > 0)
+ lfsck->ml_oit_over = 1;
+
+ if (unlikely(!thread_is_running(thread)))
+ RETURN(0);
+ } while (rc == 0 || lfsck->ml_di_dir != NULL);
+
+ RETURN(rc);