struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
down_write(&com->lc_sem);
-
lo->ll_run_time_phase2 += cfs_duration_sec(cfs_time_current() +
HALF_SEC - lfsck->li_time_last_checkpoint);
lo->ll_time_last_checkpoint = cfs_time_current_sec();
lo->ll_status = LS_FAILED;
}
- if (lo->ll_status != LS_PAUSED) {
- spin_lock(&lfsck->li_lock);
- list_del_init(&com->lc_link);
- list_add_tail(&com->lc_link, &lfsck->li_list_idle);
- spin_unlock(&lfsck->li_lock);
- }
-
rc = lfsck_layout_store(env, com);
-
up_write(&com->lc_sem);
return rc;
ltd = lfsck_tgt_get(&lfsck->li_ost_descs, index);
if (unlikely(ltd == NULL))
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
exp = ltd->ltd_exp;
if (!(exp_connect_flags(exp) & OBD_CONNECT_LFSCK))
out:
down_write(&com->lc_sem);
if (rc < 0) {
- /* If cannot touch the target server,
- * mark the LFSCK as INCOMPLETE. */
- if (rc == -ENOTCONN || rc == -ESHUTDOWN || rc == -ETIMEDOUT ||
- rc == -EHOSTDOWN || rc == -EHOSTUNREACH) {
+ struct lfsck_layout_master_data *llmd = com->lc_data;
+
+ if (unlikely(llmd->llmd_exit)) {
+ rc = 0;
+ } else if (rc == -ENOTCONN || rc == -ESHUTDOWN ||
+ rc == -ETIMEDOUT || rc == -EHOSTDOWN ||
+ rc == -EHOSTUNREACH) {
+ /* If cannot touch the target server,
+ * mark the LFSCK as INCOMPLETE. */
CERROR("%s: Fail to talk with OST %x: rc = %d.\n",
lfsck_lfsck2name(lfsck), llr->llr_ost_idx, rc);
lo->ll_flags |= LF_INCOMPLETE;
while (!list_empty(&llmd->llmd_req_list)) {
bool wakeup = false;
- if (unlikely(llmd->llmd_exit))
+ if (unlikely(llmd->llmd_exit ||
+ !thread_is_running(mthread)))
GOTO(cleanup1, rc = llmd->llmd_post_result);
llr = list_entry(llmd->llmd_req_list.next,
/* Under force exit case, some requests may be just freed without
* verification, those objects should be re-handled when next run.
* So not update the on-disk tracing file under such case. */
- if (!llmd->llmd_exit)
+ if (llmd->llmd_in_double_scan && !llmd->llmd_exit)
rc1 = lfsck_layout_double_scan_result(env, com, rc);
fini:
if (ltd == NULL) {
spin_unlock(<ds->ltd_lock);
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
}
list_del_init(<d->ltd_layout_phase_list);
llst = lfsck_layout_llst_find_and_del(llsd, lr->lr_index, true);
if (llst == NULL)
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
lfsck_layout_llst_put(llst);
if (list_empty(&llsd->llsd_master_list))
lfsck = lfsck_instance_find(dev, true, false);
if (unlikely(lfsck == NULL))
- RETURN(ERR_PTR(-ENODEV));
+ RETURN(ERR_PTR(-ENXIO));
com = lfsck_component_find(lfsck, LT_LAYOUT);
if (unlikely(com == NULL))
it->loi_llst = lfsck_layout_llst_find_and_del(llsd, attr, false);
if (it->loi_llst == NULL)
- GOTO(out, rc = -ENODEV);
+ GOTO(out, rc = -ENXIO);
if (dev->dd_record_fid_accessed) {
/* The first iteration against the rbtree, scan the whole rbtree
ltd = lfsck_tgt_get(&lfsck->li_mdt_descs, 0);
if (unlikely(ltd == NULL))
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
parent = lfsck_object_find_by_dev(env, ltd->ltd_tgt,
&LU_LPF_FID);
ss = lu_site2seq(lfsck->li_bottom->dd_lu_dev.ld_site);
if (unlikely(ss == NULL))
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
OBD_ALLOC_PTR(lfsck->li_seq);
if (lfsck->li_seq == NULL)
struct lfsck_async_interpret_args *laia = args;
struct lfsck_component *com;
+ LASSERT(laia->laia_com == NULL);
LASSERT(laia->laia_shared);
spin_lock(&lfsck->li_lock);
int rc = 0;
int rc1 = 0;
- cfs_list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
- lc_link) {
+ list_for_each_entry(com, &lfsck->li_list_double_scan, lc_link) {
if (lfsck->li_bookmark_ram.lb_param & LPF_DRYRUN)
com->lc_journal = 0;
atomic_read(&lfsck->li_double_scan_count) == 0,
&lwi);
+ if (lfsck->li_status != LS_PAUSED &&
+ lfsck->li_status != LS_CO_PAUSED) {
+ list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
+ lc_link) {
+ spin_lock(&lfsck->li_lock);
+ list_del_init(&com->lc_link);
+ list_add_tail(&com->lc_link, &lfsck->li_list_idle);
+ spin_unlock(&lfsck->li_lock);
+ }
+ }
+
return rc1 != 0 ? rc1 : rc;
}
lc_link) {
if (com->lc_ops->lfsck_quit != NULL)
com->lc_ops->lfsck_quit(env, com);
+
+ spin_lock(&lfsck->li_lock);
+ list_del_init(&com->lc_link);
+ list_del_init(&com->lc_link_dir);
+ list_add_tail(&com->lc_link, &lfsck->li_list_idle);
+ spin_unlock(&lfsck->li_lock);
}
list_for_each_entry_safe(com, next, &lfsck->li_list_double_scan,
lc_link) {
if (com->lc_ops->lfsck_quit != NULL)
com->lc_ops->lfsck_quit(env, com);
+
+ spin_lock(&lfsck->li_lock);
+ list_del_init(&com->lc_link);
+ list_add_tail(&com->lc_link, &lfsck->li_list_idle);
+ spin_unlock(&lfsck->li_lock);
}
}
lfsck->li_bookmark_ram.lb_speed_limit);
lfsck_instance_put(&env, lfsck);
} else {
- rc = -ENODEV;
+ rc = -ENXIO;
}
lu_env_fini(&env);
mutex_unlock(&lfsck->li_mutex);
lfsck_instance_put(&env, lfsck);
} else {
- rc = -ENODEV;
+ rc = -ENXIO;
}
lu_env_fini(&env);
lfsck->li_bookmark_ram.lb_async_windows);
lfsck_instance_put(&env, lfsck);
} else {
- rc = -ENODEV;
+ rc = -ENXIO;
}
lu_env_fini(&env);
}
lfsck_instance_put(&env, lfsck);
} else {
- rc = -ENODEV;
+ rc = -ENXIO;
}
lu_env_fini(&env);
lfsck_instance_put(&env, lfsck);
} else {
- rc = -ENODEV;
+ rc = -ENXIO;
}
lu_env_fini(&env);
lfsck = lfsck_instance_find(key, true, false);
if (unlikely(lfsck == NULL))
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
/* System is not ready, try again later. */
if (unlikely(lfsck->li_namespace == NULL))
lfsck = lfsck_instance_find(key, true, false);
if (unlikely(lfsck == NULL))
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
thread = &lfsck->li_thread;
/* release lfsck::li_mutex to avoid deadlock. */
lfsck = lfsck_instance_find(key, true, false);
if (unlikely(lfsck == NULL))
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
com = lfsck_component_find(lfsck, lr->lr_active);
if (likely(com != NULL)) {
lfsck = lfsck_instance_find(key, true, false);
if (unlikely(lfsck == NULL))
- RETURN(-ENODEV);
+ RETURN(-ENXIO);
com = lfsck_component_find(lfsck, lr->lr_active);
if (likely(com != NULL)) {
struct ldlm_namespace *ns)
{
struct lfsck_instance *lfsck;
- int rc = -ENODEV;
+ int rc = -ENXIO;
lfsck = lfsck_instance_find(key, true, false);
if (likely(lfsck != NULL)) {