* GPL HEADER END
*/
/*
- * Copyright (c) 2013, 2016, Intel Corporation.
+ * Copyright (c) 2013, 2017, Intel Corporation.
*/
/*
* lustre/lfsck/lfsck_lib.c
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/list.h>
+#include <linux/delay.h>
#include <lu_object.h>
#include <dt_object.h>
#include <md_object.h>
#include <lustre_lib.h>
#include <lustre_net.h>
#include <lustre_lfsck.h>
-#include <lustre/lustre_lfsck_user.h>
#include "lfsck_internal.h"
LU_CONTEXT_KEY_DEFINE(lfsck, LCT_MD_THREAD | LCT_DT_THREAD);
LU_KEY_INIT_GENERIC(lfsck);
-static struct list_head lfsck_instance_list;
-static struct list_head lfsck_ost_orphan_list;
-static struct list_head lfsck_mdt_orphan_list;
+static LIST_HEAD(lfsck_instance_list);
+static LIST_HEAD(lfsck_ost_orphan_list);
+static LIST_HEAD(lfsck_mdt_orphan_list);
static DEFINE_SPINLOCK(lfsck_instance_lock);
const char *lfsck_flags_names[] = {
einfo->ei_res_id = resid;
rc = dt_object_lock(env, obj, lh, einfo, policy);
+ /* for regular checks LFSCK doesn't use LDLM locking,
+ * so the state isn't coherent. here we just took LDLM
+ * lock for coherency and it's time to invalidate
+ * previous state */
+ if (rc == ELDLM_OK)
+ dt_invalidate(env, obj);
} else {
- rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid,
+ rc = ldlm_cli_enqueue_local(env, lfsck->li_namespace, resid,
LDLM_IBITS, policy, mode,
&flags, ldlm_blocking_ast,
ldlm_completion_ast, NULL, NULL,
}
/**
+ * Request the remote LOOKUP lock for the given object.
+ *
+ * If \a pobj is remote, the LOOKUP lock of \a obj is on the MDT where
+ * \a pobj is, acquire LOOKUP lock there.
+ *
+ * \param[in] env pointer to the thread context
+ * \param[in] lfsck pointer to the lfsck instance
+ * \param[in] pobj pointer to parent dt_object
+ * \param[in] obj pointer to the dt_object to be locked
+ * \param[out] lh pointer to the lock handle
+ * \param[in] mode the mode for the ldlm lock to be acquired
+ *
+ * \retval 0 for success
+ * \retval negative error number on failure
+ */
+int lfsck_remote_lookup_lock(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
+ struct dt_object *pobj, struct dt_object *obj,
+ struct lustre_handle *lh, enum ldlm_mode mode)
+{
+ struct ldlm_res_id *resid = &lfsck_env_info(env)->lti_resid;
+
+ LASSERT(!lustre_handle_is_used(lh));
+
+ fid_build_reg_res_name(lfsck_dto2fid(obj), resid);
+ return __lfsck_ibits_lock(env, lfsck, pobj, resid, lh,
+ MDS_INODELOCK_LOOKUP, mode);
+}
+
+/**
* Release the the specified ibits lock.
*
* If the lock has been acquired before, release it
/* 3b. insert dot into child dir */
rec->rec_fid = cfid;
rc = dt_insert(env, child, (const struct dt_rec *)rec,
- (const struct dt_key *)dot, th, 1);
+ (const struct dt_key *)dot, th);
if (rc != 0)
GOTO(unlock, rc);
/* 4b. insert dotdot into child dir */
rec->rec_fid = &LU_LPF_FID;
rc = dt_insert(env, child, (const struct dt_rec *)rec,
- (const struct dt_key *)dotdot, th, 1);
+ (const struct dt_key *)dotdot, th);
if (rc != 0)
GOTO(unlock, rc);
/* 6b. insert name into parent dir */
rec->rec_fid = cfid;
rc = dt_insert(env, parent, (const struct dt_rec *)rec,
- (const struct dt_key *)name, th, 1);
+ (const struct dt_key *)name, th);
if (rc != 0)
GOTO(stop, rc);
rec->rec_type = S_IFDIR;
rec->rec_fid = cfid;
rc = dt_insert(env, child, (const struct dt_rec *)rec,
- (const struct dt_key *)dot, th, 1);
+ (const struct dt_key *)dot, th);
if (rc != 0)
GOTO(unlock, rc);
/* 4b. insert dotdot into child dir */
rec->rec_fid = &LU_LPF_FID;
rc = dt_insert(env, child, (const struct dt_rec *)rec,
- (const struct dt_key *)dotdot, th, 1);
+ (const struct dt_key *)dotdot, th);
if (rc != 0)
GOTO(unlock, rc);
/* 5b. insert name into parent dir */
rc = dt_insert(env, parent, (const struct dt_rec *)rec,
- (const struct dt_key *)name, th, 1);
+ (const struct dt_key *)name, th);
if (rc != 0)
GOTO(stop, rc);
if (!lfsck->li_current_oit_processed && !init)
pos->lp_oit_cookie--;
- LASSERT(pos->lp_oit_cookie > 0);
+ if (unlikely(pos->lp_oit_cookie == 0))
+ pos->lp_oit_cookie = 1;
if (lfsck->li_di_dir != NULL) {
struct dt_object *dto = lfsck->li_obj_dir;
bool dirty = false;
if (limit != LFSCK_SPEED_NO_LIMIT) {
- if (limit > msecs_to_jiffies(MSEC_PER_SEC)) {
- lfsck->li_sleep_rate = limit /
- msecs_to_jiffies(MSEC_PER_SEC);
+ if (limit > cfs_time_seconds(1)) {
+ lfsck->li_sleep_rate = limit / cfs_time_seconds(1);
lfsck->li_sleep_jif = 1;
} else {
lfsck->li_sleep_rate = 1;
- lfsck->li_sleep_jif = msecs_to_jiffies(MSEC_PER_SEC) /
- limit;
+ lfsck->li_sleep_jif = cfs_time_seconds(1) / limit;
}
} else {
lfsck->li_sleep_jif = 0;
void lfsck_control_speed(struct lfsck_instance *lfsck)
{
struct ptlrpc_thread *thread = &lfsck->li_thread;
- struct l_wait_info lwi;
if (lfsck->li_sleep_jif > 0 &&
lfsck->li_new_scanned >= lfsck->li_sleep_rate) {
- lwi = LWI_TIMEOUT_INTR(lfsck->li_sleep_jif, NULL,
- LWI_ON_SIGNAL_NOOP, NULL);
-
- l_wait_event(thread->t_ctl_waitq,
- !thread_is_running(thread),
- &lwi);
+ wait_event_idle_timeout(thread->t_ctl_waitq,
+ !thread_is_running(thread),
+ lfsck->li_sleep_jif);
lfsck->li_new_scanned = 0;
}
}
{
struct lfsck_instance *lfsck = com->lc_lfsck;
struct ptlrpc_thread *thread = &lfsck->li_thread;
- struct l_wait_info lwi;
if (lfsck->li_sleep_jif > 0 &&
com->lc_new_scanned >= lfsck->li_sleep_rate) {
- lwi = LWI_TIMEOUT_INTR(lfsck->li_sleep_jif, NULL,
- LWI_ON_SIGNAL_NOOP, NULL);
-
- l_wait_event(thread->t_ctl_waitq,
- !thread_is_running(thread),
- &lwi);
+ wait_event_idle_timeout(thread->t_ctl_waitq,
+ !thread_is_running(thread),
+ lfsck->li_sleep_jif);
com->lc_new_scanned = 0;
}
}
switch (lr->lr_event) {
case LE_START:
+ if (unlikely(rc == -EINPROGRESS)) {
+ ltd->ltd_retry_start = 1;
+ break;
+ }
+
if (rc != 0) {
CDEBUG(D_LFSCK, "%s: fail to notify %s %x for %s "
"start: rc = %d\n",
ltd->ltd_index, lad->lad_name, rc);
lfsck_tgt_put(ltd);
} else {
- rc = ptlrpc_set_wait(set);
+ rc = ptlrpc_set_wait(env, set);
}
ptlrpc_set_destroy(set);
*tmp = *lr;
ptlrpc_request_set_replen(req);
- laia = ptlrpc_req_async_args(req);
+ laia = ptlrpc_req_async_args(laia, req);
*laia = *(struct lfsck_async_interpret_args *)args;
if (laia->laia_com != NULL)
lfsck_component_get(laia->laia_com);
req->rq_interpret_reply = interpreter;
req->rq_allow_intr = 1;
+ req->rq_no_delay = 1;
ptlrpc_set_add_req(set, req);
return 0;
goto again;
}
- rc = ptlrpc_set_wait(set);
+ rc = ptlrpc_set_wait(env, set);
ptlrpc_set_destroy(set);
RETURN(rc);
lad->lad_assistant_status = 0;
lad->lad_post_result = 0;
- lad->lad_to_post = 0;
- lad->lad_to_double_scan = 0;
- lad->lad_in_double_scan = 0;
- lad->lad_exit = 0;
+ lad->lad_flags = 0;
lad->lad_advance_lock = false;
thread_set_flags(athread, 0);
"rc = %d\n", lfsck_lfsck2name(lfsck), lad->lad_name, rc);
lfsck_thread_args_fini(lta);
} else {
- struct l_wait_info lwi = { 0 };
-
- l_wait_event(mthread->t_ctl_waitq,
- thread_is_running(athread) ||
- thread_is_stopped(athread),
- &lwi);
- if (unlikely(!thread_is_running(athread)))
+ wait_event_idle(mthread->t_ctl_waitq,
+ thread_is_running(athread) ||
+ thread_is_stopped(athread) ||
+ !thread_is_starting(mthread));
+ if (unlikely(!thread_is_starting(mthread)))
+ /* stopped by race */
+ rc = -ESRCH;
+ else if (unlikely(!thread_is_running(athread)))
rc = lad->lad_assistant_status;
else
rc = 0;
struct lfsck_assistant_data *lad = com->lc_data;
struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
struct ptlrpc_thread *athread = &lad->lad_thread;
- struct l_wait_info lwi = { 0 };
- l_wait_event(mthread->t_ctl_waitq,
- list_empty(&lad->lad_req_list) ||
- !thread_is_running(mthread) ||
- thread_is_stopped(athread),
- &lwi);
+ wait_event_idle(mthread->t_ctl_waitq,
+ list_empty(&lad->lad_req_list) ||
+ !thread_is_running(mthread) ||
+ thread_is_stopped(athread));
if (!thread_is_running(mthread) || thread_is_stopped(athread))
return LFSCK_CHECKPOINT_SKIP;
struct lfsck_assistant_data *lad = com->lc_data;
struct ptlrpc_thread *athread = &lad->lad_thread;
struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
- struct l_wait_info lwi = { 0 };
lad->lad_post_result = *result;
if (*result <= 0)
- lad->lad_exit = 1;
- lad->lad_to_post = 1;
+ set_bit(LAD_EXIT, &lad->lad_flags);
+ set_bit(LAD_TO_POST, &lad->lad_flags);
CDEBUG(D_LFSCK, "%s: waiting for assistant to do %s post, rc = %d\n",
lfsck_lfsck2name(com->lc_lfsck), lad->lad_name, *result);
wake_up_all(&athread->t_ctl_waitq);
- l_wait_event(mthread->t_ctl_waitq,
- (*result > 0 && list_empty(&lad->lad_req_list)) ||
- thread_is_stopped(athread),
- &lwi);
+ wait_event_idle(mthread->t_ctl_waitq,
+ (*result > 0 && list_empty(&lad->lad_req_list)) ||
+ thread_is_stopped(athread));
if (lad->lad_assistant_status < 0)
*result = lad->lad_assistant_status;
struct lfsck_assistant_data *lad = com->lc_data;
struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
struct ptlrpc_thread *athread = &lad->lad_thread;
- struct l_wait_info lwi = { 0 };
if (status != LS_SCANNING_PHASE2)
- lad->lad_exit = 1;
+ set_bit(LAD_EXIT, &lad->lad_flags);
else
- lad->lad_to_double_scan = 1;
+ set_bit(LAD_TO_DOUBLE_SCAN, &lad->lad_flags);
CDEBUG(D_LFSCK, "%s: waiting for assistant to do %s double_scan, "
"status %d\n",
lfsck_lfsck2name(com->lc_lfsck), lad->lad_name, status);
wake_up_all(&athread->t_ctl_waitq);
- l_wait_event(mthread->t_ctl_waitq,
- lad->lad_in_double_scan ||
- thread_is_stopped(athread),
- &lwi);
+ wait_event_idle(mthread->t_ctl_waitq,
+ test_bit(LAD_IN_DOUBLE_SCAN, &lad->lad_flags) ||
+ thread_is_stopped(athread));
CDEBUG(D_LFSCK, "%s: the assistant has done %s double_scan, "
"status %d\n", lfsck_lfsck2name(com->lc_lfsck), lad->lad_name,
struct lfsck_assistant_data *lad = com->lc_data;
struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
struct ptlrpc_thread *athread = &lad->lad_thread;
- struct l_wait_info lwi = { 0 };
- lad->lad_exit = 1;
+ set_bit(LAD_EXIT, &lad->lad_flags);
wake_up_all(&athread->t_ctl_waitq);
- l_wait_event(mthread->t_ctl_waitq,
- thread_is_init(athread) ||
- thread_is_stopped(athread),
- &lwi);
+ wait_event_idle(mthread->t_ctl_waitq,
+ thread_is_init(athread) ||
+ thread_is_stopped(athread));
}
int lfsck_load_one_trace_file(const struct lu_env *env,
RETURN(PTR_ERR(obj));
rc = obj->do_ops->do_index_try(env, obj, ft);
- if (rc)
+ if (rc) {
lfsck_object_put(env, obj);
- else
+ CDEBUG(D_LFSCK, "%s: LFSCK fail to load "
+ "sub trace file %s: rc = %d\n",
+ lfsck_lfsck2name(com->lc_lfsck), name, rc);
+ } else {
*child = obj;
+ }
RETURN(rc);
}
}
/* external interfaces */
-
-int lfsck_get_speed(struct seq_file *m, struct dt_device *key)
+int lfsck_get_speed(char *buf, struct dt_device *key)
{
struct lu_env env;
struct lfsck_instance *lfsck;
RETURN(rc);
lfsck = lfsck_instance_find(key, true, false);
- if (likely(lfsck != NULL)) {
- seq_printf(m, "%u\n", lfsck->li_bookmark_ram.lb_speed_limit);
+ if (lfsck && buf) {
+ rc = sprintf(buf, "%u\n",
+ lfsck->li_bookmark_ram.lb_speed_limit);
lfsck_instance_put(&env, lfsck);
} else {
rc = -ENXIO;
}
EXPORT_SYMBOL(lfsck_set_speed);
-int lfsck_get_windows(struct seq_file *m, struct dt_device *key)
+int lfsck_get_windows(char *buf, struct dt_device *key)
{
struct lu_env env;
struct lfsck_instance *lfsck;
lfsck = lfsck_instance_find(key, true, false);
if (likely(lfsck != NULL)) {
- seq_printf(m, "%u\n", lfsck->li_bookmark_ram.lb_async_windows);
+ rc = sprintf(buf, "%u\n",
+ lfsck->li_bookmark_ram.lb_async_windows);
lfsck_instance_put(&env, lfsck);
} else {
rc = -ENXIO;
}
EXPORT_SYMBOL(lfsck_get_windows);
-int lfsck_set_windows(struct dt_device *key, int val)
+int lfsck_set_windows(struct dt_device *key, unsigned int val)
{
struct lu_env env;
struct lfsck_instance *lfsck;
}
up_read(<ds->ltd_rw_sem);
- rc = ptlrpc_set_wait(set);
+ rc = ptlrpc_set_wait(env, set);
ptlrpc_set_destroy(set);
if (rc == 0)
struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
__u32 idx;
int rc = 0;
+ bool retry = false;
ENTRY;
LASSERT(start->ls_flags & LPF_BROADCAST);
- set = ptlrpc_prep_set();
- if (unlikely(set == NULL))
- RETURN(-ENOMEM);
-
memset(lr, 0, sizeof(*lr));
lr->lr_event = LE_START;
lr->lr_index = lfsck_dev_idx(lfsck);
laia->laia_lr = lr;
laia->laia_shared = 1;
+again:
+ set = ptlrpc_prep_set();
+ if (unlikely(!set))
+ RETURN(-ENOMEM);
+
down_read(<ds->ltd_rw_sem);
cfs_foreach_bit(ltds->ltd_tgts_bitmap, idx) {
ltd = lfsck_tgt_get(ltds, idx);
LASSERT(ltd != NULL);
+ if (retry && !ltd->ltd_retry_start) {
+ lfsck_tgt_put(ltd);
+ continue;
+ }
+
laia->laia_ltd = ltd;
+ ltd->ltd_retry_start = 0;
ltd->ltd_layout_done = 0;
ltd->ltd_namespace_done = 0;
ltd->ltd_synced_failures = 0;
RETURN(rc);
}
- rc = ptlrpc_set_wait(set);
+ rc = ptlrpc_set_wait(env, set);
ptlrpc_set_destroy(set);
if (rc == 0)
rc = laia->laia_result;
+ if (unlikely(rc == -EINPROGRESS)) {
+ retry = true;
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
+ set_current_state(TASK_RUNNING);
+ if (!signal_pending(current) &&
+ thread_is_running(&lfsck->li_thread))
+ goto again;
+
+ rc = -EINTR;
+ }
+
if (rc != 0) {
struct lfsck_stop *stop = &info->lti_stop;
struct lfsck_bookmark *bk;
struct ptlrpc_thread *thread;
struct lfsck_component *com;
- struct l_wait_info lwi = { 0 };
struct lfsck_thread_args *lta;
struct task_struct *task;
struct lfsck_tgt_descs *ltds;
if (unlikely(lfsck == NULL))
RETURN(-ENXIO);
+ if (unlikely(lfsck->li_stopping))
+ GOTO(put, rc = -ENXIO);
+
/* System is not ready, try again later. */
- if (unlikely(lfsck->li_namespace == NULL))
- GOTO(put, rc = -EAGAIN);
+ if (unlikely(lfsck->li_namespace == NULL ||
+ lfsck_dev_site(lfsck)->ss_server_fld == NULL))
+ GOTO(put, rc = -EINPROGRESS);
/* start == NULL means auto trigger paused LFSCK. */
- if ((start == NULL) &&
- (list_empty(&lfsck->li_list_scan) ||
- OBD_FAIL_CHECK(OBD_FAIL_LFSCK_NO_AUTO)))
- GOTO(put, rc = 0);
+ if (!start) {
+ if (list_empty(&lfsck->li_list_scan) ||
+ OBD_FAIL_CHECK(OBD_FAIL_LFSCK_NO_AUTO))
+ GOTO(put, rc = 0);
+ } else if (start->ls_flags & LPF_BROADCAST && !lfsck->li_master) {
+ CERROR("%s: only allow to specify '-A | -o' via MDS\n",
+ lfsck_lfsck2name(lfsck));
+
+ GOTO(put, rc = -EPERM);
+ }
bk = &lfsck->li_bookmark_ram;
thread = &lfsck->li_thread;
mutex_lock(&lfsck->li_mutex);
spin_lock(&lfsck->li_lock);
+ if (unlikely(thread_is_stopping(thread))) {
+ /* Someone is stopping the LFSCK. */
+ spin_unlock(&lfsck->li_lock);
+ GOTO(out, rc = -EBUSY);
+ }
+
if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
rc = -EALREADY;
if (unlikely(start == NULL)) {
if (start == NULL)
goto trigger;
- if (start->ls_flags & LPF_BROADCAST && !lfsck->li_master) {
- CERROR("%s: only allow to specify '-A | -o' via MDS\n",
- lfsck_lfsck2name(lfsck));
-
- GOTO(out, rc = -EPERM);
- }
-
start->ls_version = bk->lb_version;
if (start->ls_active != 0) {
flags |= DOIF_OUTUSED;
lfsck->li_args_oit = (flags << DT_OTABLE_IT_FLAGS_SHIFT) | valid;
- thread_set_flags(thread, 0);
lta = lfsck_thread_args_init(lfsck, NULL, lsp);
if (IS_ERR(lta))
GOTO(out, rc = PTR_ERR(lta));
__lfsck_set_speed(lfsck, bk->lb_speed_limit);
+ spin_lock(&lfsck->li_lock);
+ thread_set_flags(thread, SVC_STARTING);
+ spin_unlock(&lfsck->li_lock);
task = kthread_run(lfsck_master_engine, lta, "lfsck");
if (IS_ERR(task)) {
rc = PTR_ERR(task);
GOTO(out, rc);
}
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) ||
- thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_running(thread) ||
+ thread_is_stopped(thread));
if (start == NULL || !(start->ls_flags & LPF_BROADCAST)) {
lfsck->li_start_unplug = 1;
wake_up_all(&thread->t_ctl_waitq);
lfsck->li_start_unplug = 1;
wake_up_all(&thread->t_ctl_waitq);
- l_wait_event(thread->t_ctl_waitq,
- thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_stopped(thread));
}
} else {
lfsck->li_start_unplug = 1;
{
struct lfsck_instance *lfsck;
struct ptlrpc_thread *thread;
- struct l_wait_info lwi = { 0 };
int rc = 0;
int rc1 = 0;
ENTRY;
RETURN(-ENXIO);
thread = &lfsck->li_thread;
- /* release lfsck::li_mutex to avoid deadlock. */
- if (stop != NULL && stop->ls_flags & LPF_BROADCAST) {
- if (!lfsck->li_master) {
- CERROR("%s: only allow to specify '-A' via MDS\n",
- lfsck_lfsck2name(lfsck));
-
- GOTO(out, rc = -EPERM);
- }
-
- rc1 = lfsck_stop_all(env, lfsck, stop);
+ if (stop && stop->ls_flags & LPF_BROADCAST && !lfsck->li_master) {
+ CERROR("%s: only allow to specify '-A' via MDS\n",
+ lfsck_lfsck2name(lfsck));
+ GOTO(put, rc = -EPERM);
}
- mutex_lock(&lfsck->li_mutex);
spin_lock(&lfsck->li_lock);
- /* no error if LFSCK is already stopped, or was never started */
- if (thread_is_init(thread) || thread_is_stopped(thread)) {
- spin_unlock(&lfsck->li_lock);
- GOTO(out, rc = 0);
- }
+ /* The target is umounted */
+ if (stop && stop->ls_status == LS_PAUSED)
+ lfsck->li_stopping = 1;
+
+ if (thread_is_init(thread) || thread_is_stopped(thread))
+ /* no error if LFSCK stopped already, or not started */
+ GOTO(unlock, rc = 0);
+
+ if (thread_is_stopping(thread))
+ /* Someone is stopping LFSCK. */
+ GOTO(unlock, rc = -EINPROGRESS);
- if (stop != NULL) {
+ if (stop) {
lfsck->li_status = stop->ls_status;
lfsck->li_flags = stop->ls_flags;
} else {
thread_set_flags(thread, SVC_STOPPING);
+ LASSERT(lfsck->li_task != NULL);
+ cfs_force_sig(SIGINT, lfsck->li_task);
+
if (lfsck->li_master) {
struct lfsck_component *com;
struct lfsck_assistant_data *lad;
lad = com->lc_data;
spin_lock(&lad->lad_lock);
if (lad->lad_task != NULL)
- force_sig(SIGINT, lad->lad_task);
+ cfs_force_sig(SIGINT, lad->lad_task);
spin_unlock(&lad->lad_lock);
}
lad = com->lc_data;
spin_lock(&lad->lad_lock);
if (lad->lad_task != NULL)
- force_sig(SIGINT, lad->lad_task);
+ cfs_force_sig(SIGINT, lad->lad_task);
spin_unlock(&lad->lad_lock);
}
}
+ wake_up_all(&thread->t_ctl_waitq);
spin_unlock(&lfsck->li_lock);
+ if (stop && stop->ls_flags & LPF_BROADCAST)
+ rc1 = lfsck_stop_all(env, lfsck, stop);
- wake_up_all(&thread->t_ctl_waitq);
- l_wait_event(thread->t_ctl_waitq,
- thread_is_stopped(thread),
- &lwi);
+ /* It was me set the status as 'stopping' just now, if it is not
+ * 'stopping' now, then either stopped, or re-started by race. */
+ wait_event_idle(thread->t_ctl_waitq,
+ !thread_is_stopping(thread));
- GOTO(out, rc = 0);
+ GOTO(put, rc = 0);
-out:
- mutex_unlock(&lfsck->li_mutex);
+unlock:
+ spin_unlock(&lfsck->li_lock);
+put:
lfsck_instance_put(env, lfsck);
return rc != 0 ? rc : rc1;
que->lu_mdts_count[i][LS_SCANNING_PHASE2] != 0 ||
que->lu_osts_count[i][LS_SCANNING_PHASE1] != 0 ||
que->lu_osts_count[i][LS_SCANNING_PHASE2] != 0) {
- struct l_wait_info lwi;
-
/* If it is required to wait, then sleep
- * 3 seconds and try to query again. */
- lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(3),
- NULL,
- LWI_ON_SIGNAL_NOOP,
- NULL);
- rc = l_wait_event(lfsck->li_thread.t_ctl_waitq,
- 0, &lwi);
- if (rc == -ETIMEDOUT)
+ * 3 seconds and try to query again.
+ */
+ unsigned long timeout =
+ msecs_to_jiffies(3000) + 1;
+ while (timeout &&
+ !fatal_signal_pending(current))
+ timeout = schedule_timeout_killable(
+ timeout);
+ if (timeout == 0)
goto again;
}
}
{
int rc;
- INIT_LIST_HEAD(&lfsck_instance_list);
- INIT_LIST_HEAD(&lfsck_ost_orphan_list);
- INIT_LIST_HEAD(&lfsck_mdt_orphan_list);
lfsck_key_init_generic(&lfsck_thread_key, NULL);
rc = lu_context_key_register(&lfsck_thread_key);
if (!rc) {