#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/list.h>
+#include <linux/delay.h>
#include <lu_object.h>
#include <dt_object.h>
#include <md_object.h>
LU_CONTEXT_KEY_DEFINE(lfsck, LCT_MD_THREAD | LCT_DT_THREAD);
LU_KEY_INIT_GENERIC(lfsck);
-static struct list_head lfsck_instance_list;
-static struct list_head lfsck_ost_orphan_list;
-static struct list_head lfsck_mdt_orphan_list;
+static LIST_HEAD(lfsck_instance_list);
+static LIST_HEAD(lfsck_ost_orphan_list);
+static LIST_HEAD(lfsck_mdt_orphan_list);
static DEFINE_SPINLOCK(lfsck_instance_lock);
const char *lfsck_flags_names[] = {
einfo->ei_res_id = resid;
rc = dt_object_lock(env, obj, lh, einfo, policy);
+ /* for regular checks LFSCK doesn't use LDLM locking,
+ * so the state isn't coherent. here we just took LDLM
+ * lock for coherency and it's time to invalidate
+ * previous state */
+ if (rc == ELDLM_OK)
+ dt_invalidate(env, obj);
} else {
rc = ldlm_cli_enqueue_local(env, lfsck->li_namespace, resid,
LDLM_IBITS, policy, mode,
}
/**
+ * Request the remote LOOKUP lock for the given object.
+ *
+ * If \a pobj is remote, the LOOKUP lock of \a obj is on the MDT where
+ * \a pobj is, acquire LOOKUP lock there.
+ *
+ * \param[in] env pointer to the thread context
+ * \param[in] lfsck pointer to the lfsck instance
+ * \param[in] pobj pointer to parent dt_object
+ * \param[in] obj pointer to the dt_object to be locked
+ * \param[out] lh pointer to the lock handle
+ * \param[in] mode the mode for the ldlm lock to be acquired
+ *
+ * \retval 0 for success
+ * \retval negative error number on failure
+ */
+int lfsck_remote_lookup_lock(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
+ struct dt_object *pobj, struct dt_object *obj,
+ struct lustre_handle *lh, enum ldlm_mode mode)
+{
+ struct ldlm_res_id *resid = &lfsck_env_info(env)->lti_resid;
+
+ LASSERT(!lustre_handle_is_used(lh));
+
+ fid_build_reg_res_name(lfsck_dto2fid(obj), resid);
+ return __lfsck_ibits_lock(env, lfsck, pobj, resid, lh,
+ MDS_INODELOCK_LOOKUP, mode);
+}
+
+/**
* Release the the specified ibits lock.
*
* If the lock has been acquired before, release it
bool dirty = false;
if (limit != LFSCK_SPEED_NO_LIMIT) {
- if (limit > msecs_to_jiffies(MSEC_PER_SEC)) {
- lfsck->li_sleep_rate = limit /
- msecs_to_jiffies(MSEC_PER_SEC);
+ if (limit > cfs_time_seconds(1)) {
+ lfsck->li_sleep_rate = limit / cfs_time_seconds(1);
lfsck->li_sleep_jif = 1;
} else {
lfsck->li_sleep_rate = 1;
- lfsck->li_sleep_jif = msecs_to_jiffies(MSEC_PER_SEC) /
- limit;
+ lfsck->li_sleep_jif = cfs_time_seconds(1) / limit;
}
} else {
lfsck->li_sleep_jif = 0;
void lfsck_control_speed(struct lfsck_instance *lfsck)
{
struct ptlrpc_thread *thread = &lfsck->li_thread;
- struct l_wait_info lwi;
if (lfsck->li_sleep_jif > 0 &&
lfsck->li_new_scanned >= lfsck->li_sleep_rate) {
- lwi = LWI_TIMEOUT_INTR(lfsck->li_sleep_jif, NULL,
- LWI_ON_SIGNAL_NOOP, NULL);
-
- l_wait_event(thread->t_ctl_waitq,
- !thread_is_running(thread),
- &lwi);
+ wait_event_idle_timeout(thread->t_ctl_waitq,
+ !thread_is_running(thread),
+ lfsck->li_sleep_jif);
lfsck->li_new_scanned = 0;
}
}
{
struct lfsck_instance *lfsck = com->lc_lfsck;
struct ptlrpc_thread *thread = &lfsck->li_thread;
- struct l_wait_info lwi;
if (lfsck->li_sleep_jif > 0 &&
com->lc_new_scanned >= lfsck->li_sleep_rate) {
- lwi = LWI_TIMEOUT_INTR(lfsck->li_sleep_jif, NULL,
- LWI_ON_SIGNAL_NOOP, NULL);
-
- l_wait_event(thread->t_ctl_waitq,
- !thread_is_running(thread),
- &lwi);
+ wait_event_idle_timeout(thread->t_ctl_waitq,
+ !thread_is_running(thread),
+ lfsck->li_sleep_jif);
com->lc_new_scanned = 0;
}
}
*tmp = *lr;
ptlrpc_request_set_replen(req);
- laia = ptlrpc_req_async_args(req);
+ laia = ptlrpc_req_async_args(laia, req);
*laia = *(struct lfsck_async_interpret_args *)args;
if (laia->laia_com != NULL)
lfsck_component_get(laia->laia_com);
lad->lad_assistant_status = 0;
lad->lad_post_result = 0;
- lad->lad_to_post = 0;
- lad->lad_to_double_scan = 0;
- lad->lad_in_double_scan = 0;
- lad->lad_exit = 0;
+ lad->lad_flags = 0;
lad->lad_advance_lock = false;
thread_set_flags(athread, 0);
"rc = %d\n", lfsck_lfsck2name(lfsck), lad->lad_name, rc);
lfsck_thread_args_fini(lta);
} else {
- struct l_wait_info lwi = { 0 };
-
- l_wait_event(mthread->t_ctl_waitq,
- thread_is_running(athread) ||
- thread_is_stopped(athread) ||
- !thread_is_starting(mthread),
- &lwi);
+ wait_event_idle(mthread->t_ctl_waitq,
+ thread_is_running(athread) ||
+ thread_is_stopped(athread) ||
+ !thread_is_starting(mthread));
if (unlikely(!thread_is_starting(mthread)))
/* stopped by race */
rc = -ESRCH;
struct lfsck_assistant_data *lad = com->lc_data;
struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
struct ptlrpc_thread *athread = &lad->lad_thread;
- struct l_wait_info lwi = { 0 };
- l_wait_event(mthread->t_ctl_waitq,
- list_empty(&lad->lad_req_list) ||
- !thread_is_running(mthread) ||
- thread_is_stopped(athread),
- &lwi);
+ wait_event_idle(mthread->t_ctl_waitq,
+ list_empty(&lad->lad_req_list) ||
+ !thread_is_running(mthread) ||
+ thread_is_stopped(athread));
if (!thread_is_running(mthread) || thread_is_stopped(athread))
return LFSCK_CHECKPOINT_SKIP;
struct lfsck_assistant_data *lad = com->lc_data;
struct ptlrpc_thread *athread = &lad->lad_thread;
struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
- struct l_wait_info lwi = { 0 };
lad->lad_post_result = *result;
if (*result <= 0)
- lad->lad_exit = 1;
- lad->lad_to_post = 1;
+ set_bit(LAD_EXIT, &lad->lad_flags);
+ set_bit(LAD_TO_POST, &lad->lad_flags);
CDEBUG(D_LFSCK, "%s: waiting for assistant to do %s post, rc = %d\n",
lfsck_lfsck2name(com->lc_lfsck), lad->lad_name, *result);
wake_up_all(&athread->t_ctl_waitq);
- l_wait_event(mthread->t_ctl_waitq,
- (*result > 0 && list_empty(&lad->lad_req_list)) ||
- thread_is_stopped(athread),
- &lwi);
+ wait_event_idle(mthread->t_ctl_waitq,
+ (*result > 0 && list_empty(&lad->lad_req_list)) ||
+ thread_is_stopped(athread));
if (lad->lad_assistant_status < 0)
*result = lad->lad_assistant_status;
struct lfsck_assistant_data *lad = com->lc_data;
struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
struct ptlrpc_thread *athread = &lad->lad_thread;
- struct l_wait_info lwi = { 0 };
if (status != LS_SCANNING_PHASE2)
- lad->lad_exit = 1;
+ set_bit(LAD_EXIT, &lad->lad_flags);
else
- lad->lad_to_double_scan = 1;
+ set_bit(LAD_TO_DOUBLE_SCAN, &lad->lad_flags);
CDEBUG(D_LFSCK, "%s: waiting for assistant to do %s double_scan, "
"status %d\n",
lfsck_lfsck2name(com->lc_lfsck), lad->lad_name, status);
wake_up_all(&athread->t_ctl_waitq);
- l_wait_event(mthread->t_ctl_waitq,
- lad->lad_in_double_scan ||
- thread_is_stopped(athread),
- &lwi);
+ wait_event_idle(mthread->t_ctl_waitq,
+ test_bit(LAD_IN_DOUBLE_SCAN, &lad->lad_flags) ||
+ thread_is_stopped(athread));
CDEBUG(D_LFSCK, "%s: the assistant has done %s double_scan, "
"status %d\n", lfsck_lfsck2name(com->lc_lfsck), lad->lad_name,
struct lfsck_assistant_data *lad = com->lc_data;
struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
struct ptlrpc_thread *athread = &lad->lad_thread;
- struct l_wait_info lwi = { 0 };
- lad->lad_exit = 1;
+ set_bit(LAD_EXIT, &lad->lad_flags);
wake_up_all(&athread->t_ctl_waitq);
- l_wait_event(mthread->t_ctl_waitq,
- thread_is_init(athread) ||
- thread_is_stopped(athread),
- &lwi);
+ wait_event_idle(mthread->t_ctl_waitq,
+ thread_is_init(athread) ||
+ thread_is_stopped(athread));
}
int lfsck_load_one_trace_file(const struct lu_env *env,
}
/* external interfaces */
-int lfsck_get_speed(struct seq_file *m, char *buf, struct dt_device *key)
+int lfsck_get_speed(char *buf, struct dt_device *key)
{
struct lu_env env;
struct lfsck_instance *lfsck;
RETURN(rc);
lfsck = lfsck_instance_find(key, true, false);
- if (likely(lfsck != NULL)) {
- if (m) {
- seq_printf(m, "%u\n",
- lfsck->li_bookmark_ram.lb_speed_limit);
- } else if (buf) {
- rc = sprintf(buf, "%u\n",
- lfsck->li_bookmark_ram.lb_speed_limit);
- }
+ if (lfsck && buf) {
+ rc = sprintf(buf, "%u\n",
+ lfsck->li_bookmark_ram.lb_speed_limit);
lfsck_instance_put(&env, lfsck);
} else {
rc = -ENXIO;
}
EXPORT_SYMBOL(lfsck_set_speed);
-int lfsck_get_windows(struct seq_file *m, struct dt_device *key)
+int lfsck_get_windows(char *buf, struct dt_device *key)
{
struct lu_env env;
struct lfsck_instance *lfsck;
lfsck = lfsck_instance_find(key, true, false);
if (likely(lfsck != NULL)) {
- seq_printf(m, "%u\n", lfsck->li_bookmark_ram.lb_async_windows);
+ rc = sprintf(buf, "%u\n",
+ lfsck->li_bookmark_ram.lb_async_windows);
lfsck_instance_put(&env, lfsck);
} else {
rc = -ENXIO;
if (unlikely(rc == -EINPROGRESS)) {
retry = true;
set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
+ schedule_timeout(cfs_time_seconds(1));
set_current_state(TASK_RUNNING);
if (!signal_pending(current) &&
thread_is_running(&lfsck->li_thread))
struct lfsck_bookmark *bk;
struct ptlrpc_thread *thread;
struct lfsck_component *com;
- struct l_wait_info lwi = { 0 };
struct lfsck_thread_args *lta;
struct task_struct *task;
struct lfsck_tgt_descs *ltds;
GOTO(out, rc);
}
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) ||
- thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_running(thread) ||
+ thread_is_stopped(thread));
if (start == NULL || !(start->ls_flags & LPF_BROADCAST)) {
lfsck->li_start_unplug = 1;
wake_up_all(&thread->t_ctl_waitq);
lfsck->li_start_unplug = 1;
wake_up_all(&thread->t_ctl_waitq);
- l_wait_event(thread->t_ctl_waitq,
- thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_stopped(thread));
}
} else {
lfsck->li_start_unplug = 1;
{
struct lfsck_instance *lfsck;
struct ptlrpc_thread *thread;
- struct l_wait_info lwi = { 0 };
int rc = 0;
int rc1 = 0;
ENTRY;
thread_set_flags(thread, SVC_STOPPING);
LASSERT(lfsck->li_task != NULL);
- force_sig(SIGINT, lfsck->li_task);
+ cfs_force_sig(SIGINT, lfsck->li_task);
if (lfsck->li_master) {
struct lfsck_component *com;
lad = com->lc_data;
spin_lock(&lad->lad_lock);
if (lad->lad_task != NULL)
- force_sig(SIGINT, lad->lad_task);
+ cfs_force_sig(SIGINT, lad->lad_task);
spin_unlock(&lad->lad_lock);
}
lad = com->lc_data;
spin_lock(&lad->lad_lock);
if (lad->lad_task != NULL)
- force_sig(SIGINT, lad->lad_task);
+ cfs_force_sig(SIGINT, lad->lad_task);
spin_unlock(&lad->lad_lock);
}
}
/* It was me set the status as 'stopping' just now, if it is not
* 'stopping' now, then either stopped, or re-started by race. */
- l_wait_event(thread->t_ctl_waitq,
- !thread_is_stopping(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ !thread_is_stopping(thread));
GOTO(put, rc = 0);
que->lu_mdts_count[i][LS_SCANNING_PHASE2] != 0 ||
que->lu_osts_count[i][LS_SCANNING_PHASE1] != 0 ||
que->lu_osts_count[i][LS_SCANNING_PHASE2] != 0) {
- struct l_wait_info lwi;
-
/* If it is required to wait, then sleep
- * 3 seconds and try to query again. */
- lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(3),
- NULL,
- LWI_ON_SIGNAL_NOOP,
- NULL);
- rc = l_wait_event(lfsck->li_thread.t_ctl_waitq,
- 0, &lwi);
- if (rc == -ETIMEDOUT)
+ * 3 seconds and try to query again.
+ */
+ unsigned long timeout =
+ msecs_to_jiffies(3000) + 1;
+ while (timeout &&
+ !fatal_signal_pending(current))
+ timeout = schedule_timeout_killable(
+ timeout);
+ if (timeout == 0)
goto again;
}
}
{
int rc;
- INIT_LIST_HEAD(&lfsck_instance_list);
- INIT_LIST_HEAD(&lfsck_ost_orphan_list);
- INIT_LIST_HEAD(&lfsck_mdt_orphan_list);
lfsck_key_init_generic(&lfsck_thread_key, NULL);
rc = lu_context_key_register(&lfsck_thread_key);
if (!rc) {