The 'scrub' thread has nothing to do with ptlrpc, so
using ptlrpc_thread is strange - and not necessary. This patch
switches to native kthread interfaces.
A particular advantage of the switch is seen in the convertion of
thread_is_running().
This is uses in two completely different ways.
1/ The thread itself needs to know if it has been asked to stop.
It currently uss thread_is_running(). After the patch it
uses kthread_should_stop().
2/ Other code needs to know if the thread is still active or not. It
previously used thread_is_running(), so it looked just like the
first case. Now it checks a new flag ->os_running, which is
set precisely when SVC_RUNNING was set, and cleared when any other
status was set.
As the thread can stop itself (e.g if osd_scrub_prep fails) or can be
asked to stop (scrub_stop()), we need to avoid confusion between the
two. This is achieved by calling 'xchg(&scrub->os_task,NULL)'.
If the thread finds that to be non-NULL, it has stopped itself
and can just exit. If scrub_stop() finds it to be non-NULL,
it calls kthread_should_stop() to stop the thread.
Instead of using the waitqueue in the 'struct ptlrpc_thread', we use
wake_up_var() and wait_var_event() on the 'scrub' pointer. This is
used both to tell the thread there is work to do, and for callers to
wait for the task to make progress, or to exit.
Signed-off-by: Mr NeilBrown <neilb@suse.de>
Change-Id: Ib2f1845151734687e89a1b0d6a5135a5a4ba6e5c
Reviewed-on: https://review.whamcloud.com/38824
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Alex Zhuravlev <bzzz@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
__ret = __wait_var_event_timeout(var, condition, timeout); \
__ret; \
})
__ret = __wait_var_event_timeout(var, condition, timeout); \
__ret; \
})
+#else /* !HAVE_WAIT_VAR_EVENT */
+/* linux-3.10.0-1062.el7 defines wait_var_event_timeout() using
+ * __wait_cond_timeout(), but doesn't define __wait_cond_timeout !!!
+ */
+# ifndef __wait_cond_timeout
+# define ___wait_cond_timeout(condition) \
+({ \
+ bool __cond = (condition); \
+ if (__cond && !__ret) \
+ __ret = 1; \
+ __cond || !__ret; \
+})
+# endif /* __wait_cond_timeout */
+
#endif /* ! HAVE_WAIT_VAR_EVENT */
/*
#endif /* ! HAVE_WAIT_VAR_EVENT */
/*
/* Object for the scrub file. */
struct dt_object *os_obj;
/* Object for the scrub file. */
struct dt_object *os_obj;
- struct ptlrpc_thread os_thread;
+ struct task_struct *os_task;
struct list_head os_inconsistent_items;
/* write lock for scrub prep/update/post/checkpoint,
struct list_head os_inconsistent_items;
/* write lock for scrub prep/update/post/checkpoint,
os_convert_igif:1,
os_partial_scan:1,
os_in_join:1,
os_convert_igif:1,
os_partial_scan:1,
os_in_join:1,
+ os_running:1, /* scrub thread is running */
int scrub_start(int (*threadfn)(void *data), struct lustre_scrub *scrub,
void *data, __u32 flags)
{
int scrub_start(int (*threadfn)(void *data), struct lustre_scrub *scrub,
void *data, __u32 flags)
{
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct task_struct *task;
int rc;
ENTRY;
struct task_struct *task;
int rc;
ENTRY;
-again:
- /* os_lock: sync status between stop and scrub thread */
- spin_lock(&scrub->os_lock);
- if (thread_is_running(thread)) {
- spin_unlock(&scrub->os_lock);
- }
-
- if (unlikely(thread_is_stopping(thread))) {
- spin_unlock(&scrub->os_lock);
- wait_event_idle(thread->t_ctl_waitq,
- thread_is_stopped(thread));
- goto again;
- }
- spin_unlock(&scrub->os_lock);
if (scrub->os_file.sf_status == SS_COMPLETED) {
if (!(flags & SS_SET_FAILOUT))
if (scrub->os_file.sf_status == SS_COMPLETED) {
if (!(flags & SS_SET_FAILOUT))
- scrub->os_start_flags = flags;
- thread_set_flags(thread, 0);
- task = kthread_run(threadfn, data, "OI_scrub");
+ task = kthread_create(threadfn, data, "OI_scrub");
if (IS_ERR(task)) {
rc = PTR_ERR(task);
CERROR("%s: cannot start iteration thread: rc = %d\n",
scrub->os_name, rc);
RETURN(rc);
}
if (IS_ERR(task)) {
rc = PTR_ERR(task);
CERROR("%s: cannot start iteration thread: rc = %d\n",
scrub->os_name, rc);
RETURN(rc);
}
-
- wait_event_idle(thread->t_ctl_waitq,
- thread_is_running(thread) || thread_is_stopped(thread));
+ spin_lock(&scrub->os_lock);
+ if (scrub->os_task) {
+ /* Lost a race */
+ spin_unlock(&scrub->os_lock);
+ kthread_stop(task);
+ RETURN(-EALREADY);
+ }
+ scrub->os_start_flags = flags;
+ scrub->os_task = task;
+ wake_up_process(task);
+ spin_unlock(&scrub->os_lock);
+ wait_var_event(scrub, scrub->os_running || !scrub->os_task);
void scrub_stop(struct lustre_scrub *scrub)
{
void scrub_stop(struct lustre_scrub *scrub)
{
- struct ptlrpc_thread *thread = &scrub->os_thread;
+ struct task_struct *task;
- /* os_lock: sync status between stop and scrub thread */
spin_lock(&scrub->os_lock);
spin_lock(&scrub->os_lock);
- if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
- thread_set_flags(thread, SVC_STOPPING);
- spin_unlock(&scrub->os_lock);
- wake_up_all(&thread->t_ctl_waitq);
- wait_event_idle(thread->t_ctl_waitq,
- thread_is_stopped(thread));
- /* Do not skip the last lock/unlock, which can guarantee that
- * the caller cannot return until the OI scrub thread exit. */
- spin_lock(&scrub->os_lock);
- }
spin_unlock(&scrub->os_lock);
spin_unlock(&scrub->os_lock);
+ task = xchg(&scrub->os_task, NULL);
+ if (task)
+ kthread_stop(task);
}
EXPORT_SYMBOL(scrub_stop);
}
EXPORT_SYMBOL(scrub_stop);
sf->sf_items_igif, sf->sf_success_count);
speed = checked;
sf->sf_items_igif, sf->sf_success_count);
speed = checked;
- if (thread_is_running(&scrub->os_thread)) {
+ if (scrub->os_running) {
s64 new_checked = scrub->os_new_checked;
time64_t duration;
time64_t rtime;
s64 new_checked = scrub->os_new_checked;
time64_t duration;
time64_t rtime;
- if (thread_is_running(&scrub->os_thread)) {
+ if (scrub->os_running) {
if (scrub->os_partial_scan && !scrub->os_in_join)
goto join;
if (scrub->os_partial_scan && !scrub->os_in_join)
goto join;
if (!fid_is_norm(fid) && !fid_is_igif(fid))
RETURN(0);
if (!fid_is_norm(fid) && !fid_is_igif(fid))
RETURN(0);
- if (thread_is_running(&scrub->os_thread) &&
- scrub->os_pos_current > id->oii_ino)
+ if (scrub->os_running && scrub->os_pos_current > id->oii_ino)
RETURN(0);
if (dev->od_auto_scrub_interval == AS_NEVER ||
RETURN(0);
if (dev->od_auto_scrub_interval == AS_NEVER ||
- if (thread_is_running(&scrub->os_thread)) {
+ if (scrub->os_running) {
if (inode == NULL) {
inode = osd_iget(oti, dev, id);
/* The inode has been removed (by race maybe). */
if (inode == NULL) {
inode = osd_iget(oti, dev, id);
/* The inode has been removed (by race maybe). */
static int osd_scrub_prep(const struct lu_env *env, struct osd_device *dev)
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
static int osd_scrub_prep(const struct lu_env *env, struct osd_device *dev)
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct scrub_file *sf = &scrub->os_file;
__u32 flags = scrub->os_start_flags;
int rc;
struct scrub_file *sf = &scrub->os_file;
__u32 flags = scrub->os_start_flags;
int rc;
rc = scrub_file_store(env, scrub);
if (rc == 0) {
spin_lock(&scrub->os_lock);
rc = scrub_file_store(env, scrub);
if (rc == 0) {
spin_lock(&scrub->os_lock);
- thread_set_flags(thread, SVC_RUNNING);
spin_unlock(&scrub->os_lock);
spin_unlock(&scrub->os_lock);
- wake_up_all(&thread->t_ctl_waitq);
}
up_write(&scrub->os_rwsem);
}
up_write(&scrub->os_rwsem);
down_write(&scrub->os_rwsem);
spin_lock(&scrub->os_lock);
down_write(&scrub->os_rwsem);
spin_lock(&scrub->os_lock);
- thread_set_flags(&scrub->os_thread, SVC_STOPPING);
spin_unlock(&scrub->os_lock);
if (scrub->os_new_checked > 0) {
sf->sf_items_checked += scrub->os_new_checked;
spin_unlock(&scrub->os_lock);
if (scrub->os_new_checked > 0) {
sf->sf_items_checked += scrub->os_new_checked;
struct osd_idmap_cache **oic, const bool noslot)
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
struct osd_idmap_cache **oic, const bool noslot)
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct lu_fid *fid;
struct osd_inode_id *lid;
int rc;
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_DELAY) && cfs_fail_val > 0)
struct lu_fid *fid;
struct osd_inode_id *lid;
int rc;
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_DELAY) && cfs_fail_val > 0)
- wait_event_idle_timeout(
- thread->t_ctl_waitq,
+ wait_var_event_timeout(
+ scrub,
!list_empty(&scrub->os_inconsistent_items) ||
!list_empty(&scrub->os_inconsistent_items) ||
- !thread_is_running(thread),
cfs_time_seconds(cfs_fail_val));
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_CRASH)) {
spin_lock(&scrub->os_lock);
cfs_time_seconds(cfs_fail_val));
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_CRASH)) {
spin_lock(&scrub->os_lock);
- thread_set_flags(thread, SVC_STOPPING);
spin_unlock(&scrub->os_lock);
return SCRUB_NEXT_CRASH;
}
spin_unlock(&scrub->os_lock);
return SCRUB_NEXT_CRASH;
}
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_FATAL))
return SCRUB_NEXT_FATAL;
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_FATAL))
return SCRUB_NEXT_FATAL;
- if (unlikely(!thread_is_running(thread)))
+ if (kthread_should_stop())
return SCRUB_NEXT_EXIT;
if (!list_empty(&scrub->os_inconsistent_items)) {
return SCRUB_NEXT_EXIT;
if (!list_empty(&scrub->os_inconsistent_items)) {
{
struct osd_otable_cache *ooc = &dev->od_otable_it->ooi_cache;
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
{
struct osd_otable_cache *ooc = &dev->od_otable_it->ooi_cache;
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
- if (thread_is_running(thread) &&
+ if (scrub->os_running &&
ooc->ooc_pos_preload >= scrub->os_pos_current)
return SCRUB_NEXT_EXIT;
ooc->ooc_pos_preload >= scrub->os_pos_current)
return SCRUB_NEXT_EXIT;
spin_lock(&scrub->os_lock);
if (osd_scrub_has_window(scrub, &it->ooi_cache) ||
!list_empty(&scrub->os_inconsistent_items) ||
spin_lock(&scrub->os_lock);
if (osd_scrub_has_window(scrub, &it->ooi_cache) ||
!list_empty(&scrub->os_inconsistent_items) ||
- it->ooi_waiting || !thread_is_running(&scrub->os_thread))
+ it->ooi_waiting || kthread_should_stop())
scrub->os_waiting = 0;
else
scrub->os_waiting = 1;
scrub->os_waiting = 0;
else
scrub->os_waiting = 1;
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
struct scrub_file *sf = &scrub->os_file;
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
struct scrub_file *sf = &scrub->os_file;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct osd_otable_it *it = dev->od_otable_it;
struct osd_otable_cache *ooc = it ? &it->ooi_cache : NULL;
struct osd_otable_it *it = dev->od_otable_it;
struct osd_otable_cache *ooc = it ? &it->ooi_cache : NULL;
ooc->ooc_pos_preload < scrub->os_pos_current) {
spin_lock(&scrub->os_lock);
it->ooi_waiting = 0;
ooc->ooc_pos_preload < scrub->os_pos_current) {
spin_lock(&scrub->os_lock);
it->ooi_waiting = 0;
- wake_up_all(&thread->t_ctl_waitq);
spin_unlock(&scrub->os_lock);
}
spin_unlock(&scrub->os_lock);
}
- if (it != NULL)
- wait_event_idle(thread->t_ctl_waitq,
- osd_scrub_wakeup(scrub, it));
+ if (it)
+ wait_var_event(scrub, osd_scrub_wakeup(scrub, it));
if (!ooc || osd_scrub_has_window(scrub, ooc))
*noslot = false;
if (!ooc || osd_scrub_has_window(scrub, ooc))
*noslot = false;
__u32 flags, bool inconsistent)
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
__u32 flags, bool inconsistent)
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct scrub_file *sf = &scrub->os_file;
int rc;
ENTRY;
struct scrub_file *sf = &scrub->os_file;
int rc;
ENTRY;
scrub->os_in_join = 0;
scrub->os_full_scrub = 0;
spin_unlock(&scrub->os_lock);
scrub->os_in_join = 0;
scrub->os_full_scrub = 0;
spin_unlock(&scrub->os_lock);
- wake_up_all(&thread->t_ctl_waitq);
up_write(&scrub->os_rwsem);
CDEBUG(D_LFSCK, "%s: joined in the OI scrub with flag %u: rc = %d\n",
up_write(&scrub->os_rwsem);
CDEBUG(D_LFSCK, "%s: joined in the OI scrub with flag %u: rc = %d\n",
struct osd_device *dev, __u32 max, bool preload)
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
struct osd_device *dev, __u32 max, bool preload)
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct scrub_file *sf = &scrub->os_file;
osd_iit_next_policy next;
osd_iit_exec_policy exec;
struct scrub_file *sf = &scrub->os_file;
osd_iit_next_policy next;
osd_iit_exec_policy exec;
sf->sf_flags &= ~(SF_RECREATED | SF_INCONSISTENT |
SF_UPGRADE | SF_AUTO);
sf->sf_status = SS_COMPLETED;
sf->sf_flags &= ~(SF_RECREATED | SF_INCONSISTENT |
SF_UPGRADE | SF_AUTO);
sf->sf_status = SS_COMPLETED;
- wait_event_idle(
- thread->t_ctl_waitq,
- !thread_is_running(thread) ||
+ wait_var_event(
+ scrub,
+ kthread_should_stop() ||
!scrub->os_partial_scan ||
scrub->os_in_join ||
!list_empty(&scrub->os_inconsistent_items));
sf->sf_flags = saved_flags;
sf->sf_status = SS_SCANNING;
!scrub->os_partial_scan ||
scrub->os_in_join ||
!list_empty(&scrub->os_inconsistent_items));
sf->sf_flags = saved_flags;
sf->sf_status = SS_SCANNING;
- if (unlikely(!thread_is_running(thread)))
+ if (kthread_should_stop())
RETURN(0);
if (!scrub->os_partial_scan || scrub->os_in_join)
RETURN(0);
if (!scrub->os_partial_scan || scrub->os_in_join)
- wait_event_idle(thread->t_ctl_waitq,
- !thread_is_running(thread) ||
- !scrub->os_in_join);
+ wait_var_event(scrub,
+ kthread_should_stop() ||
+ !scrub->os_in_join);
- if (unlikely(!thread_is_running(thread)))
+ if (kthread_should_stop())
if (scrub->os_waiting && osd_scrub_has_window(scrub, ooc)) {
spin_lock(&scrub->os_lock);
scrub->os_waiting = 0;
if (scrub->os_waiting && osd_scrub_has_window(scrub, ooc)) {
spin_lock(&scrub->os_lock);
scrub->os_waiting = 0;
- wake_up_all(&scrub->os_thread.t_ctl_waitq);
spin_unlock(&scrub->os_lock);
}
spin_unlock(&scrub->os_lock);
}
struct lu_env env;
struct osd_device *dev = (struct osd_device *)args;
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
struct lu_env env;
struct osd_device *dev = (struct osd_device *)args;
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct osd_otable_it *it = dev->od_otable_it;
struct osd_otable_cache *ooc = &it->ooi_cache;
struct osd_otable_it *it = dev->od_otable_it;
struct osd_otable_cache *ooc = &it->ooi_cache;
- wait_event_idle(thread->t_ctl_waitq,
- it->ooi_user_ready ||
- !thread_is_running(thread));
- if (unlikely(!thread_is_running(thread)))
+ wait_var_event(scrub,
+ it->ooi_user_ready || kthread_should_stop());
+ if (kthread_should_stop())
GOTO(post, rc = 0);
scrub->os_pos_current = ooc->ooc_pos_preload;
GOTO(post, rc = 0);
scrub->os_pos_current = ooc->ooc_pos_preload;
rc = osd_inode_iteration(osd_oti_get(&env), dev, ~0U, false);
if (unlikely(rc == SCRUB_IT_CRASH)) {
spin_lock(&scrub->os_lock);
rc = osd_inode_iteration(osd_oti_get(&env), dev, ~0U, false);
if (unlikely(rc == SCRUB_IT_CRASH)) {
spin_lock(&scrub->os_lock);
- thread_set_flags(&scrub->os_thread, SVC_STOPPING);
spin_unlock(&scrub->os_lock);
GOTO(out, rc = -EINVAL);
}
spin_unlock(&scrub->os_lock);
GOTO(out, rc = -EINVAL);
}
noenv:
spin_lock(&scrub->os_lock);
noenv:
spin_lock(&scrub->os_lock);
- thread_set_flags(thread, SVC_STOPPED);
- wake_up_all(&thread->t_ctl_waitq);
spin_unlock(&scrub->os_lock);
spin_unlock(&scrub->os_lock);
+ if (xchg(&scrub->os_task, NULL) == NULL)
+ /* scrub_stop() is waiting, we need to synchronize */
+ wait_var_event(scrub, kthread_should_stop());
+ wake_up_var(scrub);
ctxt->pwd = dev->od_mnt->mnt_root;
ctxt->fs = KERNEL_DS;
ctxt->pwd = dev->od_mnt->mnt_root;
ctxt->fs = KERNEL_DS;
- init_waitqueue_head(&scrub->os_thread.t_ctl_waitq);
init_rwsem(&scrub->os_rwsem);
spin_lock_init(&scrub->os_lock);
INIT_LIST_HEAD(&scrub->os_inconsistent_items);
init_rwsem(&scrub->os_rwsem);
spin_lock_init(&scrub->os_lock);
INIT_LIST_HEAD(&scrub->os_inconsistent_items);
{
spin_lock(&scrub->os_lock);
if (it->ooi_cache.ooc_pos_preload < scrub->os_pos_current ||
{
spin_lock(&scrub->os_lock);
if (it->ooi_cache.ooc_pos_preload < scrub->os_pos_current ||
- scrub->os_waiting ||
- !thread_is_running(&scrub->os_thread))
+ scrub->os_waiting || !scrub->os_running)
it->ooi_waiting = 0;
else
it->ooi_waiting = 1;
it->ooi_waiting = 0;
else
it->ooi_waiting = 1;
static int osd_otable_it_next(const struct lu_env *env, struct dt_it *di)
{
static int osd_otable_it_next(const struct lu_env *env, struct dt_it *di)
{
- struct osd_otable_it *it = (struct osd_otable_it *)di;
- struct osd_device *dev = it->ooi_dev;
+ struct osd_otable_it *it = (struct osd_otable_it *)di;
+ struct osd_device *dev = it->ooi_dev;
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct osd_otable_cache *ooc = &it->ooi_cache;
- struct ptlrpc_thread *thread = &scrub->os_thread;
- int rc;
+ struct osd_otable_cache *ooc = &it->ooi_cache;
+ int rc;
ENTRY;
LASSERT(it->ooi_user_ready);
again:
ENTRY;
LASSERT(it->ooi_user_ready);
again:
- if (!thread_is_running(thread) && !it->ooi_used_outside)
+ if (!scrub->os_running && !it->ooi_used_outside)
RETURN(1);
if (ooc->ooc_cached_items > 0) {
RETURN(1);
if (ooc->ooc_cached_items > 0) {
}
if (it->ooi_all_cached) {
}
if (it->ooi_all_cached) {
- wait_event_idle(thread->t_ctl_waitq,
- !thread_is_running(thread));
+ wait_var_event(scrub, !scrub->os_running);
RETURN(1);
}
if (scrub->os_waiting && osd_scrub_has_window(scrub, ooc)) {
spin_lock(&scrub->os_lock);
scrub->os_waiting = 0;
RETURN(1);
}
if (scrub->os_waiting && osd_scrub_has_window(scrub, ooc)) {
spin_lock(&scrub->os_lock);
scrub->os_waiting = 0;
- wake_up_all(&scrub->os_thread.t_ctl_waitq);
spin_unlock(&scrub->os_lock);
}
if (it->ooi_cache.ooc_pos_preload >= scrub->os_pos_current)
spin_unlock(&scrub->os_lock);
}
if (it->ooi_cache.ooc_pos_preload >= scrub->os_pos_current)
- wait_event_idle(thread->t_ctl_waitq,
- osd_otable_it_wakeup(scrub, it));
+ wait_var_event(scrub, osd_otable_it_wakeup(scrub, it));
- if (!thread_is_running(thread) && !it->ooi_used_outside)
+ if (!scrub->os_running && !it->ooi_used_outside)
RETURN(1);
rc = osd_otable_it_preload(env, it);
RETURN(1);
rc = osd_otable_it_preload(env, it);
it->ooi_user_ready = 1;
if (!scrub->os_full_speed)
it->ooi_user_ready = 1;
if (!scrub->os_full_speed)
- wake_up_all(&scrub->os_thread.t_ctl_waitq);
memset(param, 0, sizeof(*param));
param->sb = osd_sb(dev);
memset(param, 0, sizeof(*param));
param->sb = osd_sb(dev);
struct osd_inconsistent_item *oii;
struct osd_scrub *oscrub = &dev->od_scrub;
struct lustre_scrub *lscrub = &oscrub->os_scrub;
struct osd_inconsistent_item *oii;
struct osd_scrub *oscrub = &dev->od_scrub;
struct lustre_scrub *lscrub = &oscrub->os_scrub;
- struct ptlrpc_thread *thread = &lscrub->os_thread;
lscrub->os_full_scrub = 1;
}
lscrub->os_full_scrub = 1;
}
- if (unlikely(!thread_is_running(thread))) {
+ if (!lscrub->os_running) {
spin_unlock(&lscrub->os_lock);
OBD_FREE_PTR(oii);
RETURN(-EAGAIN);
spin_unlock(&lscrub->os_lock);
OBD_FREE_PTR(oii);
RETURN(-EAGAIN);
list_add_tail(&oii->oii_list, &lscrub->os_inconsistent_items);
spin_unlock(&lscrub->os_lock);
list_add_tail(&oii->oii_list, &lscrub->os_inconsistent_items);
spin_unlock(&lscrub->os_lock);
- if (wakeup != 0)
- wake_up_all(&thread->t_ctl_waitq);
+ if (wakeup)
+ wake_up_var(lscrub);
- if (thread_is_running(&scrub->os_thread)) {
+ if (scrub->os_running) {
if (scrub->os_pos_current > oid)
RETURN(0);
} else if (osd->od_auto_scrub_interval == AS_NEVER) {
if (scrub->os_pos_current > oid)
RETURN(0);
} else if (osd->od_auto_scrub_interval == AS_NEVER) {
- if (thread_is_running(&scrub->os_thread)) {
+ if (scrub->os_running) {
if (!dn) {
rc = __osd_obj2dnode(osd->od_os, oid, &dn);
/* The object has been removed (by race maybe). */
if (!dn) {
rc = __osd_obj2dnode(osd->od_os, oid, &dn);
/* The object has been removed (by race maybe). */
}
/* The case someone triggered the OI scrub already. */
}
/* The case someone triggered the OI scrub already. */
- if (thread_is_running(&scrub->os_thread)) {
+ if (scrub->os_running) {
if (!rc) {
LASSERT(remote);
if (!rc) {
LASSERT(remote);
static int osd_scrub_prep(const struct lu_env *env, struct osd_device *dev)
{
struct lustre_scrub *scrub = &dev->od_scrub;
static int osd_scrub_prep(const struct lu_env *env, struct osd_device *dev)
{
struct lustre_scrub *scrub = &dev->od_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct scrub_file *sf = &scrub->os_file;
__u32 flags = scrub->os_start_flags;
int rc;
struct scrub_file *sf = &scrub->os_file;
__u32 flags = scrub->os_start_flags;
int rc;
rc = scrub_file_store(env, scrub);
if (!rc) {
spin_lock(&scrub->os_lock);
rc = scrub_file_store(env, scrub);
if (!rc) {
spin_lock(&scrub->os_lock);
- thread_set_flags(thread, SVC_RUNNING);
spin_unlock(&scrub->os_lock);
spin_unlock(&scrub->os_lock);
- wake_up_all(&thread->t_ctl_waitq);
}
up_write(&scrub->os_rwsem);
}
up_write(&scrub->os_rwsem);
down_write(&scrub->os_rwsem);
spin_lock(&scrub->os_lock);
down_write(&scrub->os_rwsem);
spin_lock(&scrub->os_lock);
- thread_set_flags(&scrub->os_thread, SVC_STOPPING);
spin_unlock(&scrub->os_lock);
if (scrub->os_new_checked > 0) {
sf->sf_items_checked += scrub->os_new_checked;
spin_unlock(&scrub->os_lock);
if (scrub->os_new_checked > 0) {
sf->sf_items_checked += scrub->os_new_checked;
spin_lock(&scrub->os_lock);
if (osd_scrub_has_window(it) ||
!list_empty(&scrub->os_inconsistent_items) ||
spin_lock(&scrub->os_lock);
if (osd_scrub_has_window(it) ||
!list_empty(&scrub->os_inconsistent_items) ||
- it->ooi_waiting || !thread_is_running(&scrub->os_thread))
+ it->ooi_waiting || kthread_should_stop())
scrub->os_waiting = 0;
else
scrub->os_waiting = 1;
scrub->os_waiting = 0;
else
scrub->os_waiting = 1;
struct lu_fid *fid, uint64_t *oid)
{
struct lustre_scrub *scrub = &dev->od_scrub;
struct lu_fid *fid, uint64_t *oid)
{
struct lustre_scrub *scrub = &dev->od_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct osd_otable_it *it = dev->od_otable_it;
struct lustre_mdt_attrs *lma = NULL;
nvlist_t *nvbuf = NULL;
struct osd_otable_it *it = dev->od_otable_it;
struct lustre_mdt_attrs *lma = NULL;
nvlist_t *nvbuf = NULL;
ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_DELAY) && cfs_fail_val > 0) {
ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_DELAY) && cfs_fail_val > 0) {
- wait_event_idle_timeout(
- thread->t_ctl_waitq,
+ wait_var_event_timeout(
+ scrub,
!list_empty(&scrub->os_inconsistent_items) ||
!list_empty(&scrub->os_inconsistent_items) ||
- !thread_is_running(thread),
cfs_time_seconds(cfs_fail_val));
cfs_time_seconds(cfs_fail_val));
- if (unlikely(!thread_is_running(thread)))
+ if (kthread_should_stop())
RETURN(SCRUB_NEXT_EXIT);
}
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_CRASH)) {
spin_lock(&scrub->os_lock);
RETURN(SCRUB_NEXT_EXIT);
}
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_CRASH)) {
spin_lock(&scrub->os_lock);
- thread_set_flags(thread, SVC_STOPPING);
spin_unlock(&scrub->os_lock);
RETURN(SCRUB_NEXT_CRASH);
}
spin_unlock(&scrub->os_lock);
RETURN(SCRUB_NEXT_CRASH);
}
}
if (!scrub->os_full_speed && !osd_scrub_has_window(it))
}
if (!scrub->os_full_speed && !osd_scrub_has_window(it))
- wait_event_idle(thread->t_ctl_waitq,
- osd_scrub_wakeup(scrub, it));
+ wait_var_event(scrub, osd_scrub_wakeup(scrub, it));
- if (unlikely(!thread_is_running(thread)))
+ if (kthread_should_stop())
GOTO(out, rc = SCRUB_NEXT_EXIT);
rc = -dmu_object_next(dev->od_os, &scrub->os_pos_current, B_FALSE, 0);
GOTO(out, rc = SCRUB_NEXT_EXIT);
rc = -dmu_object_next(dev->od_os, &scrub->os_pos_current, B_FALSE, 0);
it->ooi_prefetched++;
if (it->ooi_waiting) {
it->ooi_waiting = 0;
it->ooi_prefetched++;
if (it->ooi_waiting) {
it->ooi_waiting = 0;
- wake_up_all(&thread->t_ctl_waitq);
}
spin_unlock(&scrub->os_lock);
}
}
spin_unlock(&scrub->os_lock);
}
const struct lu_fid *fid, uint64_t oid, int rc)
{
struct lustre_scrub *scrub = &dev->od_scrub;
const struct lu_fid *fid, uint64_t oid, int rc)
{
struct lustre_scrub *scrub = &dev->od_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct osd_otable_it *it = dev->od_otable_it;
rc = osd_scrub_check_update(env, dev, fid, oid, rc);
struct osd_otable_it *it = dev->od_otable_it;
rc = osd_scrub_check_update(env, dev, fid, oid, rc);
it->ooi_prefetched++;
if (it->ooi_waiting) {
it->ooi_waiting = 0;
it->ooi_prefetched++;
if (it->ooi_waiting) {
it->ooi_waiting = 0;
- wake_up_all(&thread->t_ctl_waitq);
}
spin_unlock(&scrub->os_lock);
}
}
spin_unlock(&scrub->os_lock);
}
struct lu_env env;
struct osd_device *dev = (struct osd_device *)args;
struct lustre_scrub *scrub = &dev->od_scrub;
struct lu_env env;
struct osd_device *dev = (struct osd_device *)args;
struct lustre_scrub *scrub = &dev->od_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct lu_fid *fid;
uint64_t oid;
int rc = 0;
struct lu_fid *fid;
uint64_t oid;
int rc = 0;
if (!scrub->os_full_speed) {
struct osd_otable_it *it = dev->od_otable_it;
if (!scrub->os_full_speed) {
struct osd_otable_it *it = dev->od_otable_it;
- wait_event_idle(thread->t_ctl_waitq,
- it->ooi_user_ready ||
- !thread_is_running(thread));
+ wait_var_event(scrub,
+ it->ooi_user_ready ||
+ kthread_should_stop());
- if (unlikely(!thread_is_running(thread)))
+ if (kthread_should_stop())
GOTO(post, rc = 0);
scrub->os_pos_current = it->ooi_pos;
GOTO(post, rc = 0);
scrub->os_pos_current = it->ooi_pos;
scrub->os_pos_current);
fid = &osd_oti_get(&env)->oti_fid;
scrub->os_pos_current);
fid = &osd_oti_get(&env)->oti_fid;
- while (!rc && thread_is_running(thread)) {
+ while (!rc && !kthread_should_stop()) {
rc = osd_scrub_next(&env, dev, fid, &oid);
switch (rc) {
case SCRUB_NEXT_EXIT:
GOTO(post, rc = 0);
case SCRUB_NEXT_CRASH:
spin_lock(&scrub->os_lock);
rc = osd_scrub_next(&env, dev, fid, &oid);
switch (rc) {
case SCRUB_NEXT_EXIT:
GOTO(post, rc = 0);
case SCRUB_NEXT_CRASH:
spin_lock(&scrub->os_lock);
- thread_set_flags(&scrub->os_thread, SVC_STOPPING);
spin_unlock(&scrub->os_lock);
GOTO(out, rc = -EINVAL);
case SCRUB_NEXT_FATAL:
spin_unlock(&scrub->os_lock);
GOTO(out, rc = -EINVAL);
case SCRUB_NEXT_FATAL:
noenv:
spin_lock(&scrub->os_lock);
noenv:
spin_lock(&scrub->os_lock);
- thread_set_flags(thread, SVC_STOPPED);
- wake_up_all(&thread->t_ctl_waitq);
spin_unlock(&scrub->os_lock);
spin_unlock(&scrub->os_lock);
+ if (xchg(&scrub->os_task, NULL) == NULL)
+ /* scrub_stop is waiting, we need to synchronize */
+ wait_var_event(scrub, kthread_should_stop());
+ wake_up_var(scrub);
&dsl_dataset_phys(dev->od_os->os_dsl_dataset)->ds_guid,
sizeof(dsl_dataset_phys(dev->od_os->os_dsl_dataset)->ds_guid));
memset(&dev->od_scrub, 0, sizeof(struct lustre_scrub));
&dsl_dataset_phys(dev->od_os->os_dsl_dataset)->ds_guid,
sizeof(dsl_dataset_phys(dev->od_os->os_dsl_dataset)->ds_guid));
memset(&dev->od_scrub, 0, sizeof(struct lustre_scrub));
- init_waitqueue_head(&scrub->os_thread.t_ctl_waitq);
init_rwsem(&scrub->os_rwsem);
spin_lock_init(&scrub->os_lock);
INIT_LIST_HEAD(&scrub->os_inconsistent_items);
init_rwsem(&scrub->os_rwsem);
spin_lock_init(&scrub->os_lock);
INIT_LIST_HEAD(&scrub->os_inconsistent_items);
{
spin_lock(&scrub->os_lock);
if (it->ooi_pos < scrub->os_pos_current || scrub->os_waiting ||
{
spin_lock(&scrub->os_lock);
if (it->ooi_pos < scrub->os_pos_current || scrub->os_waiting ||
- !thread_is_running(&scrub->os_thread))
it->ooi_waiting = 0;
else
it->ooi_waiting = 1;
it->ooi_waiting = 0;
else
it->ooi_waiting = 1;
struct osd_otable_it *it = (struct osd_otable_it *)di;
struct osd_device *dev = it->ooi_dev;
struct lustre_scrub *scrub = &dev->od_scrub;
struct osd_otable_it *it = (struct osd_otable_it *)di;
struct osd_device *dev = it->ooi_dev;
struct lustre_scrub *scrub = &dev->od_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct lustre_mdt_attrs *lma = NULL;
nvlist_t *nvbuf = NULL;
int rc, size = 0;
struct lustre_mdt_attrs *lma = NULL;
nvlist_t *nvbuf = NULL;
int rc, size = 0;
}
if (it->ooi_pos >= scrub->os_pos_current)
}
if (it->ooi_pos >= scrub->os_pos_current)
- wait_event_idle(thread->t_ctl_waitq,
- osd_otable_it_wakeup(scrub, it));
+ wait_var_event(scrub,
+ osd_otable_it_wakeup(scrub, it));
- if (!thread_is_running(thread) && !it->ooi_used_outside)
+ if (!scrub->os_running && !it->ooi_used_outside)
GOTO(out, rc = 1);
rc = -dmu_object_next(dev->od_os, &it->ooi_pos, B_FALSE, 0);
GOTO(out, rc = 1);
rc = -dmu_object_next(dev->od_os, &it->ooi_pos, B_FALSE, 0);
if (!scrub->os_full_speed) {
if (scrub->os_waiting) {
scrub->os_waiting = 0;
if (!scrub->os_full_speed) {
if (scrub->os_waiting) {
scrub->os_waiting = 0;
- wake_up_all(&thread->t_ctl_waitq);
it->ooi_prefetched_dnode = 0;
it->ooi_user_ready = 1;
if (!scrub->os_full_speed)
it->ooi_prefetched_dnode = 0;
it->ooi_user_ready = 1;
if (!scrub->os_full_speed)
- wake_up_all(&scrub->os_thread.t_ctl_waitq);
/* Unplug OSD layer iteration by the first next() call. */
rc = osd_otable_it_next(env, (struct dt_it *)it);
/* Unplug OSD layer iteration by the first next() call. */
rc = osd_otable_it_next(env, (struct dt_it *)it);
const struct lu_fid *fid, uint64_t oid, bool insert)
{
struct lustre_scrub *scrub = &dev->od_scrub;
const struct lu_fid *fid, uint64_t oid, bool insert)
{
struct lustre_scrub *scrub = &dev->od_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct osd_inconsistent_item *oii;
bool wakeup = false;
ENTRY;
struct osd_inconsistent_item *oii;
bool wakeup = false;
ENTRY;
oii->oii_insert = insert;
spin_lock(&scrub->os_lock);
oii->oii_insert = insert;
spin_lock(&scrub->os_lock);
- if (unlikely(!thread_is_running(thread))) {
+ if (!scrub->os_running) {
spin_unlock(&scrub->os_lock);
OBD_FREE_PTR(oii);
RETURN(-EAGAIN);
spin_unlock(&scrub->os_lock);
OBD_FREE_PTR(oii);
RETURN(-EAGAIN);
spin_unlock(&scrub->os_lock);
if (wakeup)
spin_unlock(&scrub->os_lock);
if (wakeup)
- wake_up_all(&thread->t_ctl_waitq);