__ret = __wait_var_event_timeout(var, condition, timeout); \
__ret; \
})
+#else /* !HAVE_WAIT_VAR_EVENT */
+/* linux-3.10.0-1062.el7 defines wait_var_event_timeout() using
+ * __wait_cond_timeout(), but doesn't define __wait_cond_timeout !!!
+ */
+# ifndef __wait_cond_timeout
+# define ___wait_cond_timeout(condition) \
+({ \
+ bool __cond = (condition); \
+ if (__cond && !__ret) \
+ __ret = 1; \
+ __cond || !__ret; \
+})
+# endif /* __wait_cond_timeout */
+
#endif /* ! HAVE_WAIT_VAR_EVENT */
/*
/* Object for the scrub file. */
struct dt_object *os_obj;
- struct ptlrpc_thread os_thread;
+ struct task_struct *os_task;
struct list_head os_inconsistent_items;
/* write lock for scrub prep/update/post/checkpoint,
os_convert_igif:1,
os_partial_scan:1,
os_in_join:1,
+ os_running:1, /* scrub thread is running */
os_full_scrub:1;
};
int scrub_start(int (*threadfn)(void *data), struct lustre_scrub *scrub,
void *data, __u32 flags)
{
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct task_struct *task;
int rc;
ENTRY;
-again:
- /* os_lock: sync status between stop and scrub thread */
- spin_lock(&scrub->os_lock);
- if (thread_is_running(thread)) {
- spin_unlock(&scrub->os_lock);
+ if (scrub->os_task)
RETURN(-EALREADY);
- }
-
- if (unlikely(thread_is_stopping(thread))) {
- spin_unlock(&scrub->os_lock);
- wait_event_idle(thread->t_ctl_waitq,
- thread_is_stopped(thread));
- goto again;
- }
- spin_unlock(&scrub->os_lock);
if (scrub->os_file.sf_status == SS_COMPLETED) {
if (!(flags & SS_SET_FAILOUT))
flags |= SS_RESET;
}
- scrub->os_start_flags = flags;
- thread_set_flags(thread, 0);
- task = kthread_run(threadfn, data, "OI_scrub");
+ task = kthread_create(threadfn, data, "OI_scrub");
if (IS_ERR(task)) {
rc = PTR_ERR(task);
CERROR("%s: cannot start iteration thread: rc = %d\n",
scrub->os_name, rc);
RETURN(rc);
}
-
- wait_event_idle(thread->t_ctl_waitq,
- thread_is_running(thread) || thread_is_stopped(thread));
+ spin_lock(&scrub->os_lock);
+ if (scrub->os_task) {
+ /* Lost a race */
+ spin_unlock(&scrub->os_lock);
+ kthread_stop(task);
+ RETURN(-EALREADY);
+ }
+ scrub->os_start_flags = flags;
+ scrub->os_task = task;
+ wake_up_process(task);
+ spin_unlock(&scrub->os_lock);
+ wait_var_event(scrub, scrub->os_running || !scrub->os_task);
RETURN(0);
}
void scrub_stop(struct lustre_scrub *scrub)
{
- struct ptlrpc_thread *thread = &scrub->os_thread;
+ struct task_struct *task;
- /* os_lock: sync status between stop and scrub thread */
spin_lock(&scrub->os_lock);
- if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
- thread_set_flags(thread, SVC_STOPPING);
- spin_unlock(&scrub->os_lock);
- wake_up_all(&thread->t_ctl_waitq);
- wait_event_idle(thread->t_ctl_waitq,
- thread_is_stopped(thread));
- /* Do not skip the last lock/unlock, which can guarantee that
- * the caller cannot return until the OI scrub thread exit. */
- spin_lock(&scrub->os_lock);
- }
+ scrub->os_running = 0;
spin_unlock(&scrub->os_lock);
+ task = xchg(&scrub->os_task, NULL);
+ if (task)
+ kthread_stop(task);
}
EXPORT_SYMBOL(scrub_stop);
sf->sf_items_igif, sf->sf_success_count);
speed = checked;
- if (thread_is_running(&scrub->os_thread)) {
+ if (scrub->os_running) {
s64 new_checked = scrub->os_new_checked;
time64_t duration;
time64_t rtime;
}
}
- if (thread_is_running(&scrub->os_thread)) {
+ if (scrub->os_running) {
if (scrub->os_partial_scan && !scrub->os_in_join)
goto join;
if (!fid_is_norm(fid) && !fid_is_igif(fid))
RETURN(0);
- if (thread_is_running(&scrub->os_thread) &&
- scrub->os_pos_current > id->oii_ino)
+ if (scrub->os_running && scrub->os_pos_current > id->oii_ino)
RETURN(0);
if (dev->od_auto_scrub_interval == AS_NEVER ||
insert = false;
trigger:
- if (thread_is_running(&scrub->os_thread)) {
+ if (scrub->os_running) {
if (inode == NULL) {
inode = osd_iget(oti, dev, id);
/* The inode has been removed (by race maybe). */
static int osd_scrub_prep(const struct lu_env *env, struct osd_device *dev)
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct scrub_file *sf = &scrub->os_file;
__u32 flags = scrub->os_start_flags;
int rc;
rc = scrub_file_store(env, scrub);
if (rc == 0) {
spin_lock(&scrub->os_lock);
- thread_set_flags(thread, SVC_RUNNING);
+ scrub->os_running = 1;
spin_unlock(&scrub->os_lock);
- wake_up_all(&thread->t_ctl_waitq);
+ wake_up_var(scrub);
}
up_write(&scrub->os_rwsem);
down_write(&scrub->os_rwsem);
spin_lock(&scrub->os_lock);
- thread_set_flags(&scrub->os_thread, SVC_STOPPING);
+ scrub->os_running = 0;
spin_unlock(&scrub->os_lock);
if (scrub->os_new_checked > 0) {
sf->sf_items_checked += scrub->os_new_checked;
struct osd_idmap_cache **oic, const bool noslot)
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct lu_fid *fid;
struct osd_inode_id *lid;
int rc;
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_DELAY) && cfs_fail_val > 0)
- wait_event_idle_timeout(
- thread->t_ctl_waitq,
+ wait_var_event_timeout(
+ scrub,
!list_empty(&scrub->os_inconsistent_items) ||
- !thread_is_running(thread),
+ kthread_should_stop(),
cfs_time_seconds(cfs_fail_val));
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_CRASH)) {
spin_lock(&scrub->os_lock);
- thread_set_flags(thread, SVC_STOPPING);
+ scrub->os_running = 0;
spin_unlock(&scrub->os_lock);
return SCRUB_NEXT_CRASH;
}
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_FATAL))
return SCRUB_NEXT_FATAL;
- if (unlikely(!thread_is_running(thread)))
+ if (kthread_should_stop())
return SCRUB_NEXT_EXIT;
if (!list_empty(&scrub->os_inconsistent_items)) {
{
struct osd_otable_cache *ooc = &dev->od_otable_it->ooi_cache;
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
int rc;
- if (thread_is_running(thread) &&
+ if (scrub->os_running &&
ooc->ooc_pos_preload >= scrub->os_pos_current)
return SCRUB_NEXT_EXIT;
spin_lock(&scrub->os_lock);
if (osd_scrub_has_window(scrub, &it->ooi_cache) ||
!list_empty(&scrub->os_inconsistent_items) ||
- it->ooi_waiting || !thread_is_running(&scrub->os_thread))
+ it->ooi_waiting || kthread_should_stop())
scrub->os_waiting = 0;
else
scrub->os_waiting = 1;
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
struct scrub_file *sf = &scrub->os_file;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct osd_otable_it *it = dev->od_otable_it;
struct osd_otable_cache *ooc = it ? &it->ooi_cache : NULL;
ooc->ooc_pos_preload < scrub->os_pos_current) {
spin_lock(&scrub->os_lock);
it->ooi_waiting = 0;
- wake_up_all(&thread->t_ctl_waitq);
+ wake_up_var(scrub);
spin_unlock(&scrub->os_lock);
}
return 0;
}
- if (it != NULL)
- wait_event_idle(thread->t_ctl_waitq,
- osd_scrub_wakeup(scrub, it));
+ if (it)
+ wait_var_event(scrub, osd_scrub_wakeup(scrub, it));
if (!ooc || osd_scrub_has_window(scrub, ooc))
*noslot = false;
__u32 flags, bool inconsistent)
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct scrub_file *sf = &scrub->os_file;
int rc;
ENTRY;
scrub->os_in_join = 0;
scrub->os_full_scrub = 0;
spin_unlock(&scrub->os_lock);
- wake_up_all(&thread->t_ctl_waitq);
+ wake_up_var(scrub);
up_write(&scrub->os_rwsem);
CDEBUG(D_LFSCK, "%s: joined in the OI scrub with flag %u: rc = %d\n",
struct osd_device *dev, __u32 max, bool preload)
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct scrub_file *sf = &scrub->os_file;
osd_iit_next_policy next;
osd_iit_exec_policy exec;
sf->sf_flags &= ~(SF_RECREATED | SF_INCONSISTENT |
SF_UPGRADE | SF_AUTO);
sf->sf_status = SS_COMPLETED;
- wait_event_idle(
- thread->t_ctl_waitq,
- !thread_is_running(thread) ||
+ wait_var_event(
+ scrub,
+ kthread_should_stop() ||
!scrub->os_partial_scan ||
scrub->os_in_join ||
!list_empty(&scrub->os_inconsistent_items));
sf->sf_flags = saved_flags;
sf->sf_status = SS_SCANNING;
- if (unlikely(!thread_is_running(thread)))
+ if (kthread_should_stop())
RETURN(0);
if (!scrub->os_partial_scan || scrub->os_in_join)
full:
if (!preload) {
- wait_event_idle(thread->t_ctl_waitq,
- !thread_is_running(thread) ||
- !scrub->os_in_join);
+ wait_var_event(scrub,
+ kthread_should_stop() ||
+ !scrub->os_in_join);
- if (unlikely(!thread_is_running(thread)))
+ if (kthread_should_stop())
RETURN(0);
}
if (scrub->os_waiting && osd_scrub_has_window(scrub, ooc)) {
spin_lock(&scrub->os_lock);
scrub->os_waiting = 0;
- wake_up_all(&scrub->os_thread.t_ctl_waitq);
+ wake_up_var(scrub);
spin_unlock(&scrub->os_lock);
}
struct lu_env env;
struct osd_device *dev = (struct osd_device *)args;
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
int rc;
ENTRY;
struct osd_otable_it *it = dev->od_otable_it;
struct osd_otable_cache *ooc = &it->ooi_cache;
- wait_event_idle(thread->t_ctl_waitq,
- it->ooi_user_ready ||
- !thread_is_running(thread));
- if (unlikely(!thread_is_running(thread)))
+ wait_var_event(scrub,
+ it->ooi_user_ready || kthread_should_stop());
+ if (kthread_should_stop())
GOTO(post, rc = 0);
scrub->os_pos_current = ooc->ooc_pos_preload;
rc = osd_inode_iteration(osd_oti_get(&env), dev, ~0U, false);
if (unlikely(rc == SCRUB_IT_CRASH)) {
spin_lock(&scrub->os_lock);
- thread_set_flags(&scrub->os_thread, SVC_STOPPING);
+ scrub->os_running = 0;
spin_unlock(&scrub->os_lock);
GOTO(out, rc = -EINVAL);
}
noenv:
spin_lock(&scrub->os_lock);
- thread_set_flags(thread, SVC_STOPPED);
- wake_up_all(&thread->t_ctl_waitq);
+ scrub->os_running = 0;
spin_unlock(&scrub->os_lock);
+ if (xchg(&scrub->os_task, NULL) == NULL)
+ /* scrub_stop() is waiting, we need to synchronize */
+ wait_var_event(scrub, kthread_should_stop());
+ wake_up_var(scrub);
return rc;
}
ctxt->pwd = dev->od_mnt->mnt_root;
ctxt->fs = KERNEL_DS;
- init_waitqueue_head(&scrub->os_thread.t_ctl_waitq);
init_rwsem(&scrub->os_rwsem);
spin_lock_init(&scrub->os_lock);
INIT_LIST_HEAD(&scrub->os_inconsistent_items);
{
spin_lock(&scrub->os_lock);
if (it->ooi_cache.ooc_pos_preload < scrub->os_pos_current ||
- scrub->os_waiting ||
- !thread_is_running(&scrub->os_thread))
+ scrub->os_waiting || !scrub->os_running)
it->ooi_waiting = 0;
else
it->ooi_waiting = 1;
static int osd_otable_it_next(const struct lu_env *env, struct dt_it *di)
{
- struct osd_otable_it *it = (struct osd_otable_it *)di;
- struct osd_device *dev = it->ooi_dev;
+ struct osd_otable_it *it = (struct osd_otable_it *)di;
+ struct osd_device *dev = it->ooi_dev;
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct osd_otable_cache *ooc = &it->ooi_cache;
- struct ptlrpc_thread *thread = &scrub->os_thread;
- int rc;
+ struct osd_otable_cache *ooc = &it->ooi_cache;
+ int rc;
ENTRY;
LASSERT(it->ooi_user_ready);
again:
- if (!thread_is_running(thread) && !it->ooi_used_outside)
+ if (!scrub->os_running && !it->ooi_used_outside)
RETURN(1);
if (ooc->ooc_cached_items > 0) {
}
if (it->ooi_all_cached) {
- wait_event_idle(thread->t_ctl_waitq,
- !thread_is_running(thread));
+ wait_var_event(scrub, !scrub->os_running);
RETURN(1);
}
if (scrub->os_waiting && osd_scrub_has_window(scrub, ooc)) {
spin_lock(&scrub->os_lock);
scrub->os_waiting = 0;
- wake_up_all(&scrub->os_thread.t_ctl_waitq);
+ wake_up_var(scrub);
spin_unlock(&scrub->os_lock);
}
if (it->ooi_cache.ooc_pos_preload >= scrub->os_pos_current)
- wait_event_idle(thread->t_ctl_waitq,
- osd_otable_it_wakeup(scrub, it));
+ wait_var_event(scrub, osd_otable_it_wakeup(scrub, it));
- if (!thread_is_running(thread) && !it->ooi_used_outside)
+ if (!scrub->os_running && !it->ooi_used_outside)
RETURN(1);
rc = osd_otable_it_preload(env, it);
it->ooi_user_ready = 1;
if (!scrub->os_full_speed)
- wake_up_all(&scrub->os_thread.t_ctl_waitq);
+ wake_up_var(scrub);
memset(param, 0, sizeof(*param));
param->sb = osd_sb(dev);
struct osd_inconsistent_item *oii;
struct osd_scrub *oscrub = &dev->od_scrub;
struct lustre_scrub *lscrub = &oscrub->os_scrub;
- struct ptlrpc_thread *thread = &lscrub->os_thread;
int wakeup = 0;
ENTRY;
lscrub->os_full_scrub = 1;
}
- if (unlikely(!thread_is_running(thread))) {
+ if (!lscrub->os_running) {
spin_unlock(&lscrub->os_lock);
OBD_FREE_PTR(oii);
RETURN(-EAGAIN);
list_add_tail(&oii->oii_list, &lscrub->os_inconsistent_items);
spin_unlock(&lscrub->os_lock);
- if (wakeup != 0)
- wake_up_all(&thread->t_ctl_waitq);
+ if (wakeup)
+ wake_up_var(lscrub);
RETURN(0);
}
RETURN(rc);
}
- if (thread_is_running(&scrub->os_thread)) {
+ if (scrub->os_running) {
if (scrub->os_pos_current > oid)
RETURN(0);
} else if (osd->od_auto_scrub_interval == AS_NEVER) {
insert = false;
trigger:
- if (thread_is_running(&scrub->os_thread)) {
+ if (scrub->os_running) {
if (!dn) {
rc = __osd_obj2dnode(osd->od_os, oid, &dn);
/* The object has been removed (by race maybe). */
}
/* The case someone triggered the OI scrub already. */
- if (thread_is_running(&scrub->os_thread)) {
+ if (scrub->os_running) {
if (!rc) {
LASSERT(remote);
static int osd_scrub_prep(const struct lu_env *env, struct osd_device *dev)
{
struct lustre_scrub *scrub = &dev->od_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct scrub_file *sf = &scrub->os_file;
__u32 flags = scrub->os_start_flags;
int rc;
rc = scrub_file_store(env, scrub);
if (!rc) {
spin_lock(&scrub->os_lock);
- thread_set_flags(thread, SVC_RUNNING);
+ scrub->os_running = 1;
spin_unlock(&scrub->os_lock);
- wake_up_all(&thread->t_ctl_waitq);
+ wake_up_var(scrub);
}
up_write(&scrub->os_rwsem);
down_write(&scrub->os_rwsem);
spin_lock(&scrub->os_lock);
- thread_set_flags(&scrub->os_thread, SVC_STOPPING);
+ scrub->os_running = 0;
spin_unlock(&scrub->os_lock);
if (scrub->os_new_checked > 0) {
sf->sf_items_checked += scrub->os_new_checked;
spin_lock(&scrub->os_lock);
if (osd_scrub_has_window(it) ||
!list_empty(&scrub->os_inconsistent_items) ||
- it->ooi_waiting || !thread_is_running(&scrub->os_thread))
+ it->ooi_waiting || kthread_should_stop())
scrub->os_waiting = 0;
else
scrub->os_waiting = 1;
struct lu_fid *fid, uint64_t *oid)
{
struct lustre_scrub *scrub = &dev->od_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct osd_otable_it *it = dev->od_otable_it;
struct lustre_mdt_attrs *lma = NULL;
nvlist_t *nvbuf = NULL;
ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_DELAY) && cfs_fail_val > 0) {
- wait_event_idle_timeout(
- thread->t_ctl_waitq,
+ wait_var_event_timeout(
+ scrub,
!list_empty(&scrub->os_inconsistent_items) ||
- !thread_is_running(thread),
+ kthread_should_stop(),
cfs_time_seconds(cfs_fail_val));
- if (unlikely(!thread_is_running(thread)))
+ if (kthread_should_stop())
RETURN(SCRUB_NEXT_EXIT);
}
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_CRASH)) {
spin_lock(&scrub->os_lock);
- thread_set_flags(thread, SVC_STOPPING);
+ scrub->os_running = 0;
spin_unlock(&scrub->os_lock);
RETURN(SCRUB_NEXT_CRASH);
}
}
if (!scrub->os_full_speed && !osd_scrub_has_window(it))
- wait_event_idle(thread->t_ctl_waitq,
- osd_scrub_wakeup(scrub, it));
+ wait_var_event(scrub, osd_scrub_wakeup(scrub, it));
- if (unlikely(!thread_is_running(thread)))
+ if (kthread_should_stop())
GOTO(out, rc = SCRUB_NEXT_EXIT);
rc = -dmu_object_next(dev->od_os, &scrub->os_pos_current, B_FALSE, 0);
it->ooi_prefetched++;
if (it->ooi_waiting) {
it->ooi_waiting = 0;
- wake_up_all(&thread->t_ctl_waitq);
+ wake_up_var(scrub);
}
spin_unlock(&scrub->os_lock);
}
const struct lu_fid *fid, uint64_t oid, int rc)
{
struct lustre_scrub *scrub = &dev->od_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct osd_otable_it *it = dev->od_otable_it;
rc = osd_scrub_check_update(env, dev, fid, oid, rc);
it->ooi_prefetched++;
if (it->ooi_waiting) {
it->ooi_waiting = 0;
- wake_up_all(&thread->t_ctl_waitq);
+ wake_up_var(scrub);
}
spin_unlock(&scrub->os_lock);
}
struct lu_env env;
struct osd_device *dev = (struct osd_device *)args;
struct lustre_scrub *scrub = &dev->od_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct lu_fid *fid;
uint64_t oid;
int rc = 0;
if (!scrub->os_full_speed) {
struct osd_otable_it *it = dev->od_otable_it;
- wait_event_idle(thread->t_ctl_waitq,
- it->ooi_user_ready ||
- !thread_is_running(thread));
+ wait_var_event(scrub,
+ it->ooi_user_ready ||
+ kthread_should_stop());
- if (unlikely(!thread_is_running(thread)))
+ if (kthread_should_stop())
GOTO(post, rc = 0);
scrub->os_pos_current = it->ooi_pos;
scrub->os_pos_current);
fid = &osd_oti_get(&env)->oti_fid;
- while (!rc && thread_is_running(thread)) {
+ while (!rc && !kthread_should_stop()) {
rc = osd_scrub_next(&env, dev, fid, &oid);
switch (rc) {
case SCRUB_NEXT_EXIT:
GOTO(post, rc = 0);
case SCRUB_NEXT_CRASH:
spin_lock(&scrub->os_lock);
- thread_set_flags(&scrub->os_thread, SVC_STOPPING);
+ scrub->os_running = 0;
spin_unlock(&scrub->os_lock);
GOTO(out, rc = -EINVAL);
case SCRUB_NEXT_FATAL:
noenv:
spin_lock(&scrub->os_lock);
- thread_set_flags(thread, SVC_STOPPED);
- wake_up_all(&thread->t_ctl_waitq);
+ scrub->os_running = 0;
spin_unlock(&scrub->os_lock);
+ if (xchg(&scrub->os_task, NULL) == NULL)
+ /* scrub_stop is waiting, we need to synchronize */
+ wait_var_event(scrub, kthread_should_stop());
+ wake_up_var(scrub);
return rc;
}
&dsl_dataset_phys(dev->od_os->os_dsl_dataset)->ds_guid,
sizeof(dsl_dataset_phys(dev->od_os->os_dsl_dataset)->ds_guid));
memset(&dev->od_scrub, 0, sizeof(struct lustre_scrub));
- init_waitqueue_head(&scrub->os_thread.t_ctl_waitq);
init_rwsem(&scrub->os_rwsem);
spin_lock_init(&scrub->os_lock);
INIT_LIST_HEAD(&scrub->os_inconsistent_items);
{
spin_lock(&scrub->os_lock);
if (it->ooi_pos < scrub->os_pos_current || scrub->os_waiting ||
- !thread_is_running(&scrub->os_thread))
+ !scrub->os_running)
it->ooi_waiting = 0;
else
it->ooi_waiting = 1;
struct osd_otable_it *it = (struct osd_otable_it *)di;
struct osd_device *dev = it->ooi_dev;
struct lustre_scrub *scrub = &dev->od_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct lustre_mdt_attrs *lma = NULL;
nvlist_t *nvbuf = NULL;
int rc, size = 0;
}
if (it->ooi_pos >= scrub->os_pos_current)
- wait_event_idle(thread->t_ctl_waitq,
- osd_otable_it_wakeup(scrub, it));
+ wait_var_event(scrub,
+ osd_otable_it_wakeup(scrub, it));
- if (!thread_is_running(thread) && !it->ooi_used_outside)
+ if (!scrub->os_running && !it->ooi_used_outside)
GOTO(out, rc = 1);
rc = -dmu_object_next(dev->od_os, &it->ooi_pos, B_FALSE, 0);
if (!scrub->os_full_speed) {
if (scrub->os_waiting) {
scrub->os_waiting = 0;
- wake_up_all(&thread->t_ctl_waitq);
+ wake_up_var(scrub);
}
}
if (locked)
it->ooi_prefetched_dnode = 0;
it->ooi_user_ready = 1;
if (!scrub->os_full_speed)
- wake_up_all(&scrub->os_thread.t_ctl_waitq);
+ wake_up_var(scrub);
/* Unplug OSD layer iteration by the first next() call. */
rc = osd_otable_it_next(env, (struct dt_it *)it);
const struct lu_fid *fid, uint64_t oid, bool insert)
{
struct lustre_scrub *scrub = &dev->od_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct osd_inconsistent_item *oii;
bool wakeup = false;
ENTRY;
oii->oii_insert = insert;
spin_lock(&scrub->os_lock);
- if (unlikely(!thread_is_running(thread))) {
+ if (!scrub->os_running) {
spin_unlock(&scrub->os_lock);
OBD_FREE_PTR(oii);
RETURN(-EAGAIN);
spin_unlock(&scrub->os_lock);
if (wakeup)
- wake_up_all(&thread->t_ctl_waitq);
+ wake_up_var(scrub);
RETURN(0);
}