* yet, let's wait those threads stopped
*/
if (next_update_transno == 0) {
- struct l_wait_info lwi = { 0 };
-
- l_wait_event(tdtd->tdtd_recovery_threads_waitq,
- atomic_read(
- &tdtd->tdtd_recovery_threads_count) == 0,
- &lwi);
+ wait_event_idle(
+ tdtd->tdtd_recovery_threads_waitq,
+ atomic_read(&tdtd->tdtd_recovery_threads_count)
+ == 0);
next_update_transno =
distribute_txn_get_next_transno(
if (obd->obd_abort_recovery) {
CWARN("recovery is aborted, evict exports in recovery\n");
if (lut->lut_tdtd != NULL) {
- struct l_wait_info lwi = { 0 };
-
tdtd = lut->lut_tdtd;
/*
* Let's wait all of the update log recovery thread
* finished
*/
- l_wait_event(tdtd->tdtd_recovery_threads_waitq,
- atomic_read(&tdtd->tdtd_recovery_threads_count) == 0,
- &lwi);
+ wait_event_idle(
+ tdtd->tdtd_recovery_threads_waitq,
+ atomic_read(&tdtd->tdtd_recovery_threads_count)
+ == 0);
/* Then abort the update recovery list */
dtrq_list_destroy(lut->lut_tdtd);
}
ENTRY;
/* If there is eviction in progress, wait for it to finish. */
- if (unlikely(atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
- lwi = LWI_INTR(NULL, NULL);
- rc = l_wait_event(exp->exp_obd->obd_evict_inprogress_waitq,
- !atomic_read(&exp->exp_obd->obd_evict_inprogress),
- &lwi);
- }
+ wait_event_idle(
+ exp->exp_obd->obd_evict_inprogress_waitq,
+ !atomic_read(&exp->exp_obd->obd_evict_inprogress));
/* Check if client was evicted or reconnected already. */
if (exp->exp_failed ||
ldlm_set_bl_done(lock);
wake_up_all(&lock->l_waitq);
} else if (!ldlm_is_bl_done(lock)) {
- struct l_wait_info lwi = { 0 };
-
/* The lock is guaranteed to have been canceled once
* returning from this function. */
unlock_res_and_lock(lock);
- l_wait_event(lock->l_waitq, is_bl_done(lock), &lwi);
+ wait_event_idle(lock->l_waitq, is_bl_done(lock));
lock_res_and_lock(lock);
}
}
static int expired_lock_main(void *arg)
{
struct list_head *expired = &expired_lock_list;
- struct l_wait_info lwi = { 0 };
int do_dump;
ENTRY;
wake_up(&expired_lock_wait_queue);
while (1) {
- l_wait_event(expired_lock_wait_queue,
- have_expired_locks() ||
- expired_lock_thread_state == ELT_TERMINATE,
- &lwi);
+ wait_event_idle(expired_lock_wait_queue,
+ have_expired_locks() ||
+ expired_lock_thread_state == ELT_TERMINATE);
spin_lock_bh(&waiting_locks_spinlock);
if (expired_lock_dump) {
/* cannot use bltd after this, it is only on caller's stack */
while (1) {
- struct l_wait_info lwi = { 0 };
struct ldlm_bl_work_item *blwi = NULL;
struct obd_export *exp = NULL;
int rc;
rc = ldlm_bl_get_work(blp, &blwi, &exp);
if (rc == 0)
- l_wait_event_exclusive(blp->blp_waitq,
- ldlm_bl_get_work(blp, &blwi,
- &exp),
- &lwi);
+ wait_event_idle_exclusive(blp->blp_waitq,
+ ldlm_bl_get_work(blp, &blwi,
+ &exp));
atomic_inc(&blp->blp_busy_threads);
if (ldlm_bl_thread_need_create(blp, blwi))
if (cancel_flags & LCF_ASYNC) {
unlock_res_and_lock(lock);
} else {
- struct l_wait_info lwi = { 0 };
-
unlock_res_and_lock(lock);
- l_wait_event(lock->l_waitq, is_bl_done(lock), &lwi);
+ wait_event_idle(lock->l_waitq, is_bl_done(lock));
}
LDLM_LOCK_RELEASE(lock);
RETURN(0);
{
struct lfsck_component *com;
struct lfsck_component *next;
- struct l_wait_info lwi = { 0 };
int rc = 0;
int rc1 = 0;
rc1 = rc;
}
- l_wait_event(lfsck->li_thread.t_ctl_waitq,
- atomic_read(&lfsck->li_double_scan_count) == 0,
- &lwi);
+ wait_event_idle(lfsck->li_thread.t_ctl_waitq,
+ atomic_read(&lfsck->li_double_scan_count) == 0);
if (lfsck->li_status != LS_PAUSED &&
lfsck->li_status != LS_CO_PAUSED) {
struct dt_object *oit_obj = lfsck->li_obj_oit;
const struct dt_it_ops *oit_iops = &oit_obj->do_index_ops->dio_it;
struct dt_it *oit_di;
- struct l_wait_info lwi = { 0 };
int rc;
ENTRY;
spin_unlock(&lfsck->li_lock);
wake_up_all(&thread->t_ctl_waitq);
- l_wait_event(thread->t_ctl_waitq,
- lfsck->li_start_unplug ||
- !thread_is_running(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ lfsck->li_start_unplug ||
+ !thread_is_running(thread));
if (!thread_is_running(thread))
GOTO(fini_oit, rc = 0);
struct lfsck_tgt_descs *ltds = &lfsck->li_ost_descs;
struct ptlrpc_thread *mthread = &lfsck->li_thread;
struct ptlrpc_thread *athread = &lad->lad_thread;
- struct l_wait_info lwi = { 0 };
struct lu_buf buf;
int rc = 0;
int i;
if (unlikely(lovea_slot_is_dummy(objs)))
continue;
- l_wait_event(mthread->t_ctl_waitq,
- lad->lad_prefetched < bk->lb_async_windows ||
- !thread_is_running(mthread) ||
- thread_is_stopped(athread),
- &lwi);
+ wait_event_idle(mthread->t_ctl_waitq,
+ lad->lad_prefetched < bk->lb_async_windows ||
+ !thread_is_running(mthread) ||
+ thread_is_stopped(athread));
if (unlikely(!thread_is_running(mthread)) ||
thread_is_stopped(athread))
"rc = %d\n", lfsck_lfsck2name(lfsck), lad->lad_name, rc);
lfsck_thread_args_fini(lta);
} else {
- struct l_wait_info lwi = { 0 };
-
- l_wait_event(mthread->t_ctl_waitq,
- thread_is_running(athread) ||
- thread_is_stopped(athread) ||
- !thread_is_starting(mthread),
- &lwi);
+ wait_event_idle(mthread->t_ctl_waitq,
+ thread_is_running(athread) ||
+ thread_is_stopped(athread) ||
+ !thread_is_starting(mthread));
if (unlikely(!thread_is_starting(mthread)))
/* stopped by race */
rc = -ESRCH;
struct lfsck_assistant_data *lad = com->lc_data;
struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
struct ptlrpc_thread *athread = &lad->lad_thread;
- struct l_wait_info lwi = { 0 };
- l_wait_event(mthread->t_ctl_waitq,
- list_empty(&lad->lad_req_list) ||
- !thread_is_running(mthread) ||
- thread_is_stopped(athread),
- &lwi);
+ wait_event_idle(mthread->t_ctl_waitq,
+ list_empty(&lad->lad_req_list) ||
+ !thread_is_running(mthread) ||
+ thread_is_stopped(athread));
if (!thread_is_running(mthread) || thread_is_stopped(athread))
return LFSCK_CHECKPOINT_SKIP;
struct lfsck_assistant_data *lad = com->lc_data;
struct ptlrpc_thread *athread = &lad->lad_thread;
struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
- struct l_wait_info lwi = { 0 };
lad->lad_post_result = *result;
if (*result <= 0)
lfsck_lfsck2name(com->lc_lfsck), lad->lad_name, *result);
wake_up_all(&athread->t_ctl_waitq);
- l_wait_event(mthread->t_ctl_waitq,
- (*result > 0 && list_empty(&lad->lad_req_list)) ||
- thread_is_stopped(athread),
- &lwi);
+ wait_event_idle(mthread->t_ctl_waitq,
+ (*result > 0 && list_empty(&lad->lad_req_list)) ||
+ thread_is_stopped(athread));
if (lad->lad_assistant_status < 0)
*result = lad->lad_assistant_status;
struct lfsck_assistant_data *lad = com->lc_data;
struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
struct ptlrpc_thread *athread = &lad->lad_thread;
- struct l_wait_info lwi = { 0 };
if (status != LS_SCANNING_PHASE2)
set_bit(LAD_EXIT, &lad->lad_flags);
lfsck_lfsck2name(com->lc_lfsck), lad->lad_name, status);
wake_up_all(&athread->t_ctl_waitq);
- l_wait_event(mthread->t_ctl_waitq,
- test_bit(LAD_IN_DOUBLE_SCAN, &lad->lad_flags) ||
- thread_is_stopped(athread),
- &lwi);
+ wait_event_idle(mthread->t_ctl_waitq,
+ test_bit(LAD_IN_DOUBLE_SCAN, &lad->lad_flags) ||
+ thread_is_stopped(athread));
CDEBUG(D_LFSCK, "%s: the assistant has done %s double_scan, "
"status %d\n", lfsck_lfsck2name(com->lc_lfsck), lad->lad_name,
struct lfsck_assistant_data *lad = com->lc_data;
struct ptlrpc_thread *mthread = &com->lc_lfsck->li_thread;
struct ptlrpc_thread *athread = &lad->lad_thread;
- struct l_wait_info lwi = { 0 };
set_bit(LAD_EXIT, &lad->lad_flags);
wake_up_all(&athread->t_ctl_waitq);
- l_wait_event(mthread->t_ctl_waitq,
- thread_is_init(athread) ||
- thread_is_stopped(athread),
- &lwi);
+ wait_event_idle(mthread->t_ctl_waitq,
+ thread_is_init(athread) ||
+ thread_is_stopped(athread));
}
int lfsck_load_one_trace_file(const struct lu_env *env,
struct lfsck_bookmark *bk;
struct ptlrpc_thread *thread;
struct lfsck_component *com;
- struct l_wait_info lwi = { 0 };
struct lfsck_thread_args *lta;
struct task_struct *task;
struct lfsck_tgt_descs *ltds;
GOTO(out, rc);
}
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) ||
- thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_running(thread) ||
+ thread_is_stopped(thread));
if (start == NULL || !(start->ls_flags & LPF_BROADCAST)) {
lfsck->li_start_unplug = 1;
wake_up_all(&thread->t_ctl_waitq);
lfsck->li_start_unplug = 1;
wake_up_all(&thread->t_ctl_waitq);
- l_wait_event(thread->t_ctl_waitq,
- thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_stopped(thread));
}
} else {
lfsck->li_start_unplug = 1;
{
struct lfsck_instance *lfsck;
struct ptlrpc_thread *thread;
- struct l_wait_info lwi = { 0 };
int rc = 0;
int rc1 = 0;
ENTRY;
/* It was me set the status as 'stopping' just now, if it is not
* 'stopping' now, then either stopped, or re-started by race. */
- l_wait_event(thread->t_ctl_waitq,
- !thread_is_stopping(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ !thread_is_stopping(thread));
GOTO(put, rc = 0);
struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
struct ptlrpc_thread *mthread = &lfsck->li_thread;
struct ptlrpc_thread *athread = &lad->lad_thread;
- struct l_wait_info lwi = { 0 };
bool wakeup = false;
- l_wait_event(mthread->t_ctl_waitq,
- lad->lad_prefetched < bk->lb_async_windows ||
- !thread_is_running(mthread) ||
- !thread_is_running(athread),
- &lwi);
+ wait_event_idle(mthread->t_ctl_waitq,
+ lad->lad_prefetched < bk->lb_async_windows ||
+ !thread_is_running(mthread) ||
+ !thread_is_running(athread));
if (unlikely(!thread_is_running(mthread) ||
!thread_is_running(athread)))
static void pcc_layout_wait(struct pcc_inode *pcci)
{
- struct l_wait_info lwi = { 0 };
while (atomic_read(&pcci->pcci_active_ios) > 0) {
CDEBUG(D_CACHE, "Waiting for IO completion: %d\n",
atomic_read(&pcci->pcci_active_ios));
- l_wait_event(pcci->pcci_waitq,
- atomic_read(&pcci->pcci_active_ios) == 0, &lwi);
+ wait_event_idle(pcci->pcci_waitq,
+ atomic_read(&pcci->pcci_active_ios) == 0);
}
}
struct ll_sb_info *sbi = ll_i2sbi(dir);
struct ll_statahead_info *sai;
struct ptlrpc_thread *thread;
- struct l_wait_info lwi = { 0 };
ENTRY;
sai = ll_sai_get(dir);
wake_up(&thread->t_ctl_waitq);
while (1) {
- l_wait_event(thread->t_ctl_waitq,
- !agl_list_empty(sai) ||
- !thread_is_running(thread),
- &lwi);
- if (!thread_is_running(thread))
+ wait_event_idle(thread->t_ctl_waitq,
+ !agl_list_empty(sai) ||
+ !thread_is_running(thread));
+ if (!thread_is_running(thread))
break;
spin_lock(&plli->lli_agl_lock);
static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
{
struct ptlrpc_thread *thread = &sai->sai_agl_thread;
- struct l_wait_info lwi = { 0 };
struct ll_inode_info *plli;
struct task_struct *task;
ENTRY;
RETURN_EXIT;
}
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) || thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_running(thread) || thread_is_stopped(thread));
EXIT;
}
/* wait for spare statahead window */
do {
- l_wait_event(sa_thread->t_ctl_waitq,
- !sa_sent_full(sai) ||
- sa_has_callback(sai) ||
- !agl_list_empty(sai) ||
- !thread_is_running(sa_thread),
- &lwi);
+ wait_event_idle(sa_thread->t_ctl_waitq,
+ !sa_sent_full(sai) ||
+ sa_has_callback(sai) ||
+ !agl_list_empty(sai) ||
+ !thread_is_running(sa_thread));
sa_handle_callback(sai);
/* statahead is finished, but statahead entries need to be cached, wait
* for file release to stop me. */
while (thread_is_running(sa_thread)) {
- l_wait_event(sa_thread->t_ctl_waitq,
- sa_has_callback(sai) ||
- !thread_is_running(sa_thread),
- &lwi);
+ wait_event_idle(sa_thread->t_ctl_waitq,
+ sa_has_callback(sai) ||
+ !thread_is_running(sa_thread));
sa_handle_callback(sai);
}
CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n",
sai, (unsigned int)agl_thread->t_pid);
- l_wait_event(agl_thread->t_ctl_waitq,
- thread_is_stopped(agl_thread),
- &lwi);
+ wait_event_idle(agl_thread->t_ctl_waitq,
+ thread_is_stopped(agl_thread));
} else {
/* Set agl_thread flags anyway. */
thread_set_flags(agl_thread, SVC_STOPPED);
struct ll_statahead_info *sai = NULL;
struct dentry *parent = dentry->d_parent;
struct ptlrpc_thread *thread;
- struct l_wait_info lwi = { 0 };
struct task_struct *task;
struct ll_sb_info *sbi = ll_i2sbi(parent->d_inode);
int first = LS_FIRST_DE;
GOTO(out, rc);
}
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) || thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_running(thread) || thread_is_stopped(thread));
ll_sai_put(sai);
/*
struct lod_recovery_data *lrd = NULL;
struct ptlrpc_thread *thread;
struct task_struct *task;
- struct l_wait_info lwi = { 0 };
struct lod_tgt_desc *subtgt = NULL;
u32 index;
u32 master_index;
GOTO(out_llog, rc);
}
- l_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING ||
- thread->t_flags & SVC_STOPPED, &lwi);
+ wait_event_idle(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING ||
+ thread->t_flags & SVC_STOPPED);
RETURN(0);
out_llog:
static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
{
- struct l_wait_info lwi = { 0 };
ENTRY;
while (atomic_read(&lov->lo_active_ios) > 0) {
PFID(lu_object_fid(lov2lu(lov))),
atomic_read(&lov->lo_active_ios));
- l_wait_event(lov->lo_waitq,
- atomic_read(&lov->lo_active_ios) == 0, &lwi);
+ wait_event_idle(lov->lo_waitq,
+ atomic_read(&lov->lo_active_ios) == 0);
}
RETURN(0);
}
config_log_put(cld_prev);
/* Wait a bit to see if anyone else needs a requeue */
- lwi = (struct l_wait_info) { 0 };
- l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP),
- &lwi);
+ wait_event_idle(rq_waitq, rq_state & (RQ_NOW | RQ_STOP));
spin_lock(&config_list_lock);
}
set_user_nice(current, -2);
mgc_fsname2resid(fsdb->fsdb_name, &resid, CONFIG_T_RECOVER);
while (1) {
- struct l_wait_info lwi = { 0 };
+ wait_event_idle(fsdb->fsdb_notify_waitq,
+ fsdb->fsdb_notify_stop ||
+ atomic_read(&fsdb->fsdb_notify_phase));
- l_wait_event(fsdb->fsdb_notify_waitq,
- fsdb->fsdb_notify_stop ||
- atomic_read(&fsdb->fsdb_notify_phase),
- &lwi);
if (fsdb->fsdb_notify_stop)
break;
CERROR("IO failed: %d, still wait for %d remaining entries\n",
rc, atomic_read(&anchor->csi_sync_nr));
- lwi = (struct l_wait_info) { 0 };
- (void)l_wait_event(anchor->csi_waitq,
- atomic_read(&anchor->csi_sync_nr) == 0,
- &lwi);
+ wait_event_idle(anchor->csi_waitq,
+ atomic_read(&anchor->csi_sync_nr) == 0);
} else {
rc = anchor->csi_sync_rc;
}
__u16 obd_get_mod_rpc_slot(struct client_obd *cli, __u32 opc,
struct lookup_intent *it)
{
- struct l_wait_info lwi = LWI_INTR(NULL, NULL);
bool close_req = false;
__u16 i, max;
"opc %u, max %hu\n",
cli->cl_import->imp_obd->obd_name, opc, max);
- l_wait_event_exclusive(cli->cl_mod_rpcs_waitq,
- obd_mod_rpc_slot_avail(cli, close_req),
- &lwi);
+ wait_event_idle_exclusive(cli->cl_mod_rpcs_waitq,
+ obd_mod_rpc_slot_avail(cli,
+ close_req));
} while (true);
}
EXPORT_SYMBOL(obd_get_mod_rpc_slot);
struct cfs_hash *hs;
struct cfs_hash_bd bd;
struct lu_site_bkt_data *bkt;
- struct l_wait_info lwi = { 0 };
__u64 version = 0;
int rc;
if (likely(lu_object_is_inited(o->lo_header)))
RETURN(o);
- l_wait_event(bkt->lsb_waitq,
- lu_object_is_inited(o->lo_header) ||
- lu_object_is_dying(o->lo_header), &lwi);
+ wait_event_idle(bkt->lsb_waitq,
+ lu_object_is_inited(o->lo_header) ||
+ lu_object_is_dying(o->lo_header));
if (lu_object_is_dying(o->lo_header)) {
lu_object_put(env, o);
if (!(conf && conf->loc_flags & LOC_F_NEW) &&
!lu_object_is_inited(shadow->lo_header)) {
- l_wait_event(bkt->lsb_waitq,
- lu_object_is_inited(shadow->lo_header) ||
- lu_object_is_dying(shadow->lo_header), &lwi);
+ wait_event_idle(bkt->lsb_waitq,
+ lu_object_is_inited(shadow->lo_header) ||
+ lu_object_is_dying(shadow->lo_header));
if (lu_object_is_dying(shadow->lo_header)) {
lu_object_put(env, shadow);
void *data, __u32 flags)
{
struct ptlrpc_thread *thread = &scrub->os_thread;
- struct l_wait_info lwi = { 0 };
struct task_struct *task;
int rc;
ENTRY;
if (unlikely(thread_is_stopping(thread))) {
spin_unlock(&scrub->os_lock);
- l_wait_event(thread->t_ctl_waitq,
- thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_stopped(thread));
goto again;
}
spin_unlock(&scrub->os_lock);
RETURN(rc);
}
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) || thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_running(thread) || thread_is_stopped(thread));
RETURN(0);
}
void scrub_stop(struct lustre_scrub *scrub)
{
struct ptlrpc_thread *thread = &scrub->os_thread;
- struct l_wait_info lwi = { 0 };
/* os_lock: sync status between stop and scrub thread */
spin_lock(&scrub->os_lock);
thread_set_flags(thread, SVC_STOPPING);
spin_unlock(&scrub->os_lock);
wake_up_all(&thread->t_ctl_waitq);
- l_wait_event(thread->t_ctl_waitq,
- thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_stopped(thread));
/* Do not skip the last lock/unlock, which can guarantee that
* the caller cannot return until the OI scrub thread exit. */
spin_lock(&scrub->os_lock);
struct ptlrpc_thread *thread = &ofd->ofd_inconsistency_thread;
struct ofd_inconsistency_item *oii;
struct lfsck_req_local *lrl = NULL;
- struct l_wait_info lwi = { 0 };
int rc;
ENTRY;
}
spin_unlock(&ofd->ofd_inconsistency_lock);
- l_wait_event(thread->t_ctl_waitq,
- !list_empty(&ofd->ofd_inconsistency_list) ||
- !thread_is_running(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ !list_empty(&ofd->ofd_inconsistency_list) ||
+ !thread_is_running(thread));
spin_lock(&ofd->ofd_inconsistency_lock);
}
int ofd_start_inconsistency_verification_thread(struct ofd_device *ofd)
{
struct ptlrpc_thread *thread = &ofd->ofd_inconsistency_thread;
- struct l_wait_info lwi = { 0 };
struct task_struct *task;
int rc;
ofd_name(ofd), rc);
} else {
rc = 0;
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) ||
- thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_running(thread) ||
+ thread_is_stopped(thread));
}
return rc;
int ofd_stop_inconsistency_verification_thread(struct ofd_device *ofd)
{
struct ptlrpc_thread *thread = &ofd->ofd_inconsistency_thread;
- struct l_wait_info lwi = { 0 };
spin_lock(&ofd->ofd_inconsistency_lock);
if (thread_is_init(thread) || thread_is_stopped(thread)) {
thread_set_flags(thread, SVC_STOPPING);
spin_unlock(&ofd->ofd_inconsistency_lock);
wake_up_all(&thread->t_ctl_waitq);
- l_wait_event(thread->t_ctl_waitq,
- thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_stopped(thread));
return 0;
}
"%s: wait ext to %u timedout, recovery in progress?\n",
cli_name(osc_cli(obj)), state);
- lwi = LWI_INTR(NULL, NULL);
- rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state),
- &lwi);
+ wait_event_idle(ext->oe_waitq, extent_wait_cb(ext, state));
+ rc = 0;
}
if (rc == 0 && ext->oe_rc < 0)
rc = ext->oe_rc;
int osc_object_invalidate(const struct lu_env *env, struct osc_object *osc)
{
- struct l_wait_info lwi = { 0 };
ENTRY;
CDEBUG(D_INODE, "Invalidate osc object: %p, # of active IOs: %d\n",
osc, atomic_read(&osc->oo_nr_ios));
- l_wait_event(osc->oo_io_waitq, atomic_read(&osc->oo_nr_ios) == 0, &lwi);
+ wait_event_idle(osc->oo_io_waitq, atomic_read(&osc->oo_nr_ios) == 0);
/* Discard all dirty pages of this object. */
osc_cache_truncate_start(env, osc, 0, NULL);
struct osd_iit_param *param,
struct osd_idmap_cache *oic, bool *noslot, int rc)
{
- struct l_wait_info lwi = { 0 };
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
struct scrub_file *sf = &scrub->os_file;
struct ptlrpc_thread *thread = &scrub->os_thread;
}
if (it != NULL)
- l_wait_event(thread->t_ctl_waitq, osd_scrub_wakeup(scrub, it),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ osd_scrub_wakeup(scrub, it));
if (!ooc || osd_scrub_has_window(scrub, ooc))
*noslot = false;
__u64 *pos;
__u64 *count;
struct osd_iit_param *param;
- struct l_wait_info lwi = { 0 };
__u32 limit;
int rc;
bool noslot = true;
sf->sf_flags &= ~(SF_RECREATED | SF_INCONSISTENT |
SF_UPGRADE | SF_AUTO);
sf->sf_status = SS_COMPLETED;
- l_wait_event(thread->t_ctl_waitq,
- !thread_is_running(thread) ||
- !scrub->os_partial_scan ||
- scrub->os_in_join ||
- !list_empty(&scrub->os_inconsistent_items),
- &lwi);
+ wait_event_idle(
+ thread->t_ctl_waitq,
+ !thread_is_running(thread) ||
+ !scrub->os_partial_scan ||
+ scrub->os_in_join ||
+ !list_empty(&scrub->os_inconsistent_items));
sf->sf_flags = saved_flags;
sf->sf_status = SS_SCANNING;
full:
if (!preload) {
- l_wait_event(thread->t_ctl_waitq,
- !thread_is_running(thread) || !scrub->os_in_join,
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ !thread_is_running(thread) ||
+ !scrub->os_in_join);
if (unlikely(!thread_is_running(thread)))
RETURN(0);
}
if (!scrub->os_full_speed && !scrub->os_partial_scan) {
- struct l_wait_info lwi = { 0 };
struct osd_otable_it *it = dev->od_otable_it;
struct osd_otable_cache *ooc = &it->ooi_cache;
- l_wait_event(thread->t_ctl_waitq,
- it->ooi_user_ready || !thread_is_running(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ it->ooi_user_ready ||
+ !thread_is_running(thread));
if (unlikely(!thread_is_running(thread)))
GOTO(post, rc = 0);
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
struct osd_otable_cache *ooc = &it->ooi_cache;
struct ptlrpc_thread *thread = &scrub->os_thread;
- struct l_wait_info lwi = { 0 };
int rc;
ENTRY;
}
if (it->ooi_all_cached) {
- l_wait_event(thread->t_ctl_waitq,
- !thread_is_running(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ !thread_is_running(thread));
RETURN(1);
}
}
if (it->ooi_cache.ooc_pos_preload >= scrub->os_pos_current)
- l_wait_event(thread->t_ctl_waitq,
- osd_otable_it_wakeup(scrub, it),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ osd_otable_it_wakeup(scrub, it));
if (!thread_is_running(thread) && !it->ooi_used_outside)
RETURN(1);
spin_unlock(&scrub->os_lock);
}
- if (!scrub->os_full_speed && !osd_scrub_has_window(it)) {
- memset(&lwi, 0, sizeof(lwi));
- l_wait_event(thread->t_ctl_waitq,
- osd_scrub_wakeup(scrub, it),
- &lwi);
- }
+ if (!scrub->os_full_speed && !osd_scrub_has_window(it))
+ wait_event_idle(thread->t_ctl_waitq,
+ osd_scrub_wakeup(scrub, it));
if (unlikely(!thread_is_running(thread)))
GOTO(out, rc = SCRUB_NEXT_EXIT);
}
if (!scrub->os_full_speed) {
- struct l_wait_info lwi = { 0 };
struct osd_otable_it *it = dev->od_otable_it;
- l_wait_event(thread->t_ctl_waitq,
- it->ooi_user_ready || !thread_is_running(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ it->ooi_user_ready ||
+ !thread_is_running(thread));
+
if (unlikely(!thread_is_running(thread)))
GOTO(post, rc = 0);
struct osd_device *dev = it->ooi_dev;
struct lustre_scrub *scrub = &dev->od_scrub;
struct ptlrpc_thread *thread = &scrub->os_thread;
- struct l_wait_info lwi = { 0 };
struct lustre_mdt_attrs *lma = NULL;
nvlist_t *nvbuf = NULL;
int size = 0;
}
if (it->ooi_pos >= scrub->os_pos_current)
- l_wait_event(thread->t_ctl_waitq,
- osd_otable_it_wakeup(scrub, it),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ osd_otable_it_wakeup(scrub, it));
if (!thread_is_running(thread) && !it->ooi_used_outside)
GOTO(out, rc = 1);
{
struct lwp_device *m = lu2lwp_dev(ludev);
struct ptlrpc_thread *thread = &m->lpd_notify_thread;
- struct l_wait_info lwi = { 0 };
int rc;
ENTRY;
- if (!thread_is_stopped(thread))
- l_wait_event(thread->t_ctl_waitq, thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq, thread_is_stopped(thread));
if (m->lpd_exp != NULL)
class_disconnect(m->lpd_exp);
struct lwp_device *lwp;
struct ptlrpc_thread *thread;
struct task_struct *task;
- struct l_wait_info lwi = { 0 };
char name[MTI_NAME_MAXLEN];
LASSERT(exp != NULL);
name, PTR_ERR(task));
}
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) || thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_running(thread) || thread_is_stopped(thread));
}
/**
*/
static int osp_update_init(struct osp_device *osp)
{
- struct l_wait_info lwi = { 0 };
struct task_struct *task;
ENTRY;
RETURN(rc);
}
- l_wait_event(osp->opd_update_thread.t_ctl_waitq,
- osp_send_update_thread_running(osp) ||
- osp_send_update_thread_stopped(osp), &lwi);
+ wait_event_idle(osp->opd_update_thread.t_ctl_waitq,
+ osp_send_update_thread_running(osp) ||
+ osp_send_update_thread_stopped(osp));
RETURN(0);
}
struct ptlrpc_request *req = NULL;
struct obd_import *imp;
struct ost_body *body;
- struct l_wait_info lwi = { 0 };
int update_status = 0;
int rc;
int diff;
* catch all osp_precreate_reserve() calls who find
* "!opd_pre_recovering".
*/
- l_wait_event(d->opd_pre_waitq,
- (!d->opd_pre_reserved && d->opd_recovery_completed) ||
- !osp_precreate_running(d) || d->opd_got_disconnected,
- &lwi);
+ wait_event_idle(d->opd_pre_waitq,
+ (!d->opd_pre_reserved && d->opd_recovery_completed) ||
+ !osp_precreate_running(d) || d->opd_got_disconnected);
if (!osp_precreate_running(d) || d->opd_got_disconnected)
GOTO(out, rc = -EAGAIN);
{
struct osp_device *d = _arg;
struct ptlrpc_thread *thread = &d->opd_pre_thread;
- struct l_wait_info lwi = { 0 };
struct l_wait_info lwi2 = LWI_TIMEOUT(cfs_time_seconds(5),
back_to_sleep, NULL);
struct lu_env env;
d->opd_imp_connected &&
!d->opd_got_disconnected)
break;
- l_wait_event(d->opd_pre_waitq,
- !osp_precreate_running(d) ||
- d->opd_new_connection,
- &lwi);
+ wait_event_idle(d->opd_pre_waitq,
+ !osp_precreate_running(d) ||
+ d->opd_new_connection);
if (!d->opd_new_connection)
continue;
* connected, can handle precreates now
*/
while (osp_precreate_running(d)) {
- l_wait_event(d->opd_pre_waitq,
- !osp_precreate_running(d) ||
- osp_precreate_near_empty(&env, d) ||
- osp_statfs_need_update(d) ||
- d->opd_got_disconnected, &lwi);
+ wait_event_idle(d->opd_pre_waitq,
+ !osp_precreate_running(d) ||
+ osp_precreate_near_empty(&env, d) ||
+ osp_statfs_need_update(d) ||
+ d->opd_got_disconnected);
if (!osp_precreate_running(d))
break;
int osp_init_statfs(struct osp_device *d)
{
- struct l_wait_info lwi = { 0 };
struct task_struct *task;
ENTRY;
RETURN(PTR_ERR(task));
}
- l_wait_event(d->opd_pre_thread.t_ctl_waitq,
- osp_precreate_running(d) || osp_precreate_stopped(d),
- &lwi);
+ wait_event_idle(d->opd_pre_thread.t_ctl_waitq,
+ osp_precreate_running(d) || osp_precreate_stopped(d));
+
RETURN(0);
}
struct osp_device *d = data;
do {
- struct l_wait_info lwi = { 0 };
-
if (!osp_sync_running(d)) {
CDEBUG(D_HA, "stop llog processing\n");
return LLOG_PROC_BREAK;
rec = NULL;
}
- l_wait_event(d->opd_sync_waitq,
- !osp_sync_running(d) ||
- osp_sync_can_process_new(d, rec) ||
- !list_empty(&d->opd_sync_committed_there),
- &lwi);
+ wait_event_idle(d->opd_sync_waitq,
+ !osp_sync_running(d) ||
+ osp_sync_can_process_new(d, rec) ||
+ !list_empty(&d->opd_sync_committed_there));
} while (1);
}
*/
int osp_sync_init(const struct lu_env *env, struct osp_device *d)
{
- struct l_wait_info lwi = { 0 };
struct task_struct *task;
int rc;
GOTO(err_llog, rc);
}
- l_wait_event(d->opd_sync_thread.t_ctl_waitq,
- osp_sync_running(d) || osp_sync_stopped(d), &lwi);
+ wait_event_idle(d->opd_sync_thread.t_ctl_waitq,
+ osp_sync_running(d) || osp_sync_stopped(d));
RETURN(0);
err_llog:
{
struct lu_env env;
struct osp_device *osp = arg;
- struct l_wait_info lwi = { 0 };
struct osp_updates *ou = osp->opd_update;
struct ptlrpc_thread *thread = &osp->opd_update_thread;
struct osp_update_request *our = NULL;
wake_up(&thread->t_ctl_waitq);
while (1) {
our = NULL;
- l_wait_event(ou->ou_waitq,
- !osp_send_update_thread_running(osp) ||
- osp_get_next_request(ou, &our), &lwi);
+ wait_event_idle(ou->ou_waitq,
+ !osp_send_update_thread_running(osp) ||
+ osp_get_next_request(ou, &our));
if (!osp_send_update_thread_running(osp)) {
if (our != NULL) {
{
struct obd_device *obd;
struct obd_export *exp;
- struct l_wait_info lwi = { 0 };
time64_t expire_time;
ENTRY;
CDEBUG(D_HA, "Starting Ping Evictor\n");
pet_state = PET_READY;
while (1) {
- l_wait_event(pet_waitq, (!list_empty(&pet_list)) ||
- (pet_state == PET_TERMINATE), &lwi);
+ wait_event_idle(pet_waitq,
+ (!list_empty(&pet_list)) ||
+ (pet_state == PET_TERMINATE));
/* loop until all obd's will be removed */
if ((pet_state == PET_TERMINATE) && list_empty(&pet_list))
static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
{
- struct l_wait_info lwi = { 0 };
struct ptlrpc_thread *thread;
struct list_head zombie;
CDEBUG(D_INFO, "waiting for stopping-thread %s #%u\n",
svcpt->scp_service->srv_thread_name, thread->t_id);
- l_wait_event(thread->t_ctl_waitq,
- thread_is_stopped(thread), &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_stopped(thread));
spin_lock(&svcpt->scp_lock);
}
int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
{
- struct l_wait_info lwi = { 0 };
struct ptlrpc_thread *thread;
struct ptlrpc_service *svc;
struct task_struct *task;
if (!wait)
RETURN(0);
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) || thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_running(thread) || thread_is_stopped(thread));
rc = thread_is_stopped(thread) ? thread->t_id : 0;
RETURN(rc);
{
struct qmt_device *qmt = (struct qmt_device *)arg;
struct ptlrpc_thread *thread = &qmt->qmt_reba_thread;
- struct l_wait_info lwi = { 0 };
struct lu_env *env;
struct lquota_entry *lqe, *tmp;
int rc;
wake_up(&thread->t_ctl_waitq);
while (1) {
- l_wait_event(thread->t_ctl_waitq,
- !list_empty(&qmt->qmt_reba_list) ||
- !thread_is_running(thread), &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ !list_empty(&qmt->qmt_reba_list) ||
+ !thread_is_running(thread));
spin_lock(&qmt->qmt_reba_lock);
list_for_each_entry_safe(lqe, tmp, &qmt->qmt_reba_list,
int qmt_start_reba_thread(struct qmt_device *qmt)
{
struct ptlrpc_thread *thread = &qmt->qmt_reba_thread;
- struct l_wait_info lwi = { 0 };
struct task_struct *task;
ENTRY;
RETURN(PTR_ERR(task));
}
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) || thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_running(thread) || thread_is_stopped(thread));
RETURN(0);
}
struct ptlrpc_thread *thread = &qmt->qmt_reba_thread;
if (!thread_is_stopped(thread)) {
- struct l_wait_info lwi = { 0 };
thread_set_flags(thread, SVC_STOPPING);
wake_up(&thread->t_ctl_waitq);
- l_wait_event(thread->t_ctl_waitq, thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq, thread_is_stopped(thread));
}
LASSERT(list_empty(&qmt->qmt_reba_list));
}
struct qsd_qtype_info *qqi = (struct qsd_qtype_info *)args;
struct qsd_instance *qsd = qqi->qqi_qsd;
struct ptlrpc_thread *thread = &qqi->qqi_reint_thread;
- struct l_wait_info lwi = { 0 };
int rc;
ENTRY;
qti = qsd_info(env);
/* wait for the connection to master established */
- l_wait_event(thread->t_ctl_waitq,
- qsd_connected(qsd) || !thread_is_running(thread), &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ qsd_connected(qsd) || !thread_is_running(thread));
/* Step 1: enqueue global index lock */
if (!thread_is_running(thread))
}
/* wait for the qsd instance started (target recovery done) */
- l_wait_event(thread->t_ctl_waitq,
- qsd_started(qsd) || !thread_is_running(thread), &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ qsd_started(qsd) || !thread_is_running(thread));
if (!thread_is_running(thread))
GOTO(out_lock, rc = 0);
void qsd_stop_reint_thread(struct qsd_qtype_info *qqi)
{
struct ptlrpc_thread *thread = &qqi->qqi_reint_thread;
- struct l_wait_info lwi = { 0 };
if (!thread_is_stopped(thread)) {
thread_set_flags(thread, SVC_STOPPING);
wake_up(&thread->t_ctl_waitq);
- l_wait_event(thread->t_ctl_waitq,
- thread_is_stopped(thread), &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_stopped(thread));
}
}
{
struct ptlrpc_thread *thread = &qqi->qqi_reint_thread;
struct qsd_instance *qsd = qqi->qqi_qsd;
- struct l_wait_info lwi = { 0 };
struct task_struct *task;
int rc;
char *name;
RETURN(rc);
}
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) || thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_running(thread) || thread_is_stopped(thread));
RETURN(0);
}
int qsd_start_upd_thread(struct qsd_instance *qsd)
{
struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
- struct l_wait_info lwi = { 0 };
struct task_struct *task;
ENTRY;
RETURN(PTR_ERR(task));
}
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) || thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_running(thread) || thread_is_stopped(thread));
RETURN(0);
}
void qsd_stop_upd_thread(struct qsd_instance *qsd)
{
struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
- struct l_wait_info lwi = { 0 };
if (!thread_is_stopped(thread)) {
thread_set_flags(thread, SVC_STOPPING);
wake_up(&thread->t_ctl_waitq);
- l_wait_event(thread->t_ctl_waitq, thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq, thread_is_stopped(thread));
}
qsd_cleanup_deferred(qsd);
qsd_cleanup_adjust(qsd);
if (cfs_fail_val == 0 &&
lustre_msg_get_opc(msg) != OBD_PING &&
lustre_msg_get_flags(msg) & MSG_REQ_REPLAY_DONE) {
- struct l_wait_info lwi = { 0 };
-
cfs_fail_val = 1;
cfs_race_state = 0;
- l_wait_event(cfs_race_waitq, (cfs_race_state == 1),
- &lwi);
+ wait_event_idle(cfs_race_waitq, (cfs_race_state == 1));
}
}
struct niobuf_local *local_nb;
struct obd_ioobj *ioo;
struct ost_body *body, *repbody;
- struct l_wait_info lwi;
struct lustre_handle lockh = { 0 };
int npages, nob = 0, rc, i, no_reply = 0,
npages_read;
* finish */
if (unlikely(atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
/* We do not care how long it takes */
- lwi = LWI_INTR(NULL, NULL);
- rc = l_wait_event(exp->exp_obd->obd_evict_inprogress_waitq,
- !atomic_read(&exp->exp_obd->obd_evict_inprogress),
- &lwi);
+ wait_event_idle(
+ exp->exp_obd->obd_evict_inprogress_waitq,
+ !atomic_read(&exp->exp_obd->obd_evict_inprogress));
}
/* There must be big cache in current thread to process this request
*/
static int top_trans_wait_result(struct top_thandle *top_th)
{
- struct l_wait_info lwi = {0};
-
- l_wait_event(top_th->tt_multiple_thandle->tmt_stop_waitq,
- top_trans_is_stopped(top_th), &lwi);
+ wait_event_idle(top_th->tt_multiple_thandle->tmt_stop_waitq,
+ top_trans_is_stopped(top_th));
RETURN(top_th->tt_super.th_result);
}
struct target_distribute_txn_data *tdtd = _arg;
struct lu_target *lut = tdtd->tdtd_lut;
struct ptlrpc_thread *thread = &lut->lut_tdtd_commit_thread;
- struct l_wait_info lwi = { 0 };
struct lu_env env;
struct list_head list;
int rc;
top_multiple_thandle_put(tmt);
}
- l_wait_event(tdtd->tdtd_commit_thread_waitq,
- !distribute_txn_commit_thread_running(lut) ||
- committed < tdtd->tdtd_committed_batchid ||
- tdtd_ready_for_cancel_log(tdtd), &lwi);
+ wait_event_idle(tdtd->tdtd_commit_thread_waitq,
+ !distribute_txn_commit_thread_running(lut) ||
+ committed < tdtd->tdtd_committed_batchid ||
+ tdtd_ready_for_cancel_log(tdtd));
};
- l_wait_event(tdtd->tdtd_commit_thread_waitq,
- atomic_read(&tdtd->tdtd_refcount) == 0, &lwi);
+ wait_event_idle(tdtd->tdtd_commit_thread_waitq,
+ atomic_read(&tdtd->tdtd_refcount) == 0);
spin_lock(&tdtd->tdtd_batchid_lock);
list_for_each_entry_safe(tmt, tmp, &tdtd->tdtd_list,
__u32 index)
{
struct task_struct *task;
- struct l_wait_info lwi = { 0 };
int rc;
ENTRY;
if (IS_ERR(task))
RETURN(PTR_ERR(task));
- l_wait_event(lut->lut_tdtd_commit_thread.t_ctl_waitq,
- distribute_txn_commit_thread_running(lut) ||
- distribute_txn_commit_thread_stopped(lut), &lwi);
+ wait_event_idle(lut->lut_tdtd_commit_thread.t_ctl_waitq,
+ distribute_txn_commit_thread_running(lut) ||
+ distribute_txn_commit_thread_stopped(lut));
RETURN(0);
}
EXPORT_SYMBOL(distribute_txn_init);