If l_wait_event() is passed an lwi initialised with
one of
LWI_TIMEOUT_INTR( time, NULL, NULL, NULL)
LWI_TIMEOUT_INTR( time, NULL, LWI_ON_SIGNAL_NOOP, NULL)
LWI_TIMEOUT( time, NULL, NULL)
where time != 0, then it behaves much like
wait_event_idle_timeout().
All signals are blocked, and it waits either for the
condition to be true, or for the timeout (in jiffies).
Note that LWI_ON_SIGNAL_NOOP has no effect here.
l_wait_event() returns 0 when the condition is true, or -ETIMEDOUT
when the timeout occurs. wait_event_idle_timeout() instead returns a
positive number when the condition is true, and 0 when the timeout
occurs. So in the cases where return value is used, handling needs to
be adjusted accordingly.
Note that in some cases where cfs_fail_val gives the time to wait for,
the current code re-tests the wait time against zero as cfs_fail_val
can change asynchronously. This is because l_wait_event() behaves
quite differently if the timeout is zero.
The new code doesn't need to do that as wait_event_idle_timeout()
treat 0 just as a very short wait, which is exactly the correct
behavior here.
This patch also removes a comment which is no longer meaningful
(CAN_MATCH) and corrects a debug message which reported the wait time
as "seconds" rather than the correct "jiffies".
This patch doesn't change the timed wait in cl_sync_io_wait().
That is a bit more complicated, so it left to a separate patch.
Change-Id: I632afc290935e321926f45b144d5367799a01381
Signed-off-by: Mr NeilBrown <neilb@suse.com>
Reviewed-on: https://review.whamcloud.com/35977
Tested-by: jenkins <devops@whamcloud.com>
Reviewed-by: Shaun Tancheff <stancheff@cray.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Petros Koutoupis <pkoutoupis@cray.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
16 files changed:
(!ldlm_is_lvb_ready(lock))) {
__u64 wait_flags = LDLM_FL_LVB_READY |
LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
(!ldlm_is_lvb_ready(lock))) {
__u64 wait_flags = LDLM_FL_LVB_READY |
LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
- struct l_wait_info lwi;
if (lock->l_completion_ast) {
int err = lock->l_completion_ast(lock,
if (lock->l_completion_ast) {
int err = lock->l_completion_ast(lock,
GOTO(out_fail_match, matched = 0);
}
GOTO(out_fail_match, matched = 0);
}
- lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
- NULL, LWI_ON_SIGNAL_NOOP, NULL);
+ wait_event_idle_timeout(
+ lock->l_waitq,
+ lock->l_flags & wait_flags,
+ cfs_time_seconds(obd_timeout));
- /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
- l_wait_event(lock->l_waitq, lock->l_flags & wait_flags,
- &lwi);
if (!ldlm_is_lvb_ready(lock))
GOTO(out_fail_match, matched = 0);
}
if (!ldlm_is_lvb_ready(lock))
GOTO(out_fail_match, matched = 0);
}
if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY4) &&
cfs_fail_val > 0) {
if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY4) &&
cfs_fail_val > 0) {
- struct l_wait_info lwi = LWI_TIMEOUT(
- cfs_time_seconds(cfs_fail_val),
- NULL, NULL);
-
- /* Some others may changed the cfs_fail_val
- * as zero after above check, re-check it for
- * sure to avoid falling into wait for ever. */
- if (likely(lwi.lwi_timeout > 0)) {
- struct ptlrpc_thread *thread =
- &lfsck->li_thread;
-
- up_write(&com->lc_sem);
- l_wait_event(thread->t_ctl_waitq,
- !thread_is_running(thread),
- &lwi);
- down_write(&com->lc_sem);
- }
+ struct ptlrpc_thread *thread =
+ &lfsck->li_thread;
+
+ up_write(&com->lc_sem);
+ wait_event_idle_timeout(
+ thread->t_ctl_waitq,
+ !thread_is_running(thread),
+ cfs_time_seconds(cfs_fail_val));
+ down_write(&com->lc_sem);
if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY5) &&
cfs_fail_val == lfsck_dev_idx(lfsck)) {
if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY5) &&
cfs_fail_val == lfsck_dev_idx(lfsck)) {
- struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(1),
- NULL, NULL);
struct ptlrpc_thread *thread = &lfsck->li_thread;
struct ptlrpc_thread *thread = &lfsck->li_thread;
- l_wait_event(thread->t_ctl_waitq,
- !thread_is_running(thread),
- &lwi);
+ wait_event_idle_timeout(thread->t_ctl_waitq,
+ !thread_is_running(thread),
+ cfs_time_seconds(1));
}
lfsck_rbtree_update_bitmap(env, com, fid, false);
}
lfsck_rbtree_update_bitmap(env, com, fid, false);
LFSCK_CHECKPOINT_INTERVAL;
while (1) {
LFSCK_CHECKPOINT_INTERVAL;
while (1) {
- struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(30),
- NULL, NULL);
-
rc = lfsck_layout_slave_query_master(env, com);
if (list_empty(&llsd->llsd_master_list)) {
if (unlikely(!thread_is_running(thread)))
rc = lfsck_layout_slave_query_master(env, com);
if (list_empty(&llsd->llsd_master_list)) {
if (unlikely(!thread_is_running(thread)))
if (rc < 0)
GOTO(done, rc);
if (rc < 0)
GOTO(done, rc);
- rc = l_wait_event(thread->t_ctl_waitq,
- !thread_is_running(thread) ||
- lo->ll_flags & LF_INCOMPLETE ||
- list_empty(&llsd->llsd_master_list),
- &lwi);
+ rc = wait_event_idle_timeout(
+ thread->t_ctl_waitq,
+ !thread_is_running(thread) ||
+ lo->ll_flags & LF_INCOMPLETE ||
+ list_empty(&llsd->llsd_master_list),
+ cfs_time_seconds(30));
if (unlikely(!thread_is_running(thread)))
GOTO(done, rc = 0);
if (lo->ll_flags & LF_INCOMPLETE)
GOTO(done, rc = 1);
if (unlikely(!thread_is_running(thread)))
GOTO(done, rc = 0);
if (lo->ll_flags & LF_INCOMPLETE)
GOTO(done, rc = 1);
- GOTO(done, rc = (rc < 0 ? rc : 1));
void lfsck_control_speed(struct lfsck_instance *lfsck)
{
struct ptlrpc_thread *thread = &lfsck->li_thread;
void lfsck_control_speed(struct lfsck_instance *lfsck)
{
struct ptlrpc_thread *thread = &lfsck->li_thread;
- struct l_wait_info lwi;
if (lfsck->li_sleep_jif > 0 &&
lfsck->li_new_scanned >= lfsck->li_sleep_rate) {
if (lfsck->li_sleep_jif > 0 &&
lfsck->li_new_scanned >= lfsck->li_sleep_rate) {
- lwi = LWI_TIMEOUT_INTR(lfsck->li_sleep_jif, NULL,
- LWI_ON_SIGNAL_NOOP, NULL);
-
- l_wait_event(thread->t_ctl_waitq,
- !thread_is_running(thread),
- &lwi);
+ wait_event_idle_timeout(thread->t_ctl_waitq,
+ !thread_is_running(thread),
+ lfsck->li_sleep_jif);
lfsck->li_new_scanned = 0;
}
}
lfsck->li_new_scanned = 0;
}
}
{
struct lfsck_instance *lfsck = com->lc_lfsck;
struct ptlrpc_thread *thread = &lfsck->li_thread;
{
struct lfsck_instance *lfsck = com->lc_lfsck;
struct ptlrpc_thread *thread = &lfsck->li_thread;
- struct l_wait_info lwi;
if (lfsck->li_sleep_jif > 0 &&
com->lc_new_scanned >= lfsck->li_sleep_rate) {
if (lfsck->li_sleep_jif > 0 &&
com->lc_new_scanned >= lfsck->li_sleep_rate) {
- lwi = LWI_TIMEOUT_INTR(lfsck->li_sleep_jif, NULL,
- LWI_ON_SIGNAL_NOOP, NULL);
-
- l_wait_event(thread->t_ctl_waitq,
- !thread_is_running(thread),
- &lwi);
+ wait_event_idle_timeout(thread->t_ctl_waitq,
+ !thread_is_running(thread),
+ lfsck->li_sleep_jif);
com->lc_new_scanned = 0;
}
}
com->lc_new_scanned = 0;
}
}
int first = 0;
struct md_op_data *op_data;
struct ll_dir_chain chain;
int first = 0;
struct md_op_data *op_data;
struct ll_dir_chain chain;
- struct l_wait_info lwi = { 0 };
struct page *page = NULL;
__u64 pos = 0;
int rc = 0;
struct page *page = NULL;
__u64 pos = 0;
int rc = 0;
* safely because statahead RPC will access sai data */
while (sai->sai_sent != sai->sai_replied) {
/* in case we're not woken up, timeout wait */
* safely because statahead RPC will access sai data */
while (sai->sai_sent != sai->sai_replied) {
/* in case we're not woken up, timeout wait */
- lwi = LWI_TIMEOUT(cfs_time_seconds(1) >> 3, NULL, NULL);
- l_wait_event(sa_thread->t_ctl_waitq,
- sai->sai_sent == sai->sai_replied, &lwi);
+ wait_event_idle_timeout(sa_thread->t_ctl_waitq,
+ sai->sai_sent == sai->sai_replied,
+ cfs_time_seconds(1) >> 3);
}
/* release resources held by statahead RPCs */
}
/* release resources held by statahead RPCs */
bool unplug)
{
struct sa_entry *entry = NULL;
bool unplug)
{
struct sa_entry *entry = NULL;
- struct l_wait_info lwi = { 0 };
struct ll_dentry_data *ldd;
struct ll_inode_info *lli = ll_i2info(dir);
int rc = 0;
struct ll_dentry_data *ldd;
struct ll_inode_info *lli = ll_i2info(dir);
int rc = 0;
spin_lock(&lli->lli_sa_lock);
sai->sai_index_wait = entry->se_index;
spin_unlock(&lli->lli_sa_lock);
spin_lock(&lli->lli_sa_lock);
sai->sai_index_wait = entry->se_index;
spin_unlock(&lli->lli_sa_lock);
- lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
- LWI_ON_SIGNAL_NOOP, NULL);
- rc = l_wait_event(sai->sai_waitq, sa_ready(entry), &lwi);
- if (rc < 0) {
+ rc = wait_event_idle_timeout(sai->sai_waitq, sa_ready(entry),
+ cfs_time_seconds(30));
+ if (rc == 0) {
/*
* entry may not be ready, so it may be used by inflight
* statahead RPC, don't free it.
/*
* entry may not be ready, so it may be used by inflight
* statahead RPC, don't free it.
spin_lock(&config_list_lock);
rq_state |= RQ_RUNNING;
while (!(rq_state & RQ_STOP)) {
spin_lock(&config_list_lock);
rq_state |= RQ_RUNNING;
while (!(rq_state & RQ_STOP)) {
- struct l_wait_info lwi;
struct config_llog_data *cld, *cld_prev;
int rand = prandom_u32_max(MGC_TIMEOUT_RAND_CENTISEC);
int to;
struct config_llog_data *cld, *cld_prev;
int rand = prandom_u32_max(MGC_TIMEOUT_RAND_CENTISEC);
int to;
}
/* Always wait a few seconds to allow the server who
}
/* Always wait a few seconds to allow the server who
- caused the lock revocation to finish its setup, plus some
- random so everyone doesn't try to reconnect at once. */
+ * caused the lock revocation to finish its setup, plus some
+ * random so everyone doesn't try to reconnect at once.
+ */
to = cfs_time_seconds(MGC_TIMEOUT_MIN_SECONDS * 100 + rand);
/* rand is centi-seconds */
to = cfs_time_seconds(MGC_TIMEOUT_MIN_SECONDS * 100 + rand);
/* rand is centi-seconds */
- lwi = LWI_TIMEOUT(to / 100, NULL, NULL);
- l_wait_event(rq_waitq, rq_state & (RQ_STOP | RQ_PRECLEANUP),
- &lwi);
+ wait_event_idle_timeout(rq_waitq,
+ rq_state & (RQ_STOP | RQ_PRECLEANUP),
+ to/100);
/*
* iterate & processing through the list. for each cld, process
/*
* iterate & processing through the list. for each cld, process
if (rcl == -ESHUTDOWN &&
atomic_read(&mgc->u.cli.cl_mgc_refcount) > 0 && !retry) {
struct obd_import *imp;
if (rcl == -ESHUTDOWN &&
atomic_read(&mgc->u.cli.cl_mgc_refcount) > 0 && !retry) {
struct obd_import *imp;
- struct l_wait_info lwi;
long timeout = cfs_time_seconds(obd_timeout);
mutex_unlock(&cld->cld_lock);
long timeout = cfs_time_seconds(obd_timeout);
mutex_unlock(&cld->cld_lock);
* FULL or closed */
ptlrpc_pinger_force(imp);
* FULL or closed */
ptlrpc_pinger_force(imp);
- lwi = LWI_TIMEOUT(timeout, NULL, NULL);
- l_wait_event(imp->imp_recovery_waitq,
- !mgc_import_in_recovery(imp), &lwi);
+ wait_event_idle_timeout(imp->imp_recovery_waitq,
+ !mgc_import_in_recovery(imp),
+ timeout);
if (imp->imp_state == LUSTRE_IMP_FULL) {
retry = true;
if (imp->imp_state == LUSTRE_IMP_FULL) {
retry = true;
enum osc_extent_state state)
{
struct osc_object *obj = ext->oe_obj;
enum osc_extent_state state)
{
struct osc_object *obj = ext->oe_obj;
- struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL,
- LWI_ON_SIGNAL_NOOP, NULL);
osc_extent_release(env, ext);
/* wait for the extent until its state becomes @state */
osc_extent_release(env, ext);
/* wait for the extent until its state becomes @state */
- rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state), &lwi);
- if (rc == -ETIMEDOUT) {
+ rc = wait_event_idle_timeout(ext->oe_waitq, extent_wait_cb(ext, state),
+ cfs_time_seconds(600));
+ if (rc == 0) {
OSC_EXTENT_DUMP(D_ERROR, ext,
"%s: wait ext to %u timedout, recovery in progress?\n",
cli_name(osc_cli(obj)), state);
wait_event_idle(ext->oe_waitq, extent_wait_cb(ext, state));
OSC_EXTENT_DUMP(D_ERROR, ext,
"%s: wait ext to %u timedout, recovery in progress?\n",
cli_name(osc_cli(obj)), state);
wait_event_idle(ext->oe_waitq, extent_wait_cb(ext, state));
- if (rc == 0 && ext->oe_rc < 0)
struct osc_object *osc = oap->oap_obj;
struct lov_oinfo *loi = osc->oo_oinfo;
struct osc_cache_waiter ocw;
struct osc_object *osc = oap->oap_obj;
struct lov_oinfo *loi = osc->oo_oinfo;
struct osc_cache_waiter ocw;
- struct l_wait_info lwi;
- lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(AT_OFF ? obd_timeout : at_max),
- NULL, LWI_ON_SIGNAL_NOOP, NULL);
-
OSC_DUMP_GRANT(D_CACHE, cli, "need:%d\n", bytes);
spin_lock(&cli->cl_loi_list_lock);
OSC_DUMP_GRANT(D_CACHE, cli, "need:%d\n", bytes);
spin_lock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n",
cli_name(cli), &ocw, oap);
CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n",
cli_name(cli), &ocw, oap);
- rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
+ rc = wait_event_idle_timeout(ocw.ocw_waitq,
+ ocw_granted(cli, &ocw),
+ cfs_time_seconds(AT_OFF ?
+ obd_timeout :
+ at_max));
spin_lock(&cli->cl_loi_list_lock);
spin_lock(&cli->cl_loi_list_lock);
/* l_wait_event is interrupted by signal or timed out */
list_del_init(&ocw.ocw_entry);
/* l_wait_event is interrupted by signal or timed out */
list_del_init(&ocw.ocw_entry);
+ if (rc == 0)
+ rc = -ETIMEDOUT;
break;
}
LASSERT(list_empty(&ocw.ocw_entry));
break;
}
LASSERT(list_empty(&ocw.ocw_entry));
struct osd_inode_id *lid;
int rc;
struct osd_inode_id *lid;
int rc;
- if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_DELAY) && cfs_fail_val > 0) {
- struct l_wait_info lwi;
-
- lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val), NULL, NULL);
- if (likely(lwi.lwi_timeout > 0))
- l_wait_event(thread->t_ctl_waitq,
- !list_empty(&scrub->os_inconsistent_items) ||
- !thread_is_running(thread),
- &lwi);
- }
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_DELAY) && cfs_fail_val > 0)
+ wait_event_idle_timeout(
+ thread->t_ctl_waitq,
+ !list_empty(&scrub->os_inconsistent_items) ||
+ !thread_is_running(thread),
+ cfs_time_seconds(cfs_fail_val));
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_CRASH)) {
spin_lock(&scrub->os_lock);
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_CRASH)) {
spin_lock(&scrub->os_lock);
static int osd_scrub_next(const struct lu_env *env, struct osd_device *dev,
struct lu_fid *fid, uint64_t *oid)
{
static int osd_scrub_next(const struct lu_env *env, struct osd_device *dev,
struct lu_fid *fid, uint64_t *oid)
{
- struct l_wait_info lwi = { 0 };
struct lustre_scrub *scrub = &dev->od_scrub;
struct ptlrpc_thread *thread = &scrub->os_thread;
struct osd_otable_it *it = dev->od_otable_it;
struct lustre_scrub *scrub = &dev->od_scrub;
struct ptlrpc_thread *thread = &scrub->os_thread;
struct osd_otable_it *it = dev->od_otable_it;
ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_DELAY) && cfs_fail_val > 0) {
ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_DELAY) && cfs_fail_val > 0) {
- lwi = LWI_TIMEOUT(cfs_time_seconds(cfs_fail_val), NULL, NULL);
- if (likely(lwi.lwi_timeout > 0)) {
- l_wait_event(thread->t_ctl_waitq,
- !list_empty(&scrub->os_inconsistent_items) ||
- !thread_is_running(thread),
- &lwi);
- if (unlikely(!thread_is_running(thread)))
- RETURN(SCRUB_NEXT_EXIT);
- }
+ wait_event_idle_timeout(
+ thread->t_ctl_waitq,
+ !list_empty(&scrub->os_inconsistent_items) ||
+ !thread_is_running(thread),
+ cfs_time_seconds(cfs_fail_val));
+
+ if (unlikely(!thread_is_running(thread)))
+ RETURN(SCRUB_NEXT_EXIT);
}
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_CRASH)) {
}
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_CRASH)) {
{
struct osp_device *d = _arg;
struct ptlrpc_thread *thread = &d->opd_sync_thread;
{
struct osp_device *d = _arg;
struct ptlrpc_thread *thread = &d->opd_sync_thread;
- struct l_wait_info lwi = { 0 };
struct llog_ctxt *ctxt;
struct obd_device *obd = d->opd_obd;
struct llog_handle *llh;
struct llog_ctxt *ctxt;
struct obd_device *obd = d->opd_obd;
struct llog_handle *llh;
while (atomic_read(&d->opd_sync_rpcs_in_progress) > 0) {
osp_sync_process_committed(&env, d);
while (atomic_read(&d->opd_sync_rpcs_in_progress) > 0) {
osp_sync_process_committed(&env, d);
- lwi = LWI_TIMEOUT(cfs_time_seconds(5), NULL, NULL);
- rc = l_wait_event(d->opd_sync_waitq,
- atomic_read(&d->opd_sync_rpcs_in_progress) == 0,
- &lwi);
- if (rc == -ETIMEDOUT)
+ rc = wait_event_idle_timeout(
+ d->opd_sync_waitq,
+ atomic_read(&d->opd_sync_rpcs_in_progress) == 0,
+ cfs_time_seconds(5));
+ if (rc == 0)
count++;
LASSERTF(count < 10, "%s: %d %d %sempty\n",
d->opd_obd->obd_name,
count++;
LASSERTF(count < 10, "%s: %d %d %sempty\n",
d->opd_obd->obd_name,
{
#ifdef ENABLE_PINGER
long timeout_jiffies = cfs_time_seconds(obd_timeout);
{
#ifdef ENABLE_PINGER
long timeout_jiffies = cfs_time_seconds(obd_timeout);
- struct l_wait_info lwi;
int rc;
ptlrpc_pinger_force(imp);
int rc;
ptlrpc_pinger_force(imp);
CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n",
obd2cli_tgt(imp->imp_obd), obd_timeout);
CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n",
obd2cli_tgt(imp->imp_obd), obd_timeout);
- lwi = LWI_TIMEOUT(timeout_jiffies, NULL, NULL);
- rc = l_wait_event(imp->imp_recovery_waitq,
- !ptlrpc_import_in_recovery(imp), &lwi);
+ rc = wait_event_idle_timeout(imp->imp_recovery_waitq,
+ !ptlrpc_import_in_recovery(imp),
+ timeout_jiffies);
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ else
+ rc = 0;
CDEBUG(D_HA, "%s: recovery finished s:%s\n", obd2cli_tgt(imp->imp_obd),
ptlrpc_import_state_name(imp->imp_state));
return rc;
CDEBUG(D_HA, "%s: recovery finished s:%s\n", obd2cli_tgt(imp->imp_obd),
ptlrpc_import_state_name(imp->imp_state));
return rc;
/* See if we have anything in a pool, and wait if nothing */
while (list_empty(&svcpt->scp_rep_idle)) {
/* See if we have anything in a pool, and wait if nothing */
while (list_empty(&svcpt->scp_rep_idle)) {
- struct l_wait_info lwi;
int rc;
spin_unlock(&svcpt->scp_rep_lock);
/* If we cannot get anything for some long time, we better
* bail out instead of waiting infinitely */
int rc;
spin_unlock(&svcpt->scp_rep_lock);
/* If we cannot get anything for some long time, we better
* bail out instead of waiting infinitely */
- lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL);
- rc = l_wait_event(svcpt->scp_rep_waitq,
- !list_empty(&svcpt->scp_rep_idle), &lwi);
- if (rc != 0)
+ rc = wait_event_idle_timeout(svcpt->scp_rep_waitq,
+ !list_empty(&svcpt->scp_rep_idle),
+ cfs_time_seconds(10));
+ if (rc <= 0)
goto out;
spin_lock(&svcpt->scp_rep_lock);
}
goto out;
spin_lock(&svcpt->scp_rep_lock);
}
GOTO(out, rc);
if (!async) {
GOTO(out, rc);
if (!async) {
- struct l_wait_info lwi;
long timeout = cfs_time_seconds(obd_timeout);
long timeout = cfs_time_seconds(obd_timeout);
- CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n",
+ CDEBUG(D_HA, "%s: recovery started, waiting %u jiffies\n",
obd2cli_tgt(imp->imp_obd), obd_timeout);
obd2cli_tgt(imp->imp_obd), obd_timeout);
- lwi = LWI_TIMEOUT(timeout, NULL, NULL);
- rc = l_wait_event(imp->imp_recovery_waitq,
- !ptlrpc_import_in_recovery(imp), &lwi);
+ rc = wait_event_idle_timeout(imp->imp_recovery_waitq,
+ !ptlrpc_import_in_recovery(imp),
+ timeout);
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ else
+ rc = 0;
CDEBUG(D_HA, "%s: recovery finished\n",
obd2cli_tgt(imp->imp_obd));
}
CDEBUG(D_HA, "%s: recovery finished\n",
obd2cli_tgt(imp->imp_obd));
}
static void ptlrpc_wait_replies(struct ptlrpc_service_part *svcpt)
{
while (1) {
static void ptlrpc_wait_replies(struct ptlrpc_service_part *svcpt)
{
while (1) {
- int rc;
- struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(10),
- NULL, NULL);
-
- rc = l_wait_event(svcpt->scp_waitq,
- atomic_read(&svcpt->scp_nreps_difficult) == 0, &lwi);
- if (rc == 0)
+ if (wait_event_idle_timeout(
+ svcpt->scp_waitq,
+ atomic_read(&svcpt->scp_nreps_difficult) == 0,
+ cfs_time_seconds(10)) > 0)
break;
CWARN("Unexpectedly long timeout %s %p\n",
svcpt->scp_service->srv_name, svcpt->scp_service);
break;
CWARN("Unexpectedly long timeout %s %p\n",
svcpt->scp_service->srv_name, svcpt->scp_service);
enum osd_quota_local_flags *local_flags)
{
struct lquota_entry *lqe;
enum osd_quota_local_flags *local_flags)
{
struct lquota_entry *lqe;
- struct l_wait_info lwi;
enum osd_quota_local_flags qtype_flag = 0;
int rc, ret = -EINPROGRESS;
ENTRY;
enum osd_quota_local_flags qtype_flag = 0;
int rc, ret = -EINPROGRESS;
ENTRY;
/* acquire quota space for the operation, cap overall wait time to
* prevent a service thread from being stuck for too long */
/* acquire quota space for the operation, cap overall wait time to
* prevent a service thread from being stuck for too long */
- lwi = LWI_TIMEOUT(cfs_time_seconds(qsd_wait_timeout(qqi->qqi_qsd)),
- NULL, NULL);
- rc = l_wait_event(lqe->lqe_waiters, qsd_acquire(env, lqe, space, &ret),
- &lwi);
+ rc = wait_event_idle_timeout(
+ lqe->lqe_waiters, qsd_acquire(env, lqe, space, &ret),
+ cfs_time_seconds(qsd_wait_timeout(qqi->qqi_qsd)));
- if (rc == 0 && ret == 0) {
+ if (rc > 0 && ret == 0) {
+ else if (rc == 0)
+ rc = -ETIMEDOUT;
LQUOTA_DEBUG(lqe, "acquire quota failed:%d", rc);
LQUOTA_DEBUG(lqe, "acquire quota failed:%d", rc);
{
struct qsd_instance *qsd = (struct qsd_instance *)arg;
struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
{
struct qsd_instance *qsd = (struct qsd_instance *)arg;
struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
- struct l_wait_info lwi;
struct list_head queue;
struct qsd_upd_rec *upd, *n;
struct lu_env *env;
struct list_head queue;
struct qsd_upd_rec *upd, *n;
struct lu_env *env;
wake_up(&thread->t_ctl_waitq);
INIT_LIST_HEAD(&queue);
wake_up(&thread->t_ctl_waitq);
INIT_LIST_HEAD(&queue);
- lwi = LWI_TIMEOUT(cfs_time_seconds(QSD_WB_INTERVAL), NULL, NULL);
- l_wait_event(thread->t_ctl_waitq,
- qsd_job_pending(qsd, &queue, &uptodate) ||
- !thread_is_running(thread), &lwi);
+ wait_event_idle_timeout(
+ thread->t_ctl_waitq,
+ qsd_job_pending(qsd, &queue, &uptodate) ||
+ !thread_is_running(thread),
+ cfs_time_seconds(QSD_WB_INTERVAL));
list_for_each_entry_safe(upd, n, &queue, qur_link) {
list_del_init(&upd->qur_link);
list_for_each_entry_safe(upd, n, &queue, qur_link) {
list_del_init(&upd->qur_link);
RETURN(1);
if (phase1 && inflight != 0) {
RETURN(1);
if (phase1 && inflight != 0) {
- struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(left),
- NULL, NULL);
-
- rc = l_wait_event(barrier->bi_waitq,
- percpu_counter_sum(&barrier->bi_writers) == 0,
- &lwi);
- if (rc)
+ rc = wait_event_idle_timeout(
+ barrier->bi_waitq,
+ percpu_counter_sum(&barrier->bi_writers) == 0,
+ cfs_time_seconds(left));
+ if (rc <= 0)
RETURN(1);
/* sync again after all inflight modifications done. */
RETURN(1);
/* sync again after all inflight modifications done. */