/* Wait for unstable pages to be committed to stable storage */
if (force == 0) {
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
- rc = l_wait_event(sbi->ll_cache->ccc_unstable_waitq,
- atomic_long_read(&sbi->ll_cache->ccc_unstable_nr) == 0,
- &lwi);
+ rc = l_wait_event_abortable(
+ sbi->ll_cache->ccc_unstable_waitq,
+ atomic_long_read(&sbi->ll_cache->ccc_unstable_nr) == 0);
}
ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
- if (force == 0 && rc != -EINTR)
+ if (force == 0 && rc != -ERESTARTSYS)
LASSERTF(ccc_count == 0, "count: %li\n", ccc_count);
/* We need to set force before the lov_disconnect in
int obd_get_request_slot(struct client_obd *cli)
{
struct obd_request_slot_waiter orsw;
- struct l_wait_info lwi;
int rc;
spin_lock(&cli->cl_loi_list_lock);
orsw.orsw_signaled = false;
spin_unlock(&cli->cl_loi_list_lock);
- lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
- rc = l_wait_event(orsw.orsw_waitq,
- obd_request_slot_avail(cli, &orsw) ||
- orsw.orsw_signaled,
- &lwi);
+ rc = l_wait_event_abortable(orsw.orsw_waitq,
+ obd_request_slot_avail(cli, &orsw) ||
+ orsw.orsw_signaled);
/* Here, we must take the lock to avoid the on-stack 'orsw' to be
* freed but other (such as obd_put_request_slot) is using it. */
else
list_del(&orsw.orsw_entry);
}
+ rc = -EINTR;
}
if (orsw.orsw_signaled) {
int llog_cleanup(const struct lu_env *env, struct llog_ctxt *ctxt)
{
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
struct obd_llog_group *olg;
int rc, idx;
CERROR("Error %d while cleaning up ctxt %p\n",
rc, ctxt);
- l_wait_event(olg->olg_waitq,
- llog_group_ctxt_null(olg, idx), &lwi);
+ l_wait_event_abortable(olg->olg_waitq,
+ llog_group_ctxt_null(olg, idx));
RETURN(rc);
}
static int osc_lru_alloc(const struct lu_env *env, struct client_obd *cli,
struct osc_page *opg)
{
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
struct osc_io *oio = osc_env_io(env);
int rc = 0;
continue;
cond_resched();
- rc = l_wait_event(osc_lru_waitq,
- atomic_long_read(cli->cl_lru_left) > 0,
- &lwi);
- if (rc < 0)
+ rc = l_wait_event_abortable(
+ osc_lru_waitq,
+ atomic_long_read(cli->cl_lru_left) > 0);
+ if (rc < 0) {
+ rc = -EINTR;
break;
+ }
}
out:
req->rq_interpret_reply = osc_destroy_interpret;
if (!osc_can_send_destroy(cli)) {
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
-
/*
* Wait until the number of on-going destroy RPCs drops
* under max_rpc_in_flight
*/
- rc = l_wait_event_exclusive(cli->cl_destroy_waitq,
- osc_can_send_destroy(cli), &lwi);
+ rc = l_wait_event_abortable_exclusive(
+ cli->cl_destroy_waitq,
+ osc_can_send_destroy(cli));
if (rc) {
ptlrpc_req_finished(req);
- RETURN(rc);
+ RETURN(-EINTR);
}
}
ptlrpc_disconnect_import(imp, 1);
/* Wait for all invalidate calls to finish */
if (atomic_read(&imp->imp_inval_count) > 0) {
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
int rc;
- rc = l_wait_event(imp->imp_recovery_waitq,
- (atomic_read(&imp->imp_inval_count) == 0),
- &lwi);
+ rc = l_wait_event_abortable(
+ imp->imp_recovery_waitq,
+ (atomic_read(&imp->imp_inval_count) == 0));
if (rc)
CERROR("Interrupted, inval=%d\n",
atomic_read(&imp->imp_inval_count));