#include <linux/portals_lib.h>
+/* XXX this should be one mask-check */
+#define l_killable_pending(task) \
+(sigismember(&(task->pending.signal), SIGKILL) || \
+ sigismember(&(task->pending.signal), SIGINT) || \
+ sigismember(&(task->pending.signal), SIGTERM))
+
+/*
+ * Like wait_event_interruptible, but we're only interruptible by KILL, INT, or
+ * TERM.
+ */
+#define __l_wait_event_killable(wq, condition, ret) \
+do { \
+ wait_queue_t __wait; \
+ init_waitqueue_entry(&__wait, current); \
+ \
+ add_wait_queue(&wq, &__wait); \
+ for (;;) { \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (!signal_pending(current) || \
+ !l_killable_pending(current)) { \
+ schedule(); \
+ continue; \
+ } \
+ ret = -ERESTARTSYS; \
+ break; \
+ } \
+ current->state = TASK_RUNNING; \
+ remove_wait_queue(&wq, &__wait); \
+} while(0)
+
+#define l_wait_event_killable(wq, condition) \
+({ \
+ int __ret = 0; \
+ if (!(condition)) \
+ __l_wait_event_killable(wq, condition, __ret); \
+ __ret; \
+})
+
#endif /* _LUSTRE_LIB_H */
if (lock) {
ldlm_lock2handle(lock, lockh);
- wait_event_interruptible(lock->l_waitq, lock->l_req_mode ==
- lock->l_granted_mode);
+ wait_event(lock->l_waitq,
+ lock->l_req_mode == lock->l_granted_mode);
}
if (rc)
LDLM_DEBUG(lock, "matched");
LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock,"
" sleeping");
ldlm_lock_dump(lock);
- wait_event_interruptible(lock->l_waitq, lock->l_req_mode ==
- lock->l_granted_mode);
+ wait_event(lock->l_waitq,
+ lock->l_req_mode == lock->l_granted_mode);
LDLM_DEBUG(lock, "client-side enqueue waking up: granted");
}
LDLM_DEBUG(lock, "client-side enqueue END");
/* FIXME: or cancelled. */
CDEBUG(D_NET, "convert returned a blocked lock, "
"going to sleep.\n");
- wait_event_interruptible(lock->l_waitq, lock->l_req_mode ==
- lock->l_granted_mode);
+ wait_event(lock->l_waitq,
+ lock->l_req_mode == lock->l_granted_mode);
CDEBUG(D_NET, "waking up, the lock must be granted.\n");
}
ldlm_lock_put(lock);
spin_unlock(&ctl_lock);
wake_up(&thread->t_ctl_waitq);
- wait_event_interruptible(thread->t_ctl_waitq,
- thread->t_flags & SVC_STOPPED);
+ wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
spin_lock(&ctl_lock);
list_del(&thread->t_link);
/* And now, loop forever on requests */
while (1) {
- wait_event_interruptible
- (sbi->ll_commitcbd_waitq,
- ll_commitcbd_check_event(sbi));
+ wait_event(sbi->ll_commitcbd_waitq,
+ ll_commitcbd_check_event(sbi));
spin_lock(&sbi->ll_commitcbd_lock);
if (sbi->ll_commitcbd_flags & LL_COMMITCBD_STOPPING) {
sbi->ll_commitcbd_flags = LL_COMMITCBD_STOPPING;
wake_up(&sbi->ll_commitcbd_waitq);
- wait_event_interruptible
- (sbi->ll_commitcbd_ctl_waitq,
- sbi->ll_commitcbd_flags & LL_COMMITCBD_STOPPED);
+ wait_event(sbi->ll_commitcbd_ctl_waitq,
+ sbi->ll_commitcbd_flags & LL_COMMITCBD_STOPPED);
RETURN(0);
}
buf += size;
}
- wait_event_interruptible(&cb_data->waitq,
- lov_read_check_status(cb_data));
+ wait_event(&cb_data->waitq, lov_read_check_status(cb_data));
if (cb_data->flags & PTL_RPC_FL_INTR)
rc = -EINTR;
GOTO(cleanup_buf, rc);
}
- wait_event_interruptible(desc->b_waitq, ptlrpc_check_bulk_sent(desc));
+ wait_event(desc->b_waitq, ptlrpc_check_bulk_sent(desc));
if (desc->b_flags & PTL_RPC_FL_INTR)
GOTO(cleanup_buf, rc = -EINTR);
static void pupdate_wakeup(unsigned long l)
{
- wake_up_interruptible(&pupdated.waitq);
+ wake_up(&pupdated.waitq);
}
CDEBUG(D_CACHE, "pupdated stopped...\n");
pupdated.active = -1;
- wake_up_interruptible (&pupdated.waitq);
+ wake_up(&pupdated.waitq);
return 0;
}
if (pupdated.active > 0) {
CDEBUG(D_CACHE, "inform pupdated\n");
pupdated.active = 0;
- wake_up_interruptible(&pupdated.waitq);
+ wake_up(&pupdated.waitq);
CDEBUG(D_CACHE, "wait for pupdated\n");
while (pupdated.active == 0) {
GOTO(out, rc);
/* If there's no callback function, sleep here until complete. */
- wait_event_interruptible(desc->b_waitq, ptlrpc_check_bulk_sent(desc));
+ l_wait_event_killable(desc->b_waitq, ptlrpc_check_bulk_sent(desc));
if (desc->b_flags & PTL_RPC_FL_INTR)
rc = -EINTR;
if (rc)
GOTO(out_bulk, rc);
- wait_event_interruptible(desc->b_waitq, ptlrpc_check_bulk_sent(desc));
+#warning OST must time out here.
+ wait_event(desc->b_waitq, ptlrpc_check_bulk_sent(desc));
if (desc->b_flags & PTL_RPC_FL_INTR)
rc = -EINTR;
reply_sent = 1;
ptlrpc_reply(req->rq_svc, req);
- wait_event_interruptible(desc->b_waitq,
- desc->b_flags & PTL_BULK_FL_RCVD);
+#warning OST must time out here.
+ wait_event(desc->b_waitq, desc->b_flags & PTL_BULK_FL_RCVD);
rc = obd_commitrw(cmd, conn, objcount, tmp1, niocount, local_nb,
desc->b_desc_private);
ENTRY;
CDEBUG(D_INODE, "continue delayed request %Ld opc %d\n",
req->rq_xid, req->rq_reqmsg->opc);
- wake_up_interruptible(&req->rq_wait_for_rep);
+ wake_up(&req->rq_wait_for_rep);
EXIT;
}
req->rq_level = LUSTRE_CONN_RECOVD;
req->rq_flags |= PTL_RPC_FL_RESEND;
req->rq_flags &= ~PTL_RPC_FL_TIMEOUT;
- wake_up_interruptible(&req->rq_wait_for_rep);
+ wake_up(&req->rq_wait_for_rep);
EXIT;
}
req->rq_status = -ERESTARTSYS;
req->rq_flags |= PTL_RPC_FL_RECOVERY;
req->rq_flags &= ~PTL_RPC_FL_TIMEOUT;
- wake_up_interruptible(&req->rq_wait_for_rep);
+ wake_up(&req->rq_wait_for_rep);
EXIT;
}
list_del_init(&req->rq_list);
list_add(&req->rq_list, cli->cli_delayed_head.prev);
spin_unlock(&cli->cli_lock);
- wait_event_interruptible
+ l_wait_event_killable
(req->rq_wait_for_rep,
req->rq_level <= req->rq_connection->c_level);
spin_lock(&cli->cli_lock);
spin_unlock(&cli->cli_lock);
CDEBUG(D_OTHER, "-- sleeping\n");
- wait_event_interruptible(req->rq_wait_for_rep,
- ptlrpc_check_reply(req));
+ l_wait_event_killable(req->rq_wait_for_rep, ptlrpc_check_reply(req));
CDEBUG(D_OTHER, "-- done\n");
if (req->rq_flags & PTL_RPC_FL_RESEND) {
}
CDEBUG(D_OTHER, "-- sleeping\n");
- wait_event_interruptible(req->rq_wait_for_rep,
- ptlrpc_check_reply(req));
+ l_wait_event_killable(req->rq_wait_for_rep, ptlrpc_check_reply(req));
CDEBUG(D_OTHER, "-- done\n");
up(&cli->cli_rpc_sem);
if (ev->type == PTL_EVENT_PUT) {
req->rq_repmsg = ev->mem_desc.start + ev->offset;
barrier();
- wake_up_interruptible(&req->rq_wait_for_rep);
+ wake_up(&req->rq_wait_for_rep);
} else {
// XXX make sure we understand all events, including ACK's
CERROR("Unknown event %d\n", ev->type);
bulk->b_cb(bulk);
if (atomic_dec_and_test(&desc->b_pages_remaining)) {
desc->b_flags |= PTL_BULK_FL_SENT;
- wake_up_interruptible(&desc->b_waitq);
+ wake_up(&desc->b_waitq);
if (desc->b_cb != NULL)
desc->b_cb(desc, desc->b_cb_data);
}
bulk->b_cb(bulk);
if (atomic_dec_and_test(&desc->b_pages_remaining)) {
desc->b_flags |= PTL_BULK_FL_RCVD;
- wake_up_interruptible(&desc->b_waitq);
+ wake_up(&desc->b_waitq);
if (desc->b_cb != NULL)
desc->b_cb(desc, desc->b_cb_data);
}
/* And now, loop forever on requests */
while (1) {
- wait_event_interruptible(recovd->recovd_waitq,
- recovd_check_event(recovd));
+ wait_event(recovd->recovd_waitq, recovd_check_event(recovd));
spin_lock(&recovd->recovd_lock);
if (recovd->recovd_flags & RECOVD_STOPPING) {
wake_up(&recovd->recovd_waitq);
spin_unlock(&recovd->recovd_lock);
- wait_event_interruptible(recovd->recovd_ctl_waitq,
- (recovd->recovd_flags & RECOVD_STOPPED));
+ wait_event(recovd->recovd_ctl_waitq,
+ (recovd->recovd_flags & RECOVD_STOPPED));
RETURN(0);
}
ENTRY;
spin_lock(&svc->srv_lock);
- if (sigismember(&(current->pending.signal), SIGKILL) ||
- sigismember(&(current->pending.signal), SIGTERM) ||
- sigismember(&(current->pending.signal), SIGINT)) {
- thread->t_flags |= SVC_KILLED;
- GOTO(out, rc = 1);
- }
-
if (thread->t_flags & SVC_STOPPING)
GOTO(out, rc = 1);
ptlrpc_check_event(svc, thread, &event));
spin_lock(&svc->srv_lock);
- if (thread->t_flags & SVC_SIGNAL) {
- thread->t_flags &= ~SVC_SIGNAL;
- spin_unlock(&svc->srv_lock);
- EXIT;
- break;
- }
if (thread->t_flags & SVC_STOPPING) {
thread->t_flags &= ~SVC_STOPPING;
spin_unlock(&svc->srv_lock);
wake_up(&svc->srv_waitq);
- wait_event_interruptible(thread->t_ctl_waitq,
- (thread->t_flags & SVC_STOPPED));
+ wait_event(thread->t_ctl_waitq, (thread->t_flags & SVC_STOPPED));
}
void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)