* Author: Eric Barton <eric@bartonsoftware.com>
*/
+#include <asm/page.h>
#include "ralnd.h"
void
spin_lock_irqsave(&dev->rad_lock, flags);
- if (!dev->rad_ready) {
- dev->rad_ready = 1;
- cfs_waitq_signal(&dev->rad_waitq);
- }
+ if (!dev->rad_ready) {
+ dev->rad_ready = 1;
+ wake_up(&dev->rad_waitq);
+ }
spin_unlock_irqrestore(&dev->rad_lock, flags);
return;
spin_lock_irqsave(&dev->rad_lock, flags);
- if (!conn->rac_scheduled) {
- kranal_conn_addref(conn); /* +1 ref for scheduler */
- conn->rac_scheduled = 1;
- cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_ready_conns);
- cfs_waitq_signal(&dev->rad_waitq);
- }
+ if (!conn->rac_scheduled) {
+ kranal_conn_addref(conn); /* +1 ref for scheduler */
+ conn->rac_scheduled = 1;
+ cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_ready_conns);
+ wake_up(&dev->rad_waitq);
+ }
spin_unlock_irqrestore(&dev->rad_lock, flags);
}
tx->tx_nob = nob;
tx->tx_buffer = (void *)((unsigned long)(kiov->kiov_offset + offset));
- phys->Address = lnet_page2phys(kiov->kiov_page);
+ phys->Address = page_to_phys(kiov->kiov_page);
phys++;
resid = nob - (kiov->kiov_len - offset);
return -EMSGSIZE;
}
- phys->Address = lnet_page2phys(kiov->kiov_page);
+ phys->Address = page_to_phys(kiov->kiov_page);
phys++;
resid -= PAGE_SIZE;
void
kranal_tx_done (kra_tx_t *tx, int completion)
{
- lnet_msg_t *lnetmsg[2];
- unsigned long flags;
- int i;
+ lnet_msg_t *lnetmsg[2];
+ unsigned long flags;
+ int i;
- LASSERT (!cfs_in_interrupt());
+ LASSERT (!in_interrupt());
- kranal_unmap_buffer(tx);
+ kranal_unmap_buffer(tx);
- lnetmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
- lnetmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
+ lnetmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
+ lnetmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
- tx->tx_buftype = RANAL_BUF_NONE;
- tx->tx_msg.ram_type = RANAL_MSG_NONE;
- tx->tx_conn = NULL;
+ tx->tx_buftype = RANAL_BUF_NONE;
+ tx->tx_msg.ram_type = RANAL_MSG_NONE;
+ tx->tx_conn = NULL;
spin_lock_irqsave(&kranal_data.kra_tx_lock, flags);
- cfs_list_add_tail(&tx->tx_list, &kranal_data.kra_idle_txs);
+ cfs_list_add_tail(&tx->tx_list, &kranal_data.kra_idle_txs);
spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
- /* finalize AFTER freeing lnet msgs */
- for (i = 0; i < 2; i++) {
- if (lnetmsg[i] == NULL)
- continue;
+ /* finalize AFTER freeing lnet msgs */
+ for (i = 0; i < 2; i++) {
+ if (lnetmsg[i] == NULL)
+ continue;
- lnet_finalize(kranal_data.kra_ni, lnetmsg[i], completion);
- }
+ lnet_finalize(kranal_data.kra_ni, lnetmsg[i], completion);
+ }
}
kra_conn_t *
spin_lock(&kranal_data.kra_connd_lock);
- cfs_list_add_tail(&peer->rap_connd_list,
- &kranal_data.kra_connd_peers);
- cfs_waitq_signal(&kranal_data.kra_connd_waitq);
+ cfs_list_add_tail(&peer->rap_connd_list,
+ &kranal_data.kra_connd_peers);
+ wake_up(&kranal_data.kra_connd_waitq);
spin_unlock(&kranal_data.kra_connd_lock);
}
/* NB 'private' is different depending on what we're sending.... */
- CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
- nob, niov, libcfs_id2str(target));
+ CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
+ nob, niov, libcfs_id2str(target));
- LASSERT (nob == 0 || niov > 0);
- LASSERT (niov <= LNET_MAX_IOV);
+ LASSERT (nob == 0 || niov > 0);
+ LASSERT (niov <= LNET_MAX_IOV);
- LASSERT (!cfs_in_interrupt());
- /* payload is either all vaddrs or all pages */
- LASSERT (!(kiov != NULL && iov != NULL));
+ LASSERT (!in_interrupt());
+ /* payload is either all vaddrs or all pages */
+ LASSERT (!(kiov != NULL && iov != NULL));
- if (routing) {
- CERROR ("Can't route\n");
- return -EIO;
- }
+ if (routing) {
+ CERROR ("Can't route\n");
+ return -EIO;
+ }
switch(type) {
default:
struct iovec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
- kra_conn_t *conn = private;
- kra_msg_t *rxmsg = conn->rac_rxmsg;
- kra_tx_t *tx;
- void *buffer;
- int rc;
+ kra_conn_t *conn = private;
+ kra_msg_t *rxmsg = conn->rac_rxmsg;
+ kra_tx_t *tx;
+ void *buffer;
+ int rc;
- LASSERT (mlen <= rlen);
- LASSERT (!cfs_in_interrupt());
- /* Either all pages or all vaddrs */
- LASSERT (!(kiov != NULL && iov != NULL));
+ LASSERT (mlen <= rlen);
+ LASSERT (!in_interrupt());
+ /* Either all pages or all vaddrs */
+ LASSERT (!(kiov != NULL && iov != NULL));
- CDEBUG(D_NET, "conn %p, rxmsg %p, lntmsg %p\n", conn, rxmsg, lntmsg);
+ CDEBUG(D_NET, "conn %p, rxmsg %p, lntmsg %p\n", conn, rxmsg, lntmsg);
switch(rxmsg->ram_type) {
default:
int
kranal_thread_start(int(*fn)(void *arg), void *arg, char *name)
{
- cfs_task_t *task = cfs_thread_run(fn, arg, name);
+ struct task_struct *task = cfs_thread_run(fn, arg, name);
if (!IS_ERR(task))
- cfs_atomic_inc(&kranal_data.kra_nthreads);
+ atomic_inc(&kranal_data.kra_nthreads);
return PTR_ERR(task);
}
void
kranal_thread_fini (void)
{
- cfs_atomic_dec(&kranal_data.kra_nthreads);
+ atomic_dec(&kranal_data.kra_nthreads);
}
int
conn->rac_state == RANAL_CONN_CLOSING);
if (!conn->rac_close_sent &&
- cfs_time_aftereq(now, conn->rac_last_tx + conn->rac_keepalive *
- HZ)) {
+ cfs_time_aftereq(now, conn->rac_last_tx +
+ msecs_to_jiffies(conn->rac_keepalive *
+ MSEC_PER_SEC))) {
/* not sent in a while; schedule conn so scheduler sends a keepalive */
CDEBUG(D_NET, "Scheduling keepalive %p->%s\n",
conn, libcfs_nid2str(conn->rac_peer->rap_nid));
kranal_schedule_conn(conn);
}
- timeout = conn->rac_timeout * HZ;
+ timeout = msecs_to_jiffies(conn->rac_timeout * MSEC_PER_SEC);
if (!conn->rac_close_recvd &&
cfs_time_aftereq(now, conn->rac_last_rx + timeout)) {
(conn->rac_state == RANAL_CONN_ESTABLISHED) ?
"Nothing" : "CLOSE not",
libcfs_nid2str(conn->rac_peer->rap_nid),
- (now - conn->rac_last_rx)/HZ);
+ jiffies_to_msecs(now - conn->rac_last_rx)/MSEC_PER_SEC);
return -ETIMEDOUT;
}
spin_unlock_irqrestore(&conn->rac_lock, flags);
CERROR("tx on fmaq for %s blocked %lu seconds\n",
libcfs_nid2str(conn->rac_peer->rap_nid),
- (now - tx->tx_qtime)/HZ);
+ jiffies_to_msecs(now-tx->tx_qtime)/MSEC_PER_SEC);
return -ETIMEDOUT;
}
}
spin_unlock_irqrestore(&conn->rac_lock, flags);
CERROR("tx on rdmaq for %s blocked %lu seconds\n",
libcfs_nid2str(conn->rac_peer->rap_nid),
- (now - tx->tx_qtime)/HZ);
+ jiffies_to_msecs(now-tx->tx_qtime)/MSEC_PER_SEC);
return -ETIMEDOUT;
}
}
spin_unlock_irqrestore(&conn->rac_lock, flags);
CERROR("tx on replyq for %s blocked %lu seconds\n",
libcfs_nid2str(conn->rac_peer->rap_nid),
- (now - tx->tx_qtime)/HZ);
+ jiffies_to_msecs(now-tx->tx_qtime)/MSEC_PER_SEC);
return -ETIMEDOUT;
}
}
int
kranal_connd (void *arg)
{
- long id = (long)arg;
- cfs_waitlink_t wait;
- unsigned long flags;
- kra_peer_t *peer;
- kra_acceptsock_t *ras;
- int did_something;
+ long id = (long)arg;
+ wait_queue_t wait;
+ unsigned long flags;
+ kra_peer_t *peer;
+ kra_acceptsock_t *ras;
+ int did_something;
- cfs_block_allsigs();
+ cfs_block_allsigs();
- cfs_waitlink_init(&wait);
+ init_waitqueue_entry_current(&wait);
spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
- while (!kranal_data.kra_shutdown) {
- did_something = 0;
+ while (!kranal_data.kra_shutdown) {
+ did_something = 0;
- if (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) {
- ras = cfs_list_entry(kranal_data.kra_connd_acceptq.next,
- kra_acceptsock_t, ras_list);
- cfs_list_del(&ras->ras_list);
+ if (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) {
+ ras = cfs_list_entry(kranal_data.kra_connd_acceptq.next,
+ kra_acceptsock_t, ras_list);
+ cfs_list_del(&ras->ras_list);
spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
- flags);
+ flags);
- CDEBUG(D_NET,"About to handshake someone\n");
+ CDEBUG(D_NET,"About to handshake someone\n");
- kranal_conn_handshake(ras->ras_sock, NULL);
- kranal_free_acceptsock(ras);
+ kranal_conn_handshake(ras->ras_sock, NULL);
+ kranal_free_acceptsock(ras);
- CDEBUG(D_NET,"Finished handshaking someone\n");
+ CDEBUG(D_NET,"Finished handshaking someone\n");
spin_lock_irqsave(&kranal_data.kra_connd_lock,
- flags);
- did_something = 1;
- }
+ flags);
+ did_something = 1;
+ }
- if (!cfs_list_empty(&kranal_data.kra_connd_peers)) {
- peer = cfs_list_entry(kranal_data.kra_connd_peers.next,
- kra_peer_t, rap_connd_list);
+ if (!cfs_list_empty(&kranal_data.kra_connd_peers)) {
+ peer = cfs_list_entry(kranal_data.kra_connd_peers.next,
+ kra_peer_t, rap_connd_list);
- cfs_list_del_init(&peer->rap_connd_list);
+ cfs_list_del_init(&peer->rap_connd_list);
spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
- flags);
+ flags);
- kranal_connect(peer);
- kranal_peer_decref(peer);
+ kranal_connect(peer);
+ kranal_peer_decref(peer);
spin_lock_irqsave(&kranal_data.kra_connd_lock,
- flags);
- did_something = 1;
- }
+ flags);
+ did_something = 1;
+ }
- if (did_something)
- continue;
+ if (did_something)
+ continue;
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add_exclusive(&kranal_data.kra_connd_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&kranal_data.kra_connd_waitq, &wait);
spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
- cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
+ waitq_wait(&wait, TASK_INTERRUPTIBLE);
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&kranal_data.kra_connd_waitq, &wait);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kranal_data.kra_connd_waitq, &wait);
spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
- }
+ }
spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
- kranal_thread_fini();
- return 0;
+ kranal_thread_fini();
+ return 0;
}
void
int
kranal_reaper (void *arg)
{
- cfs_waitlink_t wait;
- unsigned long flags;
- long timeout;
- int i;
- int conn_entries = kranal_data.kra_conn_hash_size;
- int conn_index = 0;
- int base_index = conn_entries - 1;
- unsigned long next_check_time = jiffies;
- long next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
- long current_min_timeout = 1;
+ wait_queue_t wait;
+ unsigned long flags;
+ long timeout;
+ int i;
+ int conn_entries = kranal_data.kra_conn_hash_size;
+ int conn_index = 0;
+ int base_index = conn_entries - 1;
+ unsigned long next_check_time = jiffies;
+ long next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+ long current_min_timeout = 1;
- cfs_block_allsigs();
+ cfs_block_allsigs();
- cfs_waitlink_init(&wait);
+ init_waitqueue_entry_current(&wait);
spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
- while (!kranal_data.kra_shutdown) {
- /* I wake up every 'p' seconds to check for timeouts on some
- * more peers. I try to check every connection 'n' times
- * within the global minimum of all keepalive and timeout
- * intervals, to ensure I attend to every connection within
- * (n+1)/n times its timeout intervals. */
- const int p = 1;
- const int n = 3;
- unsigned long min_timeout;
- int chunk;
-
- /* careful with the jiffy wrap... */
- timeout = (long)(next_check_time - jiffies);
- if (timeout > 0) {
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add(&kranal_data.kra_reaper_waitq, &wait);
+ while (!kranal_data.kra_shutdown) {
+ /* I wake up every 'p' seconds to check for timeouts on some
+ * more peers. I try to check every connection 'n' times
+ * within the global minimum of all keepalive and timeout
+ * intervals, to ensure I attend to every connection within
+ * (n+1)/n times its timeout intervals. */
+ const int p = 1;
+ const int n = 3;
+ unsigned long min_timeout;
+ int chunk;
+
+ /* careful with the jiffy wrap... */
+ timeout = (long)(next_check_time - jiffies);
+ if (timeout > 0) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&kranal_data.kra_reaper_waitq, &wait);
spin_unlock_irqrestore(&kranal_data.kra_reaper_lock,
- flags);
+ flags);
- cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
- timeout);
+ waitq_timedwait(&wait, TASK_INTERRUPTIBLE,
+ timeout);
spin_lock_irqsave(&kranal_data.kra_reaper_lock,
- flags);
+ flags);
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&kranal_data.kra_reaper_waitq, &wait);
- continue;
- }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kranal_data.kra_reaper_waitq, &wait);
+ continue;
+ }
- if (kranal_data.kra_new_min_timeout !=
- CFS_MAX_SCHEDULE_TIMEOUT) {
- /* new min timeout set: restart min timeout scan */
- next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
- base_index = conn_index - 1;
- if (base_index < 0)
- base_index = conn_entries - 1;
-
- if (kranal_data.kra_new_min_timeout <
- current_min_timeout) {
- current_min_timeout =
- kranal_data.kra_new_min_timeout;
- CDEBUG(D_NET, "Set new min timeout %ld\n",
- current_min_timeout);
- }
+ if (kranal_data.kra_new_min_timeout !=
+ MAX_SCHEDULE_TIMEOUT) {
+ /* new min timeout set: restart min timeout scan */
+ next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+ base_index = conn_index - 1;
+ if (base_index < 0)
+ base_index = conn_entries - 1;
+
+ if (kranal_data.kra_new_min_timeout <
+ current_min_timeout) {
+ current_min_timeout =
+ kranal_data.kra_new_min_timeout;
+ CDEBUG(D_NET, "Set new min timeout %ld\n",
+ current_min_timeout);
+ }
- kranal_data.kra_new_min_timeout =
- CFS_MAX_SCHEDULE_TIMEOUT;
- }
- min_timeout = current_min_timeout;
+ kranal_data.kra_new_min_timeout =
+ MAX_SCHEDULE_TIMEOUT;
+ }
+ min_timeout = current_min_timeout;
spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
- LASSERT (min_timeout > 0);
-
- /* Compute how many table entries to check now so I get round
- * the whole table fast enough given that I do this at fixed
- * intervals of 'p' seconds) */
- chunk = conn_entries;
- if (min_timeout > n * p)
- chunk = (chunk * n * p) / min_timeout;
- if (chunk == 0)
- chunk = 1;
-
- for (i = 0; i < chunk; i++) {
- kranal_reaper_check(conn_index,
- &next_min_timeout);
- conn_index = (conn_index + 1) % conn_entries;
- }
+ LASSERT (min_timeout > 0);
+
+ /* Compute how many table entries to check now so I get round
+ * the whole table fast enough given that I do this at fixed
+ * intervals of 'p' seconds) */
+ chunk = conn_entries;
+ if (min_timeout > n * p)
+ chunk = (chunk * n * p) / min_timeout;
+ if (chunk == 0)
+ chunk = 1;
+
+ for (i = 0; i < chunk; i++) {
+ kranal_reaper_check(conn_index,
+ &next_min_timeout);
+ conn_index = (conn_index + 1) % conn_entries;
+ }
- next_check_time += p * HZ;
+ next_check_time += msecs_to_jiffies(p * MSEC_PER_SEC);
spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
- if (((conn_index - chunk <= base_index &&
- base_index < conn_index) ||
- (conn_index - conn_entries - chunk <= base_index &&
- base_index < conn_index - conn_entries))) {
+ if (((conn_index - chunk <= base_index &&
+ base_index < conn_index) ||
+ (conn_index - conn_entries - chunk <= base_index &&
+ base_index < conn_index - conn_entries))) {
- /* Scanned all conns: set current_min_timeout... */
- if (current_min_timeout != next_min_timeout) {
- current_min_timeout = next_min_timeout;
- CDEBUG(D_NET, "Set new min timeout %ld\n",
- current_min_timeout);
- }
+ /* Scanned all conns: set current_min_timeout... */
+ if (current_min_timeout != next_min_timeout) {
+ current_min_timeout = next_min_timeout;
+ CDEBUG(D_NET, "Set new min timeout %ld\n",
+ current_min_timeout);
+ }
- /* ...and restart min timeout scan */
- next_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
- base_index = conn_index - 1;
- if (base_index < 0)
- base_index = conn_entries - 1;
- }
- }
+ /* ...and restart min timeout scan */
+ next_min_timeout = MAX_SCHEDULE_TIMEOUT;
+ base_index = conn_index - 1;
+ if (base_index < 0)
+ base_index = conn_entries - 1;
+ }
+ }
- kranal_thread_fini();
- return 0;
+ kranal_thread_fini();
+ return 0;
}
void
case RAP_NOT_DONE:
if (cfs_time_aftereq(jiffies,
- conn->rac_last_tx + conn->rac_keepalive *
- HZ))
+ conn->rac_last_tx +
+ msecs_to_jiffies(conn->rac_keepalive *
+ MSEC_PER_SEC)))
CWARN("EAGAIN sending %02x (idle %lu secs)\n",
msg->ram_type,
- (jiffies - conn->rac_last_tx)/HZ);
+ jiffies_to_msecs(jiffies - conn->rac_last_tx) /
+ MSEC_PER_SEC);
return -EAGAIN;
}
}
if (cfs_time_aftereq(jiffies,
conn->rac_last_tx +
- conn->rac_keepalive * HZ)) {
+ msecs_to_jiffies(conn->rac_keepalive *
+ MSEC_PER_SEC))) {
CDEBUG(D_NET, "sending NOOP (rdma in progress)\n");
kranal_init_msg(&conn->rac_msg, RANAL_MSG_NOOP);
kranal_sendmsg(conn, &conn->rac_msg, NULL, 0);
spin_unlock_irqrestore(&conn->rac_lock, flags);
if (cfs_time_aftereq(jiffies,
- conn->rac_last_tx + conn->rac_keepalive *
- HZ)) {
+ conn->rac_last_tx +
+ msecs_to_jiffies(conn->rac_keepalive *
+ MSEC_PER_SEC))) {
CDEBUG(D_NET, "sending NOOP -> %s (%p idle %lu(%ld))\n",
libcfs_nid2str(conn->rac_peer->rap_nid), conn,
- (jiffies - conn->rac_last_tx)/HZ,
+ jiffies_to_msecs(jiffies - conn->rac_last_tx) /
+ MSEC_PER_SEC,
conn->rac_keepalive);
kranal_init_msg(&conn->rac_msg, RANAL_MSG_NOOP);
kranal_sendmsg(conn, &conn->rac_msg, NULL, 0);
LASSERT (rrc == RAP_NOT_DONE);
if (!cfs_time_aftereq(jiffies, conn->rac_last_tx +
- conn->rac_timeout * HZ))
+ msecs_to_jiffies(conn->rac_timeout*MSEC_PER_SEC)))
return -EAGAIN;
/* Too late */
int
kranal_scheduler (void *arg)
{
- kra_device_t *dev = (kra_device_t *)arg;
- cfs_waitlink_t wait;
- kra_conn_t *conn;
+ kra_device_t *dev = (kra_device_t *)arg;
+ wait_queue_t wait;
+ kra_conn_t *conn;
unsigned long flags;
unsigned long deadline;
unsigned long soonest;
cfs_block_allsigs();
- dev->rad_scheduler = current;
- cfs_waitlink_init(&wait);
+ dev->rad_scheduler = current;
+ init_waitqueue_entry_current(&wait);
spin_lock_irqsave(&dev->rad_lock, flags);
if (busy_loops++ >= RANAL_RESCHED) {
spin_unlock_irqrestore(&dev->rad_lock, flags);
- cfs_cond_resched();
- busy_loops = 0;
+ cond_resched();
+ busy_loops = 0;
spin_lock_irqsave(&dev->rad_lock, flags);
}
/* retry with exponential backoff until HZ */
if (conn->rac_keepalive == 0)
conn->rac_keepalive = 1;
- else if (conn->rac_keepalive <= HZ)
+ else if (conn->rac_keepalive <=
+ msecs_to_jiffies(MSEC_PER_SEC))
conn->rac_keepalive *= 2;
else
- conn->rac_keepalive += HZ;
+ conn->rac_keepalive +=
+ msecs_to_jiffies(MSEC_PER_SEC);
deadline = conn->rac_last_tx + conn->rac_keepalive;
spin_lock_irqsave(&dev->rad_lock, flags);
if (dropped_lock) /* may sleep iff I didn't drop the lock */
continue;
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add_exclusive(&dev->rad_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&dev->rad_waitq, &wait);
spin_unlock_irqrestore(&dev->rad_lock, flags);
- if (nsoonest == 0) {
- busy_loops = 0;
- cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
- } else {
- timeout = (long)(soonest - jiffies);
- if (timeout > 0) {
- busy_loops = 0;
- cfs_waitq_timedwait(&wait,
- CFS_TASK_INTERRUPTIBLE,
- timeout);
- }
- }
+ if (nsoonest == 0) {
+ busy_loops = 0;
+ waitq_wait(&wait, TASK_INTERRUPTIBLE);
+ } else {
+ timeout = (long)(soonest - jiffies);
+ if (timeout > 0) {
+ busy_loops = 0;
+ waitq_timedwait(&wait,
+ TASK_INTERRUPTIBLE,
+ timeout);
+ }
+ }
- cfs_waitq_del(&dev->rad_waitq, &wait);
- cfs_set_current_state(CFS_TASK_RUNNING);
+ remove_wait_queue(&dev->rad_waitq, &wait);
+ set_current_state(TASK_RUNNING);
spin_lock_irqsave(&dev->rad_lock, flags);
- }
+ }
spin_unlock_irqrestore(&dev->rad_lock, flags);