/* "normal" descriptor is free */
if (!list_empty(&kranal_data.kra_idle_txs)) {
tx = list_entry(kranal_data.kra_idle_txs.next,
- kra_tx_t, tx_list);
+ kra_tx_t, tx_list);
break;
}
}
tx = list_entry(kranal_data.kra_idle_nblk_txs.next,
- kra_tx_t, tx_list);
+ kra_tx_t, tx_list);
break;
}
spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
wait_event(kranal_data.kra_idle_tx_waitq,
- !list_empty(&kranal_data.kra_idle_txs));
+ !list_empty(&kranal_data.kra_idle_txs));
}
if (tx != NULL) {
spin_lock(&kranal_data.kra_connd_lock);
list_add_tail(&peer->rap_connd_list,
- &kranal_data.kra_connd_peers);
+ &kranal_data.kra_connd_peers);
wake_up(&kranal_data.kra_connd_waitq);
spin_unlock(&kranal_data.kra_connd_lock);
/* Incoming message consistent with immediate reply? */
if (conn->rac_rxmsg->ram_type != RANAL_MSG_GET_REQ) {
CERROR("REPLY to "LPX64" bad msg type %x!!!\n",
- nid, conn->rac_rxmsg->ram_type);
+ nid, conn->rac_rxmsg->ram_type);
return PTL_FAIL;
}
ptl_err_t
kranal_send (lib_nal_t *nal, void *private, lib_msg_t *cookie,
- ptl_hdr_t *hdr, int type, ptl_nid_t nid, ptl_pid_t pid,
- unsigned int niov, struct iovec *iov,
- size_t offset, size_t len)
+ ptl_hdr_t *hdr, int type, ptl_nid_t nid, ptl_pid_t pid,
+ unsigned int niov, struct iovec *iov,
+ size_t offset, size_t len)
{
return kranal_do_send(nal, private, cookie,
- hdr, type, nid, pid,
- niov, iov, NULL,
- offset, len);
+ hdr, type, nid, pid,
+ niov, iov, NULL,
+ offset, len);
}
ptl_err_t
kranal_send_pages (lib_nal_t *nal, void *private, lib_msg_t *cookie,
- ptl_hdr_t *hdr, int type, ptl_nid_t nid, ptl_pid_t pid,
- unsigned int niov, ptl_kiov_t *kiov,
- size_t offset, size_t len)
+ ptl_hdr_t *hdr, int type, ptl_nid_t nid, ptl_pid_t pid,
+ unsigned int niov, ptl_kiov_t *kiov,
+ size_t offset, size_t len)
{
return kranal_do_send(nal, private, cookie,
- hdr, type, nid, pid,
- niov, NULL, kiov,
- offset, len);
+ hdr, type, nid, pid,
+ niov, NULL, kiov,
+ offset, len);
}
ptl_err_t
kranal_recvmsg (lib_nal_t *nal, void *private, lib_msg_t *libmsg,
- unsigned int niov, struct iovec *iov, ptl_kiov_t *kiov,
- size_t offset, size_t mlen, size_t rlen)
+ unsigned int niov, struct iovec *iov, ptl_kiov_t *kiov,
+ size_t offset, size_t mlen, size_t rlen)
{
kra_conn_t *conn = private;
kra_msg_t *rxmsg = conn->rac_rxmsg;
ptl_err_t
kranal_recv (lib_nal_t *nal, void *private, lib_msg_t *msg,
- unsigned int niov, struct iovec *iov,
- size_t offset, size_t mlen, size_t rlen)
+ unsigned int niov, struct iovec *iov,
+ size_t offset, size_t mlen, size_t rlen)
{
return kranal_recvmsg(nal, private, msg, niov, iov, NULL,
- offset, mlen, rlen);
+ offset, mlen, rlen);
}
ptl_err_t
kranal_recv_pages (lib_nal_t *nal, void *private, lib_msg_t *msg,
- unsigned int niov, ptl_kiov_t *kiov,
- size_t offset, size_t mlen, size_t rlen)
+ unsigned int niov, ptl_kiov_t *kiov,
+ size_t offset, size_t mlen, size_t rlen)
{
return kranal_recvmsg(nal, private, msg, niov, NULL, kiov,
- offset, mlen, rlen);
+ offset, mlen, rlen);
}
int
int
kranal_connd (void *arg)
{
- char name[16];
+ char name[16];
wait_queue_t wait;
unsigned long flags;
kra_peer_t *peer;
- snprintf(name, sizeof(name), "kranal_connd_%02ld", (long)arg);
+ snprintf(name, sizeof(name), "kranal_connd_%02ld", (long)arg);
kportal_daemonize(name);
kportal_blockallsigs();
if (!list_empty(&kranal_data.kra_connd_peers)) {
peer = list_entry(kranal_data.kra_connd_peers.next,
- kra_peer_t, rap_connd_list);
+ kra_peer_t, rap_connd_list);
list_del_init(&peer->rap_connd_list);
spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
kranal_peer_decref(peer);
spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
- continue;
+ continue;
}
set_current_state(TASK_INTERRUPTIBLE);
/* Compute how many table entries to check now so I
* get round the whole table fast enough (NB I do
* this at fixed intervals of 'p' seconds) */
- chunk = conn_entries;
+ chunk = conn_entries;
if (min_timeout > n * p)
chunk = (chunk * n * p) / min_timeout;
if (chunk == 0)
while (!kranal_data.kra_shutdown) {
/* Safe: kra_shutdown only set when quiescent */
- if (busy_loops++ >= RANAL_RESCHED) {
+ if (busy_loops++ >= RANAL_RESCHED) {
spin_unlock_irqrestore(&dev->rad_lock, flags);
our_cond_resched();
- busy_loops = 0;
+ busy_loops = 0;
spin_lock_irqsave(&dev->rad_lock, flags);
- }
+ }
if (dev->rad_ready) {
/* Device callback fired since I last checked it */
spin_lock_irqsave(&dev->rad_lock, flags);
}
-
+
if (!list_empty(&dev->rad_connq)) {
/* Connection needs attention */
conn = list_entry(dev->rad_connq.next,