/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
*
+ * Copyright (c) 2012, Intel Corporation.
+ *
* Author: Eric Barton <eric@bartonsoftware.com>
*
* This file is part of Portals, http://www.lustre.org
kqswnal_tx_t *
kqswnal_get_idle_tx (void)
{
- unsigned long flags;
- kqswnal_tx_t *ktx;
+ unsigned long flags;
+ kqswnal_tx_t *ktx;
- cfs_spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
+ spin_lock_irqsave(&kqswnal_data.kqn_idletxd_lock, flags);
- if (kqswnal_data.kqn_shuttingdown ||
- cfs_list_empty (&kqswnal_data.kqn_idletxds)) {
- cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock,
- flags);
+ if (kqswnal_data.kqn_shuttingdown ||
+ cfs_list_empty(&kqswnal_data.kqn_idletxds)) {
+ spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
- return NULL;
- }
+ return NULL;
+ }
ktx = cfs_list_entry (kqswnal_data.kqn_idletxds.next, kqswnal_tx_t,
ktx_list);
cfs_list_add (&ktx->ktx_list, &kqswnal_data.kqn_activetxds);
ktx->ktx_launcher = current->pid;
- cfs_atomic_inc(&kqswnal_data.kqn_pending_txs);
+ atomic_inc(&kqswnal_data.kqn_pending_txs);
spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
void
kqswnal_tx_done_in_thread_context (kqswnal_tx_t *ktx)
{
- lnet_msg_t *lnetmsg0 = NULL;
- lnet_msg_t *lnetmsg1 = NULL;
- int status0 = 0;
- int status1 = 0;
- kqswnal_rx_t *krx;
+ lnet_msg_t *lnetmsg0 = NULL;
+ lnet_msg_t *lnetmsg1 = NULL;
+ int status0 = 0;
+ int status1 = 0;
+ kqswnal_rx_t *krx;
- LASSERT (!cfs_in_interrupt());
+ LASSERT (!in_interrupt());
- if (ktx->ktx_status == -EHOSTDOWN)
- kqswnal_notify_peer_down(ktx);
+ if (ktx->ktx_status == -EHOSTDOWN)
+ kqswnal_notify_peer_down(ktx);
switch (ktx->ktx_state) {
case KTX_RDMA_FETCH: /* optimized PUT/REPLY handled */
void
kqswnal_tx_done (kqswnal_tx_t *ktx, int status)
{
- unsigned long flags;
+ unsigned long flags;
- ktx->ktx_status = status;
+ ktx->ktx_status = status;
- if (!cfs_in_interrupt()) {
- kqswnal_tx_done_in_thread_context(ktx);
- return;
- }
+ if (!in_interrupt()) {
+ kqswnal_tx_done_in_thread_context(ktx);
+ return;
+ }
- /* Complete the send in thread context */
+ /* Complete the send in thread context */
spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
cfs_list_add_tail(&ktx->ktx_schedlist,
&kqswnal_data.kqn_donetxds);
- cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+ wake_up(&kqswnal_data.kqn_sched_waitq);
spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
}
int
kqswnal_launch (kqswnal_tx_t *ktx)
{
- /* Don't block for transmit descriptor if we're in interrupt context */
- int attr = cfs_in_interrupt() ? (EP_NO_SLEEP | EP_NO_ALLOC) : 0;
- int dest = kqswnal_nid2elanid (ktx->ktx_nid);
- unsigned long flags;
- int rc;
+ /* Don't block for transmit descriptor if we're in interrupt context */
+ int attr = in_interrupt() ? (EP_NO_SLEEP | EP_NO_ALLOC) : 0;
+ int dest = kqswnal_nid2elanid (ktx->ktx_nid);
+ unsigned long flags;
+ int rc;
- ktx->ktx_launchtime = cfs_time_current();
+ ktx->ktx_launchtime = cfs_time_current();
if (kqswnal_data.kqn_shuttingdown)
return (-ESHUTDOWN);
cfs_list_add_tail(&ktx->ktx_schedlist,
&kqswnal_data.kqn_delayedtxds);
- cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+ wake_up(&kqswnal_data.kqn_sched_waitq);
spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
flags);
ktx->ktx_args[0] = krx;
ktx->ktx_args[1] = lntmsg;
- LASSERT (cfs_atomic_read(&krx->krx_refcount) > 0);
+ LASSERT (atomic_read(&krx->krx_refcount) > 0);
/* Take an extra ref for the completion callback */
- cfs_atomic_inc(&krx->krx_refcount);
+ atomic_inc(&krx->krx_refcount);
/* Map on the rail the RPC prefers */
ktx->ktx_rail = ep_rcvr_prefrail(krx->krx_eprx,
kqswnal_put_idle_tx (ktx);
}
- cfs_atomic_dec(&kqswnal_data.kqn_pending_txs);
+ atomic_dec(&kqswnal_data.kqn_pending_txs);
return (rc);
}
CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
payload_nob, payload_niov, libcfs_id2str(target));
- LASSERT (payload_nob == 0 || payload_niov > 0);
- LASSERT (payload_niov <= LNET_MAX_IOV);
+ LASSERT (payload_nob == 0 || payload_niov > 0);
+ LASSERT (payload_niov <= LNET_MAX_IOV);
- /* It must be OK to kmap() if required */
- LASSERT (payload_kiov == NULL || !cfs_in_interrupt ());
- /* payload is either all vaddrs or all pages */
- LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
+ /* It must be OK to kmap() if required */
+ LASSERT (payload_kiov == NULL || !in_interrupt ());
+ /* payload is either all vaddrs or all pages */
+ LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
- if (kqswnal_nid2elanid (target.nid) < 0) {
- CERROR("%s not in my cluster\n", libcfs_nid2str(target.nid));
- return -EIO;
- }
+ if (kqswnal_nid2elanid (target.nid) < 0) {
+ CERROR("%s not in my cluster\n", libcfs_nid2str(target.nid));
+ return -EIO;
+ }
/* I may not block for a transmit descriptor if I might block the
* router, receiver, or an interrupt handler. */
}
- cfs_atomic_dec(&kqswnal_data.kqn_pending_txs);
+ atomic_dec(&kqswnal_data.kqn_pending_txs);
return (rc == 0 ? 0 : -EIO);
}
void
kqswnal_requeue_rx (kqswnal_rx_t *krx)
{
- LASSERT (cfs_atomic_read(&krx->krx_refcount) == 0);
+ LASSERT (atomic_read(&krx->krx_refcount) == 0);
LASSERT (!krx->krx_rpc_reply_needed);
krx->krx_state = KRX_POSTED;
void
kqswnal_rx_done (kqswnal_rx_t *krx)
{
- int rc;
+ int rc;
- LASSERT (cfs_atomic_read(&krx->krx_refcount) == 0);
+ LASSERT (atomic_read(&krx->krx_refcount) == 0);
- if (krx->krx_rpc_reply_needed) {
- /* We've not completed the peer's RPC yet... */
- krx->krx_rpc_reply.msg.magic = LNET_PROTO_QSW_MAGIC;
- krx->krx_rpc_reply.msg.version = QSWLND_PROTO_VERSION;
+ if (krx->krx_rpc_reply_needed) {
+ /* We've not completed the peer's RPC yet... */
+ krx->krx_rpc_reply.msg.magic = LNET_PROTO_QSW_MAGIC;
+ krx->krx_rpc_reply.msg.version = QSWLND_PROTO_VERSION;
- LASSERT (!cfs_in_interrupt());
+ LASSERT (!in_interrupt());
- rc = ep_complete_rpc(krx->krx_rxd,
- kqswnal_rpc_complete, krx,
- &krx->krx_rpc_reply.ep_statusblk,
- NULL, NULL, 0);
- if (rc == EP_SUCCESS)
- return;
+ rc = ep_complete_rpc(krx->krx_rxd,
+ kqswnal_rpc_complete, krx,
+ &krx->krx_rpc_reply.ep_statusblk,
+ NULL, NULL, 0);
+ if (rc == EP_SUCCESS)
+ return;
- CERROR("can't complete RPC: %d\n", rc);
- krx->krx_rpc_reply_needed = 0;
- }
+ CERROR("can't complete RPC: %d\n", rc);
+ krx->krx_rpc_reply_needed = 0;
+ }
- kqswnal_requeue_rx(krx);
+ kqswnal_requeue_rx(krx);
}
void
int nob;
int rc;
- LASSERT (cfs_atomic_read(&krx->krx_refcount) == 1);
+ LASSERT (atomic_read(&krx->krx_refcount) == 1);
if (krx->krx_nob < offsetof(kqswnal_msg_t, kqm_u)) {
CERROR("Short message %d received from %s\n",
/* Default to failure if an RPC reply is requested but not handled */
krx->krx_rpc_reply.msg.status = -EPROTO;
- cfs_atomic_set (&krx->krx_refcount, 1);
+ atomic_set (&krx->krx_refcount, 1);
if (status != EP_SUCCESS) {
/* receives complete with failure when receiver is removed */
else
CERROR("receive status failed with status %d nob %d\n",
ep_rxd_status(rxd), nob);
- kqswnal_rx_decref(krx);
- return;
- }
+ kqswnal_rx_decref(krx);
+ return;
+ }
- if (!cfs_in_interrupt()) {
- kqswnal_parse(krx);
- return;
- }
+ if (!in_interrupt()) {
+ kqswnal_parse(krx);
+ return;
+ }
spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
cfs_list_add_tail(&krx->krx_list, &kqswnal_data.kqn_readyrxds);
- cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+ wake_up(&kqswnal_data.kqn_sched_waitq);
spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
}
unsigned int mlen,
unsigned int rlen)
{
- kqswnal_rx_t *krx = (kqswnal_rx_t *)private;
- lnet_nid_t fromnid;
- kqswnal_msg_t *msg;
- lnet_hdr_t *hdr;
- kqswnal_remotemd_t *rmd;
- int msg_offset;
- int rc;
-
- LASSERT (!cfs_in_interrupt ()); /* OK to map */
- /* Either all pages or all vaddrs */
- LASSERT (!(kiov != NULL && iov != NULL));
+ kqswnal_rx_t *krx = (kqswnal_rx_t *)private;
+ lnet_nid_t fromnid;
+ kqswnal_msg_t *msg;
+ lnet_hdr_t *hdr;
+ kqswnal_remotemd_t *rmd;
+ int msg_offset;
+ int rc;
+
+ LASSERT (!in_interrupt ()); /* OK to map */
+ /* Either all pages or all vaddrs */
+ LASSERT (!(kiov != NULL && iov != NULL));
fromnid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ep_rxd_node(krx->krx_rxd));
msg = (kqswnal_msg_t *)page_address(krx->krx_kiov[0].kiov_page);
}
int
-kqswnal_thread_start (int (*fn)(void *arg), void *arg)
+kqswnal_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- long pid = cfs_create_thread (fn, arg, 0);
+ struct task_struct *task = cfs_thread_run(fn, arg, name);
- if (pid < 0)
- return ((int)pid);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
- cfs_atomic_inc (&kqswnal_data.kqn_nthreads);
- return (0);
+ atomic_inc(&kqswnal_data.kqn_nthreads);
+ return 0;
}
void
kqswnal_thread_fini (void)
{
- cfs_atomic_dec (&kqswnal_data.kqn_nthreads);
+ atomic_dec (&kqswnal_data.kqn_nthreads);
}
int
int counter = 0;
int did_something;
- cfs_daemonize ("kqswnal_sched");
cfs_block_allsigs ();
spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
libcfs_nid2str(ktx->ktx_nid), rc);
kqswnal_tx_done (ktx, rc);
}
- cfs_atomic_dec (&kqswnal_data.kqn_pending_txs);
+ atomic_dec (&kqswnal_data.kqn_pending_txs);
did_something = 1;
spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
* when there's nothing left to do */
break;
}
- cfs_wait_event_interruptible_exclusive (
- kqswnal_data.kqn_sched_waitq,
- kqswnal_data.kqn_shuttingdown == 2 ||
- !cfs_list_empty(&kqswnal_data. \
- kqn_readyrxds) ||
- !cfs_list_empty(&kqswnal_data. \
- kqn_donetxds) ||
- !cfs_list_empty(&kqswnal_data. \
- kqn_delayedtxds, rc));
- LASSERT (rc == 0);
- } else if (need_resched())
- cfs_schedule ();
+ rc = wait_event_interruptible_exclusive (
+ kqswnal_data.kqn_sched_waitq,
+ kqswnal_data.kqn_shuttingdown == 2 ||
+ !cfs_list_empty(&kqswnal_data. \
+ kqn_readyrxds) ||
+ !cfs_list_empty(&kqswnal_data. \
+ kqn_donetxds) ||
+ !cfs_list_empty(&kqswnal_data. \
+ kqn_delayedtxds));
+ LASSERT (rc == 0);
+ } else if (need_resched())
+ schedule ();
spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
- flags);
- }
- }
+ flags);
+ }
+ }
- kqswnal_thread_fini ();
- return (0);
+ kqswnal_thread_fini ();
+ return 0;
}