X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lnet%2Fklnds%2Fqswlnd%2Fqswlnd_cb.c;h=99eb1cc134828e3300d2e8ccd1b13ddd8adbac45;hb=07bd49670282fcd75f1a937b621aa3c11db88407;hp=243918be5e00ea61e03bc7481644d412549b2242;hpb=2841be335687840cf98961e6c6cde6ee9312e4d7;p=fs%2Flustre-release.git diff --git a/lnet/klnds/qswlnd/qswlnd_cb.c b/lnet/klnds/qswlnd/qswlnd_cb.c index 243918b..99eb1cc 100644 --- a/lnet/klnds/qswlnd/qswlnd_cb.c +++ b/lnet/klnds/qswlnd/qswlnd_cb.c @@ -386,7 +386,7 @@ kqswnal_get_idle_tx (void) cfs_list_add (&ktx->ktx_list, &kqswnal_data.kqn_activetxds); ktx->ktx_launcher = current->pid; - cfs_atomic_inc(&kqswnal_data.kqn_pending_txs); + atomic_inc(&kqswnal_data.kqn_pending_txs); spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags); @@ -398,16 +398,16 @@ kqswnal_get_idle_tx (void) void kqswnal_tx_done_in_thread_context (kqswnal_tx_t *ktx) { - lnet_msg_t *lnetmsg0 = NULL; - lnet_msg_t *lnetmsg1 = NULL; - int status0 = 0; - int status1 = 0; - kqswnal_rx_t *krx; + lnet_msg_t *lnetmsg0 = NULL; + lnet_msg_t *lnetmsg1 = NULL; + int status0 = 0; + int status1 = 0; + kqswnal_rx_t *krx; - LASSERT (!cfs_in_interrupt()); + LASSERT (!in_interrupt()); - if (ktx->ktx_status == -EHOSTDOWN) - kqswnal_notify_peer_down(ktx); + if (ktx->ktx_status == -EHOSTDOWN) + kqswnal_notify_peer_down(ktx); switch (ktx->ktx_state) { case KTX_RDMA_FETCH: /* optimized PUT/REPLY handled */ @@ -504,21 +504,21 @@ kqswnal_tx_done_in_thread_context (kqswnal_tx_t *ktx) void kqswnal_tx_done (kqswnal_tx_t *ktx, int status) { - unsigned long flags; + unsigned long flags; - ktx->ktx_status = status; + ktx->ktx_status = status; - if (!cfs_in_interrupt()) { - kqswnal_tx_done_in_thread_context(ktx); - return; - } + if (!in_interrupt()) { + kqswnal_tx_done_in_thread_context(ktx); + return; + } - /* Complete the send in thread context */ + /* Complete the send in thread context */ spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags); cfs_list_add_tail(&ktx->ktx_schedlist, &kqswnal_data.kqn_donetxds); - cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq); + wake_up(&kqswnal_data.kqn_sched_waitq); spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags); } @@ -602,13 +602,13 @@ kqswnal_txhandler(EP_TXD *txd, void *arg, int status) int kqswnal_launch (kqswnal_tx_t *ktx) { - /* Don't block for transmit descriptor if we're in interrupt context */ - int attr = cfs_in_interrupt() ? (EP_NO_SLEEP | EP_NO_ALLOC) : 0; - int dest = kqswnal_nid2elanid (ktx->ktx_nid); - unsigned long flags; - int rc; + /* Don't block for transmit descriptor if we're in interrupt context */ + int attr = in_interrupt() ? (EP_NO_SLEEP | EP_NO_ALLOC) : 0; + int dest = kqswnal_nid2elanid (ktx->ktx_nid); + unsigned long flags; + int rc; - ktx->ktx_launchtime = cfs_time_current(); + ktx->ktx_launchtime = cfs_time_current(); if (kqswnal_data.kqn_shuttingdown) return (-ESHUTDOWN); @@ -669,7 +669,7 @@ kqswnal_launch (kqswnal_tx_t *ktx) cfs_list_add_tail(&ktx->ktx_schedlist, &kqswnal_data.kqn_delayedtxds); - cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq); + wake_up(&kqswnal_data.kqn_sched_waitq); spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags); @@ -899,9 +899,9 @@ kqswnal_rdma (kqswnal_rx_t *krx, lnet_msg_t *lntmsg, ktx->ktx_args[0] = krx; ktx->ktx_args[1] = lntmsg; - LASSERT (cfs_atomic_read(&krx->krx_refcount) > 0); + LASSERT (atomic_read(&krx->krx_refcount) > 0); /* Take an extra ref for the completion callback */ - cfs_atomic_inc(&krx->krx_refcount); + atomic_inc(&krx->krx_refcount); /* Map on the rail the RPC prefers */ ktx->ktx_rail = ep_rcvr_prefrail(krx->krx_eprx, @@ -978,7 +978,7 @@ kqswnal_rdma (kqswnal_rx_t *krx, lnet_msg_t *lntmsg, kqswnal_put_idle_tx (ktx); } - cfs_atomic_dec(&kqswnal_data.kqn_pending_txs); + atomic_dec(&kqswnal_data.kqn_pending_txs); return (rc); } @@ -1005,18 +1005,18 @@ kqswnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n", payload_nob, payload_niov, libcfs_id2str(target)); - LASSERT (payload_nob == 0 || payload_niov > 0); - LASSERT (payload_niov <= LNET_MAX_IOV); + LASSERT (payload_nob == 0 || payload_niov > 0); + LASSERT (payload_niov <= LNET_MAX_IOV); - /* It must be OK to kmap() if required */ - LASSERT (payload_kiov == NULL || !cfs_in_interrupt ()); - /* payload is either all vaddrs or all pages */ - LASSERT (!(payload_kiov != NULL && payload_iov != NULL)); + /* It must be OK to kmap() if required */ + LASSERT (payload_kiov == NULL || !in_interrupt ()); + /* payload is either all vaddrs or all pages */ + LASSERT (!(payload_kiov != NULL && payload_iov != NULL)); - if (kqswnal_nid2elanid (target.nid) < 0) { - CERROR("%s not in my cluster\n", libcfs_nid2str(target.nid)); - return -EIO; - } + if (kqswnal_nid2elanid (target.nid) < 0) { + CERROR("%s not in my cluster\n", libcfs_nid2str(target.nid)); + return -EIO; + } /* I may not block for a transmit descriptor if I might block the * router, receiver, or an interrupt handler. */ @@ -1254,14 +1254,14 @@ kqswnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) } - cfs_atomic_dec(&kqswnal_data.kqn_pending_txs); + atomic_dec(&kqswnal_data.kqn_pending_txs); return (rc == 0 ? 0 : -EIO); } void kqswnal_requeue_rx (kqswnal_rx_t *krx) { - LASSERT (cfs_atomic_read(&krx->krx_refcount) == 0); + LASSERT (atomic_read(&krx->krx_refcount) == 0); LASSERT (!krx->krx_rpc_reply_needed); krx->krx_state = KRX_POSTED; @@ -1296,29 +1296,29 @@ kqswnal_rpc_complete (EP_RXD *rxd) void kqswnal_rx_done (kqswnal_rx_t *krx) { - int rc; + int rc; - LASSERT (cfs_atomic_read(&krx->krx_refcount) == 0); + LASSERT (atomic_read(&krx->krx_refcount) == 0); - if (krx->krx_rpc_reply_needed) { - /* We've not completed the peer's RPC yet... */ - krx->krx_rpc_reply.msg.magic = LNET_PROTO_QSW_MAGIC; - krx->krx_rpc_reply.msg.version = QSWLND_PROTO_VERSION; + if (krx->krx_rpc_reply_needed) { + /* We've not completed the peer's RPC yet... */ + krx->krx_rpc_reply.msg.magic = LNET_PROTO_QSW_MAGIC; + krx->krx_rpc_reply.msg.version = QSWLND_PROTO_VERSION; - LASSERT (!cfs_in_interrupt()); + LASSERT (!in_interrupt()); - rc = ep_complete_rpc(krx->krx_rxd, - kqswnal_rpc_complete, krx, - &krx->krx_rpc_reply.ep_statusblk, - NULL, NULL, 0); - if (rc == EP_SUCCESS) - return; + rc = ep_complete_rpc(krx->krx_rxd, + kqswnal_rpc_complete, krx, + &krx->krx_rpc_reply.ep_statusblk, + NULL, NULL, 0); + if (rc == EP_SUCCESS) + return; - CERROR("can't complete RPC: %d\n", rc); - krx->krx_rpc_reply_needed = 0; - } + CERROR("can't complete RPC: %d\n", rc); + krx->krx_rpc_reply_needed = 0; + } - kqswnal_requeue_rx(krx); + kqswnal_requeue_rx(krx); } void @@ -1333,7 +1333,7 @@ kqswnal_parse (kqswnal_rx_t *krx) int nob; int rc; - LASSERT (cfs_atomic_read(&krx->krx_refcount) == 1); + LASSERT (atomic_read(&krx->krx_refcount) == 1); if (krx->krx_nob < offsetof(kqswnal_msg_t, kqm_u)) { CERROR("Short message %d received from %s\n", @@ -1521,7 +1521,7 @@ kqswnal_rxhandler(EP_RXD *rxd) /* Default to failure if an RPC reply is requested but not handled */ krx->krx_rpc_reply.msg.status = -EPROTO; - cfs_atomic_set (&krx->krx_refcount, 1); + atomic_set (&krx->krx_refcount, 1); if (status != EP_SUCCESS) { /* receives complete with failure when receiver is removed */ @@ -1530,19 +1530,19 @@ kqswnal_rxhandler(EP_RXD *rxd) else CERROR("receive status failed with status %d nob %d\n", ep_rxd_status(rxd), nob); - kqswnal_rx_decref(krx); - return; - } + kqswnal_rx_decref(krx); + return; + } - if (!cfs_in_interrupt()) { - kqswnal_parse(krx); - return; - } + if (!in_interrupt()) { + kqswnal_parse(krx); + return; + } spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags); cfs_list_add_tail(&krx->krx_list, &kqswnal_data.kqn_readyrxds); - cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq); + wake_up(&kqswnal_data.kqn_sched_waitq); spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags); } @@ -1559,17 +1559,17 @@ kqswnal_recv (lnet_ni_t *ni, unsigned int mlen, unsigned int rlen) { - kqswnal_rx_t *krx = (kqswnal_rx_t *)private; - lnet_nid_t fromnid; - kqswnal_msg_t *msg; - lnet_hdr_t *hdr; - kqswnal_remotemd_t *rmd; - int msg_offset; - int rc; - - LASSERT (!cfs_in_interrupt ()); /* OK to map */ - /* Either all pages or all vaddrs */ - LASSERT (!(kiov != NULL && iov != NULL)); + kqswnal_rx_t *krx = (kqswnal_rx_t *)private; + lnet_nid_t fromnid; + kqswnal_msg_t *msg; + lnet_hdr_t *hdr; + kqswnal_remotemd_t *rmd; + int msg_offset; + int rc; + + LASSERT (!in_interrupt ()); /* OK to map */ + /* Either all pages or all vaddrs */ + LASSERT (!(kiov != NULL && iov != NULL)); fromnid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ep_rxd_node(krx->krx_rxd)); msg = (kqswnal_msg_t *)page_address(krx->krx_kiov[0].kiov_page); @@ -1655,21 +1655,21 @@ kqswnal_recv (lnet_ni_t *ni, } int -kqswnal_thread_start (int (*fn)(void *arg), void *arg) +kqswnal_thread_start(int (*fn)(void *arg), void *arg, char *name) { - long pid = cfs_create_thread (fn, arg, 0); + struct task_struct *task = cfs_thread_run(fn, arg, name); - if (pid < 0) - return ((int)pid); + if (IS_ERR(task)) + return PTR_ERR(task); - cfs_atomic_inc (&kqswnal_data.kqn_nthreads); - return (0); + atomic_inc(&kqswnal_data.kqn_nthreads); + return 0; } void kqswnal_thread_fini (void) { - cfs_atomic_dec (&kqswnal_data.kqn_nthreads); + atomic_dec (&kqswnal_data.kqn_nthreads); } int @@ -1682,7 +1682,6 @@ kqswnal_scheduler (void *arg) int counter = 0; int did_something; - cfs_daemonize ("kqswnal_sched"); cfs_block_allsigs (); spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags); @@ -1736,7 +1735,7 @@ kqswnal_scheduler (void *arg) libcfs_nid2str(ktx->ktx_nid), rc); kqswnal_tx_done (ktx, rc); } - cfs_atomic_dec (&kqswnal_data.kqn_pending_txs); + atomic_dec (&kqswnal_data.kqn_pending_txs); did_something = 1; spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, @@ -1756,24 +1755,24 @@ kqswnal_scheduler (void *arg) * when there's nothing left to do */ break; } - cfs_wait_event_interruptible_exclusive ( - kqswnal_data.kqn_sched_waitq, - kqswnal_data.kqn_shuttingdown == 2 || - !cfs_list_empty(&kqswnal_data. \ - kqn_readyrxds) || - !cfs_list_empty(&kqswnal_data. \ - kqn_donetxds) || - !cfs_list_empty(&kqswnal_data. \ - kqn_delayedtxds, rc)); - LASSERT (rc == 0); - } else if (need_resched()) - cfs_schedule (); + rc = wait_event_interruptible_exclusive ( + kqswnal_data.kqn_sched_waitq, + kqswnal_data.kqn_shuttingdown == 2 || + !cfs_list_empty(&kqswnal_data. \ + kqn_readyrxds) || + !cfs_list_empty(&kqswnal_data. \ + kqn_donetxds) || + !cfs_list_empty(&kqswnal_data. \ + kqn_delayedtxds)); + LASSERT (rc == 0); + } else if (need_resched()) + schedule (); spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, - flags); - } - } + flags); + } + } - kqswnal_thread_fini (); - return (0); + kqswnal_thread_fini (); + return 0; }