X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Fklnds%2Fqswlnd%2Fqswlnd_cb.c;h=c34ba6dc291fe19ab8b52c4082409a17020ee48c;hp=c509d10d603f17acec339361e8b33cb86a631f02;hb=b43a6b1800265608cfa18159d4d0d006a1c23015;hpb=cf814617a3151e53c30204fea07afad595b8eddc diff --git a/lnet/klnds/qswlnd/qswlnd_cb.c b/lnet/klnds/qswlnd/qswlnd_cb.c index c509d10..c34ba6d 100644 --- a/lnet/klnds/qswlnd/qswlnd_cb.c +++ b/lnet/klnds/qswlnd/qswlnd_cb.c @@ -1,8 +1,7 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: +/* + * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * - * Copyright (C) 2002 Cluster File Systems, Inc. - * Author: Eric Barton + * Author: Eric Barton * * This file is part of Portals, http://www.lustre.org * @@ -18,7 +17,6 @@ * You should have received a copy of the GNU General Public License * along with Portals; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * */ #include "qswlnd.h" @@ -357,12 +355,12 @@ kqswnal_put_idle_tx (kqswnal_tx_t *ktx) kqswnal_unmap_tx (ktx); /* release temporary mappings */ ktx->ktx_state = KTX_IDLE; - spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags); + cfs_spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags); - list_del (&ktx->ktx_list); /* take off active list */ - list_add (&ktx->ktx_list, &kqswnal_data.kqn_idletxds); + cfs_list_del (&ktx->ktx_list); /* take off active list */ + cfs_list_add (&ktx->ktx_list, &kqswnal_data.kqn_idletxds); - spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags); + cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags); } kqswnal_tx_t * @@ -371,23 +369,25 @@ kqswnal_get_idle_tx (void) unsigned long flags; kqswnal_tx_t *ktx; - spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags); + cfs_spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags); if (kqswnal_data.kqn_shuttingdown || - list_empty (&kqswnal_data.kqn_idletxds)) { - spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags); + cfs_list_empty (&kqswnal_data.kqn_idletxds)) { + cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, + flags); return NULL; } - ktx = list_entry (kqswnal_data.kqn_idletxds.next, kqswnal_tx_t, ktx_list); - list_del (&ktx->ktx_list); + ktx = cfs_list_entry (kqswnal_data.kqn_idletxds.next, kqswnal_tx_t, + ktx_list); + cfs_list_del (&ktx->ktx_list); - list_add (&ktx->ktx_list, &kqswnal_data.kqn_activetxds); + cfs_list_add (&ktx->ktx_list, &kqswnal_data.kqn_activetxds); ktx->ktx_launcher = current->pid; - atomic_inc(&kqswnal_data.kqn_pending_txs); + cfs_atomic_inc(&kqswnal_data.kqn_pending_txs); - spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags); + cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags); /* Idle descs can't have any mapped (as opposed to pre-mapped) pages */ LASSERT (ktx->ktx_nmappedpages == 0); @@ -402,9 +402,9 @@ kqswnal_tx_done_in_thread_context (kqswnal_tx_t *ktx) int status0 = 0; int status1 = 0; kqswnal_rx_t *krx; - - LASSERT (!in_interrupt()); - + + LASSERT (!cfs_in_interrupt()); + if (ktx->ktx_status == -EHOSTDOWN) kqswnal_notify_peer_down(ktx); @@ -507,19 +507,19 @@ kqswnal_tx_done (kqswnal_tx_t *ktx, int status) ktx->ktx_status = status; - if (!in_interrupt()) { + if (!cfs_in_interrupt()) { kqswnal_tx_done_in_thread_context(ktx); return; } /* Complete the send in thread context */ - spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags); - - list_add_tail(&ktx->ktx_schedlist, - &kqswnal_data.kqn_donetxds); - wake_up(&kqswnal_data.kqn_sched_waitq); - - spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags); + cfs_spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags); + + cfs_list_add_tail(&ktx->ktx_schedlist, + &kqswnal_data.kqn_donetxds); + cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq); + + cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags); } static void @@ -535,7 +535,7 @@ kqswnal_txhandler(EP_TXD *txd, void *arg, int status) if (status != EP_SUCCESS) { - CDEBUG (D_NETERROR, "Tx completion to %s failed: %d\n", + CNETERR("Tx completion to %s failed: %d\n", libcfs_nid2str(ktx->ktx_nid), status); status = -EHOSTDOWN; @@ -602,7 +602,7 @@ int kqswnal_launch (kqswnal_tx_t *ktx) { /* Don't block for transmit descriptor if we're in interrupt context */ - int attr = in_interrupt() ? (EP_NO_SLEEP | EP_NO_ALLOC) : 0; + int attr = cfs_in_interrupt() ? (EP_NO_SLEEP | EP_NO_ALLOC) : 0; int dest = kqswnal_nid2elanid (ktx->ktx_nid); unsigned long flags; int rc; @@ -620,8 +620,7 @@ kqswnal_launch (kqswnal_tx_t *ktx) switch (ktx->ktx_state) { case KTX_GETTING: case KTX_PUTTING: - if (the_lnet.ln_testprotocompat != 0 && - the_lnet.ln_ptlcompat == 0) { + if (the_lnet.ln_testprotocompat != 0) { kqswnal_msg_t *msg = (kqswnal_msg_t *)ktx->ktx_buffer; /* single-shot proto test: @@ -653,7 +652,7 @@ kqswnal_launch (kqswnal_tx_t *ktx) kqswnal_txhandler, ktx, NULL, ktx->ktx_frags, ktx->ktx_nfrag); break; - + default: LBUG(); rc = -EINVAL; /* no compiler warning please */ @@ -665,16 +664,19 @@ kqswnal_launch (kqswnal_tx_t *ktx) return (0); case EP_ENOMEM: /* can't allocate ep txd => queue for later */ - spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags); + cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags); - list_add_tail (&ktx->ktx_schedlist, &kqswnal_data.kqn_delayedtxds); - wake_up (&kqswnal_data.kqn_sched_waitq); + cfs_list_add_tail (&ktx->ktx_schedlist, + &kqswnal_data.kqn_delayedtxds); + cfs_waitq_signal (&kqswnal_data.kqn_sched_waitq); - spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags); + cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, + flags); return (0); default: /* fatal error */ - CDEBUG (D_NETERROR, "Tx to %s failed: %d\n", libcfs_nid2str(ktx->ktx_nid), rc); + CNETERR ("Tx to %s failed: %d\n", + libcfs_nid2str(ktx->ktx_nid), rc); kqswnal_notify_peer_down(ktx); return (-EHOSTUNREACH); } @@ -896,9 +898,9 @@ kqswnal_rdma (kqswnal_rx_t *krx, lnet_msg_t *lntmsg, ktx->ktx_args[0] = krx; ktx->ktx_args[1] = lntmsg; - LASSERT (atomic_read(&krx->krx_refcount) > 0); + LASSERT (cfs_atomic_read(&krx->krx_refcount) > 0); /* Take an extra ref for the completion callback */ - atomic_inc(&krx->krx_refcount); + cfs_atomic_inc(&krx->krx_refcount); /* Map on the rail the RPC prefers */ ktx->ktx_rail = ep_rcvr_prefrail(krx->krx_eprx, @@ -975,7 +977,7 @@ kqswnal_rdma (kqswnal_rx_t *krx, lnet_msg_t *lntmsg, kqswnal_put_idle_tx (ktx); } - atomic_dec(&kqswnal_data.kqn_pending_txs); + cfs_atomic_dec(&kqswnal_data.kqn_pending_txs); return (rc); } @@ -1006,7 +1008,7 @@ kqswnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) LASSERT (payload_niov <= LNET_MAX_IOV); /* It must be OK to kmap() if required */ - LASSERT (payload_kiov == NULL || !in_interrupt ()); + LASSERT (payload_kiov == NULL || !cfs_in_interrupt ()); /* payload is either all vaddrs or all pages */ LASSERT (!(payload_kiov != NULL && payload_iov != NULL)); @@ -1056,20 +1058,13 @@ kqswnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) * ktx_frags[1] and onward with the network addresses * of the buffer frags. */ - if (the_lnet.ln_ptlcompat == 2) { - /* Strong portals compatibility: send "raw" LNET - * header + rdma descriptor */ - mhdr = (lnet_hdr_t *)ktx->ktx_buffer; - rmd = (kqswnal_remotemd_t *)(mhdr + 1); - } else { - /* Send an RDMA message */ - msg->kqm_magic = LNET_PROTO_QSW_MAGIC; - msg->kqm_version = QSWLND_PROTO_VERSION; - msg->kqm_type = QSWLND_MSG_RDMA; + /* Send an RDMA message */ + msg->kqm_magic = LNET_PROTO_QSW_MAGIC; + msg->kqm_version = QSWLND_PROTO_VERSION; + msg->kqm_type = QSWLND_MSG_RDMA; - mhdr = &msg->kqm_u.rdma.kqrm_hdr; - rmd = &msg->kqm_u.rdma.kqrm_rmd; - } + mhdr = &msg->kqm_u.rdma.kqrm_hdr; + rmd = &msg->kqm_u.rdma.kqrm_rmd; *mhdr = *hdr; nob = (((char *)rmd) - ktx->ktx_buffer); @@ -1105,7 +1100,6 @@ kqswnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) ep_nmd_subset(&ktx->ktx_frags[0], &ktx->ktx_ebuffer, 0, nob); #if KQSW_CKSUM - LASSERT (the_lnet.ln_ptlcompat != 2); msg->kqm_nob = nob + payload_nob; msg->kqm_cksum = 0; msg->kqm_cksum = kqswnal_csum(~0, (char *)msg, nob); @@ -1152,21 +1146,13 @@ kqswnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) char *payload; kqswnal_msg_t *msg = (kqswnal_msg_t *)ktx->ktx_buffer; - /* small message: single frag copied into the pre-mapped buffer */ - if (the_lnet.ln_ptlcompat == 2) { - /* Strong portals compatibility: send "raw" LNET header - * + payload */ - mhdr = (lnet_hdr_t *)ktx->ktx_buffer; - payload = (char *)(mhdr + 1); - } else { - /* Send an IMMEDIATE message */ - msg->kqm_magic = LNET_PROTO_QSW_MAGIC; - msg->kqm_version = QSWLND_PROTO_VERSION; - msg->kqm_type = QSWLND_MSG_IMMEDIATE; + /* single frag copied into the pre-mapped buffer */ + msg->kqm_magic = LNET_PROTO_QSW_MAGIC; + msg->kqm_version = QSWLND_PROTO_VERSION; + msg->kqm_type = QSWLND_MSG_IMMEDIATE; - mhdr = &msg->kqm_u.immediate.kqim_hdr; - payload = msg->kqm_u.immediate.kqim_payload; - } + mhdr = &msg->kqm_u.immediate.kqim_hdr; + payload = msg->kqm_u.immediate.kqim_payload; *mhdr = *hdr; nob = (payload - ktx->ktx_buffer) + payload_nob; @@ -1182,7 +1168,6 @@ kqswnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) payload_niov, payload_iov, payload_offset, payload_nob); #if KQSW_CKSUM - LASSERT (the_lnet.ln_ptlcompat != 2); msg->kqm_nob = nob; msg->kqm_cksum = 0; msg->kqm_cksum = kqswnal_csum(~0, (char *)msg, nob); @@ -1195,22 +1180,13 @@ kqswnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) lnet_hdr_t *mhdr; kqswnal_msg_t *msg = (kqswnal_msg_t *)ktx->ktx_buffer; - /* large message: multiple frags: first is hdr in pre-mapped buffer */ - if (the_lnet.ln_ptlcompat == 2) { - /* Strong portals compatibility: send "raw" LNET header - * + payload */ - mhdr = (lnet_hdr_t *)ktx->ktx_buffer; - nob = sizeof(lnet_hdr_t); - } else { - /* Send an IMMEDIATE message */ - msg->kqm_magic = LNET_PROTO_QSW_MAGIC; - msg->kqm_version = QSWLND_PROTO_VERSION; - msg->kqm_type = QSWLND_MSG_IMMEDIATE; + /* multiple frags: first is hdr in pre-mapped buffer */ + msg->kqm_magic = LNET_PROTO_QSW_MAGIC; + msg->kqm_version = QSWLND_PROTO_VERSION; + msg->kqm_type = QSWLND_MSG_IMMEDIATE; - mhdr = &msg->kqm_u.immediate.kqim_hdr; - nob = offsetof(kqswnal_msg_t, - kqm_u.immediate.kqim_payload); - } + mhdr = &msg->kqm_u.immediate.kqim_hdr; + nob = offsetof(kqswnal_msg_t, kqm_u.immediate.kqim_payload); *mhdr = *hdr; @@ -1245,23 +1221,23 @@ kqswnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) #endif nob += payload_nob; } - + ktx->ktx_port = (nob <= KQSW_SMALLMSG) ? EP_MSG_SVC_PORTALS_SMALL : EP_MSG_SVC_PORTALS_LARGE; rc = kqswnal_launch (ktx); out: - CDEBUG(rc == 0 ? D_NET : D_NETERROR, "%s %d bytes to %s%s: rc %d\n", - routing ? (rc == 0 ? "Routed" : "Failed to route") : - (rc == 0 ? "Sent" : "Failed to send"), - nob, libcfs_nid2str(target.nid), - target_is_router ? "(router)" : "", rc); + CDEBUG_LIMIT(rc == 0? D_NET :D_NETERROR, "%s %d bytes to %s%s: rc %d\n", + routing ? (rc == 0 ? "Routed" : "Failed to route") : + (rc == 0 ? "Sent" : "Failed to send"), + nob, libcfs_nid2str(target.nid), + target_is_router ? "(router)" : "", rc); if (rc != 0) { lnet_msg_t *repmsg = (lnet_msg_t *)ktx->ktx_args[2]; int state = ktx->ktx_state; - + kqswnal_put_idle_tx (ktx); if (state == KTX_GETTING && repmsg != NULL) { @@ -1277,14 +1253,14 @@ kqswnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) } - atomic_dec(&kqswnal_data.kqn_pending_txs); + cfs_atomic_dec(&kqswnal_data.kqn_pending_txs); return (rc == 0 ? 0 : -EIO); } void kqswnal_requeue_rx (kqswnal_rx_t *krx) { - LASSERT (atomic_read(&krx->krx_refcount) == 0); + LASSERT (cfs_atomic_read(&krx->krx_refcount) == 0); LASSERT (!krx->krx_rpc_reply_needed); krx->krx_state = KRX_POSTED; @@ -1321,14 +1297,14 @@ kqswnal_rx_done (kqswnal_rx_t *krx) { int rc; - LASSERT (atomic_read(&krx->krx_refcount) == 0); + LASSERT (cfs_atomic_read(&krx->krx_refcount) == 0); if (krx->krx_rpc_reply_needed) { /* We've not completed the peer's RPC yet... */ krx->krx_rpc_reply.msg.magic = LNET_PROTO_QSW_MAGIC; krx->krx_rpc_reply.msg.version = QSWLND_PROTO_VERSION; - LASSERT (!in_interrupt()); + LASSERT (!cfs_in_interrupt()); rc = ep_complete_rpc(krx->krx_rxd, kqswnal_rpc_complete, krx, @@ -1356,12 +1332,8 @@ kqswnal_parse (kqswnal_rx_t *krx) int nob; int rc; - LASSERT (atomic_read(&krx->krx_refcount) == 1); + LASSERT (cfs_atomic_read(&krx->krx_refcount) == 1); - /* If ln_ptlcompat is set, peers may send me an "old" unencapsulated - * lnet hdr */ - LASSERT (offsetof(kqswnal_msg_t, kqm_u) <= sizeof(lnet_hdr_t)); - if (krx->krx_nob < offsetof(kqswnal_msg_t, kqm_u)) { CERROR("Short message %d received from %s\n", krx->krx_nob, libcfs_nid2str(fromnid)); @@ -1517,25 +1489,6 @@ kqswnal_parse (kqswnal_rx_t *krx) goto done; } - if (the_lnet.ln_ptlcompat != 0) { - /* Portals compatibility (strong or weak) - * This could be an unencapsulated LNET header. If it's big - * enough, let LNET's parser sort it out */ - - if (krx->krx_nob < sizeof(lnet_hdr_t)) { - CERROR("Short portals-compatible message from %s\n", - libcfs_nid2str(fromnid)); - goto done; - } - - krx->krx_raw_lnet_hdr = 1; - rc = lnet_parse(ni, (lnet_hdr_t *)msg, - fromnid, krx, krx->krx_rpc_reply_needed); - if (rc < 0) - goto done; - return; - } - CERROR("Unrecognised magic %08x from %s\n", msg->kqm_magic, libcfs_nid2str(fromnid)); done: @@ -1559,7 +1512,6 @@ kqswnal_rxhandler(EP_RXD *rxd) krx->krx_state = KRX_PARSE; krx->krx_rxd = rxd; krx->krx_nob = nob; - krx->krx_raw_lnet_hdr = 0; /* RPC reply iff rpc request received without error */ krx->krx_rpc_reply_needed = ep_rxd_isrpc(rxd) && @@ -1568,7 +1520,7 @@ kqswnal_rxhandler(EP_RXD *rxd) /* Default to failure if an RPC reply is requested but not handled */ krx->krx_rpc_reply.msg.status = -EPROTO; - atomic_set (&krx->krx_refcount, 1); + cfs_atomic_set (&krx->krx_refcount, 1); if (status != EP_SUCCESS) { /* receives complete with failure when receiver is removed */ @@ -1581,17 +1533,17 @@ kqswnal_rxhandler(EP_RXD *rxd) return; } - if (!in_interrupt()) { + if (!cfs_in_interrupt()) { kqswnal_parse(krx); return; } - spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags); + cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags); - list_add_tail (&krx->krx_list, &kqswnal_data.kqn_readyrxds); - wake_up (&kqswnal_data.kqn_sched_waitq); + cfs_list_add_tail (&krx->krx_list, &kqswnal_data.kqn_readyrxds); + cfs_waitq_signal (&kqswnal_data.kqn_sched_waitq); - spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags); + cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags); } int @@ -1614,7 +1566,7 @@ kqswnal_recv (lnet_ni_t *ni, int msg_offset; int rc; - LASSERT (!in_interrupt ()); /* OK to map */ + LASSERT (!cfs_in_interrupt ()); /* OK to map */ /* Either all pages or all vaddrs */ LASSERT (!(kiov != NULL && iov != NULL)); @@ -1624,17 +1576,9 @@ kqswnal_recv (lnet_ni_t *ni, if (krx->krx_rpc_reply_needed) { /* optimized (rdma) request sent as RPC */ - if (krx->krx_raw_lnet_hdr) { - LASSERT (the_lnet.ln_ptlcompat != 0); - hdr = (lnet_hdr_t *)msg; - rmd = kqswnal_get_portalscompat_rmd(krx); - if (rmd == NULL) - return (-EPROTO); - } else { - LASSERT (msg->kqm_type == QSWLND_MSG_RDMA); - hdr = &msg->kqm_u.rdma.kqrm_hdr; - rmd = &msg->kqm_u.rdma.kqrm_rmd; - } + LASSERT (msg->kqm_type == QSWLND_MSG_RDMA); + hdr = &msg->kqm_u.rdma.kqrm_hdr; + rmd = &msg->kqm_u.rdma.kqrm_rmd; /* NB header is still in wire byte order */ @@ -1684,13 +1628,8 @@ kqswnal_recv (lnet_ni_t *ni, return rc; } - if (krx->krx_raw_lnet_hdr) { - LASSERT (the_lnet.ln_ptlcompat != 0); - msg_offset = sizeof(lnet_hdr_t); - } else { - LASSERT (msg->kqm_type == QSWLND_MSG_IMMEDIATE); - msg_offset = offsetof(kqswnal_msg_t, kqm_u.immediate.kqim_payload); - } + LASSERT (msg->kqm_type == QSWLND_MSG_IMMEDIATE); + msg_offset = offsetof(kqswnal_msg_t, kqm_u.immediate.kqim_payload); if (krx->krx_nob < msg_offset + rlen) { CERROR("Bad message size from %s: have %d, need %d + %d\n", @@ -1717,19 +1656,19 @@ kqswnal_recv (lnet_ni_t *ni, int kqswnal_thread_start (int (*fn)(void *arg), void *arg) { - long pid = kernel_thread (fn, arg, 0); + long pid = cfs_create_thread (fn, arg, 0); if (pid < 0) return ((int)pid); - atomic_inc (&kqswnal_data.kqn_nthreads); + cfs_atomic_inc (&kqswnal_data.kqn_nthreads); return (0); } void kqswnal_thread_fini (void) { - atomic_dec (&kqswnal_data.kqn_nthreads); + cfs_atomic_dec (&kqswnal_data.kqn_nthreads); } int @@ -1744,49 +1683,51 @@ kqswnal_scheduler (void *arg) cfs_daemonize ("kqswnal_sched"); cfs_block_allsigs (); - - spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags); + + cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags); for (;;) { did_something = 0; - if (!list_empty (&kqswnal_data.kqn_readyrxds)) + if (!cfs_list_empty (&kqswnal_data.kqn_readyrxds)) { - krx = list_entry(kqswnal_data.kqn_readyrxds.next, - kqswnal_rx_t, krx_list); - list_del (&krx->krx_list); - spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, - flags); + krx = cfs_list_entry(kqswnal_data.kqn_readyrxds.next, + kqswnal_rx_t, krx_list); + cfs_list_del (&krx->krx_list); + cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, + flags); LASSERT (krx->krx_state == KRX_PARSE); kqswnal_parse (krx); did_something = 1; - spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags); + cfs_spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, + flags); } - if (!list_empty (&kqswnal_data.kqn_donetxds)) + if (!cfs_list_empty (&kqswnal_data.kqn_donetxds)) { - ktx = list_entry(kqswnal_data.kqn_donetxds.next, - kqswnal_tx_t, ktx_schedlist); - list_del_init (&ktx->ktx_schedlist); - spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, - flags); + ktx = cfs_list_entry(kqswnal_data.kqn_donetxds.next, + kqswnal_tx_t, ktx_schedlist); + cfs_list_del_init (&ktx->ktx_schedlist); + cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, + flags); kqswnal_tx_done_in_thread_context(ktx); did_something = 1; - spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags); + cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, + flags); } - if (!list_empty (&kqswnal_data.kqn_delayedtxds)) + if (!cfs_list_empty (&kqswnal_data.kqn_delayedtxds)) { - ktx = list_entry(kqswnal_data.kqn_delayedtxds.next, - kqswnal_tx_t, ktx_schedlist); - list_del_init (&ktx->ktx_schedlist); - spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, - flags); + ktx = cfs_list_entry(kqswnal_data.kqn_delayedtxds.next, + kqswnal_tx_t, ktx_schedlist); + cfs_list_del_init (&ktx->ktx_schedlist); + cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, + flags); rc = kqswnal_launch (ktx); if (rc != 0) { @@ -1794,36 +1735,41 @@ kqswnal_scheduler (void *arg) libcfs_nid2str(ktx->ktx_nid), rc); kqswnal_tx_done (ktx, rc); } - atomic_dec (&kqswnal_data.kqn_pending_txs); + cfs_atomic_dec (&kqswnal_data.kqn_pending_txs); did_something = 1; - spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags); + cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, + flags); } /* nothing to do or hogging CPU */ if (!did_something || counter++ == KQSW_RESCHED) { - spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, - flags); + cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, + flags); counter = 0; if (!did_something) { if (kqswnal_data.kqn_shuttingdown == 2) { - /* We only exit in stage 2 of shutdown when - * there's nothing left to do */ + /* We only exit in stage 2 of shutdown + * when there's nothing left to do */ break; } - rc = wait_event_interruptible_exclusive ( + cfs_wait_event_interruptible_exclusive ( kqswnal_data.kqn_sched_waitq, kqswnal_data.kqn_shuttingdown == 2 || - !list_empty(&kqswnal_data.kqn_readyrxds) || - !list_empty(&kqswnal_data.kqn_donetxds) || - !list_empty(&kqswnal_data.kqn_delayedtxds)); + !cfs_list_empty(&kqswnal_data. \ + kqn_readyrxds) || + !cfs_list_empty(&kqswnal_data. \ + kqn_donetxds) || + !cfs_list_empty(&kqswnal_data. \ + kqn_delayedtxds, rc)); LASSERT (rc == 0); } else if (need_resched()) - schedule (); + cfs_schedule (); - spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags); + cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, + flags); } }