Whamcloud - gitweb
LU-9679 modules: convert MIN/MAX to kernel style
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd_cb.c
index 705a341..4cff9ed 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (C) 2009-2012 Cray, Inc.
  *
  *   Derived from work by Eric Barton <eric@bartonsoftware.com>
+ *   Author: James Shimek <jshimek@cray.com>
  *   Author: Nic Henke <nic@cray.com>
  *
  *   This file is part of Lustre, http://www.lustre.org.
@@ -25,6 +26,7 @@
 
 #include <asm/page.h>
 #include <linux/nmi.h>
+#include <linux/pagemap.h>
 #include "gnilnd.h"
 
 /* this is useful when needed to debug wire corruption. */
@@ -80,7 +82,6 @@ kgnilnd_schedule_device(kgn_device_t *dev)
        if (!already_live) {
                wake_up_all(&dev->gnd_waitq);
        }
-       return;
 }
 
 void kgnilnd_schedule_device_timer(unsigned long arg)
@@ -119,7 +120,7 @@ kgnilnd_device_callback(__u32 devid, __u64 arg)
  * == 0: reschedule if someone marked him WANTS_SCHED
  * > 0 : force a reschedule */
 /* Return code 0 means it did not schedule the conn, 1
- *  means it succesfully scheduled the conn.
+ * means it successfully scheduled the conn.
  */
 
 int
@@ -150,7 +151,7 @@ kgnilnd_schedule_process_conn(kgn_conn_t *conn, int sched_intent)
  * as scheduled */
 
 int
-_kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refheld)
+_kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refheld, int lock_held)
 {
        kgn_device_t        *dev = conn->gnc_device;
        int                  sched;
@@ -183,10 +184,11 @@ _kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refhe
                         conn, sched);
 
                CDEBUG(D_INFO, "scheduling conn 0x%p caller %s:%d\n", conn, caller, line);
-
-               spin_lock(&dev->gnd_lock);
+               if (!lock_held)
+                       spin_lock(&dev->gnd_lock);
                list_add_tail(&conn->gnc_schedlist, &dev->gnd_ready_conns);
-               spin_unlock(&dev->gnd_lock);
+               if (!lock_held)
+                       spin_unlock(&dev->gnd_lock);
                set_mb(conn->gnc_last_sched_ask, jiffies);
                rc = 1;
        } else {
@@ -196,6 +198,23 @@ _kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refhe
 
        /* make sure thread(s) going to process conns - but let it make
         * separate decision from conn schedule */
+       if (!lock_held)
+               kgnilnd_schedule_device(dev);
+       return rc;
+}
+
+int
+_kgnilnd_schedule_delay_conn(kgn_conn_t *conn)
+{
+       kgn_device_t    *dev = conn->gnc_device;
+       int rc = 0;
+       spin_lock(&dev->gnd_lock);
+       if (list_empty(&conn->gnc_delaylist)) {
+               list_add_tail(&conn->gnc_delaylist, &dev->gnd_delay_conns);
+               rc = 1;
+       }
+       spin_unlock(&dev->gnd_lock);
+
        kgnilnd_schedule_device(dev);
        return rc;
 }
@@ -236,7 +255,7 @@ kgnilnd_free_tx(kgn_tx_t *tx)
 
        /* Only free the buffer if we used it */
        if (tx->tx_buffer_copy != NULL) {
-               vfree(tx->tx_buffer_copy);
+               kgnilnd_vfree(tx->tx_buffer_copy, tx->tx_rdma_desc.length);
                tx->tx_buffer_copy = NULL;
                CDEBUG(D_MALLOC, "vfreed buffer2\n");
        }
@@ -301,7 +320,7 @@ kgnilnd_cksum(void *ptr, size_t nob)
        return sum;
 }
 
-inline __u16
+__u16
 kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov,
                    unsigned int offset, unsigned int nob, int dump_blob)
 {
@@ -473,7 +492,7 @@ kgnilnd_nak_rdma(kgn_conn_t *conn, int rx_type, int error, __u64 cookie, lnet_ni
                LBUG();
        }
        /* only allow NAK on error and truncate to zero */
-       LASSERTF(error <= 0, "error %d conn 0x%p, cookie "LPU64"\n",
+       LASSERTF(error <= 0, "error %d conn 0x%p, cookie %llu\n",
                 error, conn, cookie);
 
        tx = kgnilnd_new_tx_msg(nak_type, source);
@@ -489,9 +508,9 @@ kgnilnd_nak_rdma(kgn_conn_t *conn, int rx_type, int error, __u64 cookie, lnet_ni
 }
 
 int
-kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, struct iovec *iov,
-                              lnet_kiov_t *kiov, unsigned int offset, unsigned int nob)
-
+kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov,
+                              struct kvec *iov, lnet_kiov_t *kiov,
+                              unsigned int offset, unsigned int nob)
 {
        kgn_msg_t       *msg = &tx->tx_msg;
        int              i;
@@ -504,8 +523,15 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, struct iovec *io
        if (nob == 0) {
                tx->tx_buffer = NULL;
        } else if (kiov != NULL) {
+
+               if ((niov > 0) && unlikely(niov > (nob/PAGE_SIZE))) {
+                       niov = ((nob + offset + kiov->kiov_offset + PAGE_SIZE - 1) /
+                               PAGE_SIZE);
+               }
+
                LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE,
-                        "bad niov %d\n", niov);
+                       "bad niov %d msg %p kiov %p iov %p offset %d nob%d\n",
+                       niov, msg, kiov, iov, offset, nob);
 
                while (offset >= kiov->kiov_len) {
                        offset -= kiov->kiov_len;
@@ -598,7 +624,7 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, struct iovec *io
 
 int
 kgnilnd_setup_virt_buffer(kgn_tx_t *tx,
-                         unsigned int niov, struct iovec *iov,
+                         unsigned int niov, struct kvec *iov,
                          unsigned int offset, unsigned int nob)
 
 {
@@ -738,7 +764,7 @@ error:
 
 static inline int
 kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov,
-                         struct iovec *iov, lnet_kiov_t *kiov,
+                         struct kvec *iov, lnet_kiov_t *kiov,
                          unsigned int offset, unsigned int nob)
 {
        int     rc;
@@ -766,7 +792,7 @@ kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov,
  *           transfer.
  */
 static void
-kgnilnd_parse_lnet_rdma(lnet_msg_t *lntmsg, unsigned int *niov,
+kgnilnd_parse_lnet_rdma(struct lnet_msg *lntmsg, unsigned int *niov,
                        unsigned int *offset, unsigned int *nob,
                        lnet_kiov_t **kiov, int put_len)
 {
@@ -793,7 +819,7 @@ kgnilnd_compute_rdma_cksum(kgn_tx_t *tx, int put_len)
 {
        unsigned int     niov, offset, nob;
        lnet_kiov_t     *kiov;
-       lnet_msg_t      *lntmsg = tx->tx_lntmsg[0];
+       struct lnet_msg      *lntmsg = tx->tx_lntmsg[0];
        int              dump_cksum = (*kgnilnd_tunables.kgn_checksum_dump > 1);
 
        GNITX_ASSERTF(tx, ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE) ||
@@ -845,7 +871,7 @@ kgnilnd_verify_rdma_cksum(kgn_tx_t *tx, __u16 rx_cksum, int put_len)
        __u16            cksum;
        unsigned int     niov, offset, nob;
        lnet_kiov_t     *kiov;
-       lnet_msg_t      *lntmsg = tx->tx_lntmsg[0];
+       struct lnet_msg      *lntmsg = tx->tx_lntmsg[0];
        int dump_on_err = *kgnilnd_tunables.kgn_checksum_dump;
 
        /* we can only match certain requests */
@@ -939,7 +965,7 @@ kgnilnd_mem_add_map_list(kgn_device_t *dev, kgn_tx_t *tx)
        if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
            tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
                atomic64_add(bytes, &dev->gnd_rdmaq_bytes_out);
-               GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to "LPD64"",
+               GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to %lld",
                          bytes, atomic64_read(&dev->gnd_rdmaq_bytes_out));
        }
 
@@ -990,7 +1016,7 @@ kgnilnd_mem_del_map_list(kgn_device_t *dev, kgn_tx_t *tx)
                atomic64_sub(bytes, &dev->gnd_rdmaq_bytes_out);
                LASSERTF(atomic64_read(&dev->gnd_rdmaq_bytes_out) >= 0,
                         "bytes_out negative! %ld\n", atomic64_read(&dev->gnd_rdmaq_bytes_out));
-               GNIDBG_TX(D_NETTRACE, tx, "rdma -- %d to "LPD64"",
+               GNIDBG_TX(D_NETTRACE, tx, "rdma -- %d to %lld",
                          bytes, atomic64_read(&dev->gnd_rdmaq_bytes_out));
        }
 
@@ -1049,7 +1075,7 @@ kgnilnd_map_buffer(kgn_tx_t *tx)
                 *  GART resource, etc starvation handling */
                if (rrc != GNI_RC_SUCCESS) {
                        GNIDBG_TX(D_NET, tx, "Can't map %d pages: dev %d "
-                               "phys %u pp %u, virt %u nob "LPU64"",
+                               "phys %u pp %u, virt %u nob %llu",
                                tx->tx_phys_npages, dev->gnd_id,
                                dev->gnd_map_nphys, dev->gnd_map_physnop,
                                dev->gnd_map_nvirt, dev->gnd_map_virtnob);
@@ -1066,7 +1092,7 @@ kgnilnd_map_buffer(kgn_tx_t *tx)
                        NULL, flags, &tx->tx_map_key);
                if (rrc != GNI_RC_SUCCESS) {
                        GNIDBG_TX(D_NET, tx, "Can't map %u bytes: dev %d "
-                               "phys %u pp %u, virt %u nob "LPU64"",
+                               "phys %u pp %u, virt %u nob %llu",
                                tx->tx_nob, dev->gnd_id,
                                dev->gnd_map_nphys, dev->gnd_map_physnop,
                                dev->gnd_map_nvirt, dev->gnd_map_virtnob);
@@ -1124,8 +1150,10 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error)
        int               hold_timeout = 0;
 
        /* code below relies on +1 relationship ... */
-       CLASSERT(GNILND_BUF_PHYS_MAPPED == (GNILND_BUF_PHYS_UNMAPPED + 1));
-       CLASSERT(GNILND_BUF_VIRT_MAPPED == (GNILND_BUF_VIRT_UNMAPPED + 1));
+       BUILD_BUG_ON(GNILND_BUF_PHYS_MAPPED !=
+                    (GNILND_BUF_PHYS_UNMAPPED + 1));
+       BUILD_BUG_ON(GNILND_BUF_VIRT_MAPPED !=
+                    (GNILND_BUF_VIRT_UNMAPPED + 1));
 
        switch (tx->tx_buftype) {
        default:
@@ -1157,6 +1185,7 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error)
                 * verified peer notification  - the theory is that
                 * a TX error can be communicated in all other cases */
                if (tx->tx_conn->gnc_state != GNILND_CONN_ESTABLISHED &&
+                   error != -GNILND_NOPURG &&
                    kgnilnd_check_purgatory_conn(tx->tx_conn)) {
                        kgnilnd_add_purgatory_tx(tx);
 
@@ -1166,7 +1195,7 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error)
                        hold_timeout = GNILND_TIMEOUT2DEADMAN;
 
                        GNIDBG_TX(D_NET, tx,
-                                "dev %p delaying MDD release for %dms key "LPX64"."LPX64"",
+                                "dev %p delaying MDD release for %dms key %#llx.%#llx",
                                 tx->tx_conn->gnc_device, hold_timeout,
                                 tx->tx_map_key.qword1, tx->tx_map_key.qword2);
                }
@@ -1189,9 +1218,9 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error)
 void
 kgnilnd_tx_done(kgn_tx_t *tx, int completion)
 {
-       lnet_msg_t      *lntmsg0, *lntmsg1;
+       struct lnet_msg      *lntmsg0, *lntmsg1;
        int             status0, status1;
-       lnet_ni_t       *ni = NULL;
+       struct lnet_ni       *ni = NULL;
        kgn_conn_t      *conn = tx->tx_conn;
 
        LASSERT(!in_interrupt());
@@ -1256,10 +1285,10 @@ kgnilnd_tx_done(kgn_tx_t *tx, int completion)
         * could free up lnet credits, resulting in a call chain back into
         * the LND via kgnilnd_send and friends */
 
-       lnet_finalize(ni, lntmsg0, status0);
+       lnet_finalize(lntmsg0, status0);
 
        if (lntmsg1 != NULL) {
-               lnet_finalize(ni, lntmsg1, status1);
+               lnet_finalize(lntmsg1, status1);
        }
 }
 
@@ -1326,7 +1355,7 @@ search_again:
         * if we are sending to the same node faster than 256000/sec.
         * To help guard against this, we OR in the tx_seq - that is 32 bits */
 
-       tx->tx_id.txe_chips = (__u32)(jiffies | conn->gnc_tx_seq);
+       tx->tx_id.txe_chips = (__u32)(jiffies | atomic_read(&conn->gnc_tx_seq));
 
        GNIDBG_TX(D_NET, tx, "set cookie/id/bits", NULL);
 
@@ -1334,70 +1363,35 @@ search_again:
        return 0;
 }
 
-static inline int
-kgnilnd_tx_should_retry(kgn_conn_t *conn, kgn_tx_t *tx)
+static inline void
+kgnilnd_tx_log_retrans(kgn_conn_t *conn, kgn_tx_t *tx)
 {
-       int             max_retrans = *kgnilnd_tunables.kgn_max_retransmits;
        int             log_retrans;
-       int             log_retrans_level;
 
-       /* I need kgni credits to send this.  Replace tx at the head of the
-        * fmaq and I'll get rescheduled when credits appear */
-       tx->tx_state = 0;
-       tx->tx_retrans++;
-       conn->gnc_tx_retrans++;
-       log_retrans = ((tx->tx_retrans < 25) || ((tx->tx_retrans % 25) == 0) ||
-                       (tx->tx_retrans > (max_retrans / 2)));
-       log_retrans_level = tx->tx_retrans < (max_retrans / 2) ? D_NET : D_NETERROR;
-
-       /* Decision time - either error, warn or just retransmit */
+       log_retrans = ((tx->tx_retrans < 25) || ((tx->tx_retrans % 25) == 0));
 
        /* we don't care about TX timeout - it could be that the network is slower
         * or throttled. We'll keep retranmitting - so if the network is so slow
         * that we fill up our mailbox, we'll keep trying to resend that msg
         * until we exceed the max_retrans _or_ gnc_last_rx expires, indicating
         * that he hasn't send us any traffic in return */
-
-       if (tx->tx_retrans > max_retrans) {
-               /* this means we are not backing off the retransmits
-                * in a healthy manner and are likely chewing up the
-                * CPU cycles quite badly */
-               GNIDBG_TOMSG(D_ERROR, &tx->tx_msg,
-                       "SOFTWARE BUG: too many retransmits (%d) for tx id %x "
-                       "conn 0x%p->%s\n",
-                       tx->tx_retrans, tx->tx_id, conn,
-                       libcfs_nid2str(conn->gnc_peer->gnp_nid));
-
-               /* yes - double errors to help debug this condition */
-               GNIDBG_TOMSG(D_NETERROR, &tx->tx_msg, "connection dead. "
-                       "unable to send to %s for %lu secs (%d tries)",
-                       libcfs_nid2str(tx->tx_conn->gnc_peer->gnp_nid),
-                       cfs_duration_sec(jiffies - tx->tx_cred_wait),
-                       tx->tx_retrans);
-
-               kgnilnd_close_conn(conn, -ETIMEDOUT);
-
-               /* caller should terminate */
-               RETURN(0);
-       } else {
-               /* some reasonable throttling of the debug message */
-               if (log_retrans) {
-                       unsigned long now = jiffies;
-                       /* XXX Nic: Mystical TX debug here... */
-                       GNIDBG_SMSG_CREDS(log_retrans_level, conn);
-                       GNIDBG_TOMSG(log_retrans_level, &tx->tx_msg,
-                               "NOT_DONE on conn 0x%p->%s id %x retrans %d wait %dus"
-                               " last_msg %uus/%uus last_cq %uus/%uus",
-                               conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
-                               tx->tx_id, tx->tx_retrans,
-                               jiffies_to_usecs(now - tx->tx_cred_wait),
-                               jiffies_to_usecs(now - conn->gnc_last_tx),
-                               jiffies_to_usecs(now - conn->gnc_last_rx),
-                               jiffies_to_usecs(now - conn->gnc_last_tx_cq),
-                               jiffies_to_usecs(now - conn->gnc_last_rx_cq));
-               }
-               /* caller should retry */
-               RETURN(1);
+       
+       /* some reasonable throttling of the debug message */
+       if (log_retrans) {
+               unsigned long now = jiffies;
+               /* XXX Nic: Mystical TX debug here... */
+               /* We expect retransmissions so only log when D_NET is enabled */
+               GNIDBG_SMSG_CREDS(D_NET, conn);
+               GNIDBG_TOMSG(D_NET, &tx->tx_msg,
+                       "NOT_DONE on conn 0x%p->%s id %x retrans %d wait %dus"
+                       " last_msg %uus/%uus last_cq %uus/%uus",
+                       conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
+                       tx->tx_id, tx->tx_retrans,
+                       jiffies_to_usecs(now - tx->tx_cred_wait),
+                       jiffies_to_usecs(now - conn->gnc_last_tx),
+                       jiffies_to_usecs(now - conn->gnc_last_rx),
+                       jiffies_to_usecs(now - conn->gnc_last_tx_cq),
+                       jiffies_to_usecs(now - conn->gnc_last_rx_cq));
        }
 }
 
@@ -1410,7 +1404,6 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
 {
        kgn_conn_t      *conn = tx->tx_conn;
        kgn_msg_t       *msg = &tx->tx_msg;
-       int              retry_send;
        gni_return_t     rrc;
        unsigned long    newest_last_rx, timeout;
        unsigned long    now;
@@ -1428,7 +1421,8 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
         * close message.
         */
        if (atomic_read(&conn->gnc_peer->gnp_dirty_eps) != 0 && msg->gnm_type != GNILND_MSG_CLOSE) {
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                /* Return -ETIME, we are closing the connection already so we dont want to
                 * have this tx hit the wire. The tx will be killed by the calling function.
                 * Once the EP is marked dirty the close message will be the last
@@ -1446,11 +1440,13 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
        }
 
        if (time_after_eq(now, newest_last_rx + GNILND_TIMEOUTRX(timeout))) {
-               GNIDBG_CONN(D_NETERROR|D_CONSOLE, conn, "Cant send to %s after timeout lapse of %lu; TO %lu",
+               GNIDBG_CONN(D_NETERROR|D_CONSOLE, conn,
+                           "Cant send to %s after timeout lapse of %lu; TO %lu\n",
                libcfs_nid2str(conn->gnc_peer->gnp_nid),
                cfs_duration_sec(now - newest_last_rx),
                cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                return -ETIME;
        }
 
@@ -1461,7 +1457,7 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
         */
        msg->gnm_connstamp = conn->gnc_my_connstamp;
        msg->gnm_payload_len = immediatenob;
-       msg->gnm_seq = conn->gnc_tx_seq;
+       msg->gnm_seq = atomic_read(&conn->gnc_tx_seq);
 
        /* always init here - kgn_checksum is a /sys module tunable
         * and can be flipped at any point, even between msg init and sending */
@@ -1485,14 +1481,15 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
        if (unlikely(tx->tx_state & GNILND_TX_FAIL_SMSG)) {
                rrc = cfs_fail_val ? cfs_fail_val : GNI_RC_NOT_DONE;
        } else {
-       rrc = kgnilnd_smsg_send(conn->gnc_ephandle,
-                                   msg, sizeof(*msg), immediate, immediatenob,
-                           tx->tx_id.txe_smsg_id);
+               rrc = kgnilnd_smsg_send(conn->gnc_ephandle,
+                                       msg, sizeof(*msg), immediate,
+                                       immediatenob,
+                                       tx->tx_id.txe_smsg_id);
        }
 
        switch (rrc) {
        case GNI_RC_SUCCESS:
-               conn->gnc_tx_seq++;
+               atomic_inc(&conn->gnc_tx_seq);
                conn->gnc_last_tx = jiffies;
                /* no locking here as LIVE isn't a list */
                kgnilnd_tx_add_state_locked(tx, NULL, conn, GNILND_TX_LIVE_FMAQ, 1);
@@ -1506,7 +1503,8 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
 
                /* serialize with seeing CQ events for completion on this, as well as
                 * tx_seq */
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
 
                atomic_inc(&conn->gnc_device->gnd_short_ntx);
                atomic64_add(immediatenob, &conn->gnc_device->gnd_short_txbytes);
@@ -1515,11 +1513,13 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
                return 0;
 
        case GNI_RC_NOT_DONE:
-               /* XXX Nic: We need to figure out how to track this
-                * - there are bound to be good reasons for it,
-                * but we want to know when it happens */
-
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               /* Jshimek: We can get GNI_RC_NOT_DONE for 3 reasons currently
+                * 1: out of mbox credits
+                * 2: out of mbox payload credits
+                * 3: On Aries out of dla credits
+                */
+               kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                /* We'll handle this error inline - makes the calling logic much more
                 * clean */
 
@@ -1528,35 +1528,41 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
                        return -EAGAIN;
                }
 
-               retry_send = kgnilnd_tx_should_retry(conn, tx);
-               if (retry_send) {
-                       /* add to head of list for the state and retries */
-                       spin_lock(state_lock);
-                       kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, state, 0);
-                       spin_unlock(state_lock);
-
-                       /* We only reschedule for a certain number of retries, then
-                        * we will wait for the CQ events indicating a release of SMSG
-                        * credits */
-                       if (tx->tx_retrans < (*kgnilnd_tunables.kgn_max_retransmits/4)) {
-                               kgnilnd_schedule_conn(conn);
-                               return 0;
-                       } else {
-                               /* CQ event coming in signifies either TX completed or
-                                * RX receive. Either of these *could* free up credits
-                                * in the SMSG mbox and we should try sending again */
-                               GNIDBG_TX(D_NET, tx, "waiting for CQID %u event to resend",
-                                        tx->tx_conn->gnc_cqid);
-                               /* use +ve return code to let upper layers know they
-                                * should stop looping on sends */
-                               return EAGAIN;
-                       }
+               /* I need kgni credits to send this.  Replace tx at the head of the
+                * fmaq and I'll get rescheduled when credits appear. Reset the tx_state
+                * and bump retrans counts since we are requeueing the tx.
+                */
+               tx->tx_state = 0;
+               tx->tx_retrans++;
+               conn->gnc_tx_retrans++;
+
+               kgnilnd_tx_log_retrans(conn, tx);
+               /* add to head of list for the state and retries */
+               spin_lock(state_lock);
+               kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, state, 0);
+               spin_unlock(state_lock);
+
+               /* We only reschedule for a certain number of retries, then
+                * we will wait for the CQ events indicating a release of SMSG
+                * credits */
+               if (tx->tx_retrans < *kgnilnd_tunables.kgn_max_retransmits) {
+                       kgnilnd_schedule_conn(conn);
+                       return 0;
                } else {
-                       return -EAGAIN;
+                       /* CQ event coming in signifies either TX completed or
+                        * RX receive. Either of these *could* free up credits
+                        * in the SMSG mbox and we should try sending again */
+                       GNIDBG_TX(D_NET, tx, "waiting for CQID %u event to resend",
+                                tx->tx_conn->gnc_cqid);
+                       kgnilnd_schedule_delay_conn(conn);
+                       /* use +ve return code to let upper layers know they
+                        * should stop looping on sends */
+                       return EAGAIN;
                }
        default:
                /* handle bad retcode gracefully */
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                return -EIO;
        }
 }
@@ -1571,7 +1577,8 @@ kgnilnd_sendmsg(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
        int              rc;
 
        timestamp = jiffies;
-       mutex_lock(&dev->gnd_cq_mutex);
+       kgnilnd_gl_mutex_lock(&dev->gnd_cq_mutex);
+       kgnilnd_conn_mutex_lock(&tx->tx_conn->gnc_smsg_mutex);
        /* delay in jiffies - we are really concerned only with things that
         * result in a schedule() or really holding this off for long times .
         * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
@@ -1616,7 +1623,8 @@ kgnilnd_sendmsg_trylock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob
                rc = 0;
        } else {
                atomic_inc(&conn->gnc_device->gnd_fast_try);
-               rc = mutex_trylock(&conn->gnc_device->gnd_cq_mutex);
+               rc = kgnilnd_trylock(&conn->gnc_device->gnd_cq_mutex,
+                                    &conn->gnc_smsg_mutex);
        }
        if (!rc) {
                rc = -EAGAIN;
@@ -1639,7 +1647,7 @@ kgnilnd_sendmsg_trylock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob
 }
 
 /* lets us know if we can push this RDMA through now */
-inline int
+static int
 kgnilnd_auth_rdma_bytes(kgn_device_t *dev, kgn_tx_t *tx)
 {
        long    bytes_left;
@@ -1753,7 +1761,7 @@ kgnilnd_queue_tx(kgn_conn_t *conn, kgn_tx_t *tx)
 }
 
 void
-kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target)
+kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, struct lnet_process_id *target)
 {
        kgn_peer_t      *peer;
        kgn_peer_t      *new_peer = NULL;
@@ -1788,7 +1796,7 @@ kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target)
                }
 
                /* don't create a connection if the peer is marked down */
-               if (peer->gnp_down == GNILND_RCA_NODE_DOWN) {
+               if (peer->gnp_state != GNILND_PEER_UP) {
                        read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
                        rc = -ENETRESET;
                        GOTO(no_peer, rc);
@@ -1827,7 +1835,7 @@ kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target)
        kgnilnd_add_peer_locked(target->nid, new_peer, &peer);
 
        /* don't create a connection if the peer is not up */
-       if (peer->gnp_down != GNILND_RCA_NODE_UP) {
+       if (peer->gnp_state != GNILND_PEER_UP) {
                write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
                rc = -ENETRESET;
                GOTO(no_peer, rc);
@@ -1855,7 +1863,7 @@ no_peer:
        RETURN_EXIT;
 }
 
-void
+int
 kgnilnd_rdma(kgn_tx_t *tx, int type,
            kgn_rdma_desc_t *sink, unsigned int nob, __u64 cookie)
 {
@@ -1907,11 +1915,11 @@ kgnilnd_rdma(kgn_tx_t *tx, int type,
 
                        tx->tx_offset = ((__u64)((unsigned long)sink->gnrd_addr)) & 3;
                        if (tx->tx_offset)
-                               kgnilnd_admin_addref(kgnilnd_data.kgn_rev_offset);
+                               atomic_inc(&kgnilnd_data.kgn_rev_offset);
 
                        if ((nob + tx->tx_offset) & 3) {
                                desc_nob = ((nob + tx->tx_offset) + (4 - ((nob + tx->tx_offset) & 3)));
-                               kgnilnd_admin_addref(kgnilnd_data.kgn_rev_length);
+                               atomic_inc(&kgnilnd_data.kgn_rev_length);
                        } else {
                                desc_nob = (nob + tx->tx_offset);
                        }
@@ -1919,23 +1927,24 @@ kgnilnd_rdma(kgn_tx_t *tx, int type,
                        if (tx->tx_buffer_copy == NULL) {
                                /* Allocate the largest copy buffer we will need, this will prevent us from overwriting data
                                 * and require at most we allocate a few extra bytes. */
-                               tx->tx_buffer_copy = vmalloc(desc_nob);
+                               tx->tx_buffer_copy = kgnilnd_vzalloc(desc_nob);
 
                                if (!tx->tx_buffer_copy) {
                                        /* allocation of buffer failed nak the rdma */
                                        kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
                                        kgnilnd_tx_done(tx, -EFAULT);
-                                       return;
+                                       return 0;
                                }
-                               kgnilnd_admin_addref(kgnilnd_data.kgn_rev_copy_buff);
+                               atomic_inc(&kgnilnd_data.kgn_rev_copy_buff);
                                rc = kgnilnd_mem_register(conn->gnc_device->gnd_handle, (__u64)tx->tx_buffer_copy, desc_nob, NULL, GNI_MEM_READWRITE, &tx->tx_buffer_copy_map_key);
                                if (rc != GNI_RC_SUCCESS) {
                                        /* Registration Failed nak rdma and kill the tx. */
-                                       vfree(tx->tx_buffer_copy);
+                                       kgnilnd_vfree(tx->tx_buffer_copy,
+                                                     desc_nob);
                                        tx->tx_buffer_copy = NULL;
                                        kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
                                        kgnilnd_tx_done(tx, -EFAULT);
-                                       return;
+                                       return 0;
                                }
                        }
                        desc_map_key = tx->tx_buffer_copy_map_key;
@@ -1953,8 +1962,10 @@ kgnilnd_rdma(kgn_tx_t *tx, int type,
        tx->tx_rdma_desc.remote_mem_hndl = sink->gnrd_key;
        tx->tx_rdma_desc.length = desc_nob;
        tx->tx_nob_rdma = nob;
-       if (*kgnilnd_tunables.kgn_bte_dlvr_mode)
-               tx->tx_rdma_desc.dlvr_mode = *kgnilnd_tunables.kgn_bte_dlvr_mode;
+       if (post_type == GNI_POST_RDMA_PUT && *kgnilnd_tunables.kgn_bte_put_dlvr_mode)
+               tx->tx_rdma_desc.dlvr_mode = *kgnilnd_tunables.kgn_bte_put_dlvr_mode;
+       if (post_type == GNI_POST_RDMA_GET && *kgnilnd_tunables.kgn_bte_get_dlvr_mode)
+               tx->tx_rdma_desc.dlvr_mode = *kgnilnd_tunables.kgn_bte_get_dlvr_mode;
        /* prep final completion message */
        kgnilnd_init_msg(&tx->tx_msg, type, tx->tx_msg.gnm_srcnid);
        tx->tx_msg.gnm_u.completion.gncm_cookie = cookie;
@@ -1965,21 +1976,23 @@ kgnilnd_rdma(kgn_tx_t *tx, int type,
 
        if (nob == 0) {
                kgnilnd_queue_tx(conn, tx);
-               return;
+               return 0;
        }
 
        /* Don't lie (CLOSE == RDMA idle) */
        LASSERTF(!conn->gnc_close_sent, "tx %p on conn %p after close sent %d\n",
                 tx, conn, conn->gnc_close_sent);
 
-       GNIDBG_TX(D_NET, tx, "Post RDMA type 0x%02x dlvr_mode 0x%x cookie:"LPX64,
-               type, tx->tx_rdma_desc.dlvr_mode, cookie);
+       GNIDBG_TX(D_NET, tx, "Post RDMA type 0x%02x conn %p dlvr_mode "
+               "0x%x cookie:%#llx",
+               type, conn, tx->tx_rdma_desc.dlvr_mode, cookie);
 
        /* set CQ dedicated for RDMA */
        tx->tx_rdma_desc.src_cq_hndl = conn->gnc_device->gnd_snd_rdma_cqh;
 
        timestamp = jiffies;
-       mutex_lock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_conn_mutex_lock(&conn->gnc_rdma_mutex);
+       kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
        /* delay in jiffies - we are really concerned only with things that
         * result in a schedule() or really holding this off for long times .
         * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
@@ -1987,17 +2000,35 @@ kgnilnd_rdma(kgn_tx_t *tx, int type,
 
        rrc = kgnilnd_post_rdma(conn->gnc_ephandle, &tx->tx_rdma_desc);
 
+       if (rrc == GNI_RC_ERROR_RESOURCE) {
+               kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_unmap_buffer(tx, 0);
+
+               if (tx->tx_buffer_copy != NULL) {
+                       kgnilnd_vfree(tx->tx_buffer_copy, desc_nob);
+                       tx->tx_buffer_copy = NULL;
+               }
+
+               spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
+               kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn,
+                                           GNILND_TX_MAPQ, 0);
+               spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
+               kgnilnd_schedule_device(tx->tx_conn->gnc_device);
+               return -EAGAIN;
+       }
+
        spin_lock(&conn->gnc_list_lock);
        kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_LIVE_RDMAQ, 1);
        tx->tx_qtime = jiffies;
        spin_unlock(&conn->gnc_list_lock);
-
-       mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
 
        /* XXX Nic: is this a place we should handle more errors for
         * robustness sake */
        LASSERT(rrc == GNI_RC_SUCCESS);
-
+       return 0;
 }
 
 kgn_rx_t *
@@ -2028,19 +2059,19 @@ kgnilnd_release_msg(kgn_conn_t *conn)
        CDEBUG(D_NET, "consuming %p\n", conn);
 
        timestamp = jiffies;
-       mutex_lock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
        /* delay in jiffies - we are really concerned only with things that
         * result in a schedule() or really holding this off for long times .
         * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
        conn->gnc_device->gnd_mutex_delay += (long) jiffies - timestamp;
 
        rrc = kgnilnd_smsg_release(conn->gnc_ephandle);
-       mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
 
        LASSERTF(rrc == GNI_RC_SUCCESS, "bad rrc %d\n", rrc);
        GNIDBG_SMSG_CREDS(D_NET, conn);
 
-       return;
+       kgnilnd_schedule_conn(conn);
 }
 
 void
@@ -2064,20 +2095,18 @@ kgnilnd_consume_rx(kgn_rx_t *rx)
        kmem_cache_free(kgnilnd_data.kgn_rx_cache, rx);
        CDEBUG(D_MALLOC, "slab-freed 'rx': %lu at %p.\n",
               sizeof(*rx), rx);
-
-       return;
 }
 
 int
-kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
+kgnilnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
 {
-       lnet_hdr_t       *hdr = &lntmsg->msg_hdr;
+       struct lnet_hdr  *hdr = &lntmsg->msg_hdr;
        int               type = lntmsg->msg_type;
-       lnet_process_id_t target = lntmsg->msg_target;
+       struct lnet_process_id target = lntmsg->msg_target;
        int               target_is_router = lntmsg->msg_target_is_router;
        int               routing = lntmsg->msg_routing;
        unsigned int      niov = lntmsg->msg_niov;
-       struct iovec     *iov = lntmsg->msg_iov;
+       struct kvec      *iov = lntmsg->msg_iov;
        lnet_kiov_t      *kiov = lntmsg->msg_kiov;
        unsigned int      offset = lntmsg->msg_offset;
        unsigned int      nob = lntmsg->msg_len;
@@ -2241,12 +2270,12 @@ out:
 }
 
 void
-kgnilnd_setup_rdma(lnet_ni_t *ni, kgn_rx_t *rx, lnet_msg_t *lntmsg, int mlen)
+kgnilnd_setup_rdma(struct lnet_ni *ni, kgn_rx_t *rx, struct lnet_msg *lntmsg, int mlen)
 {
        kgn_conn_t    *conn = rx->grx_conn;
        kgn_msg_t     *rxmsg = rx->grx_msg;
        unsigned int   niov = lntmsg->msg_niov;
-       struct iovec  *iov = lntmsg->msg_iov;
+       struct kvec   *iov = lntmsg->msg_iov;
        lnet_kiov_t   *kiov = lntmsg->msg_kiov;
        unsigned int   offset = lntmsg->msg_offset;
        unsigned int   nob = lntmsg->msg_len;
@@ -2298,11 +2327,11 @@ kgnilnd_setup_rdma(lnet_ni_t *ni, kgn_rx_t *rx, lnet_msg_t *lntmsg, int mlen)
        kgnilnd_tx_done(tx, rc);
        kgnilnd_nak_rdma(conn, done_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
  failed_0:
-       lnet_finalize(ni, lntmsg, rc);
+       lnet_finalize(lntmsg, rc);
 }
 
 int
-kgnilnd_eager_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
+kgnilnd_eager_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
                   void **new_private)
 {
        kgn_rx_t        *rx = private;
@@ -2340,7 +2369,7 @@ kgnilnd_eager_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
                CERROR("Couldnt find matching peer %p or conn %p / %p\n",
                        peer, conn, found_conn);
                if (found_conn) {
-                       CERROR("Unexpected connstamp "LPX64"("LPX64" expected)"
+                       CERROR("Unexpected connstamp %#llx(%#llx expected)"
                                " from %s", rxmsg->gnm_connstamp,
                                found_conn->gnc_peer_connstamp,
                                libcfs_nid2str(peer->gnp_nid));
@@ -2393,9 +2422,9 @@ kgnilnd_eager_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
 }
 
 int
-kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
+kgnilnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
             int delayed, unsigned int niov,
-            struct iovec *iov, lnet_kiov_t *kiov,
+            struct kvec *iov, lnet_kiov_t *kiov,
             unsigned int offset, unsigned int mlen, unsigned int rlen)
 {
        kgn_rx_t    *rx = private;
@@ -2424,7 +2453,7 @@ kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
 
                /* someone closed the conn after we copied this out, nuke it */
                kgnilnd_consume_rx(rx);
-               lnet_finalize(ni, lntmsg, conn->gnc_error);
+               lnet_finalize(lntmsg, conn->gnc_error);
                RETURN(0);
        }
        read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
@@ -2498,14 +2527,14 @@ kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
                                &rxmsg[1], 0, mlen);
 
                kgnilnd_consume_rx(rx);
-               lnet_finalize(ni, lntmsg, 0);
+               lnet_finalize(lntmsg, 0);
                RETURN(0);
 
        case GNILND_MSG_PUT_REQ:
                /* LNET wants to truncate or drop transaction, sending NAK */
                if (mlen == 0) {
                        kgnilnd_consume_rx(rx);
-                       lnet_finalize(ni, lntmsg, 0);
+                       lnet_finalize(lntmsg, 0);
 
                        /* only error if lntmsg == NULL, otherwise we are just
                         * short circuiting the rdma process of 0 bytes */
@@ -2564,7 +2593,7 @@ nak_put_req:
                /* LNET wants to truncate or drop transaction, sending NAK */
                if (mlen == 0) {
                        kgnilnd_consume_rx(rx);
-                       lnet_finalize(ni, lntmsg, 0);
+                       lnet_finalize(lntmsg, 0);
 
                        /* only error if lntmsg == NULL, otherwise we are just
                         * short circuiting the rdma process of 0 bytes */
@@ -2634,7 +2663,7 @@ nak_get_req_rev:
                /* LNET wants to truncate or drop transaction, sending NAK */
                if (mlen == 0) {
                        kgnilnd_consume_rx(rx);
-                       lnet_finalize(ni, lntmsg, 0);
+                       lnet_finalize(lntmsg, 0);
 
                        /* only error if lntmsg == NULL, otherwise we are just
                         * short circuiting the rdma process of 0 bytes */
@@ -2709,7 +2738,7 @@ kgnilnd_check_conn_timeouts_locked(kgn_conn_t *conn)
        if (time_after_eq(now, newest_last_rx + timeout)) {
                uint32_t level = D_CONSOLE|D_NETERROR;
 
-               if (conn->gnc_peer->gnp_down == GNILND_RCA_NODE_DOWN) {
+               if (conn->gnc_peer->gnp_state == GNILND_PEER_DOWN) {
                        level = D_NET;
                }
                        GNIDBG_CONN(level, conn,
@@ -2762,14 +2791,16 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
        int                     rc = 0;
        int                     count = 0;
        int                     reconnect;
+       int                     to_reconn;
        short                   releaseconn = 0;
        unsigned long           first_rx = 0;
+       int                     purgatory_conn_cnt = 0;
 
        CDEBUG(D_NET, "checking peer 0x%p->%s for timeouts; interval %lus\n",
                peer, libcfs_nid2str(peer->gnp_nid),
                peer->gnp_reconnect_interval);
 
-       timeout = cfs_time_seconds(MAX(*kgnilnd_tunables.kgn_timeout,
+       timeout = cfs_time_seconds(max(*kgnilnd_tunables.kgn_timeout,
                                       GNILND_MIN_TIMEOUT));
 
        conn = kgnilnd_find_conn_locked(peer);
@@ -2783,6 +2814,14 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
                                conn->gnc_close_recvd = GNILND_CLOSE_INJECT1;
                                conn->gnc_peer_error = -ETIMEDOUT;
                        }
+
+                       if (*kgnilnd_tunables.kgn_to_reconn_disable &&
+                           rc == -ETIMEDOUT) {
+                               peer->gnp_state = GNILND_PEER_TIMED_OUT;
+                               CDEBUG(D_WARNING, "%s conn timed out, will "
+                                      "reconnect upon request from peer\n",
+                                      libcfs_nid2str(conn->gnc_peer->gnp_nid));
+                       }
                        /* Once we mark closed, any of the scheduler threads could
                         * get it and move through before we hit the fail loc code */
                        kgnilnd_close_conn_locked(conn, rc);
@@ -2826,21 +2865,28 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
        /* Don't reconnect if we are still trying to clear out old conns.
         * This prevents us sending traffic on the new mbox before ensuring we are done
         * with the old one */
-       reconnect = (peer->gnp_down == GNILND_RCA_NODE_UP) &&
+       reconnect = (peer->gnp_state == GNILND_PEER_UP) &&
                    (atomic_read(&peer->gnp_dirty_eps) == 0);
 
+       /* fast reconnect after a timeout */
+       to_reconn = !conn &&
+                   (peer->gnp_last_errno == -ETIMEDOUT) &&
+                   *kgnilnd_tunables.kgn_fast_reconn;
+
        /* if we are not connected and there are tx on the gnp_tx_queue waiting
         * to be sent, we'll check the reconnect interval and fire up a new
         * connection request */
 
-       if ((peer->gnp_connecting == GNILND_PEER_IDLE) &&
+       if (reconnect &&
+           (peer->gnp_connecting == GNILND_PEER_IDLE) &&
            (time_after_eq(jiffies, peer->gnp_reconnect_time)) &&
-            !list_empty(&peer->gnp_tx_queue) && reconnect) {
+           (!list_empty(&peer->gnp_tx_queue) || to_reconn)) {
 
                CDEBUG(D_NET, "starting connect to %s\n",
                        libcfs_nid2str(peer->gnp_nid));
-               LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE, "Peer was idle and we"
-                       "have a write_lock, state issue %d\n", peer->gnp_connecting);
+               LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE,
+                        "Peer was idle and we have a write_lock, state issue %d\n",
+                        peer->gnp_connecting);
 
                peer->gnp_connecting = GNILND_PEER_CONNECT;
                kgnilnd_peer_addref(peer); /* extra ref for connd */
@@ -2903,11 +2949,33 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
                                        cfs_duration_sec(waiting));
 
                                kgnilnd_detach_purgatory_locked(conn, souls);
+                       } else {
+                               purgatory_conn_cnt++;
                        }
                }
        }
 
-       return;
+       /* If we have too many connections in purgatory we could run out of
+        * resources. Limit the number of connections to a tunable number,
+        * clean up to the minimum all in one fell swoop... there are
+        * situations where dvs will retry tx's and we can eat up several
+        * hundread connection requests at once.
+        */
+       if (purgatory_conn_cnt > *kgnilnd_tunables.kgn_max_purgatory) {
+               list_for_each_entry_safe(conn, connN, &peer->gnp_conns,
+                                        gnc_list) {
+                       if (conn->gnc_in_purgatory &&
+                           conn->gnc_state == GNILND_CONN_DONE) {
+                               CDEBUG(D_NET, "Dropping Held resource due to"
+                                             " resource limits being hit\n");
+                               kgnilnd_detach_purgatory_locked(conn, souls);
+
+                               if (purgatory_conn_cnt-- <
+                                   *kgnilnd_tunables.kgn_max_purgatory)
+                                       break;
+                       }
+               }
+       }
 }
 
 void
@@ -2915,11 +2983,8 @@ kgnilnd_reaper_check(int idx)
 {
        struct list_head  *peers = &kgnilnd_data.kgn_peers[idx];
        struct list_head  *ctmp, *ctmpN;
-       struct list_head   geriatrics;
-       struct list_head   souls;
-
-       INIT_LIST_HEAD(&geriatrics);
-       INIT_LIST_HEAD(&souls);
+       LIST_HEAD(geriatrics);
+       LIST_HEAD(souls);
 
        write_lock(&kgnilnd_data.kgn_peer_conn_lock);
 
@@ -3004,7 +3069,7 @@ kgnilnd_reaper(void *arg)
                                    next_check_time);
                        mod_timer(&timer, (long) jiffies + timeout);
 
-                       /* check flag variables before comitting */
+                       /* check flag variables before committing */
                        if (!kgnilnd_data.kgn_shutdown &&
                            !kgnilnd_data.kgn_quiesce_trigger) {
                                CDEBUG(D_INFO, "schedule timeout %ld (%lu sec)\n",
@@ -3060,7 +3125,7 @@ int
 kgnilnd_recv_bte_get(kgn_tx_t *tx) {
        unsigned niov, offset, nob;
        lnet_kiov_t     *kiov;
-       lnet_msg_t *lntmsg = tx->tx_lntmsg[0];
+       struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
        kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov, tx->tx_nob_rdma);
 
        if (kiov != NULL) {
@@ -3112,7 +3177,7 @@ kgnilnd_check_rdma_cq(kgn_device_t *dev)
                }
 
                if (rrc == GNI_RC_NOT_DONE) {
-                       mutex_unlock(&dev->gnd_cq_mutex);
+                       kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
                        CDEBUG(D_INFO, "SEND RDMA CQ %d empty processed %ld\n",
                               dev->gnd_id, num_processed);
                        return num_processed;
@@ -3124,12 +3189,12 @@ kgnilnd_check_rdma_cq(kgn_device_t *dev)
                        "this is bad, somehow our credits didn't protect us"
                        " from CQ overrun\n");
                LASSERTF(GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_POST,
-                       "rrc %d, GNI_CQ_GET_TYPE("LPX64") = "LPX64"\n", rrc,
+                       "rrc %d, GNI_CQ_GET_TYPE(%#llx) = %#llx\n", rrc,
                        event_data, GNI_CQ_GET_TYPE(event_data));
 
                rrc = kgnilnd_get_completed(dev->gnd_snd_rdma_cqh, event_data,
                                            &desc);
-               mutex_unlock(&dev->gnd_cq_mutex);
+               kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
 
                /* XXX Nic: Need better error handling here... */
                LASSERTF((rrc == GNI_RC_SUCCESS) ||
@@ -3173,9 +3238,16 @@ kgnilnd_check_rdma_cq(kgn_device_t *dev)
                }
 
                /* remove from rdmaq */
+               kgnilnd_conn_mutex_lock(&conn->gnc_rdma_mutex);
                spin_lock(&conn->gnc_list_lock);
                kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
                spin_unlock(&conn->gnc_list_lock);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
+
+               if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RDMA_CQ_ERROR)) {
+                       event_data = 1LL << 48;
+                       rc = 1;
+               }
 
                if (likely(desc->status == GNI_RC_SUCCESS) && rc == 0) {
                        atomic_inc(&dev->gnd_rdma_ntx);
@@ -3187,6 +3259,7 @@ kgnilnd_check_rdma_cq(kgn_device_t *dev)
                        /* drop ref from kgnilnd_validate_tx_ev_id */
                        kgnilnd_admin_decref(conn->gnc_tx_in_use);
                        kgnilnd_conn_decref(conn);
+
                        continue;
                }
 
@@ -3230,7 +3303,7 @@ kgnilnd_check_rdma_cq(kgn_device_t *dev)
                                         -EFAULT,
                                         rcookie,
                                         tx->tx_msg.gnm_srcnid);
-                       kgnilnd_tx_done(tx, -EFAULT);
+                       kgnilnd_tx_done(tx, -GNILND_NOPURG);
                        kgnilnd_close_conn(conn, -ECOMM);
                }
 
@@ -3250,6 +3323,7 @@ kgnilnd_check_fma_send_cq(kgn_device_t *dev)
        kgn_conn_t            *conn = NULL;
        int                    queued_fma, saw_reply, rc;
        long                   num_processed = 0;
+       struct list_head      *ctmp, *ctmpN;
 
        for (;;) {
                /* make sure we don't keep looping if we need to reset */
@@ -3265,13 +3339,29 @@ kgnilnd_check_fma_send_cq(kgn_device_t *dev)
                }
 
                rrc = kgnilnd_cq_get_event(dev->gnd_snd_fma_cqh, &event_data);
-               mutex_unlock(&dev->gnd_cq_mutex);
+               kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
 
                if (rrc == GNI_RC_NOT_DONE) {
                        CDEBUG(D_INFO,
-                              "SMSG send CQ %d not ready (data "LPX64") "
+                              "SMSG send CQ %d not ready (data %#llx) "
                               "processed %ld\n", dev->gnd_id, event_data,
                               num_processed);
+
+                       if (num_processed > 0) {
+                               spin_lock(&dev->gnd_lock);
+                               if (!list_empty(&dev->gnd_delay_conns)) {
+                                       list_for_each_safe(ctmp, ctmpN, &dev->gnd_delay_conns) {
+                                               conn = list_entry(ctmp, kgn_conn_t, gnc_delaylist);
+                                               list_del_init(&conn->gnc_delaylist);
+                                               CDEBUG(D_NET, "Moving Conn %p from delay queue to ready_queue\n", conn);
+                                               kgnilnd_schedule_conn_nolock(conn);
+                                       }
+                                       spin_unlock(&dev->gnd_lock);
+                                       kgnilnd_schedule_device(dev);
+                               } else {
+                                       spin_unlock(&dev->gnd_lock);
+                               }
+                       }
                        return num_processed;
                }
 
@@ -3282,7 +3372,7 @@ kgnilnd_check_fma_send_cq(kgn_device_t *dev)
                        "this is bad, somehow our credits didn't "
                        "protect us from CQ overrun\n");
                LASSERTF(GNI_CQ_GET_TYPE(event_data) == GNI_CQ_EVENT_TYPE_SMSG,
-                       "rrc %d, GNI_CQ_GET_TYPE("LPX64") = "LPX64"\n", rrc,
+                       "rrc %d, GNI_CQ_GET_TYPE(%#llx) = %#llx\n", rrc,
                        event_data, GNI_CQ_GET_TYPE(event_data));
 
                /* if SMSG couldn't handle an error, time for conn to die */
@@ -3296,7 +3386,7 @@ kgnilnd_check_fma_send_cq(kgn_device_t *dev)
                        if (conn == NULL) {
                                /* Conn was destroyed? */
                                CDEBUG(D_NET,
-                                       "SMSG CQID lookup "LPX64" failed\n",
+                                       "SMSG CQID lookup %#llx failed\n",
                                        GNI_CQ_GET_INST_ID(event_data));
                                write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
                                continue;
@@ -3331,6 +3421,7 @@ kgnilnd_check_fma_send_cq(kgn_device_t *dev)
                }
 
                /* lock tx_list_state and tx_state */
+               kgnilnd_conn_mutex_lock(&conn->gnc_smsg_mutex);
                spin_lock(&tx->tx_conn->gnc_list_lock);
 
                GNITX_ASSERTF(tx, tx->tx_list_state == GNILND_TX_LIVE_FMAQ,
@@ -3351,6 +3442,7 @@ kgnilnd_check_fma_send_cq(kgn_device_t *dev)
                saw_reply = !(tx->tx_state & GNILND_TX_WAITING_REPLY);
 
                spin_unlock(&tx->tx_conn->gnc_list_lock);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
 
                if (queued_fma) {
                        CDEBUG(D_NET, "scheduling conn 0x%p->%s for fmaq\n",
@@ -3419,10 +3511,10 @@ kgnilnd_check_fma_rcv_cq(kgn_device_t *dev)
                        return 1;
                }
                rrc = kgnilnd_cq_get_event(dev->gnd_rcv_fma_cqh, &event_data);
-               mutex_unlock(&dev->gnd_cq_mutex);
+               kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
 
                if (rrc == GNI_RC_NOT_DONE) {
-                       CDEBUG(D_INFO, "SMSG RX CQ %d empty data "LPX64" "
+                       CDEBUG(D_INFO, "SMSG RX CQ %d empty data %#llx "
                                "processed %ld\n",
                                dev->gnd_id, event_data, num_processed);
                        return num_processed;
@@ -3439,14 +3531,13 @@ kgnilnd_check_fma_rcv_cq(kgn_device_t *dev)
                                /* set overrun too */
                                event_data |= (1UL << 63);
                                LASSERTF(GNI_CQ_OVERRUN(event_data),
-                                        "(1UL << 63) is no longer the bit to"
-                                        "set to indicate CQ_OVERRUN\n");
+                                        "(1UL << 63) is no longer the bit to set to indicate CQ_OVERRUN\n");
                        }
                }
                /* sender should get error event too and take care
                of failed transaction by re-transmitting */
                if (rrc == GNI_RC_TRANSACTION_ERROR) {
-                       CDEBUG(D_NET, "SMSG RX CQ error "LPX64"\n", event_data);
+                       CDEBUG(D_NET, "SMSG RX CQ error %#llx\n", event_data);
                        continue;
                }
 
@@ -3455,12 +3546,12 @@ kgnilnd_check_fma_rcv_cq(kgn_device_t *dev)
                        conn = kgnilnd_cqid2conn_locked(
                                                 GNI_CQ_GET_INST_ID(event_data));
                        if (conn == NULL) {
-                               CDEBUG(D_NET, "SMSG RX CQID lookup "LPU64" "
-                                       "failed, dropping event "LPX64"\n",
+                               CDEBUG(D_NET, "SMSG RX CQID lookup %llu "
+                                       "failed, dropping event %#llx\n",
                                        GNI_CQ_GET_INST_ID(event_data),
                                        event_data);
                        } else {
-                               CDEBUG(D_NET, "SMSG RX: CQID "LPU64" "
+                               CDEBUG(D_NET, "SMSG RX: CQID %llu "
                                       "conn %p->%s\n",
                                        GNI_CQ_GET_INST_ID(event_data),
                                        conn, conn->gnc_peer ?
@@ -3530,7 +3621,8 @@ kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full)
                rc = kgnilnd_map_buffer(tx);
        }
 
-       /* rc should be 0 if we mapped succesfully here, if non-zero we are queueing */
+       /* rc should be 0 if we mapped successfully here, if non-zero
+        * we are queueing */
        if (rc != 0) {
                /* if try_map_if_full set, they handle requeuing */
                if (unlikely(try_map_if_full)) {
@@ -3554,7 +3646,12 @@ kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full)
         * remote node where the RDMA will be started
         * Special case -EAGAIN logic - this should just queued as if the mapping couldn't
         * be satisified. The rest of the errors are "hard" errors that require
-        * upper layers to handle themselves */
+        * upper layers to handle themselves.
+        * If kgnilnd_post_rdma returns a resource error, kgnilnd_rdma will put
+        * the tx back on the TX_MAPQ. When this tx is pulled back off the MAPQ,
+        * it's gnm_type will now be GNILND_MSG_PUT_DONE or
+        * GNILND_MSG_GET_DONE_REV.
+        */
        case GNILND_MSG_GET_REQ:
                tx->tx_msg.gnm_u.get.gngm_desc.gnrd_key = tx->tx_map_key;
                tx->tx_msg.gnm_u.get.gngm_cookie = tx->tx_id.txe_cookie;
@@ -3578,18 +3675,20 @@ kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full)
                break;
 
        /* PUT_REQ and GET_DONE are where we do the actual RDMA */
+       case GNILND_MSG_PUT_DONE:
        case GNILND_MSG_PUT_REQ:
-               kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE,
+               rc = kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE,
                             &tx->tx_putinfo.gnpam_desc,
                             tx->tx_putinfo.gnpam_desc.gnrd_nob,
                             tx->tx_putinfo.gnpam_dst_cookie);
+               RETURN(try_map_if_full ? rc : 0);
                break;
        case GNILND_MSG_GET_DONE:
-               kgnilnd_rdma(tx, GNILND_MSG_GET_DONE,
+               rc = kgnilnd_rdma(tx, GNILND_MSG_GET_DONE,
                             &tx->tx_getinfo.gngm_desc,
                             tx->tx_lntmsg[0]->msg_len,
                             tx->tx_getinfo.gngm_cookie);
-
+               RETURN(try_map_if_full ? rc : 0);
                break;
        case GNILND_MSG_PUT_REQ_REV:
                tx->tx_msg.gnm_u.get.gngm_desc.gnrd_key = tx->tx_map_key;
@@ -3603,10 +3702,11 @@ kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full)
                rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
                break;
        case GNILND_MSG_PUT_DONE_REV:
-               kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE_REV,
+               rc = kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE_REV,
                             &tx->tx_getinfo.gngm_desc,
                             tx->tx_nob,
                             tx->tx_getinfo.gngm_cookie);
+               RETURN(try_map_if_full ? rc : 0);
                break;
        case GNILND_MSG_GET_ACK_REV:
                tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_key = tx->tx_map_key;
@@ -3621,12 +3721,13 @@ kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full)
                /* redirect to FMAQ on failure, no need to infinite loop here in MAPQ */
                rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
                break;
+       case GNILND_MSG_GET_DONE_REV:
        case GNILND_MSG_GET_REQ_REV:
-               kgnilnd_rdma(tx, GNILND_MSG_GET_DONE_REV,
+               rc = kgnilnd_rdma(tx, GNILND_MSG_GET_DONE_REV,
                                &tx->tx_putinfo.gnpam_desc,
                                tx->tx_putinfo.gnpam_desc.gnrd_nob,
                                tx->tx_putinfo.gnpam_dst_cookie);
-
+               RETURN(try_map_if_full ? rc : 0);
                break;
        }
 
@@ -3710,7 +3811,7 @@ kgnilnd_process_fmaq(kgn_conn_t *conn)
        GNITX_ASSERTF(tx, tx->tx_id.txe_smsg_id != 0,
                      "tx with zero id", NULL);
 
-       CDEBUG(D_NET, "sending regular msg: %p, type %s(0x%02x), cookie "LPX64"\n",
+       CDEBUG(D_NET, "sending regular msg: %p, type %s(0x%02x), cookie %#llx\n",
               tx, kgnilnd_msgtype2str(tx->tx_msg.gnm_type),
               tx->tx_msg.gnm_type, tx->tx_id.txe_cookie);
 
@@ -3908,8 +4009,8 @@ _kgnilnd_match_reply(kgn_conn_t *conn, int type1, int type2, __u64 cookie)
                GNITX_ASSERTF(tx, ((tx->tx_id.txe_idx == ev_id.txe_idx) &&
                                  (tx->tx_id.txe_cookie = cookie)),
                              "conn 0x%p->%s tx_ref_table hosed: wanted "
-                             "txe_cookie "LPX64" txe_idx %d "
-                             "found tx %p cookie "LPX64" txe_idx %d\n",
+                             "txe_cookie %#llx txe_idx %d "
+                             "found tx %p cookie %#llx txe_idx %d\n",
                              conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
                              cookie, ev_id.txe_idx,
                              tx, tx->tx_id.txe_cookie, tx->tx_id.txe_idx);
@@ -3923,7 +4024,7 @@ _kgnilnd_match_reply(kgn_conn_t *conn, int type1, int type2, __u64 cookie)
                        tx->tx_state, GNILND_TX_WAITING_REPLY,
                        libcfs_nid2str(conn->gnc_peer->gnp_nid));
        } else {
-               CWARN("Unmatched reply %02x, or %02x/"LPX64" from %s\n",
+               CWARN("Unmatched reply %02x, or %02x/%#llx from %s\n",
                      type1, type2, cookie, libcfs_nid2str(conn->gnc_peer->gnp_nid));
        }
        return tx;
@@ -3951,7 +4052,7 @@ kgnilnd_complete_tx(kgn_tx_t *tx, int rc)
        tx->tx_state &= ~GNILND_TX_WAITING_REPLY;
 
        if (rc == -EFAULT) {
-               CDEBUG(D_NETERROR, "Error %d TX data: TX %p tx_id %x nob %16"LPF64"u physnop %8d buffertype %#8x MemHandle "LPX64"."LPX64"x\n",
+               CDEBUG(D_NETERROR, "Error %d TX data: TX %p tx_id %x nob %16llu physnop %8d buffertype %#8x MemHandle %#llx.%#llxx\n",
                        rc, tx, id, nob, physnop, buftype, hndl.qword1, hndl.qword2);
 
                if(*kgnilnd_tunables.kgn_efault_lbug) {
@@ -4021,7 +4122,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                RETURN_EXIT;
 
        timestamp = jiffies;
-       mutex_lock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
        /* delay in jiffies - we are really concerned only with things that
         * result in a schedule() or really holding this off for long times .
         * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
@@ -4050,7 +4151,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                libcfs_nid2str(conn->gnc_peer->gnp_nid),
                cfs_duration_sec(timestamp - newest_last_rx),
                cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                rc = -ETIME;
                kgnilnd_close_conn(conn, rc);
                RETURN_EXIT;
@@ -4059,8 +4160,8 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
        rrc = kgnilnd_smsg_getnext(conn->gnc_ephandle, &prefix);
 
        if (rrc == GNI_RC_NOT_DONE) {
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
-               CDEBUG(D_INFO, "SMSG RX empty\n");
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               CDEBUG(D_INFO, "SMSG RX empty conn 0x%p\n", conn);
                RETURN_EXIT;
        }
 
@@ -4073,7 +4174,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
         */
 
        if (rrc == GNI_RC_INVALID_STATE) {
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                GNIDBG_CONN(D_NETERROR | D_CONSOLE, conn, "Mailbox corruption "
                        "detected closing conn %p from peer %s\n", conn,
                        libcfs_nid2str(conn->gnc_peer->gnp_nid));
@@ -4090,18 +4191,18 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
 
        rx = kgnilnd_alloc_rx();
        if (rx == NULL) {
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                kgnilnd_release_msg(conn);
                GNIDBG_MSG(D_NETERROR, msg, "Dropping SMSG RX from 0x%p->%s, no RX memory",
                           conn, libcfs_nid2str(peer->gnp_nid));
                RETURN_EXIT;
        }
 
-       GNIDBG_MSG(D_INFO, msg, "SMSG RX on %p from %s",
-               conn, libcfs_nid2str(peer->gnp_nid));
+       GNIDBG_MSG(D_INFO, msg, "SMSG RX on %p", conn);
 
        timestamp = conn->gnc_last_rx;
-       last_seq = conn->gnc_rx_seq;
+       seq = last_seq = atomic_read(&conn->gnc_rx_seq);
+       atomic_inc(&conn->gnc_rx_seq);
 
        conn->gnc_last_rx = jiffies;
        /* stash first rx so we can clear out purgatory
@@ -4109,10 +4210,8 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
        if (conn->gnc_first_rx == 0)
                conn->gnc_first_rx = jiffies;
 
-       seq = conn->gnc_rx_seq++;
-
        /* needs to linger to protect gnc_rx_seq like we do with gnc_tx_seq */
-       mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
        kgnilnd_peer_alive(conn->gnc_peer);
 
        rx->grx_msg = msg;
@@ -4200,7 +4299,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
        }
 
        if (msg->gnm_connstamp != conn->gnc_peer_connstamp) {
-               GNIDBG_MSG(D_NETERROR, msg, "Unexpected connstamp "LPX64"("LPX64
+               GNIDBG_MSG(D_NETERROR, msg, "Unexpected connstamp %#llx(%#llx"
                       " expected) from %s",
                       msg->gnm_connstamp, conn->gnc_peer_connstamp,
                       libcfs_nid2str(peer->gnp_nid));
@@ -4236,7 +4335,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                                       conn, last_seq,
                                       cfs_duration_sec(now - timestamp),
                                       cfs_duration_sec(now - conn->gnc_last_rx_cq),
-                                      conn->gnc_tx_seq,
+                                      atomic_read(&conn->gnc_tx_seq),
                                       cfs_duration_sec(now - conn->gnc_last_tx),
                                       cfs_duration_sec(now - conn->gnc_last_tx_cq),
                                       cfs_duration_sec(now - conn->gnc_last_noop_want),
@@ -4698,6 +4797,11 @@ kgnilnd_process_mapped_tx(kgn_device_t *dev)
                         * mapped so we can reset our timers */
                        dev->gnd_map_attempt = 0;
                        continue;
+               } else if (rc == -EAGAIN) {
+                       spin_lock(&dev->gnd_lock);
+                       mod_timer(&dev->gnd_map_timer, dev->gnd_next_map);
+                       spin_unlock(&dev->gnd_lock);
+                       GOTO(get_out_mapped, rc);
                } else if (rc != -ENOMEM) {
                        /* carp, failure we can't handle */
                        kgnilnd_tx_done(tx, rc);
@@ -4752,7 +4856,7 @@ kgnilnd_process_mapped_tx(kgn_device_t *dev)
                } else {
                       GNIDBG_TX(log_retrans_level, tx,
                                "transient map failure #%d %d pages/%d bytes phys %u@%u "
-                               "virt %u@"LPU64" "
+                               "virt %u@%llu "
                                "nq_map %d mdd# %d/%d GART %ld",
                                dev->gnd_map_attempt, tx->tx_phys_npages, tx->tx_nob,
                                dev->gnd_map_nphys, dev->gnd_map_physnop * PAGE_SIZE,
@@ -4797,6 +4901,12 @@ kgnilnd_process_conns(kgn_device_t *dev, unsigned long deadline)
 
                conn = list_first_entry(&dev->gnd_ready_conns, kgn_conn_t, gnc_schedlist);
                list_del_init(&conn->gnc_schedlist);
+               /* 
+                * Since we are processing conn now, we don't need to be on the delaylist any longer.
+                */
+
+               if (!list_empty(&conn->gnc_delaylist))
+                       list_del_init(&conn->gnc_delaylist);
                spin_unlock(&dev->gnd_lock);
 
                conn_sched = xchg(&conn->gnc_scheduled, GNILND_CONN_PROCESS);
@@ -4823,7 +4933,7 @@ kgnilnd_process_conns(kgn_device_t *dev, unsigned long deadline)
                                kgnilnd_conn_decref(conn);
                                up_write(&dev->gnd_conn_sem);
                        } else if (rc != 1) {
-                       kgnilnd_conn_decref(conn);
+                               kgnilnd_conn_decref(conn);
                        }
                        /* clear this so that scheduler thread doesn't spin */
                        found_work = 0;
@@ -4842,9 +4952,9 @@ kgnilnd_process_conns(kgn_device_t *dev, unsigned long deadline)
                                 * yet. Cycle this conn back through
                                 * the scheduler. */
                                kgnilnd_schedule_conn(conn);
-                       } else
-                       kgnilnd_complete_closed_conn(conn);
-
+                       } else {
+                               kgnilnd_complete_closed_conn(conn);
+                       }
                        up_write(&dev->gnd_conn_sem);
                } else if (unlikely(conn->gnc_state == GNILND_CONN_DESTROY_EP)) {
                        /* DESTROY_EP set in kgnilnd_conn_decref on gnc_refcount = 1 */
@@ -4874,7 +4984,7 @@ kgnilnd_process_conns(kgn_device_t *dev, unsigned long deadline)
                        kgnilnd_conn_decref(conn);
                        up_write(&dev->gnd_conn_sem);
                } else if (rc != 1) {
-               kgnilnd_conn_decref(conn);
+                       kgnilnd_conn_decref(conn);
                }
 
                /* check list again with lock held */