Whamcloud - gitweb
LU-7850 gnilnd: Fix niov calculation with offset kiov
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd_cb.c
index 381aa64..d9839ce 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (C) 2009-2012 Cray, Inc.
  *
  *   Derived from work by Eric Barton <eric@bartonsoftware.com>
+ *   Author: James Shimek <jshimek@cray.com>
  *   Author: Nic Henke <nic@cray.com>
  *
  *   This file is part of Lustre, http://www.lustre.org.
@@ -23,6 +24,7 @@
  *
  */
 
+#include <asm/page.h>
 #include <linux/nmi.h>
 #include "gnilnd.h"
 
@@ -118,7 +120,7 @@ kgnilnd_device_callback(__u32 devid, __u64 arg)
  * == 0: reschedule if someone marked him WANTS_SCHED
  * > 0 : force a reschedule */
 /* Return code 0 means it did not schedule the conn, 1
- *  means it succesfully scheduled the conn.
+ * means it successfully scheduled the conn.
  */
 
 int
@@ -228,7 +230,7 @@ kgnilnd_free_tx(kgn_tx_t *tx)
 
        /* we only allocate this if we need to */
        if (tx->tx_phys != NULL) {
-               cfs_mem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
+               kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
                CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n",
                       LNET_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys);
        }
@@ -242,9 +244,8 @@ kgnilnd_free_tx(kgn_tx_t *tx)
 #if 0
        KGNILND_POISON(tx, 0x5a, sizeof(kgn_tx_t));
 #endif
-       cfs_mem_cache_free(kgnilnd_data.kgn_tx_cache, tx);
-       CDEBUG(D_MALLOC, "slab-freed 'tx': %lu at %p.\n",
-              sizeof(*tx), tx);
+       CDEBUG(D_MALLOC, "slab-freed 'tx': %lu at %p.\n", sizeof(*tx), tx);
+       kmem_cache_free(kgnilnd_data.kgn_tx_cache, tx);
 }
 
 kgn_tx_t *
@@ -255,7 +256,7 @@ kgnilnd_alloc_tx (void)
        if (CFS_FAIL_CHECK(CFS_FAIL_GNI_ALLOC_TX))
                return tx;
 
-       tx = cfs_mem_cache_alloc(kgnilnd_data.kgn_tx_cache, CFS_ALLOC_ATOMIC);
+       tx = kmem_cache_alloc(kgnilnd_data.kgn_tx_cache, GFP_ATOMIC);
        if (tx == NULL) {
                CERROR("failed to allocate tx\n");
                return NULL;
@@ -489,9 +490,9 @@ kgnilnd_nak_rdma(kgn_conn_t *conn, int rx_type, int error, __u64 cookie, lnet_ni
 }
 
 int
-kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, struct iovec *iov,
-                              lnet_kiov_t *kiov, unsigned int offset, unsigned int nob)
-
+kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov,
+                              struct kvec *iov, lnet_kiov_t *kiov,
+                              unsigned int offset, unsigned int nob)
 {
        kgn_msg_t       *msg = &tx->tx_msg;
        int              i;
@@ -500,13 +501,19 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, struct iovec *io
         * gni_smsg_send to send that as the payload */
 
        LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
-       LASSERT(nob >= 0);
 
        if (nob == 0) {
                tx->tx_buffer = NULL;
        } else if (kiov != NULL) {
+
+               if ((niov > 0) && unlikely(niov > (nob/PAGE_SIZE))) {
+                       niov = ((nob + offset + kiov->kiov_offset + PAGE_SIZE - 1) /
+                               PAGE_SIZE);
+               }
+
                LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE,
-                        "bad niov %d\n", niov);
+                       "bad niov %d msg %p kiov %p iov %p offset %d nob%d\n",
+                       niov, msg, kiov, iov, offset, nob);
 
                while (offset >= kiov->kiov_len) {
                        offset -= kiov->kiov_len;
@@ -522,7 +529,7 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, struct iovec *io
                         * than kiov_len, we will also have a whole at the end of that page
                         * which isn't allowed */
                        if ((kiov[i].kiov_offset != 0 && i > 0) ||
-                           (kiov[i].kiov_offset + kiov[i].kiov_len != CFS_PAGE_SIZE && i < niov - 1)) {
+                           (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1)) {
                                CNETERR("Can't make payload contiguous in I/O VM:"
                                       "page %d, offset %u, nob %u, kiov_offset %u kiov_len %u \n",
                                       i, offset, nob, kiov->kiov_offset, kiov->kiov_len);
@@ -599,7 +606,7 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, struct iovec *io
 
 int
 kgnilnd_setup_virt_buffer(kgn_tx_t *tx,
-                         unsigned int niov, struct iovec *iov,
+                         unsigned int niov, struct kvec *iov,
                          unsigned int offset, unsigned int nob)
 
 {
@@ -640,8 +647,8 @@ kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov,
        LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
 
        /* only allocate this if we are going to use it */
-       tx->tx_phys = cfs_mem_cache_alloc(kgnilnd_data.kgn_tx_phys_cache,
-                                             CFS_ALLOC_ATOMIC);
+       tx->tx_phys = kmem_cache_alloc(kgnilnd_data.kgn_tx_phys_cache,
+                                             GFP_ATOMIC);
        if (tx->tx_phys == NULL) {
                CERROR("failed to allocate tx_phys\n");
                rc = -ENOMEM;
@@ -711,7 +718,7 @@ kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov,
                               "nkiov %u offset %u\n",
                      kiov->kiov_page, kiov->kiov_offset, kiov->kiov_len, nob, nkiov, offset);
 
-               phys->address = lnet_page2phys(kiov->kiov_page);
+               phys->address = page_to_phys(kiov->kiov_page);
                phys++;
                kiov++;
                nkiov--;
@@ -729,7 +736,7 @@ kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov,
 
 error:
        if (tx->tx_phys != NULL) {
-               cfs_mem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
+               kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys);
                CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n",
                       sizeof(*tx->tx_phys), tx->tx_phys);
                tx->tx_phys = NULL;
@@ -739,7 +746,7 @@ error:
 
 static inline int
 kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov,
-                         struct iovec *iov, lnet_kiov_t *kiov,
+                         struct kvec *iov, lnet_kiov_t *kiov,
                          unsigned int offset, unsigned int nob)
 {
        int     rc;
@@ -1099,7 +1106,7 @@ kgnilnd_add_purgatory_tx(kgn_tx_t *tx)
        if (tx->tx_buffer_copy)
                gmp->gmp_map_key = tx->tx_buffer_copy_map_key;
        else
-       gmp->gmp_map_key = tx->tx_map_key;
+               gmp->gmp_map_key = tx->tx_map_key;
 
        atomic_inc(&conn->gnc_device->gnd_n_mdd_held);
 
@@ -1158,6 +1165,7 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error)
                 * verified peer notification  - the theory is that
                 * a TX error can be communicated in all other cases */
                if (tx->tx_conn->gnc_state != GNILND_CONN_ESTABLISHED &&
+                   error != -GNILND_NOPURG &&
                    kgnilnd_check_purgatory_conn(tx->tx_conn)) {
                        kgnilnd_add_purgatory_tx(tx);
 
@@ -1177,8 +1185,8 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error)
                        rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_map_key, 0);
                        LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
                } else {
-               rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_map_key, hold_timeout);
-               LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
+                       rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_map_key, hold_timeout);
+                       LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc);
                }
 
                tx->tx_buftype--;
@@ -1209,7 +1217,7 @@ kgnilnd_tx_done(kgn_tx_t *tx, int completion)
                       libcfs_nid2str(conn->gnc_peer->gnp_nid) : "<?>",
                       tx->tx_id.txe_smsg_id, tx->tx_id.txe_idx,
                       kgnilnd_tx_state2str(tx->tx_list_state),
-                      cfs_duration_sec((long)jiffies - tx->tx_qtime));
+                      cfs_duration_sec((unsigned long)jiffies - tx->tx_qtime));
        }
 
        /* The error codes determine if we hold onto the MDD */
@@ -1327,7 +1335,7 @@ search_again:
         * if we are sending to the same node faster than 256000/sec.
         * To help guard against this, we OR in the tx_seq - that is 32 bits */
 
-       tx->tx_id.txe_chips = (__u32)(jiffies | conn->gnc_tx_seq);
+       tx->tx_id.txe_chips = (__u32)(jiffies | atomic_read(&conn->gnc_tx_seq));
 
        GNIDBG_TX(D_NET, tx, "set cookie/id/bits", NULL);
 
@@ -1429,7 +1437,8 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
         * close message.
         */
        if (atomic_read(&conn->gnc_peer->gnp_dirty_eps) != 0 && msg->gnm_type != GNILND_MSG_CLOSE) {
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                /* Return -ETIME, we are closing the connection already so we dont want to
                 * have this tx hit the wire. The tx will be killed by the calling function.
                 * Once the EP is marked dirty the close message will be the last
@@ -1447,11 +1456,13 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
        }
 
        if (time_after_eq(now, newest_last_rx + GNILND_TIMEOUTRX(timeout))) {
-               GNIDBG_CONN(D_NETERROR|D_CONSOLE, conn, "Cant send to %s after timeout lapse of %lu; TO %lu",
+               GNIDBG_CONN(D_NETERROR|D_CONSOLE, conn,
+                           "Cant send to %s after timeout lapse of %lu; TO %lu\n",
                libcfs_nid2str(conn->gnc_peer->gnp_nid),
                cfs_duration_sec(now - newest_last_rx),
                cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                return -ETIME;
        }
 
@@ -1462,7 +1473,7 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
         */
        msg->gnm_connstamp = conn->gnc_my_connstamp;
        msg->gnm_payload_len = immediatenob;
-       msg->gnm_seq = conn->gnc_tx_seq;
+       msg->gnm_seq = atomic_read(&conn->gnc_tx_seq);
 
        /* always init here - kgn_checksum is a /sys module tunable
         * and can be flipped at any point, even between msg init and sending */
@@ -1486,14 +1497,15 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
        if (unlikely(tx->tx_state & GNILND_TX_FAIL_SMSG)) {
                rrc = cfs_fail_val ? cfs_fail_val : GNI_RC_NOT_DONE;
        } else {
-       rrc = kgnilnd_smsg_send(conn->gnc_ephandle,
-                                   msg, sizeof(*msg), immediate, immediatenob,
-                           tx->tx_id.txe_smsg_id);
+               rrc = kgnilnd_smsg_send(conn->gnc_ephandle,
+                                       msg, sizeof(*msg), immediate,
+                                       immediatenob,
+                                       tx->tx_id.txe_smsg_id);
        }
 
        switch (rrc) {
        case GNI_RC_SUCCESS:
-               conn->gnc_tx_seq++;
+               atomic_inc(&conn->gnc_tx_seq);
                conn->gnc_last_tx = jiffies;
                /* no locking here as LIVE isn't a list */
                kgnilnd_tx_add_state_locked(tx, NULL, conn, GNILND_TX_LIVE_FMAQ, 1);
@@ -1507,7 +1519,8 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
 
                /* serialize with seeing CQ events for completion on this, as well as
                 * tx_seq */
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
 
                atomic_inc(&conn->gnc_device->gnd_short_ntx);
                atomic64_add(immediatenob, &conn->gnc_device->gnd_short_txbytes);
@@ -1519,8 +1532,8 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
                /* XXX Nic: We need to figure out how to track this
                 * - there are bound to be good reasons for it,
                 * but we want to know when it happens */
-
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                /* We'll handle this error inline - makes the calling logic much more
                 * clean */
 
@@ -1557,7 +1570,8 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
                }
        default:
                /* handle bad retcode gracefully */
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                return -EIO;
        }
 }
@@ -1572,7 +1586,8 @@ kgnilnd_sendmsg(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
        int              rc;
 
        timestamp = jiffies;
-       mutex_lock(&dev->gnd_cq_mutex);
+       kgnilnd_gl_mutex_lock(&dev->gnd_cq_mutex);
+       kgnilnd_conn_mutex_lock(&tx->tx_conn->gnc_smsg_mutex);
        /* delay in jiffies - we are really concerned only with things that
         * result in a schedule() or really holding this off for long times .
         * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
@@ -1617,7 +1632,8 @@ kgnilnd_sendmsg_trylock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob
                rc = 0;
        } else {
                atomic_inc(&conn->gnc_device->gnd_fast_try);
-               rc = mutex_trylock(&conn->gnc_device->gnd_cq_mutex);
+               rc = kgnilnd_trylock(&conn->gnc_device->gnd_cq_mutex,
+                                    &conn->gnc_smsg_mutex);
        }
        if (!rc) {
                rc = -EAGAIN;
@@ -1697,7 +1713,7 @@ kgnilnd_queue_rdma(kgn_conn_t *conn, kgn_tx_t *tx)
 void
 kgnilnd_queue_tx(kgn_conn_t *conn, kgn_tx_t *tx)
 {
-       int            rc;
+       int            rc = 0;
        int            add_tail = 1;
 
        /* set the tx_id here, we delay it until we have an actual conn
@@ -1760,6 +1776,7 @@ kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target)
        kgn_peer_t      *new_peer = NULL;
        kgn_conn_t      *conn = NULL;
        int              rc;
+       int              node_state;
 
        ENTRY;
 
@@ -1800,6 +1817,8 @@ kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target)
 
        CFS_RACE(CFS_FAIL_GNI_FIND_TARGET);
 
+       node_state = kgnilnd_get_node_state(LNET_NIDADDR(target->nid));
+
        /* NB - this will not block during normal operations -
         * the only writer of this is in the startup/shutdown path. */
        rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
@@ -1811,7 +1830,7 @@ kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target)
        /* ignore previous peer entirely - we cycled the lock, so we
         * will create new peer and at worst drop it if peer is still
         * in the tables */
-       rc = kgnilnd_create_peer_safe(&new_peer, target->nid, net);
+       rc = kgnilnd_create_peer_safe(&new_peer, target->nid, net, node_state);
        if (rc != 0) {
                up_read(&kgnilnd_data.kgn_net_rw_sem);
                GOTO(no_peer, rc);
@@ -1824,7 +1843,20 @@ kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target)
         * if we don't find it, add our new one to the list */
        kgnilnd_add_peer_locked(target->nid, new_peer, &peer);
 
+       /* don't create a connection if the peer is not up */
+       if (peer->gnp_down != GNILND_RCA_NODE_UP) {
+               write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
+               rc = -ENETRESET;
+               GOTO(no_peer, rc);
+       }
+
        conn = kgnilnd_find_or_create_conn_locked(peer);
+
+       if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DGRAM_DROP_TX)) {
+               write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
+               GOTO(no_peer, rc);
+       }
+
        if (conn != NULL) {
                /* oh hey, found a conn now... magical */
                kgnilnd_queue_tx(conn, tx);
@@ -1840,7 +1872,7 @@ no_peer:
        RETURN_EXIT;
 }
 
-void
+int
 kgnilnd_rdma(kgn_tx_t *tx, int type,
            kgn_rdma_desc_t *sink, unsigned int nob, __u64 cookie)
 {
@@ -1910,7 +1942,7 @@ kgnilnd_rdma(kgn_tx_t *tx, int type,
                                        /* allocation of buffer failed nak the rdma */
                                        kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
                                        kgnilnd_tx_done(tx, -EFAULT);
-                                       return;
+                                       return 0;
                                }
                                kgnilnd_admin_addref(kgnilnd_data.kgn_rev_copy_buff);
                                rc = kgnilnd_mem_register(conn->gnc_device->gnd_handle, (__u64)tx->tx_buffer_copy, desc_nob, NULL, GNI_MEM_READWRITE, &tx->tx_buffer_copy_map_key);
@@ -1920,7 +1952,7 @@ kgnilnd_rdma(kgn_tx_t *tx, int type,
                                        tx->tx_buffer_copy = NULL;
                                        kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
                                        kgnilnd_tx_done(tx, -EFAULT);
-                                       return;
+                                       return 0;
                                }
                        }
                        desc_map_key = tx->tx_buffer_copy_map_key;
@@ -1950,21 +1982,23 @@ kgnilnd_rdma(kgn_tx_t *tx, int type,
 
        if (nob == 0) {
                kgnilnd_queue_tx(conn, tx);
-               return;
+               return 0;
        }
 
        /* Don't lie (CLOSE == RDMA idle) */
        LASSERTF(!conn->gnc_close_sent, "tx %p on conn %p after close sent %d\n",
                 tx, conn, conn->gnc_close_sent);
 
-       GNIDBG_TX(D_NET, tx, "Post RDMA type 0x%02x dlvr_mode 0x%x cookie:"LPX64,
-               type, tx->tx_rdma_desc.dlvr_mode, cookie);
+       GNIDBG_TX(D_NET, tx, "Post RDMA type 0x%02x conn %p dlvr_mode "
+               "0x%x cookie:"LPX64,
+               type, conn, tx->tx_rdma_desc.dlvr_mode, cookie);
 
        /* set CQ dedicated for RDMA */
        tx->tx_rdma_desc.src_cq_hndl = conn->gnc_device->gnd_snd_rdma_cqh;
 
        timestamp = jiffies;
-       mutex_lock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_conn_mutex_lock(&conn->gnc_rdma_mutex);
+       kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
        /* delay in jiffies - we are really concerned only with things that
         * result in a schedule() or really holding this off for long times .
         * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
@@ -1972,17 +2006,35 @@ kgnilnd_rdma(kgn_tx_t *tx, int type,
 
        rrc = kgnilnd_post_rdma(conn->gnc_ephandle, &tx->tx_rdma_desc);
 
+       if (rrc == GNI_RC_ERROR_RESOURCE) {
+               kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_unmap_buffer(tx, 0);
+
+               if (tx->tx_buffer_copy != NULL) {
+                       vfree(tx->tx_buffer_copy);
+                       tx->tx_buffer_copy = NULL;
+               }
+
+               spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
+               kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn,
+                                           GNILND_TX_MAPQ, 0);
+               spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
+               kgnilnd_schedule_device(tx->tx_conn->gnc_device);
+               return -EAGAIN;
+       }
+
        spin_lock(&conn->gnc_list_lock);
        kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_LIVE_RDMAQ, 1);
        tx->tx_qtime = jiffies;
        spin_unlock(&conn->gnc_list_lock);
-
-       mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
 
        /* XXX Nic: is this a place we should handle more errors for
         * robustness sake */
        LASSERT(rrc == GNI_RC_SUCCESS);
-
+       return 0;
 }
 
 kgn_rx_t *
@@ -1990,7 +2042,7 @@ kgnilnd_alloc_rx(void)
 {
        kgn_rx_t        *rx;
 
-       rx = cfs_mem_cache_alloc(kgnilnd_data.kgn_rx_cache, CFS_ALLOC_ATOMIC);
+       rx = kmem_cache_alloc(kgnilnd_data.kgn_rx_cache, GFP_ATOMIC);
        if (rx == NULL) {
                CERROR("failed to allocate rx\n");
                return NULL;
@@ -2013,14 +2065,14 @@ kgnilnd_release_msg(kgn_conn_t *conn)
        CDEBUG(D_NET, "consuming %p\n", conn);
 
        timestamp = jiffies;
-       mutex_lock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
        /* delay in jiffies - we are really concerned only with things that
         * result in a schedule() or really holding this off for long times .
         * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
        conn->gnc_device->gnd_mutex_delay += (long) jiffies - timestamp;
 
        rrc = kgnilnd_smsg_release(conn->gnc_ephandle);
-       mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
 
        LASSERTF(rrc == GNI_RC_SUCCESS, "bad rrc %d\n", rrc);
        GNIDBG_SMSG_CREDS(D_NET, conn);
@@ -2037,6 +2089,7 @@ kgnilnd_consume_rx(kgn_rx_t *rx)
        /* if we are eager, free the cache alloc'd msg */
        if (unlikely(rx->grx_eager)) {
                LIBCFS_FREE(rxmsg, sizeof(*rxmsg) + *kgnilnd_tunables.kgn_max_immediate);
+               atomic_dec(&kgnilnd_data.kgn_neager_allocs);
 
                /* release ref from eager_recv */
                kgnilnd_conn_decref(conn);
@@ -2045,7 +2098,7 @@ kgnilnd_consume_rx(kgn_rx_t *rx)
                kgnilnd_release_msg(conn);
        }
 
-       cfs_mem_cache_free(kgnilnd_data.kgn_rx_cache, rx);
+       kmem_cache_free(kgnilnd_data.kgn_rx_cache, rx);
        CDEBUG(D_MALLOC, "slab-freed 'rx': %lu at %p.\n",
               sizeof(*rx), rx);
 
@@ -2061,7 +2114,7 @@ kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
        int               target_is_router = lntmsg->msg_target_is_router;
        int               routing = lntmsg->msg_routing;
        unsigned int      niov = lntmsg->msg_niov;
-       struct iovec     *iov = lntmsg->msg_iov;
+       struct kvec      *iov = lntmsg->msg_iov;
        lnet_kiov_t      *kiov = lntmsg->msg_kiov;
        unsigned int      offset = lntmsg->msg_offset;
        unsigned int      nob = lntmsg->msg_len;
@@ -2230,7 +2283,7 @@ kgnilnd_setup_rdma(lnet_ni_t *ni, kgn_rx_t *rx, lnet_msg_t *lntmsg, int mlen)
        kgn_conn_t    *conn = rx->grx_conn;
        kgn_msg_t     *rxmsg = rx->grx_msg;
        unsigned int   niov = lntmsg->msg_niov;
-       struct iovec  *iov = lntmsg->msg_iov;
+       struct kvec   *iov = lntmsg->msg_iov;
        lnet_kiov_t   *kiov = lntmsg->msg_kiov;
        unsigned int   offset = lntmsg->msg_offset;
        unsigned int   nob = lntmsg->msg_len;
@@ -2342,6 +2395,15 @@ kgnilnd_eager_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
 
        /* we have no credits or buffers for this message, so copy it
         * somewhere for a later kgnilnd_recv */
+       if (atomic_read(&kgnilnd_data.kgn_neager_allocs) >=
+                       *kgnilnd_tunables.kgn_eager_credits) {
+               CERROR("Out of eager credits to %s\n",
+                       libcfs_nid2str(conn->gnc_peer->gnp_nid));
+               return -ENOMEM;
+       }
+
+       atomic_inc(&kgnilnd_data.kgn_neager_allocs);
+
        LIBCFS_ALLOC(eagermsg, sizeof(*eagermsg) + *kgnilnd_tunables.kgn_max_immediate);
        if (eagermsg == NULL) {
                kgnilnd_conn_decref(conn);
@@ -2370,7 +2432,7 @@ kgnilnd_eager_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
 int
 kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
             int delayed, unsigned int niov,
-            struct iovec *iov, lnet_kiov_t *kiov,
+            struct kvec *iov, lnet_kiov_t *kiov,
             unsigned int offset, unsigned int mlen, unsigned int rlen)
 {
        kgn_rx_t    *rx = private;
@@ -2515,7 +2577,7 @@ kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
                tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_nob = mlen;
 
                tx->tx_lntmsg[0] = lntmsg; /* finalize this on RDMA_DONE */
-
+               tx->tx_qtime = jiffies;
                /* we only queue from kgnilnd_recv - we might get called from other contexts
                 * and we don't want to block the mutex in those cases */
 
@@ -2737,8 +2799,10 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
        int                     rc = 0;
        int                     count = 0;
        int                     reconnect;
+       int                     to_reconn;
        short                   releaseconn = 0;
        unsigned long           first_rx = 0;
+       int                     purgatory_conn_cnt = 0;
 
        CDEBUG(D_NET, "checking peer 0x%p->%s for timeouts; interval %lus\n",
                peer, libcfs_nid2str(peer->gnp_nid),
@@ -2804,13 +2868,19 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
        reconnect = (peer->gnp_down == GNILND_RCA_NODE_UP) &&
                    (atomic_read(&peer->gnp_dirty_eps) == 0);
 
+       /* fast reconnect after a timeout */
+       to_reconn = !conn &&
+                   (peer->gnp_last_errno == -ETIMEDOUT) &&
+                   *kgnilnd_tunables.kgn_fast_reconn;
+
        /* if we are not connected and there are tx on the gnp_tx_queue waiting
         * to be sent, we'll check the reconnect interval and fire up a new
         * connection request */
 
-       if ((peer->gnp_connecting == GNILND_PEER_IDLE) &&
+       if (reconnect &&
+           (peer->gnp_connecting == GNILND_PEER_IDLE) &&
            (time_after_eq(jiffies, peer->gnp_reconnect_time)) &&
-            !list_empty(&peer->gnp_tx_queue) && reconnect) {
+           (!list_empty(&peer->gnp_tx_queue) || to_reconn)) {
 
                CDEBUG(D_NET, "starting connect to %s\n",
                        libcfs_nid2str(peer->gnp_nid));
@@ -2878,6 +2948,30 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
                                        cfs_duration_sec(waiting));
 
                                kgnilnd_detach_purgatory_locked(conn, souls);
+                       } else {
+                               purgatory_conn_cnt++;
+                       }
+               }
+       }
+
+       /* If we have too many connections in purgatory we could run out of
+        * resources. Limit the number of connections to a tunable number,
+        * clean up to the minimum all in one fell swoop... there are
+        * situations where dvs will retry tx's and we can eat up several
+        * hundread connection requests at once.
+        */
+       if (purgatory_conn_cnt > *kgnilnd_tunables.kgn_max_purgatory) {
+               list_for_each_entry_safe(conn, connN, &peer->gnp_conns,
+                                        gnc_list) {
+                       if (conn->gnc_in_purgatory &&
+                           conn->gnc_state == GNILND_CONN_DONE) {
+                               CDEBUG(D_NET, "Dropping Held resource due to"
+                                             " resource limits being hit\n");
+                               kgnilnd_detach_purgatory_locked(conn, souls);
+
+                               if (purgatory_conn_cnt-- <
+                                   *kgnilnd_tunables.kgn_max_purgatory)
+                                       break;
                        }
                }
        }
@@ -2946,7 +3040,6 @@ kgnilnd_reaper(void *arg)
        struct timer_list  timer;
        DEFINE_WAIT(wait);
 
-       cfs_daemonize("kgnilnd_rpr");
        cfs_block_allsigs();
 
        /* all gnilnd threads need to run fairly urgently */
@@ -2980,7 +3073,7 @@ kgnilnd_reaper(void *arg)
                                    next_check_time);
                        mod_timer(&timer, (long) jiffies + timeout);
 
-                       /* check flag variables before comitting */
+                       /* check flag variables before committing */
                        if (!kgnilnd_data.kgn_shutdown &&
                            !kgnilnd_data.kgn_quiesce_trigger) {
                                CDEBUG(D_INFO, "schedule timeout %ld (%lu sec)\n",
@@ -3043,7 +3136,7 @@ kgnilnd_recv_bte_get(kgn_tx_t *tx) {
                lnet_copy_flat2kiov(
                        niov, kiov, offset,
                        nob,
-                       tx->tx_buffer_copy, tx->tx_offset, nob);
+                       tx->tx_buffer_copy + tx->tx_offset, 0, nob);
        } else {
                memcpy(tx->tx_buffer, tx->tx_buffer_copy + tx->tx_offset, nob);
        }
@@ -3063,6 +3156,9 @@ kgnilnd_check_rdma_cq(kgn_device_t *dev)
        long                   num_processed = 0;
        kgn_conn_t            *conn = NULL;
        kgn_tx_t              *tx = NULL;
+       kgn_rdma_desc_t       *rdesc;
+       unsigned int           rnob;
+       __u64                  rcookie;
 
        for (;;) {
                /* make sure we don't keep looping if we need to reset */
@@ -3085,7 +3181,7 @@ kgnilnd_check_rdma_cq(kgn_device_t *dev)
                }
 
                if (rrc == GNI_RC_NOT_DONE) {
-                       mutex_unlock(&dev->gnd_cq_mutex);
+                       kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
                        CDEBUG(D_INFO, "SEND RDMA CQ %d empty processed %ld\n",
                               dev->gnd_id, num_processed);
                        return num_processed;
@@ -3102,7 +3198,7 @@ kgnilnd_check_rdma_cq(kgn_device_t *dev)
 
                rrc = kgnilnd_get_completed(dev->gnd_snd_rdma_cqh, event_data,
                                            &desc);
-               mutex_unlock(&dev->gnd_cq_mutex);
+               kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
 
                /* XXX Nic: Need better error handling here... */
                LASSERTF((rrc == GNI_RC_SUCCESS) ||
@@ -3146,9 +3242,16 @@ kgnilnd_check_rdma_cq(kgn_device_t *dev)
                }
 
                /* remove from rdmaq */
+               kgnilnd_conn_mutex_lock(&conn->gnc_rdma_mutex);
                spin_lock(&conn->gnc_list_lock);
                kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
                spin_unlock(&conn->gnc_list_lock);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
+
+               if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RDMA_CQ_ERROR)) {
+                       event_data = 1LL << 48;
+                       rc = 1;
+               }
 
                if (likely(desc->status == GNI_RC_SUCCESS) && rc == 0) {
                        atomic_inc(&dev->gnd_rdma_ntx);
@@ -3160,6 +3263,7 @@ kgnilnd_check_rdma_cq(kgn_device_t *dev)
                        /* drop ref from kgnilnd_validate_tx_ev_id */
                        kgnilnd_admin_decref(conn->gnc_tx_in_use);
                        kgnilnd_conn_decref(conn);
+
                        continue;
                }
 
@@ -3183,31 +3287,28 @@ kgnilnd_check_rdma_cq(kgn_device_t *dev)
 
                if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE ||
                    tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) {
-                       if (should_retry) {
-                               kgnilnd_rdma(tx, tx->tx_msg.gnm_type,
-                                            &tx->tx_putinfo.gnpam_desc,
-                                            tx->tx_putinfo.gnpam_desc.gnrd_nob,
-                                            tx->tx_putinfo.gnpam_dst_cookie);
-                       } else {
-                               kgnilnd_nak_rdma(conn, tx->tx_msg.gnm_type,
-                                               -EFAULT,
-                                               tx->tx_putinfo.gnpam_dst_cookie,
-                                               tx->tx_msg.gnm_srcnid);
-                               kgnilnd_tx_done(tx, -EFAULT);
-                       }
+                       rdesc    = &tx->tx_putinfo.gnpam_desc;
+                       rnob     = tx->tx_putinfo.gnpam_desc.gnrd_nob;
+                       rcookie  = tx->tx_putinfo.gnpam_dst_cookie;
                } else {
-                       if (should_retry) {
-                               kgnilnd_rdma(tx, tx->tx_msg.gnm_type,
-                                            &tx->tx_getinfo.gngm_desc,
-                                            tx->tx_lntmsg[0]->msg_len,
-                                            tx->tx_getinfo.gngm_cookie);
-                       } else {
-                               kgnilnd_nak_rdma(conn, tx->tx_msg.gnm_type,
-                                               -EFAULT,
-                                               tx->tx_getinfo.gngm_cookie,
-                                               tx->tx_msg.gnm_srcnid);
-                               kgnilnd_tx_done(tx, -EFAULT);
-                       }
+                       rdesc    = &tx->tx_getinfo.gngm_desc;
+                       rnob     = tx->tx_lntmsg[0]->msg_len;
+                       rcookie  = tx->tx_getinfo.gngm_cookie;
+               }
+
+               if (should_retry) {
+                       kgnilnd_rdma(tx,
+                                    tx->tx_msg.gnm_type,
+                                    rdesc,
+                                    rnob, rcookie);
+               } else {
+                       kgnilnd_nak_rdma(conn,
+                                        tx->tx_msg.gnm_type,
+                                        -EFAULT,
+                                        rcookie,
+                                        tx->tx_msg.gnm_srcnid);
+                       kgnilnd_tx_done(tx, -GNILND_NOPURG);
+                       kgnilnd_close_conn(conn, -ECOMM);
                }
 
                /* drop ref from kgnilnd_validate_tx_ev_id */
@@ -3241,7 +3342,7 @@ kgnilnd_check_fma_send_cq(kgn_device_t *dev)
                }
 
                rrc = kgnilnd_cq_get_event(dev->gnd_snd_fma_cqh, &event_data);
-               mutex_unlock(&dev->gnd_cq_mutex);
+               kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
 
                if (rrc == GNI_RC_NOT_DONE) {
                        CDEBUG(D_INFO,
@@ -3307,6 +3408,7 @@ kgnilnd_check_fma_send_cq(kgn_device_t *dev)
                }
 
                /* lock tx_list_state and tx_state */
+               kgnilnd_conn_mutex_lock(&conn->gnc_smsg_mutex);
                spin_lock(&tx->tx_conn->gnc_list_lock);
 
                GNITX_ASSERTF(tx, tx->tx_list_state == GNILND_TX_LIVE_FMAQ,
@@ -3327,6 +3429,7 @@ kgnilnd_check_fma_send_cq(kgn_device_t *dev)
                saw_reply = !(tx->tx_state & GNILND_TX_WAITING_REPLY);
 
                spin_unlock(&tx->tx_conn->gnc_list_lock);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
 
                if (queued_fma) {
                        CDEBUG(D_NET, "scheduling conn 0x%p->%s for fmaq\n",
@@ -3395,7 +3498,7 @@ kgnilnd_check_fma_rcv_cq(kgn_device_t *dev)
                        return 1;
                }
                rrc = kgnilnd_cq_get_event(dev->gnd_rcv_fma_cqh, &event_data);
-               mutex_unlock(&dev->gnd_cq_mutex);
+               kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
 
                if (rrc == GNI_RC_NOT_DONE) {
                        CDEBUG(D_INFO, "SMSG RX CQ %d empty data "LPX64" "
@@ -3506,7 +3609,8 @@ kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full)
                rc = kgnilnd_map_buffer(tx);
        }
 
-       /* rc should be 0 if we mapped succesfully here, if non-zero we are queueing */
+       /* rc should be 0 if we mapped successfully here, if non-zero
+        * we are queueing */
        if (rc != 0) {
                /* if try_map_if_full set, they handle requeuing */
                if (unlikely(try_map_if_full)) {
@@ -3530,7 +3634,12 @@ kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full)
         * remote node where the RDMA will be started
         * Special case -EAGAIN logic - this should just queued as if the mapping couldn't
         * be satisified. The rest of the errors are "hard" errors that require
-        * upper layers to handle themselves */
+        * upper layers to handle themselves.
+        * If kgnilnd_post_rdma returns a resource error, kgnilnd_rdma will put
+        * the tx back on the TX_MAPQ. When this tx is pulled back off the MAPQ,
+        * it's gnm_type will now be GNILND_MSG_PUT_DONE or
+        * GNILND_MSG_GET_DONE_REV.
+        */
        case GNILND_MSG_GET_REQ:
                tx->tx_msg.gnm_u.get.gngm_desc.gnrd_key = tx->tx_map_key;
                tx->tx_msg.gnm_u.get.gngm_cookie = tx->tx_id.txe_cookie;
@@ -3554,18 +3663,20 @@ kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full)
                break;
 
        /* PUT_REQ and GET_DONE are where we do the actual RDMA */
+       case GNILND_MSG_PUT_DONE:
        case GNILND_MSG_PUT_REQ:
-               kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE,
+               rc = kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE,
                             &tx->tx_putinfo.gnpam_desc,
                             tx->tx_putinfo.gnpam_desc.gnrd_nob,
                             tx->tx_putinfo.gnpam_dst_cookie);
+               RETURN(try_map_if_full ? rc : 0);
                break;
        case GNILND_MSG_GET_DONE:
-               kgnilnd_rdma(tx, GNILND_MSG_GET_DONE,
+               rc = kgnilnd_rdma(tx, GNILND_MSG_GET_DONE,
                             &tx->tx_getinfo.gngm_desc,
                             tx->tx_lntmsg[0]->msg_len,
                             tx->tx_getinfo.gngm_cookie);
-
+               RETURN(try_map_if_full ? rc : 0);
                break;
        case GNILND_MSG_PUT_REQ_REV:
                tx->tx_msg.gnm_u.get.gngm_desc.gnrd_key = tx->tx_map_key;
@@ -3579,10 +3690,11 @@ kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full)
                rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
                break;
        case GNILND_MSG_PUT_DONE_REV:
-               kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE_REV,
+               rc = kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE_REV,
                             &tx->tx_getinfo.gngm_desc,
-                            tx->tx_lntmsg[0]->msg_len,
+                            tx->tx_nob,
                             tx->tx_getinfo.gngm_cookie);
+               RETURN(try_map_if_full ? rc : 0);
                break;
        case GNILND_MSG_GET_ACK_REV:
                tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_key = tx->tx_map_key;
@@ -3597,12 +3709,13 @@ kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full)
                /* redirect to FMAQ on failure, no need to infinite loop here in MAPQ */
                rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
                break;
+       case GNILND_MSG_GET_DONE_REV:
        case GNILND_MSG_GET_REQ_REV:
-               kgnilnd_rdma(tx, GNILND_MSG_GET_DONE_REV,
+               rc = kgnilnd_rdma(tx, GNILND_MSG_GET_DONE_REV,
                                &tx->tx_putinfo.gnpam_desc,
                                tx->tx_putinfo.gnpam_desc.gnrd_nob,
                                tx->tx_putinfo.gnpam_dst_cookie);
-
+               RETURN(try_map_if_full ? rc : 0);
                break;
        }
 
@@ -3910,6 +4023,13 @@ kgnilnd_complete_tx(kgn_tx_t *tx, int rc)
 {
        int             complete = 0;
        kgn_conn_t      *conn = tx->tx_conn;
+       __u64 nob = tx->tx_nob;
+       __u32 physnop = tx->tx_phys_npages;
+       int   id = tx->tx_id.txe_smsg_id;
+       int buftype = tx->tx_buftype;
+       gni_mem_handle_t hndl;
+       hndl.qword1 = tx->tx_map_key.qword1;
+       hndl.qword2 = tx->tx_map_key.qword2;
 
        spin_lock(&conn->gnc_list_lock);
 
@@ -3919,6 +4039,22 @@ kgnilnd_complete_tx(kgn_tx_t *tx, int rc)
        tx->tx_rc = rc;
        tx->tx_state &= ~GNILND_TX_WAITING_REPLY;
 
+       if (rc == -EFAULT) {
+               CDEBUG(D_NETERROR, "Error %d TX data: TX %p tx_id %x nob %16"LPF64"u physnop %8d buffertype %#8x MemHandle "LPX64"."LPX64"x\n",
+                       rc, tx, id, nob, physnop, buftype, hndl.qword1, hndl.qword2);
+
+               if(*kgnilnd_tunables.kgn_efault_lbug) {
+                       GNIDBG_TOMSG(D_NETERROR, &tx->tx_msg,
+                       "error %d on tx 0x%p->%s id %u/%d state %s age %ds",
+                       rc, tx, conn ?
+                       libcfs_nid2str(conn->gnc_peer->gnp_nid) : "<?>",
+                       tx->tx_id.txe_smsg_id, tx->tx_id.txe_idx,
+                       kgnilnd_tx_state2str(tx->tx_list_state),
+                       cfs_duration_sec((unsigned long) jiffies - tx->tx_qtime));
+                       LBUG();
+               }
+       }
+
        if (!(tx->tx_state & GNILND_TX_WAITING_COMPLETION)) {
                kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
                /* sample under lock as follow on steps require gnc_list_lock
@@ -3974,7 +4110,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                RETURN_EXIT;
 
        timestamp = jiffies;
-       mutex_lock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
        /* delay in jiffies - we are really concerned only with things that
         * result in a schedule() or really holding this off for long times .
         * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
@@ -4003,7 +4139,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                libcfs_nid2str(conn->gnc_peer->gnp_nid),
                cfs_duration_sec(timestamp - newest_last_rx),
                cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                rc = -ETIME;
                kgnilnd_close_conn(conn, rc);
                RETURN_EXIT;
@@ -4012,8 +4148,8 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
        rrc = kgnilnd_smsg_getnext(conn->gnc_ephandle, &prefix);
 
        if (rrc == GNI_RC_NOT_DONE) {
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
-               CDEBUG(D_INFO, "SMSG RX empty\n");
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               CDEBUG(D_INFO, "SMSG RX empty conn 0x%p\n", conn);
                RETURN_EXIT;
        }
 
@@ -4026,7 +4162,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
         */
 
        if (rrc == GNI_RC_INVALID_STATE) {
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                GNIDBG_CONN(D_NETERROR | D_CONSOLE, conn, "Mailbox corruption "
                        "detected closing conn %p from peer %s\n", conn,
                        libcfs_nid2str(conn->gnc_peer->gnp_nid));
@@ -4043,18 +4179,18 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
 
        rx = kgnilnd_alloc_rx();
        if (rx == NULL) {
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                kgnilnd_release_msg(conn);
                GNIDBG_MSG(D_NETERROR, msg, "Dropping SMSG RX from 0x%p->%s, no RX memory",
                           conn, libcfs_nid2str(peer->gnp_nid));
                RETURN_EXIT;
        }
 
-       GNIDBG_MSG(D_INFO, msg, "SMSG RX on %p from %s",
-               conn, libcfs_nid2str(peer->gnp_nid));
+       GNIDBG_MSG(D_INFO, msg, "SMSG RX on %p", conn);
 
        timestamp = conn->gnc_last_rx;
-       last_seq = conn->gnc_rx_seq;
+       seq = last_seq = atomic_read(&conn->gnc_rx_seq);
+       atomic_inc(&conn->gnc_rx_seq);
 
        conn->gnc_last_rx = jiffies;
        /* stash first rx so we can clear out purgatory
@@ -4062,10 +4198,8 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
        if (conn->gnc_first_rx == 0)
                conn->gnc_first_rx = jiffies;
 
-       seq = conn->gnc_rx_seq++;
-
        /* needs to linger to protect gnc_rx_seq like we do with gnc_tx_seq */
-       mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
        kgnilnd_peer_alive(conn->gnc_peer);
 
        rx->grx_msg = msg;
@@ -4189,7 +4323,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                                       conn, last_seq,
                                       cfs_duration_sec(now - timestamp),
                                       cfs_duration_sec(now - conn->gnc_last_rx_cq),
-                                      conn->gnc_tx_seq,
+                                      atomic_read(&conn->gnc_tx_seq),
                                       cfs_duration_sec(now - conn->gnc_last_tx),
                                       cfs_duration_sec(now - conn->gnc_last_tx_cq),
                                       cfs_duration_sec(now - conn->gnc_last_noop_want),
@@ -4651,6 +4785,11 @@ kgnilnd_process_mapped_tx(kgn_device_t *dev)
                         * mapped so we can reset our timers */
                        dev->gnd_map_attempt = 0;
                        continue;
+               } else if (rc == -EAGAIN) {
+                       spin_lock(&dev->gnd_lock);
+                       mod_timer(&dev->gnd_map_timer, dev->gnd_next_map);
+                       spin_unlock(&dev->gnd_lock);
+                       GOTO(get_out_mapped, rc);
                } else if (rc != -ENOMEM) {
                        /* carp, failure we can't handle */
                        kgnilnd_tx_done(tx, rc);
@@ -4795,9 +4934,9 @@ kgnilnd_process_conns(kgn_device_t *dev, unsigned long deadline)
                                 * yet. Cycle this conn back through
                                 * the scheduler. */
                                kgnilnd_schedule_conn(conn);
-                       } else
-                       kgnilnd_complete_closed_conn(conn);
-
+                       } else {
+                               kgnilnd_complete_closed_conn(conn);
+                       }
                        up_write(&dev->gnd_conn_sem);
                } else if (unlikely(conn->gnc_state == GNILND_CONN_DESTROY_EP)) {
                        /* DESTROY_EP set in kgnilnd_conn_decref on gnc_refcount = 1 */
@@ -4850,15 +4989,12 @@ kgnilnd_scheduler(void *arg)
 {
        int               threadno = (long)arg;
        kgn_device_t            *dev;
-       char                    name[16];
        int                     busy_loops = 0;
        unsigned long     deadline = 0;
        DEFINE_WAIT(wait);
 
        dev = &kgnilnd_data.kgn_devices[(threadno + 1) % kgnilnd_data.kgn_ndevs];
 
-       snprintf(name, sizeof(name), "kgnilnd_sd_%02d", threadno);
-       cfs_daemonize(name);
        cfs_block_allsigs();
 
        /* all gnilnd threads need to run fairly urgently */