Whamcloud - gitweb
LU-6261 gnilnd: kgnilnd_check_rdma_cq race in error path.
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd_cb.c
index 705a341..b8f56f5 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (C) 2009-2012 Cray, Inc.
  *
  *   Derived from work by Eric Barton <eric@bartonsoftware.com>
+ *   Author: James Shimek <jshimek@cray.com>
  *   Author: Nic Henke <nic@cray.com>
  *
  *   This file is part of Lustre, http://www.lustre.org.
@@ -119,7 +120,7 @@ kgnilnd_device_callback(__u32 devid, __u64 arg)
  * == 0: reschedule if someone marked him WANTS_SCHED
  * > 0 : force a reschedule */
 /* Return code 0 means it did not schedule the conn, 1
- *  means it succesfully scheduled the conn.
+ * means it successfully scheduled the conn.
  */
 
 int
@@ -504,8 +505,14 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, struct iovec *io
        if (nob == 0) {
                tx->tx_buffer = NULL;
        } else if (kiov != NULL) {
+
+               if ((niov > 0) && unlikely(niov > (nob/PAGE_SIZE))) {
+                       niov = ((nob + offset + PAGE_SIZE - 1) / PAGE_SIZE);
+               }
+
                LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE,
-                        "bad niov %d\n", niov);
+                       "bad niov %d msg %p kiov %p iov %p offset %d nob%d\n",
+                       niov, msg, kiov, iov, offset, nob);
 
                while (offset >= kiov->kiov_len) {
                        offset -= kiov->kiov_len;
@@ -1157,6 +1164,7 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error)
                 * verified peer notification  - the theory is that
                 * a TX error can be communicated in all other cases */
                if (tx->tx_conn->gnc_state != GNILND_CONN_ESTABLISHED &&
+                   error != -GNILND_NOPURG &&
                    kgnilnd_check_purgatory_conn(tx->tx_conn)) {
                        kgnilnd_add_purgatory_tx(tx);
 
@@ -1326,7 +1334,7 @@ search_again:
         * if we are sending to the same node faster than 256000/sec.
         * To help guard against this, we OR in the tx_seq - that is 32 bits */
 
-       tx->tx_id.txe_chips = (__u32)(jiffies | conn->gnc_tx_seq);
+       tx->tx_id.txe_chips = (__u32)(jiffies | atomic_read(&conn->gnc_tx_seq));
 
        GNIDBG_TX(D_NET, tx, "set cookie/id/bits", NULL);
 
@@ -1428,7 +1436,8 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
         * close message.
         */
        if (atomic_read(&conn->gnc_peer->gnp_dirty_eps) != 0 && msg->gnm_type != GNILND_MSG_CLOSE) {
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                /* Return -ETIME, we are closing the connection already so we dont want to
                 * have this tx hit the wire. The tx will be killed by the calling function.
                 * Once the EP is marked dirty the close message will be the last
@@ -1446,11 +1455,13 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
        }
 
        if (time_after_eq(now, newest_last_rx + GNILND_TIMEOUTRX(timeout))) {
-               GNIDBG_CONN(D_NETERROR|D_CONSOLE, conn, "Cant send to %s after timeout lapse of %lu; TO %lu",
+               GNIDBG_CONN(D_NETERROR|D_CONSOLE, conn,
+                           "Cant send to %s after timeout lapse of %lu; TO %lu\n",
                libcfs_nid2str(conn->gnc_peer->gnp_nid),
                cfs_duration_sec(now - newest_last_rx),
                cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                return -ETIME;
        }
 
@@ -1461,7 +1472,7 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
         */
        msg->gnm_connstamp = conn->gnc_my_connstamp;
        msg->gnm_payload_len = immediatenob;
-       msg->gnm_seq = conn->gnc_tx_seq;
+       msg->gnm_seq = atomic_read(&conn->gnc_tx_seq);
 
        /* always init here - kgn_checksum is a /sys module tunable
         * and can be flipped at any point, even between msg init and sending */
@@ -1485,14 +1496,15 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
        if (unlikely(tx->tx_state & GNILND_TX_FAIL_SMSG)) {
                rrc = cfs_fail_val ? cfs_fail_val : GNI_RC_NOT_DONE;
        } else {
-       rrc = kgnilnd_smsg_send(conn->gnc_ephandle,
-                                   msg, sizeof(*msg), immediate, immediatenob,
-                           tx->tx_id.txe_smsg_id);
+               rrc = kgnilnd_smsg_send(conn->gnc_ephandle,
+                                       msg, sizeof(*msg), immediate,
+                                       immediatenob,
+                                       tx->tx_id.txe_smsg_id);
        }
 
        switch (rrc) {
        case GNI_RC_SUCCESS:
-               conn->gnc_tx_seq++;
+               atomic_inc(&conn->gnc_tx_seq);
                conn->gnc_last_tx = jiffies;
                /* no locking here as LIVE isn't a list */
                kgnilnd_tx_add_state_locked(tx, NULL, conn, GNILND_TX_LIVE_FMAQ, 1);
@@ -1506,7 +1518,8 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
 
                /* serialize with seeing CQ events for completion on this, as well as
                 * tx_seq */
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
 
                atomic_inc(&conn->gnc_device->gnd_short_ntx);
                atomic64_add(immediatenob, &conn->gnc_device->gnd_short_txbytes);
@@ -1518,8 +1531,8 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
                /* XXX Nic: We need to figure out how to track this
                 * - there are bound to be good reasons for it,
                 * but we want to know when it happens */
-
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                /* We'll handle this error inline - makes the calling logic much more
                 * clean */
 
@@ -1556,7 +1569,8 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
                }
        default:
                /* handle bad retcode gracefully */
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                return -EIO;
        }
 }
@@ -1571,7 +1585,8 @@ kgnilnd_sendmsg(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
        int              rc;
 
        timestamp = jiffies;
-       mutex_lock(&dev->gnd_cq_mutex);
+       kgnilnd_gl_mutex_lock(&dev->gnd_cq_mutex);
+       kgnilnd_conn_mutex_lock(&tx->tx_conn->gnc_smsg_mutex);
        /* delay in jiffies - we are really concerned only with things that
         * result in a schedule() or really holding this off for long times .
         * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
@@ -1616,7 +1631,8 @@ kgnilnd_sendmsg_trylock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob
                rc = 0;
        } else {
                atomic_inc(&conn->gnc_device->gnd_fast_try);
-               rc = mutex_trylock(&conn->gnc_device->gnd_cq_mutex);
+               rc = kgnilnd_trylock(&conn->gnc_device->gnd_cq_mutex,
+                                    &conn->gnc_smsg_mutex);
        }
        if (!rc) {
                rc = -EAGAIN;
@@ -1855,7 +1871,7 @@ no_peer:
        RETURN_EXIT;
 }
 
-void
+int
 kgnilnd_rdma(kgn_tx_t *tx, int type,
            kgn_rdma_desc_t *sink, unsigned int nob, __u64 cookie)
 {
@@ -1925,7 +1941,7 @@ kgnilnd_rdma(kgn_tx_t *tx, int type,
                                        /* allocation of buffer failed nak the rdma */
                                        kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
                                        kgnilnd_tx_done(tx, -EFAULT);
-                                       return;
+                                       return 0;
                                }
                                kgnilnd_admin_addref(kgnilnd_data.kgn_rev_copy_buff);
                                rc = kgnilnd_mem_register(conn->gnc_device->gnd_handle, (__u64)tx->tx_buffer_copy, desc_nob, NULL, GNI_MEM_READWRITE, &tx->tx_buffer_copy_map_key);
@@ -1935,7 +1951,7 @@ kgnilnd_rdma(kgn_tx_t *tx, int type,
                                        tx->tx_buffer_copy = NULL;
                                        kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
                                        kgnilnd_tx_done(tx, -EFAULT);
-                                       return;
+                                       return 0;
                                }
                        }
                        desc_map_key = tx->tx_buffer_copy_map_key;
@@ -1965,21 +1981,23 @@ kgnilnd_rdma(kgn_tx_t *tx, int type,
 
        if (nob == 0) {
                kgnilnd_queue_tx(conn, tx);
-               return;
+               return 0;
        }
 
        /* Don't lie (CLOSE == RDMA idle) */
        LASSERTF(!conn->gnc_close_sent, "tx %p on conn %p after close sent %d\n",
                 tx, conn, conn->gnc_close_sent);
 
-       GNIDBG_TX(D_NET, tx, "Post RDMA type 0x%02x dlvr_mode 0x%x cookie:"LPX64,
-               type, tx->tx_rdma_desc.dlvr_mode, cookie);
+       GNIDBG_TX(D_NET, tx, "Post RDMA type 0x%02x conn %p dlvr_mode "
+               "0x%x cookie:"LPX64,
+               type, conn, tx->tx_rdma_desc.dlvr_mode, cookie);
 
        /* set CQ dedicated for RDMA */
        tx->tx_rdma_desc.src_cq_hndl = conn->gnc_device->gnd_snd_rdma_cqh;
 
        timestamp = jiffies;
-       mutex_lock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_conn_mutex_lock(&conn->gnc_rdma_mutex);
+       kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
        /* delay in jiffies - we are really concerned only with things that
         * result in a schedule() or really holding this off for long times .
         * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
@@ -1987,17 +2005,35 @@ kgnilnd_rdma(kgn_tx_t *tx, int type,
 
        rrc = kgnilnd_post_rdma(conn->gnc_ephandle, &tx->tx_rdma_desc);
 
+       if (rrc == GNI_RC_ERROR_RESOURCE) {
+               kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_unmap_buffer(tx, 0);
+
+               if (tx->tx_buffer_copy != NULL) {
+                       vfree(tx->tx_buffer_copy);
+                       tx->tx_buffer_copy = NULL;
+               }
+
+               spin_lock(&tx->tx_conn->gnc_device->gnd_lock);
+               kgnilnd_tx_add_state_locked(tx, NULL, tx->tx_conn,
+                                           GNILND_TX_MAPQ, 0);
+               spin_unlock(&tx->tx_conn->gnc_device->gnd_lock);
+               kgnilnd_schedule_device(tx->tx_conn->gnc_device);
+               return -EAGAIN;
+       }
+
        spin_lock(&conn->gnc_list_lock);
        kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, GNILND_TX_LIVE_RDMAQ, 1);
        tx->tx_qtime = jiffies;
        spin_unlock(&conn->gnc_list_lock);
-
-       mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
 
        /* XXX Nic: is this a place we should handle more errors for
         * robustness sake */
        LASSERT(rrc == GNI_RC_SUCCESS);
-
+       return 0;
 }
 
 kgn_rx_t *
@@ -2028,14 +2064,14 @@ kgnilnd_release_msg(kgn_conn_t *conn)
        CDEBUG(D_NET, "consuming %p\n", conn);
 
        timestamp = jiffies;
-       mutex_lock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
        /* delay in jiffies - we are really concerned only with things that
         * result in a schedule() or really holding this off for long times .
         * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
        conn->gnc_device->gnd_mutex_delay += (long) jiffies - timestamp;
 
        rrc = kgnilnd_smsg_release(conn->gnc_ephandle);
-       mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
 
        LASSERTF(rrc == GNI_RC_SUCCESS, "bad rrc %d\n", rrc);
        GNIDBG_SMSG_CREDS(D_NET, conn);
@@ -2762,8 +2798,10 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
        int                     rc = 0;
        int                     count = 0;
        int                     reconnect;
+       int                     to_reconn;
        short                   releaseconn = 0;
        unsigned long           first_rx = 0;
+       int                     purgatory_conn_cnt = 0;
 
        CDEBUG(D_NET, "checking peer 0x%p->%s for timeouts; interval %lus\n",
                peer, libcfs_nid2str(peer->gnp_nid),
@@ -2829,13 +2867,19 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
        reconnect = (peer->gnp_down == GNILND_RCA_NODE_UP) &&
                    (atomic_read(&peer->gnp_dirty_eps) == 0);
 
+       /* fast reconnect after a timeout */
+       to_reconn = !conn &&
+                   (peer->gnp_last_errno == -ETIMEDOUT) &&
+                   *kgnilnd_tunables.kgn_fast_reconn;
+
        /* if we are not connected and there are tx on the gnp_tx_queue waiting
         * to be sent, we'll check the reconnect interval and fire up a new
         * connection request */
 
-       if ((peer->gnp_connecting == GNILND_PEER_IDLE) &&
+       if (reconnect &&
+           (peer->gnp_connecting == GNILND_PEER_IDLE) &&
            (time_after_eq(jiffies, peer->gnp_reconnect_time)) &&
-            !list_empty(&peer->gnp_tx_queue) && reconnect) {
+           (!list_empty(&peer->gnp_tx_queue) || to_reconn)) {
 
                CDEBUG(D_NET, "starting connect to %s\n",
                        libcfs_nid2str(peer->gnp_nid));
@@ -2903,6 +2947,30 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
                                        cfs_duration_sec(waiting));
 
                                kgnilnd_detach_purgatory_locked(conn, souls);
+                       } else {
+                               purgatory_conn_cnt++;
+                       }
+               }
+       }
+
+       /* If we have too many connections in purgatory we could run out of
+        * resources. Limit the number of connections to a tunable number,
+        * clean up to the minimum all in one fell swoop... there are
+        * situations where dvs will retry tx's and we can eat up several
+        * hundread connection requests at once.
+        */
+       if (purgatory_conn_cnt > *kgnilnd_tunables.kgn_max_purgatory) {
+               list_for_each_entry_safe(conn, connN, &peer->gnp_conns,
+                                        gnc_list) {
+                       if (conn->gnc_in_purgatory &&
+                           conn->gnc_state == GNILND_CONN_DONE) {
+                               CDEBUG(D_NET, "Dropping Held resource due to"
+                                             " resource limits being hit\n");
+                               kgnilnd_detach_purgatory_locked(conn, souls);
+
+                               if (purgatory_conn_cnt-- <
+                                   *kgnilnd_tunables.kgn_max_purgatory)
+                                       break;
                        }
                }
        }
@@ -3112,7 +3180,7 @@ kgnilnd_check_rdma_cq(kgn_device_t *dev)
                }
 
                if (rrc == GNI_RC_NOT_DONE) {
-                       mutex_unlock(&dev->gnd_cq_mutex);
+                       kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
                        CDEBUG(D_INFO, "SEND RDMA CQ %d empty processed %ld\n",
                               dev->gnd_id, num_processed);
                        return num_processed;
@@ -3129,7 +3197,7 @@ kgnilnd_check_rdma_cq(kgn_device_t *dev)
 
                rrc = kgnilnd_get_completed(dev->gnd_snd_rdma_cqh, event_data,
                                            &desc);
-               mutex_unlock(&dev->gnd_cq_mutex);
+               kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
 
                /* XXX Nic: Need better error handling here... */
                LASSERTF((rrc == GNI_RC_SUCCESS) ||
@@ -3173,9 +3241,16 @@ kgnilnd_check_rdma_cq(kgn_device_t *dev)
                }
 
                /* remove from rdmaq */
+               kgnilnd_conn_mutex_lock(&conn->gnc_rdma_mutex);
                spin_lock(&conn->gnc_list_lock);
                kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD);
                spin_unlock(&conn->gnc_list_lock);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_rdma_mutex);
+
+               if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RDMA_CQ_ERROR)) {
+                       event_data = 1LL << 48;
+                       rc = 1;
+               }
 
                if (likely(desc->status == GNI_RC_SUCCESS) && rc == 0) {
                        atomic_inc(&dev->gnd_rdma_ntx);
@@ -3187,6 +3262,7 @@ kgnilnd_check_rdma_cq(kgn_device_t *dev)
                        /* drop ref from kgnilnd_validate_tx_ev_id */
                        kgnilnd_admin_decref(conn->gnc_tx_in_use);
                        kgnilnd_conn_decref(conn);
+
                        continue;
                }
 
@@ -3230,7 +3306,7 @@ kgnilnd_check_rdma_cq(kgn_device_t *dev)
                                         -EFAULT,
                                         rcookie,
                                         tx->tx_msg.gnm_srcnid);
-                       kgnilnd_tx_done(tx, -EFAULT);
+                       kgnilnd_tx_done(tx, -GNILND_NOPURG);
                        kgnilnd_close_conn(conn, -ECOMM);
                }
 
@@ -3265,7 +3341,7 @@ kgnilnd_check_fma_send_cq(kgn_device_t *dev)
                }
 
                rrc = kgnilnd_cq_get_event(dev->gnd_snd_fma_cqh, &event_data);
-               mutex_unlock(&dev->gnd_cq_mutex);
+               kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
 
                if (rrc == GNI_RC_NOT_DONE) {
                        CDEBUG(D_INFO,
@@ -3331,6 +3407,7 @@ kgnilnd_check_fma_send_cq(kgn_device_t *dev)
                }
 
                /* lock tx_list_state and tx_state */
+               kgnilnd_conn_mutex_lock(&conn->gnc_smsg_mutex);
                spin_lock(&tx->tx_conn->gnc_list_lock);
 
                GNITX_ASSERTF(tx, tx->tx_list_state == GNILND_TX_LIVE_FMAQ,
@@ -3351,6 +3428,7 @@ kgnilnd_check_fma_send_cq(kgn_device_t *dev)
                saw_reply = !(tx->tx_state & GNILND_TX_WAITING_REPLY);
 
                spin_unlock(&tx->tx_conn->gnc_list_lock);
+               kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
 
                if (queued_fma) {
                        CDEBUG(D_NET, "scheduling conn 0x%p->%s for fmaq\n",
@@ -3419,7 +3497,7 @@ kgnilnd_check_fma_rcv_cq(kgn_device_t *dev)
                        return 1;
                }
                rrc = kgnilnd_cq_get_event(dev->gnd_rcv_fma_cqh, &event_data);
-               mutex_unlock(&dev->gnd_cq_mutex);
+               kgnilnd_gl_mutex_unlock(&dev->gnd_cq_mutex);
 
                if (rrc == GNI_RC_NOT_DONE) {
                        CDEBUG(D_INFO, "SMSG RX CQ %d empty data "LPX64" "
@@ -3530,7 +3608,8 @@ kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full)
                rc = kgnilnd_map_buffer(tx);
        }
 
-       /* rc should be 0 if we mapped succesfully here, if non-zero we are queueing */
+       /* rc should be 0 if we mapped successfully here, if non-zero
+        * we are queueing */
        if (rc != 0) {
                /* if try_map_if_full set, they handle requeuing */
                if (unlikely(try_map_if_full)) {
@@ -3554,7 +3633,12 @@ kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full)
         * remote node where the RDMA will be started
         * Special case -EAGAIN logic - this should just queued as if the mapping couldn't
         * be satisified. The rest of the errors are "hard" errors that require
-        * upper layers to handle themselves */
+        * upper layers to handle themselves.
+        * If kgnilnd_post_rdma returns a resource error, kgnilnd_rdma will put
+        * the tx back on the TX_MAPQ. When this tx is pulled back off the MAPQ,
+        * it's gnm_type will now be GNILND_MSG_PUT_DONE or
+        * GNILND_MSG_GET_DONE_REV.
+        */
        case GNILND_MSG_GET_REQ:
                tx->tx_msg.gnm_u.get.gngm_desc.gnrd_key = tx->tx_map_key;
                tx->tx_msg.gnm_u.get.gngm_cookie = tx->tx_id.txe_cookie;
@@ -3578,18 +3662,20 @@ kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full)
                break;
 
        /* PUT_REQ and GET_DONE are where we do the actual RDMA */
+       case GNILND_MSG_PUT_DONE:
        case GNILND_MSG_PUT_REQ:
-               kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE,
+               rc = kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE,
                             &tx->tx_putinfo.gnpam_desc,
                             tx->tx_putinfo.gnpam_desc.gnrd_nob,
                             tx->tx_putinfo.gnpam_dst_cookie);
+               RETURN(try_map_if_full ? rc : 0);
                break;
        case GNILND_MSG_GET_DONE:
-               kgnilnd_rdma(tx, GNILND_MSG_GET_DONE,
+               rc = kgnilnd_rdma(tx, GNILND_MSG_GET_DONE,
                             &tx->tx_getinfo.gngm_desc,
                             tx->tx_lntmsg[0]->msg_len,
                             tx->tx_getinfo.gngm_cookie);
-
+               RETURN(try_map_if_full ? rc : 0);
                break;
        case GNILND_MSG_PUT_REQ_REV:
                tx->tx_msg.gnm_u.get.gngm_desc.gnrd_key = tx->tx_map_key;
@@ -3603,10 +3689,11 @@ kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full)
                rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
                break;
        case GNILND_MSG_PUT_DONE_REV:
-               kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE_REV,
+               rc = kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE_REV,
                             &tx->tx_getinfo.gngm_desc,
                             tx->tx_nob,
                             tx->tx_getinfo.gngm_cookie);
+               RETURN(try_map_if_full ? rc : 0);
                break;
        case GNILND_MSG_GET_ACK_REV:
                tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_key = tx->tx_map_key;
@@ -3621,12 +3708,13 @@ kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full)
                /* redirect to FMAQ on failure, no need to infinite loop here in MAPQ */
                rc = kgnilnd_sendmsg(tx, NULL, 0, &tx->tx_conn->gnc_list_lock, GNILND_TX_FMAQ);
                break;
+       case GNILND_MSG_GET_DONE_REV:
        case GNILND_MSG_GET_REQ_REV:
-               kgnilnd_rdma(tx, GNILND_MSG_GET_DONE_REV,
+               rc = kgnilnd_rdma(tx, GNILND_MSG_GET_DONE_REV,
                                &tx->tx_putinfo.gnpam_desc,
                                tx->tx_putinfo.gnpam_desc.gnrd_nob,
                                tx->tx_putinfo.gnpam_dst_cookie);
-
+               RETURN(try_map_if_full ? rc : 0);
                break;
        }
 
@@ -4021,7 +4109,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                RETURN_EXIT;
 
        timestamp = jiffies;
-       mutex_lock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_gl_mutex_lock(&conn->gnc_device->gnd_cq_mutex);
        /* delay in jiffies - we are really concerned only with things that
         * result in a schedule() or really holding this off for long times .
         * NB - mutex_lock could spin for 2 jiffies before going to sleep to wait */
@@ -4050,7 +4138,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                libcfs_nid2str(conn->gnc_peer->gnp_nid),
                cfs_duration_sec(timestamp - newest_last_rx),
                cfs_duration_sec(GNILND_TIMEOUTRX(timeout)));
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                rc = -ETIME;
                kgnilnd_close_conn(conn, rc);
                RETURN_EXIT;
@@ -4059,8 +4147,8 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
        rrc = kgnilnd_smsg_getnext(conn->gnc_ephandle, &prefix);
 
        if (rrc == GNI_RC_NOT_DONE) {
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
-               CDEBUG(D_INFO, "SMSG RX empty\n");
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               CDEBUG(D_INFO, "SMSG RX empty conn 0x%p\n", conn);
                RETURN_EXIT;
        }
 
@@ -4073,7 +4161,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
         */
 
        if (rrc == GNI_RC_INVALID_STATE) {
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                GNIDBG_CONN(D_NETERROR | D_CONSOLE, conn, "Mailbox corruption "
                        "detected closing conn %p from peer %s\n", conn,
                        libcfs_nid2str(conn->gnc_peer->gnp_nid));
@@ -4090,18 +4178,18 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
 
        rx = kgnilnd_alloc_rx();
        if (rx == NULL) {
-               mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+               kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                kgnilnd_release_msg(conn);
                GNIDBG_MSG(D_NETERROR, msg, "Dropping SMSG RX from 0x%p->%s, no RX memory",
                           conn, libcfs_nid2str(peer->gnp_nid));
                RETURN_EXIT;
        }
 
-       GNIDBG_MSG(D_INFO, msg, "SMSG RX on %p from %s",
-               conn, libcfs_nid2str(peer->gnp_nid));
+       GNIDBG_MSG(D_INFO, msg, "SMSG RX on %p", conn);
 
        timestamp = conn->gnc_last_rx;
-       last_seq = conn->gnc_rx_seq;
+       seq = last_seq = atomic_read(&conn->gnc_rx_seq);
+       atomic_inc(&conn->gnc_rx_seq);
 
        conn->gnc_last_rx = jiffies;
        /* stash first rx so we can clear out purgatory
@@ -4109,10 +4197,8 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
        if (conn->gnc_first_rx == 0)
                conn->gnc_first_rx = jiffies;
 
-       seq = conn->gnc_rx_seq++;
-
        /* needs to linger to protect gnc_rx_seq like we do with gnc_tx_seq */
-       mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
+       kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
        kgnilnd_peer_alive(conn->gnc_peer);
 
        rx->grx_msg = msg;
@@ -4236,7 +4322,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                                       conn, last_seq,
                                       cfs_duration_sec(now - timestamp),
                                       cfs_duration_sec(now - conn->gnc_last_rx_cq),
-                                      conn->gnc_tx_seq,
+                                      atomic_read(&conn->gnc_tx_seq),
                                       cfs_duration_sec(now - conn->gnc_last_tx),
                                       cfs_duration_sec(now - conn->gnc_last_tx_cq),
                                       cfs_duration_sec(now - conn->gnc_last_noop_want),
@@ -4698,6 +4784,11 @@ kgnilnd_process_mapped_tx(kgn_device_t *dev)
                         * mapped so we can reset our timers */
                        dev->gnd_map_attempt = 0;
                        continue;
+               } else if (rc == -EAGAIN) {
+                       spin_lock(&dev->gnd_lock);
+                       mod_timer(&dev->gnd_map_timer, dev->gnd_next_map);
+                       spin_unlock(&dev->gnd_lock);
+                       GOTO(get_out_mapped, rc);
                } else if (rc != -ENOMEM) {
                        /* carp, failure we can't handle */
                        kgnilnd_tx_done(tx, rc);
@@ -4842,9 +4933,9 @@ kgnilnd_process_conns(kgn_device_t *dev, unsigned long deadline)
                                 * yet. Cycle this conn back through
                                 * the scheduler. */
                                kgnilnd_schedule_conn(conn);
-                       } else
-                       kgnilnd_complete_closed_conn(conn);
-
+                       } else {
+                               kgnilnd_complete_closed_conn(conn);
+                       }
                        up_write(&dev->gnd_conn_sem);
                } else if (unlikely(conn->gnc_state == GNILND_CONN_DESTROY_EP)) {
                        /* DESTROY_EP set in kgnilnd_conn_decref on gnc_refcount = 1 */