Whamcloud - gitweb
LU-11299 lnet: modify lnd notification mechanism
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd_cb.c
index 374d161..c6fb08f 100644 (file)
@@ -1,6 +1,4 @@
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
  * GPL HEADER START
  *
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
 /*
  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, 2017, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
 
 #include "o2iblnd.h"
 
+#define MAX_CONN_RACES_BEFORE_ABORT 20
+
+static void kiblnd_peer_alive(struct kib_peer_ni *peer_ni);
+static void kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active,
+                                      int error);
+static void kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx,
+                              int type, int body_nob);
+static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
+                           int resid, struct kib_rdma_desc *dstrd, u64 dstcookie);
+static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn);
+static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn);
+
+static void kiblnd_unmap_tx(struct kib_tx *tx);
+static void kiblnd_check_sends_locked(struct kib_conn *conn);
+
 void
-kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx)
+kiblnd_tx_done(struct kib_tx *tx)
 {
-        lnet_msg_t *lntmsg[2];
-        kib_net_t  *net = ni->ni_data;
-        int         rc;
-        int         i;
+       struct lnet_msg *lntmsg[2];
+       int         rc;
+       int         i;
 
-        LASSERT (net != NULL);
-        LASSERT (!cfs_in_interrupt());
-        LASSERT (!tx->tx_queued);               /* mustn't be queued for sending */
-        LASSERT (tx->tx_sending == 0);          /* mustn't be awaiting sent callback */
-        LASSERT (!tx->tx_waiting);              /* mustn't be awaiting peer response */
-        LASSERT (tx->tx_pool != NULL);
+       LASSERT (!in_interrupt());
+       LASSERT (!tx->tx_queued);               /* mustn't be queued for sending */
+       LASSERT (tx->tx_sending == 0);          /* mustn't be awaiting sent callback */
+       LASSERT (!tx->tx_waiting);              /* mustn't be awaiting peer_ni response */
+       LASSERT (tx->tx_pool != NULL);
 
-        kiblnd_unmap_tx(ni, tx);
+       kiblnd_unmap_tx(tx);
 
-        /* tx may have up to 2 lnet msgs to finalise */
-        lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
-        lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
-        rc = tx->tx_status;
+       /* tx may have up to 2 lnet msgs to finalise */
+       lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
+       lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
+       rc = tx->tx_status;
 
-        if (tx->tx_conn != NULL) {
-                LASSERT (ni == tx->tx_conn->ibc_peer->ibp_ni);
+       if (tx->tx_conn != NULL) {
+               kiblnd_conn_decref(tx->tx_conn);
+               tx->tx_conn = NULL;
+       }
 
-                kiblnd_conn_decref(tx->tx_conn);
-                tx->tx_conn = NULL;
-        }
+       tx->tx_nwrq = tx->tx_nsge = 0;
+       tx->tx_status = 0;
 
-        tx->tx_nwrq = 0;
-        tx->tx_status = 0;
+       kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
 
-        kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
+       /* delay finalize until my descs have been freed */
+       for (i = 0; i < 2; i++) {
+               if (lntmsg[i] == NULL)
+                       continue;
 
-        /* delay finalize until my descs have been freed */
-        for (i = 0; i < 2; i++) {
-                if (lntmsg[i] == NULL)
-                        continue;
+               /* propagate health status to LNet for requests */
+               if (i == 0 && lntmsg[i])
+                       lntmsg[i]->msg_health_status = tx->tx_hstatus;
 
-                lnet_finalize(ni, lntmsg[i], rc);
-        }
+               lnet_finalize(lntmsg[i], rc);
+       }
 }
 
 void
-kiblnd_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist, int status)
+kiblnd_txlist_done(struct list_head *txlist, int status,
+                  enum lnet_msg_hstatus hstatus)
 {
-        kib_tx_t *tx;
-
-        while (!cfs_list_empty (txlist)) {
-                tx = cfs_list_entry (txlist->next, kib_tx_t, tx_list);
-
-                cfs_list_del(&tx->tx_list);
-                /* complete now */
-                tx->tx_waiting = 0;
-                tx->tx_status = status;
-                kiblnd_tx_done(ni, tx);
-        }
+       struct kib_tx *tx;
+
+       while (!list_empty(txlist)) {
+               tx = list_entry(txlist->next, struct kib_tx, tx_list);
+
+               list_del(&tx->tx_list);
+               /* complete now */
+               tx->tx_waiting = 0;
+               tx->tx_status = status;
+               if (hstatus != LNET_MSG_STATUS_OK)
+                       tx->tx_hstatus = hstatus;
+               kiblnd_tx_done(tx);
+       }
 }
 
-kib_tx_t *
-kiblnd_get_idle_tx (lnet_ni_t *ni)
+static struct kib_tx *
+kiblnd_get_idle_tx(struct lnet_ni *ni, lnet_nid_t target)
 {
-        kib_net_t            *net = (kib_net_t *)ni->ni_data;
-        cfs_list_t           *node;
-        kib_tx_t             *tx;
+       struct kib_net *net = ni->ni_data;
+       struct list_head *node;
+       struct kib_tx *tx;
+       struct kib_tx_poolset *tps;
 
-        node = kiblnd_pool_alloc_node(&net->ibn_tx_ps.tps_poolset);
+       tps = net->ibn_tx_ps[lnet_cpt_of_nid(target, ni)];
+       node = kiblnd_pool_alloc_node(&tps->tps_poolset);
         if (node == NULL)
                 return NULL;
-        tx = container_of(node, kib_tx_t, tx_list);
+       tx = container_of(node, struct kib_tx, tx_list);
 
         LASSERT (tx->tx_nwrq == 0);
         LASSERT (!tx->tx_queued);
@@ -119,45 +135,52 @@ kiblnd_get_idle_tx (lnet_ni_t *ni)
         LASSERT (tx->tx_conn == NULL);
         LASSERT (tx->tx_lntmsg[0] == NULL);
         LASSERT (tx->tx_lntmsg[1] == NULL);
-        LASSERT (tx->tx_u.pmr == NULL);
         LASSERT (tx->tx_nfrags == 0);
 
+       tx->tx_gaps = false;
+       tx->tx_hstatus = LNET_MSG_STATUS_OK;
+
         return tx;
 }
 
-void
-kiblnd_drop_rx (kib_rx_t *rx)
+static void
+kiblnd_drop_rx(struct kib_rx *rx)
 {
-        kib_conn_t         *conn = rx->rx_conn;
-        unsigned long       flags;
-        
-        cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
-        LASSERT (conn->ibc_nrx > 0);
-        conn->ibc_nrx--;
-        cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
-
-        kiblnd_conn_decref(conn);
+       struct kib_conn *conn = rx->rx_conn;
+       struct kib_sched_info *sched = conn->ibc_sched;
+       unsigned long flags;
+
+       spin_lock_irqsave(&sched->ibs_lock, flags);
+       LASSERT(conn->ibc_nrx > 0);
+       conn->ibc_nrx--;
+       spin_unlock_irqrestore(&sched->ibs_lock, flags);
+
+       kiblnd_conn_decref(conn);
 }
 
 int
-kiblnd_post_rx (kib_rx_t *rx, int credit)
+kiblnd_post_rx(struct kib_rx *rx, int credit)
 {
-        kib_conn_t         *conn = rx->rx_conn;
-        kib_net_t          *net = conn->ibc_peer->ibp_ni->ni_data;
-        struct ib_recv_wr  *bad_wrq = NULL;
-        struct ib_mr       *mr;
-        int                 rc;
-
-        LASSERT (net != NULL);
-        LASSERT (!cfs_in_interrupt());
-        LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
-                 credit == IBLND_POSTRX_PEER_CREDIT ||
-                 credit == IBLND_POSTRX_RSRVD_CREDIT);
-
-        mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE);
-        LASSERT (mr != NULL);
-
-        rx->rx_sge.lkey   = mr->lkey;
+       struct kib_conn *conn = rx->rx_conn;
+       struct kib_net *net = conn->ibc_peer->ibp_ni->ni_data;
+       struct ib_recv_wr *bad_wrq = NULL;
+#ifdef HAVE_IB_GET_DMA_MR
+       struct ib_mr *mr = conn->ibc_hdev->ibh_mrs;
+#endif
+       int rc;
+
+       LASSERT (net != NULL);
+       LASSERT (!in_interrupt());
+       LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
+                credit == IBLND_POSTRX_PEER_CREDIT ||
+                credit == IBLND_POSTRX_RSRVD_CREDIT);
+#ifdef HAVE_IB_GET_DMA_MR
+       LASSERT(mr != NULL);
+
+       rx->rx_sge.lkey   = mr->lkey;
+#else
+       rx->rx_sge.lkey   = conn->ibc_hdev->ibh_pd->local_dma_lkey;
+#endif
         rx->rx_sge.addr   = rx->rx_msgaddr;
         rx->rx_sge.length = IBLND_MSG_SIZE;
 
@@ -176,105 +199,117 @@ kiblnd_post_rx (kib_rx_t *rx, int credit)
 
         rx->rx_nob = -1;                        /* flag posted */
 
-        rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
-        if (rc != 0) {
-                CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
-                       libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
-                rx->rx_nob = 0;
-        }
-
-        if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
-                return rc;
-
-        if (rc != 0) {
-                kiblnd_close_conn(conn, rc);
-                kiblnd_drop_rx(rx);             /* No more posts for this rx */
-                return rc;
-        }
-
-        if (credit == IBLND_POSTRX_NO_CREDIT)
-                return 0;
-
-        cfs_spin_lock(&conn->ibc_lock);
-        if (credit == IBLND_POSTRX_PEER_CREDIT)
-                conn->ibc_outstanding_credits++;
-        else
-                conn->ibc_reserved_credits++;
-        cfs_spin_unlock(&conn->ibc_lock);
-
-        kiblnd_check_sends(conn);
-        return 0;
+       /* NB: need an extra reference after ib_post_recv because we don't
+        * own this rx (and rx::rx_conn) anymore, LU-5678.
+        */
+       kiblnd_conn_addref(conn);
+#ifdef HAVE_IB_POST_SEND_RECV_CONST
+       rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq,
+                         (const struct ib_recv_wr **)&bad_wrq);
+#else
+       rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
+#endif
+       if (unlikely(rc != 0)) {
+               CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
+                      libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
+               rx->rx_nob = 0;
+       }
+
+       if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
+               goto out;
+
+       if (unlikely(rc != 0)) {
+               kiblnd_close_conn(conn, rc);
+               kiblnd_drop_rx(rx);     /* No more posts for this rx */
+               goto out;
+       }
+
+       if (credit == IBLND_POSTRX_NO_CREDIT)
+               goto out;
+
+       spin_lock(&conn->ibc_lock);
+       if (credit == IBLND_POSTRX_PEER_CREDIT)
+               conn->ibc_outstanding_credits++;
+       else
+               conn->ibc_reserved_credits++;
+       kiblnd_check_sends_locked(conn);
+       spin_unlock(&conn->ibc_lock);
+
+out:
+       kiblnd_conn_decref(conn);
+       return rc;
 }
 
-kib_tx_t *
-kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
+static struct kib_tx *
+kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, u64 cookie)
 {
-        cfs_list_t   *tmp;
+       struct list_head *tmp;
 
-        cfs_list_for_each(tmp, &conn->ibc_active_txs) {
-                kib_tx_t *tx = cfs_list_entry(tmp, kib_tx_t, tx_list);
+       list_for_each(tmp, &conn->ibc_active_txs) {
+               struct kib_tx *tx = list_entry(tmp, struct kib_tx, tx_list);
 
-                LASSERT (!tx->tx_queued);
-                LASSERT (tx->tx_sending != 0 || tx->tx_waiting);
+               LASSERT(!tx->tx_queued);
+               LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
 
-                if (tx->tx_cookie != cookie)
-                        continue;
+               if (tx->tx_cookie != cookie)
+                       continue;
 
-                if (tx->tx_waiting &&
-                    tx->tx_msg->ibm_type == txtype)
-                        return tx;
+               if (tx->tx_waiting &&
+                   tx->tx_msg->ibm_type == txtype)
+                       return tx;
 
-                CWARN("Bad completion: %swaiting, type %x (wanted %x)\n",
-                      tx->tx_waiting ? "" : "NOT ",
-                      tx->tx_msg->ibm_type, txtype);
-        }
-        return NULL;
+               CWARN("Bad completion: %swaiting, type %x (wanted %x)\n",
+                     tx->tx_waiting ? "" : "NOT ",
+                     tx->tx_msg->ibm_type, txtype);
+       }
+       return NULL;
 }
 
-void
-kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
+static void
+kiblnd_handle_completion(struct kib_conn *conn, int txtype, int status, u64 cookie)
 {
-        kib_tx_t    *tx;
-        lnet_ni_t   *ni = conn->ibc_peer->ibp_ni;
-        int          idle;
+       struct kib_tx *tx;
+       struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
+       int idle;
 
-        cfs_spin_lock(&conn->ibc_lock);
+       spin_lock(&conn->ibc_lock);
 
-        tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
-        if (tx == NULL) {
-                cfs_spin_unlock(&conn->ibc_lock);
+       tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
+       if (tx == NULL) {
+               spin_unlock(&conn->ibc_lock);
 
-                CWARN("Unmatched completion type %x cookie "LPX64" from %s\n",
-                      txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
-                kiblnd_close_conn(conn, -EPROTO);
-                return;
-        }
+               CWARN("Unmatched completion type %x cookie %#llx from %s\n",
+                     txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+               kiblnd_close_conn(conn, -EPROTO);
+               return;
+       }
 
-        if (tx->tx_status == 0) {               /* success so far */
-                if (status < 0) {               /* failed? */
-                        tx->tx_status = status;
-                } else if (txtype == IBLND_MSG_GET_REQ) {
-                        lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
-                }
-        }
+       if (tx->tx_status == 0) {               /* success so far */
+               if (status < 0) {               /* failed? */
+                       tx->tx_status = status;
+                       tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_ERROR;
+               } else if (txtype == IBLND_MSG_GET_REQ) {
+                       lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
+               }
+       }
 
-        tx->tx_waiting = 0;
+       tx->tx_waiting = 0;
 
-        idle = !tx->tx_queued && (tx->tx_sending == 0);
-        if (idle)
-                cfs_list_del(&tx->tx_list);
+       idle = !tx->tx_queued && (tx->tx_sending == 0);
+       if (idle)
+               list_del(&tx->tx_list);
 
-        cfs_spin_unlock(&conn->ibc_lock);
+       spin_unlock(&conn->ibc_lock);
 
-        if (idle)
-                kiblnd_tx_done(ni, tx);
+       if (idle)
+               kiblnd_tx_done(tx);
 }
 
-void
-kiblnd_send_completion (kib_conn_t *conn, int type, int status, __u64 cookie)
+static void
+kiblnd_send_completion(struct kib_conn *conn, int type, int status, u64 cookie)
 {
-        lnet_ni_t   *ni = conn->ibc_peer->ibp_ni;
-        kib_tx_t    *tx = kiblnd_get_idle_tx(ni);
+       struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
+       struct kib_tx *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
 
         if (tx == NULL) {
                 CERROR("Can't get tx for completion %x for %s\n",
@@ -284,19 +319,19 @@ kiblnd_send_completion (kib_conn_t *conn, int type, int status, __u64 cookie)
 
         tx->tx_msg->ibm_u.completion.ibcm_status = status;
         tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie;
-        kiblnd_init_tx_msg(ni, tx, type, sizeof(kib_completion_msg_t));
+       kiblnd_init_tx_msg(ni, tx, type, sizeof(struct kib_completion_msg));
 
         kiblnd_queue_tx(tx, conn);
 }
 
-void
-kiblnd_handle_rx (kib_rx_t *rx)
+static void
+kiblnd_handle_rx(struct kib_rx *rx)
 {
-        kib_msg_t    *msg = rx->rx_msg;
-        kib_conn_t   *conn = rx->rx_conn;
-        lnet_ni_t    *ni = conn->ibc_peer->ibp_ni;
+       struct kib_msg *msg = rx->rx_msg;
+       struct kib_conn   *conn = rx->rx_conn;
+       struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
         int           credits = msg->ibm_credits;
-        kib_tx_t     *tx;
+       struct kib_tx *tx;
         int           rc = 0;
         int           rc2;
         int           post_credit;
@@ -309,22 +344,22 @@ kiblnd_handle_rx (kib_rx_t *rx)
 
         if (credits != 0) {
                 /* Have I received credits that will let me send? */
-                cfs_spin_lock(&conn->ibc_lock);
+               spin_lock(&conn->ibc_lock);
 
-                if (conn->ibc_credits + credits >
-                    IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) {
-                        rc2 = conn->ibc_credits;
-                        cfs_spin_unlock(&conn->ibc_lock);
+               if (conn->ibc_credits + credits >
+                   conn->ibc_queue_depth) {
+                       rc2 = conn->ibc_credits;
+                       spin_unlock(&conn->ibc_lock);
 
-                        CERROR("Bad credits from %s: %d + %d > %d\n",
-                               libcfs_nid2str(conn->ibc_peer->ibp_nid),
-                               rc2, credits,
-                               IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
+                       CERROR("Bad credits from %s: %d + %d > %d\n",
+                              libcfs_nid2str(conn->ibc_peer->ibp_nid),
+                              rc2, credits,
+                              conn->ibc_queue_depth);
 
-                        kiblnd_close_conn(conn, -EPROTO);
-                        kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
-                        return;
-                }
+                       kiblnd_close_conn(conn, -EPROTO);
+                       kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
+                       return;
+               }
 
                 conn->ibc_credits += credits;
 
@@ -333,8 +368,8 @@ kiblnd_handle_rx (kib_rx_t *rx)
                     !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */
                         conn->ibc_outstanding_credits++;
 
-                cfs_spin_unlock(&conn->ibc_lock);
-                kiblnd_check_sends(conn);
+               kiblnd_check_sends_locked(conn);
+               spin_unlock(&conn->ibc_lock);
         }
 
         switch (msg->ibm_type) {
@@ -385,12 +420,12 @@ kiblnd_handle_rx (kib_rx_t *rx)
         case IBLND_MSG_PUT_ACK:
                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
 
-                cfs_spin_lock(&conn->ibc_lock);
-                tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
-                                                   msg->ibm_u.putack.ibpam_src_cookie);
-                if (tx != NULL)
-                        cfs_list_del(&tx->tx_list);
-                cfs_spin_unlock(&conn->ibc_lock);
+               spin_lock(&conn->ibc_lock);
+               tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
+                                       msg->ibm_u.putack.ibpam_src_cookie);
+               if (tx != NULL)
+                       list_del(&tx->tx_list);
+               spin_unlock(&conn->ibc_lock);
 
                 if (tx == NULL) {
                         CERROR("Unmatched PUT_ACK from %s\n",
@@ -401,10 +436,10 @@ kiblnd_handle_rx (kib_rx_t *rx)
 
                 LASSERT (tx->tx_waiting);
                 /* CAVEAT EMPTOR: I could be racing with tx_complete, but...
-                 * (a) I can overwrite tx_msg since my peer has received it!
+                 * (a) I can overwrite tx_msg since my peer_ni has received it!
                  * (b) tx_waiting set tells tx_complete() it's not done. */
 
-                tx->tx_nwrq = 0;                /* overwrite PUT_REQ */
+               tx->tx_nwrq = tx->tx_nsge = 0;  /* overwrite PUT_REQ */
 
                 rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
                                        kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd),
@@ -414,11 +449,11 @@ kiblnd_handle_rx (kib_rx_t *rx)
                         CERROR("Can't setup rdma for PUT to %s: %d\n",
                                libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2);
 
-                cfs_spin_lock(&conn->ibc_lock);
-                tx->tx_waiting = 0;             /* clear waiting and queue atomically */
-                kiblnd_queue_tx_locked(tx, conn);
-                cfs_spin_unlock(&conn->ibc_lock);
-                break;
+               spin_lock(&conn->ibc_lock);
+               tx->tx_waiting = 0;     /* clear waiting and queue atomically */
+               kiblnd_queue_tx_locked(tx, conn);
+               spin_unlock(&conn->ibc_lock);
+               break;
 
         case IBLND_MSG_PUT_DONE:
                 post_credit = IBLND_POSTRX_PEER_CREDIT;
@@ -450,15 +485,15 @@ kiblnd_handle_rx (kib_rx_t *rx)
                 kiblnd_post_rx(rx, post_credit);
 }
 
-void
-kiblnd_rx_complete (kib_rx_t *rx, int status, int nob)
+static void
+kiblnd_rx_complete(struct kib_rx *rx, int status, int nob)
 {
-        kib_msg_t    *msg = rx->rx_msg;
-        kib_conn_t   *conn = rx->rx_conn;
-        lnet_ni_t    *ni = conn->ibc_peer->ibp_ni;
-        kib_net_t    *net = ni->ni_data;
-        int           rc;
-        int           err = -EIO;
+       struct kib_msg *msg = rx->rx_msg;
+       struct kib_conn   *conn = rx->rx_conn;
+       struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
+       struct kib_net *net = ni->ni_data;
+       int rc;
+       int err = -EIO;
 
         LASSERT (net != NULL);
         LASSERT (rx->rx_nob < 0);               /* was posted */
@@ -499,17 +534,17 @@ kiblnd_rx_complete (kib_rx_t *rx, int status, int nob)
         /* racing with connection establishment/teardown! */
 
         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
-                cfs_rwlock_t  *g_lock = &kiblnd_data.kib_global_lock;
-                unsigned long  flags;
-
-                cfs_write_lock_irqsave(g_lock, flags);
-                /* must check holding global lock to eliminate race */
-                if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
-                        cfs_list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
-                        cfs_write_unlock_irqrestore(g_lock, flags);
-                        return;
-                }
-                cfs_write_unlock_irqrestore(g_lock, flags);
+               rwlock_t  *g_lock = &kiblnd_data.kib_global_lock;
+               unsigned long  flags;
+
+               write_lock_irqsave(g_lock, flags);
+               /* must check holding global lock to eliminate race */
+               if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
+                       list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
+                       write_unlock_irqrestore(g_lock, flags);
+                       return;
+               }
+               write_unlock_irqrestore(g_lock, flags);
         }
         kiblnd_handle_rx(rx);
         return;
@@ -521,131 +556,156 @@ kiblnd_rx_complete (kib_rx_t *rx, int status, int nob)
         kiblnd_drop_rx(rx);                     /* Don't re-post rx. */
 }
 
-struct page *
-kiblnd_kvaddr_to_page (unsigned long vaddr)
-{
-        struct page *page;
-
-        if (vaddr >= VMALLOC_START &&
-            vaddr < VMALLOC_END) {
-                page = vmalloc_to_page ((void *)vaddr);
-                LASSERT (page != NULL);
-                return page;
-        }
-#ifdef CONFIG_HIGHMEM
-        if (vaddr >= PKMAP_BASE &&
-            vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE)) {
-                /* No highmem pages only used for bulk (kiov) I/O */
-                CERROR("find page for address in highmem\n");
-                LBUG();
-        }
-#endif
-        page = virt_to_page (vaddr);
-        LASSERT (page != NULL);
-        return page;
-}
-
 static int
-kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
+kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx,
+                 struct kib_rdma_desc *rd, u32 nob)
 {
-        kib_hca_dev_t      *hdev  = tx->tx_pool->tpo_hdev;
-        __u64              *pages = tx->tx_pages;
-        int                 npages;
-        int                 size;
-        int                 rc;
-        int                 i;
-
-        for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
-                for (size = 0; size <  rd->rd_frags[i].rf_nob;
-                               size += hdev->ibh_page_size) {
-                        pages[npages ++] = (rd->rd_frags[i].rf_addr &
-                                            hdev->ibh_page_mask) + size;
-                }
-        }
-
-        rc = kiblnd_fmr_pool_map(&net->ibn_fmr_ps, pages, npages, 0, &tx->tx_u.fmr);
-        if (rc != 0) {
-                CERROR ("Can't map %d pages: %d\n", npages, rc);
-                return rc;
-        }
-
-        /* If rd is not tx_rd, it's going to get sent to a peer, who will need
-         * the rkey */
-        rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey :
-                                         tx->tx_u.fmr.fmr_pfmr->fmr->lkey;
-        rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
-        rd->rd_frags[0].rf_nob   = nob;
-        rd->rd_nfrags = 1;
-
-        return 0;
+       struct kib_hca_dev *hdev;
+       struct kib_dev *dev;
+       struct kib_fmr_poolset *fps;
+       int                     cpt;
+       int                     rc;
+       int i;
+
+       LASSERT(tx->tx_pool != NULL);
+       LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
+
+       dev = net->ibn_dev;
+       hdev = tx->tx_pool->tpo_hdev;
+       cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
+
+       /*
+        * If we're dealing with FastReg, but the device doesn't
+        * support GAPS and the tx has GAPS, then there is no real point
+        * in trying to map the memory, because it'll just fail. So
+        * preemptively fail with an appropriate message
+        */
+       if ((dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED) &&
+           !(dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT) &&
+           tx->tx_gaps) {
+               CERROR("Using FastReg with no GAPS support, but tx has gaps. "
+                      "Try setting use_fastreg_gaps to 1\n");
+               return -EPROTONOSUPPORT;
+       }
+
+       /*
+        * FMR does not support gaps but the tx has gaps then
+        * we should make sure that the number of fragments we'll be sending
+        * over fits within the number of fragments negotiated on the
+        * connection, otherwise, we won't be able to RDMA the data.
+        * We need to maintain the number of fragments negotiation on the
+        * connection for backwards compatibility.
+        */
+       if (tx->tx_gaps && (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)) {
+               if (tx->tx_conn &&
+                   tx->tx_conn->ibc_max_frags <= rd->rd_nfrags) {
+                       CERROR("TX number of frags (%d) is <= than connection"
+                              " number of frags (%d). Consider setting peer's"
+                              " map_on_demand to 256\n", tx->tx_nfrags,
+                              tx->tx_conn->ibc_max_frags);
+                       return -EFBIG;
+               }
+       }
+
+       fps = net->ibn_fmr_ps[cpt];
+       rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->tx_fmr);
+       if (rc != 0) {
+               CERROR("Can't map %u pages: %d\n", nob, rc);
+               return rc;
+       }
+
+       /*
+        * If rd is not tx_rd, it's going to get sent to a peer_ni, who will
+        * need the rkey
+        */
+       rd->rd_key = tx->tx_fmr.fmr_key;
+       /*
+        * for FastReg or FMR with no gaps we can accumulate all
+        * the fragments in one FastReg or FMR fragment.
+        */
+       if (((dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED) && !tx->tx_gaps) ||
+           (dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED)) {
+               /* FMR requires zero based address */
+               if (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
+                       rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
+               rd->rd_frags[0].rf_nob = nob;
+               rd->rd_nfrags = 1;
+       } else {
+               /*
+                * We're transmitting with gaps using FMR.
+                * We'll need to use multiple fragments and identify the
+                * zero based address of each fragment.
+                */
+               for (i = 0; i < rd->rd_nfrags; i++) {
+                       rd->rd_frags[i].rf_addr &= ~hdev->ibh_page_mask;
+                       rd->rd_frags[i].rf_addr += i << hdev->ibh_page_shift;
+               }
+       }
+
+       return 0;
 }
 
-static int
-kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
+static void
+kiblnd_unmap_tx(struct kib_tx *tx)
 {
-        kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
-        __u64   iova;
-        int     rc;
-
-        iova = rd->rd_frags[0].rf_addr & ~hdev->ibh_page_mask;
-
-        rc = kiblnd_pmr_pool_map(&net->ibn_pmr_ps, hdev, rd, &iova, &tx->tx_u.pmr);
-        if (rc != 0) {
-                CERROR("Failed to create MR by phybuf: %d\n", rc);
-                return rc;
-        }
-
-        /* If rd is not tx_rd, it's going to get sent to a peer, who will need
-         * the rkey */
-        rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.pmr->pmr_mr->rkey :
-                                         tx->tx_u.pmr->pmr_mr->lkey;
-        rd->rd_nfrags = 1;
-        rd->rd_frags[0].rf_addr = iova;
-        rd->rd_frags[0].rf_nob  = nob;
-
-        return 0;
+       if (tx->tx_fmr.fmr_pfmr || tx->tx_fmr.fmr_frd)
+               kiblnd_fmr_pool_unmap(&tx->tx_fmr, tx->tx_status);
+
+       if (tx->tx_nfrags != 0) {
+               kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
+                                   tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
+               tx->tx_nfrags = 0;
+       }
 }
 
-void
-kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
+#ifdef HAVE_IB_GET_DMA_MR
+static struct ib_mr *
+kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd)
 {
-        kib_net_t  *net = ni->ni_data;
-
-        LASSERT (net != NULL);
-
-        if (net->ibn_with_fmr && tx->tx_u.fmr.fmr_pfmr != NULL) {
-                kiblnd_fmr_pool_unmap(&tx->tx_u.fmr, tx->tx_status);
-                tx->tx_u.fmr.fmr_pfmr = NULL;
-        } else if (net->ibn_with_pmr && tx->tx_u.pmr != NULL) {
-                kiblnd_pmr_pool_unmap(tx->tx_u.pmr);
-                tx->tx_u.pmr = NULL;
-        }
-
-        if (tx->tx_nfrags != 0) {
-                kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
-                                    tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
-                tx->tx_nfrags = 0;
-        }
+       struct kib_net *net = ni->ni_data;
+       struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
+       struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+
+       tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
+
+       /*
+        * if map-on-demand is turned on and the device supports
+        * either FMR or FastReg then use that. Otherwise use global
+        * memory regions. If that's not available either, then you're
+        * dead in the water and fail the operation.
+        */
+       if (tunables->lnd_map_on_demand &&
+           (net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED ||
+            net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED))
+               return NULL;
+
+       /*
+        * hdev->ibh_mrs can be NULL. This case is dealt with gracefully
+        * in the call chain. The mapping will fail with appropriate error
+        * message.
+        */
+       return hdev->ibh_mrs;
 }
+#endif
 
-int
-kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
-              kib_rdma_desc_t *rd, int nfrags)
+static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx,
+                        struct kib_rdma_desc *rd, int nfrags)
 {
-        kib_hca_dev_t      *hdev  = tx->tx_pool->tpo_hdev;
-        kib_net_t          *net   = ni->ni_data;
-        struct ib_mr       *mr    = NULL;
-        __u32               nob;
-        int                 i;
+       struct kib_net *net = ni->ni_data;
+       struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
+#ifdef HAVE_IB_GET_DMA_MR
+       struct ib_mr *mr = NULL;
+#endif
+       __u32 nob;
+       int i;
 
-        /* If rd is not tx_rd, it's going to get sent to a peer and I'm the
+        /* If rd is not tx_rd, it's going to get sent to a peer_ni and I'm the
          * RDMA sink */
         tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-        tx->tx_nfrags = nfrags;
+       tx->tx_nfrags = nfrags;
 
-        rd->rd_nfrags =
-                kiblnd_dma_map_sg(hdev->ibh_ibdev,
-                                  tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
+       rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags,
+                                         tx->tx_nfrags, tx->tx_dmadir);
 
         for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
                 rd->rd_frags[i].rf_nob  = kiblnd_sg_dma_len(
@@ -655,33 +715,32 @@ kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
                 nob += rd->rd_frags[i].rf_nob;
         }
 
-        /* looking for pre-mapping MR */
-        mr = kiblnd_find_rd_dma_mr(hdev, rd);
-        if (mr != NULL) {
-                /* found pre-mapping MR */
-                rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
-                return 0;
-        }
+#ifdef HAVE_IB_GET_DMA_MR
+       mr = kiblnd_find_rd_dma_mr(ni, rd);
+       if (mr != NULL) {
+               /* found pre-mapping MR */
+               rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
+               return 0;
+       }
+#endif
 
-        if (net->ibn_with_fmr)
-                return kiblnd_fmr_map_tx(net, tx, rd, nob);
-        else if (net->ibn_with_pmr)
-                return kiblnd_pmr_map_tx(net, tx, rd, nob);
+       if (net->ibn_fmr_ps != NULL)
+               return kiblnd_fmr_map_tx(net, tx, rd, nob);
 
-        return -EINVAL;
+       return -EINVAL;
 }
 
-
-int
-kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
-                    unsigned int niov, struct iovec *iov, int offset, int nob)
+static int kiblnd_setup_rd_iov(struct lnet_ni *ni, struct kib_tx *tx,
+                              struct kib_rdma_desc *rd, unsigned int niov,
+                              struct kvec *iov, int offset, int nob)
 {
-        kib_net_t          *net = ni->ni_data;
-        struct page        *page;
+       struct kib_net *net = ni->ni_data;
+       struct page *page;
         struct scatterlist *sg;
         unsigned long       vaddr;
         int                 fragnob;
         int                 page_offset;
+       unsigned int        max_niov;
 
         LASSERT (nob > 0);
         LASSERT (niov > 0);
@@ -694,23 +753,43 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
                 LASSERT (niov > 0);
         }
 
-        sg = tx->tx_frags;
-        do {
-                LASSERT (niov > 0);
-
-                vaddr = ((unsigned long)iov->iov_base) + offset;
-                page_offset = vaddr & (PAGE_SIZE - 1);
-                page = kiblnd_kvaddr_to_page(vaddr);
-                if (page == NULL) {
-                        CERROR ("Can't find page\n");
-                        return -EFAULT;
-                }
-
-                fragnob = min((int)(iov->iov_len - offset), nob);
-                fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);
-
-                sg_set_page(sg, page, fragnob, page_offset);
-                sg++;
+       max_niov = niov;
+
+       sg = tx->tx_frags;
+       do {
+               LASSERT(niov > 0);
+
+               vaddr = ((unsigned long)iov->iov_base) + offset;
+               page_offset = vaddr & (PAGE_SIZE - 1);
+               page = lnet_kvaddr_to_page(vaddr);
+               if (page == NULL) {
+                       CERROR("Can't find page\n");
+                       return -EFAULT;
+               }
+
+               fragnob = min((int)(iov->iov_len - offset), nob);
+               fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);
+
+               /*
+                * We're allowed to start at a non-aligned page offset in
+                * the first fragment and end at a non-aligned page offset
+                * in the last fragment.
+                */
+               if ((fragnob < (int)PAGE_SIZE - page_offset) &&
+                   (niov < max_niov) && nob > fragnob) {
+                       CDEBUG(D_NET, "fragnob %d < available page %d: with"
+                                     " remaining %d iovs with %d nob left\n",
+                              fragnob, (int)PAGE_SIZE - page_offset, niov,
+                              nob);
+                       tx->tx_gaps = true;
+               }
+
+               sg_set_page(sg, page, fragnob, page_offset);
+               sg = sg_next(sg);
+               if (!sg) {
+                       CERROR("lacking enough sg entries to map tx\n");
+                       return -EFAULT;
+               }
 
                 if (offset + fragnob < iov->iov_len) {
                         offset += fragnob;
@@ -725,78 +804,100 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
         return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
 }
 
-int
-kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
-                      int nkiov, lnet_kiov_t *kiov, int offset, int nob)
+static int kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx,
+                               struct kib_rdma_desc *rd, int nkiov,
+                               lnet_kiov_t *kiov, int offset, int nob)
 {
-        kib_net_t          *net = ni->ni_data;
-        struct scatterlist *sg;
-        int                 fragnob;
-
-        CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
-
-        LASSERT (nob > 0);
-        LASSERT (nkiov > 0);
-        LASSERT (net != NULL);
-
-        while (offset >= kiov->kiov_len) {
-                offset -= kiov->kiov_len;
-                nkiov--;
-                kiov++;
-                LASSERT (nkiov > 0);
-        }
-
-        sg = tx->tx_frags;
-        do {
-                LASSERT (nkiov > 0);
-
-                fragnob = min((int)(kiov->kiov_len - offset), nob);
-
-                memset(sg, 0, sizeof(*sg));
-                sg_set_page(sg, kiov->kiov_page, fragnob,
-                            kiov->kiov_offset + offset);
-                sg++;
-
-                offset = 0;
-                kiov++;
-                nkiov--;
-                nob -= fragnob;
-        } while (nob > 0);
-
-        return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
+       struct kib_net *net = ni->ni_data;
+       struct scatterlist *sg;
+       int                 fragnob;
+       int                 max_nkiov;
+
+       CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
+
+       LASSERT(nob > 0);
+       LASSERT(nkiov > 0);
+       LASSERT(net != NULL);
+
+       while (offset >= kiov->kiov_len) {
+               offset -= kiov->kiov_len;
+               nkiov--;
+               kiov++;
+               LASSERT(nkiov > 0);
+       }
+
+       max_nkiov = nkiov;
+
+       sg = tx->tx_frags;
+       do {
+               LASSERT(nkiov > 0);
+
+               fragnob = min((int)(kiov->kiov_len - offset), nob);
+
+               /*
+                * We're allowed to start at a non-aligned page offset in
+                * the first fragment and end at a non-aligned page offset
+                * in the last fragment.
+                */
+               if ((fragnob < (int)(kiov->kiov_len - offset)) &&
+                   nkiov < max_nkiov && nob > fragnob) {
+                       CDEBUG(D_NET, "fragnob %d < available page %d: with"
+                                     " remaining %d kiovs with %d nob left\n",
+                              fragnob, (int)(kiov->kiov_len - offset),
+                              nkiov, nob);
+                       tx->tx_gaps = true;
+               }
+
+               sg_set_page(sg, kiov->kiov_page, fragnob,
+                           kiov->kiov_offset + offset);
+               sg = sg_next(sg);
+               if (!sg) {
+                       CERROR("lacking enough sg entries to map tx\n");
+                       return -EFAULT;
+               }
+
+               offset = 0;
+               kiov++;
+               nkiov--;
+               nob -= fragnob;
+       } while (nob > 0);
+
+       return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
 }
 
-int
-kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
+static int
+kiblnd_post_tx_locked(struct kib_conn *conn, struct kib_tx *tx, int credit)
+__must_hold(&conn->ibc_lock)
 {
-        kib_msg_t         *msg = tx->tx_msg;
-        kib_peer_t        *peer = conn->ibc_peer;
-        int                ver = conn->ibc_version;
-        int                rc;
-        int                done;
-        struct ib_send_wr *bad_wrq;
-
-        LASSERT (tx->tx_queued);
-        /* We rely on this for QP sizing */
-        LASSERT (tx->tx_nwrq > 0);
-        LASSERT (tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver));
-
-        LASSERT (credit == 0 || credit == 1);
-        LASSERT (conn->ibc_outstanding_credits >= 0);
-        LASSERT (conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver));
-        LASSERT (conn->ibc_credits >= 0);
-        LASSERT (conn->ibc_credits <= IBLND_MSG_QUEUE_SIZE(ver));
-
-        if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) {
-                /* tx completions outstanding... */
-                CDEBUG(D_NET, "%s: posted enough\n",
-                       libcfs_nid2str(peer->ibp_nid));
-                return -EAGAIN;
-        }
+       struct kib_msg *msg = tx->tx_msg;
+       struct kib_peer_ni *peer_ni = conn->ibc_peer;
+       struct lnet_ni *ni = peer_ni->ibp_ni;
+       int ver = conn->ibc_version;
+       int rc;
+       int done;
+
+       LASSERT(tx->tx_queued);
+       /* We rely on this for QP sizing */
+       LASSERT(tx->tx_nwrq > 0 && tx->tx_nsge >= 0);
+       LASSERT(tx->tx_nwrq <= 1 + conn->ibc_max_frags);
+
+       LASSERT(credit == 0 || credit == 1);
+       LASSERT(conn->ibc_outstanding_credits >= 0);
+       LASSERT(conn->ibc_outstanding_credits <= conn->ibc_queue_depth);
+       LASSERT(conn->ibc_credits >= 0);
+       LASSERT(conn->ibc_credits <= conn->ibc_queue_depth);
+
+       if (conn->ibc_nsends_posted ==
+           kiblnd_concurrent_sends(ver, ni)) {
+               /* tx completions outstanding... */
+               CDEBUG(D_NET, "%s: posted enough\n",
+                      libcfs_nid2str(peer_ni->ibp_nid));
+               return -EAGAIN;
+       }
 
         if (credit != 0 && conn->ibc_credits == 0) {   /* no credits */
                 CDEBUG(D_NET, "%s: no credits\n",
-                       libcfs_nid2str(peer->ibp_nid));
+                       libcfs_nid2str(peer_ni->ibp_nid));
                 return -EAGAIN;
         }
 
@@ -804,47 +905,48 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
             conn->ibc_credits == 1 &&   /* last credit reserved */
             msg->ibm_type != IBLND_MSG_NOOP) {      /* for NOOP */
                 CDEBUG(D_NET, "%s: not using last credit\n",
-                       libcfs_nid2str(peer->ibp_nid));
+                       libcfs_nid2str(peer_ni->ibp_nid));
                 return -EAGAIN;
         }
 
         /* NB don't drop ibc_lock before bumping tx_sending */
-        cfs_list_del(&tx->tx_list);
+       list_del(&tx->tx_list);
         tx->tx_queued = 0;
 
         if (msg->ibm_type == IBLND_MSG_NOOP &&
-            (!kiblnd_send_noop(conn) ||     /* redundant NOOP */
+            (!kiblnd_need_noop(conn) ||     /* redundant NOOP */
              (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
               conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
-                /* OK to drop when posted enough NOOPs, since
-                 * kiblnd_check_sends will queue NOOP again when
-                 * posted NOOPs complete */
-                cfs_spin_unlock(&conn->ibc_lock);
-                kiblnd_tx_done(peer->ibp_ni, tx);
-                cfs_spin_lock(&conn->ibc_lock);
+               /* OK to drop when posted enough NOOPs, since
+                * kiblnd_check_sends_locked will queue NOOP again when
+                * posted NOOPs complete */
+               spin_unlock(&conn->ibc_lock);
+               tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+               kiblnd_tx_done(tx);
+               spin_lock(&conn->ibc_lock);
                 CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
-                       libcfs_nid2str(peer->ibp_nid),
+                       libcfs_nid2str(peer_ni->ibp_nid),
                        conn->ibc_noops_posted);
                 return 0;
         }
 
-        kiblnd_pack_msg(peer->ibp_ni, msg, ver, conn->ibc_outstanding_credits,
-                        peer->ibp_nid, conn->ibc_incarnation);
+        kiblnd_pack_msg(peer_ni->ibp_ni, msg, ver, conn->ibc_outstanding_credits,
+                        peer_ni->ibp_nid, conn->ibc_incarnation);
 
-        conn->ibc_credits -= credit;
-        conn->ibc_outstanding_credits = 0;
-        conn->ibc_nsends_posted++;
-        if (msg->ibm_type == IBLND_MSG_NOOP)
-                conn->ibc_noops_posted++;
+       conn->ibc_credits -= credit;
+       conn->ibc_outstanding_credits = 0;
+       conn->ibc_nsends_posted++;
+       if (msg->ibm_type == IBLND_MSG_NOOP)
+               conn->ibc_noops_posted++;
 
-        /* CAVEAT EMPTOR!  This tx could be the PUT_DONE of an RDMA
-         * PUT.  If so, it was first queued here as a PUT_REQ, sent and
-         * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
-         * and then re-queued here.  It's (just) possible that
-         * tx_sending is non-zero if we've not done the tx_complete()
-         * from the first send; hence the ++ rather than = below. */
-        tx->tx_sending++;
-        cfs_list_add(&tx->tx_list, &conn->ibc_active_txs);
+       /* CAVEAT EMPTOR!  This tx could be the PUT_DONE of an RDMA
+        * PUT.  If so, it was first queued here as a PUT_REQ, sent and
+        * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
+        * and then re-queued here.  It's (just) possible that
+        * tx_sending is non-zero if we've not done the tx_complete()
+        * from the first send; hence the ++ rather than = below. */
+       tx->tx_sending++;
+       list_add(&tx->tx_list, &conn->ibc_active_txs);
 
         /* I'm still holding ibc_lock! */
         if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
@@ -854,11 +956,38 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
                 /* close_conn will launch failover */
                 rc = -ENETDOWN;
         } else {
-                rc = ib_post_send(conn->ibc_cmid->qp,
-                                  tx->tx_wrq, &bad_wrq);
-        }
+               struct kib_fast_reg_descriptor *frd = tx->tx_fmr.fmr_frd;
+               struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
+               struct ib_send_wr *wr  = &tx->tx_wrq[0].wr;
+
+               if (frd != NULL) {
+                       if (!frd->frd_valid) {
+                               wr = &frd->frd_inv_wr.wr;
+                               wr->next = &frd->frd_fastreg_wr.wr;
+                       } else {
+                               wr = &frd->frd_fastreg_wr.wr;
+                       }
+                       frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr;
+               }
+
+               LASSERTF(bad->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
+                        "bad wr_id %#llx, opc %d, flags %d, peer_ni: %s\n",
+                        bad->wr_id, bad->opcode, bad->send_flags,
+                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
+
+               bad = NULL;
+               if (lnet_send_error_simulation(tx->tx_lntmsg[0], &tx->tx_hstatus))
+                       rc = -EINVAL;
+               else
+#ifdef HAVE_IB_POST_SEND_RECV_CONST
+                       rc = ib_post_send(conn->ibc_cmid->qp, wr,
+                                         (const struct ib_send_wr **)&bad);
+#else
+                       rc = ib_post_send(conn->ibc_cmid->qp, wr, &bad);
+#endif
+       }
 
-        conn->ibc_last_send = jiffies;
+       conn->ibc_last_send = ktime_get();
 
         if (rc == 0)
                 return 0;
@@ -877,33 +1006,33 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
 
         done = (tx->tx_sending == 0);
         if (done)
-                cfs_list_del(&tx->tx_list);
+               list_del(&tx->tx_list);
 
-        cfs_spin_unlock(&conn->ibc_lock);
+       spin_unlock(&conn->ibc_lock);
 
         if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
                 CERROR("Error %d posting transmit to %s\n",
-                       rc, libcfs_nid2str(peer->ibp_nid));
+                       rc, libcfs_nid2str(peer_ni->ibp_nid));
         else
                 CDEBUG(D_NET, "Error %d posting transmit to %s\n",
-                       rc, libcfs_nid2str(peer->ibp_nid));
+                       rc, libcfs_nid2str(peer_ni->ibp_nid));
 
         kiblnd_close_conn(conn, rc);
 
-        if (done)
-                kiblnd_tx_done(peer->ibp_ni, tx);
+       if (done)
+               kiblnd_tx_done(tx);
 
-        cfs_spin_lock(&conn->ibc_lock);
+       spin_lock(&conn->ibc_lock);
 
-        return -EIO;
+       return -EIO;
 }
 
-void
-kiblnd_check_sends (kib_conn_t *conn)
+static void
+kiblnd_check_sends_locked(struct kib_conn *conn)
 {
-        int        ver = conn->ibc_version;
-        lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
-        kib_tx_t  *tx;
+       int ver = conn->ibc_version;
+       struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
+       struct kib_tx *tx;
 
         /* Don't send anything until after the connection is established */
         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
@@ -912,76 +1041,69 @@ kiblnd_check_sends (kib_conn_t *conn)
                 return;
         }
 
-        cfs_spin_lock(&conn->ibc_lock);
-
-        LASSERT (conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver));
+       LASSERT(conn->ibc_nsends_posted <=
+               kiblnd_concurrent_sends(ver, ni));
         LASSERT (!IBLND_OOB_CAPABLE(ver) ||
                  conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
         LASSERT (conn->ibc_reserved_credits >= 0);
 
         while (conn->ibc_reserved_credits > 0 &&
-               !cfs_list_empty(&conn->ibc_tx_queue_rsrvd)) {
-                tx = cfs_list_entry(conn->ibc_tx_queue_rsrvd.next,
-                                    kib_tx_t, tx_list);
-                cfs_list_del(&tx->tx_list);
-                cfs_list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
+              !list_empty(&conn->ibc_tx_queue_rsrvd)) {
+               tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
+                               struct kib_tx, tx_list);
+               list_del(&tx->tx_list);
+               list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
                 conn->ibc_reserved_credits--;
         }
 
-        if (kiblnd_send_noop(conn)) {
-                cfs_spin_unlock(&conn->ibc_lock);
+        if (kiblnd_need_noop(conn)) {
+               spin_unlock(&conn->ibc_lock);
 
-                tx = kiblnd_get_idle_tx(ni);
-                if (tx != NULL)
-                        kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
+               tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
+               if (tx != NULL)
+                       kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
 
-                cfs_spin_lock(&conn->ibc_lock);
+               spin_lock(&conn->ibc_lock);
                 if (tx != NULL)
                         kiblnd_queue_tx_locked(tx, conn);
         }
 
-        kiblnd_conn_addref(conn); /* 1 ref for me.... (see b21911) */
-
         for (;;) {
                 int credit;
 
-                if (!cfs_list_empty(&conn->ibc_tx_queue_nocred)) {
+               if (!list_empty(&conn->ibc_tx_queue_nocred)) {
                         credit = 0;
-                        tx = cfs_list_entry(conn->ibc_tx_queue_nocred.next,
-                                            kib_tx_t, tx_list);
-                } else if (!cfs_list_empty(&conn->ibc_tx_noops)) {
+                       tx = list_entry(conn->ibc_tx_queue_nocred.next,
+                                       struct kib_tx, tx_list);
+               } else if (!list_empty(&conn->ibc_tx_noops)) {
                         LASSERT (!IBLND_OOB_CAPABLE(ver));
                         credit = 1;
-                        tx = cfs_list_entry(conn->ibc_tx_noops.next,
-                                        kib_tx_t, tx_list);
-                } else if (!cfs_list_empty(&conn->ibc_tx_queue)) {
+                       tx = list_entry(conn->ibc_tx_noops.next,
+                                       struct kib_tx, tx_list);
+               } else if (!list_empty(&conn->ibc_tx_queue)) {
                         credit = 1;
-                        tx = cfs_list_entry(conn->ibc_tx_queue.next,
-                                            kib_tx_t, tx_list);
+                       tx = list_entry(conn->ibc_tx_queue.next,
+                                       struct kib_tx, tx_list);
                 } else
                         break;
 
                 if (kiblnd_post_tx_locked(conn, tx, credit) != 0)
                         break;
         }
-
-        cfs_spin_unlock(&conn->ibc_lock);
-
-        kiblnd_conn_decref(conn); /* ...until here */
 }
 
-void
-kiblnd_tx_complete (kib_tx_t *tx, int status)
+static void
+kiblnd_tx_complete(struct kib_tx *tx, int status)
 {
         int           failed = (status != IB_WC_SUCCESS);
-        kib_conn_t   *conn = tx->tx_conn;
+       struct kib_conn   *conn = tx->tx_conn;
         int           idle;
 
         LASSERT (tx->tx_sending > 0);
 
         if (failed) {
                 if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
-                        CNETERR("Tx -> %s cookie "LPX64
+                       CNETERR("Tx -> %s cookie %#llx"
                                 " sending %d waiting %d: failed %d\n",
                                 libcfs_nid2str(conn->ibc_peer->ibp_nid),
                                 tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
@@ -992,7 +1114,7 @@ kiblnd_tx_complete (kib_tx_t *tx, int status)
                 kiblnd_peer_alive(conn->ibc_peer);
         }
 
-        cfs_spin_lock(&conn->ibc_lock);
+       spin_lock(&conn->ibc_lock);
 
         /* I could be racing with rdma completion.  Whoever makes 'tx' idle
          * gets to free it, which also drops its ref on 'conn'. */
@@ -1003,158 +1125,192 @@ kiblnd_tx_complete (kib_tx_t *tx, int status)
                 conn->ibc_noops_posted--;
 
         if (failed) {
-                tx->tx_waiting = 0;             /* don't wait for peer */
+               tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
+                tx->tx_waiting = 0;             /* don't wait for peer_ni */
                 tx->tx_status = -EIO;
         }
 
         idle = (tx->tx_sending == 0) &&         /* This is the final callback */
-               !tx->tx_waiting &&               /* Not waiting for peer */
+               !tx->tx_waiting &&               /* Not waiting for peer_ni */
                !tx->tx_queued;                  /* Not re-queued (PUT_DONE) */
         if (idle)
-                cfs_list_del(&tx->tx_list);
-
-        kiblnd_conn_addref(conn);               /* 1 ref for me.... */
+               list_del(&tx->tx_list);
 
-        cfs_spin_unlock(&conn->ibc_lock);
-
-        if (idle)
-                kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx);
+       kiblnd_check_sends_locked(conn);
+       spin_unlock(&conn->ibc_lock);
 
-        kiblnd_check_sends(conn);
-
-        kiblnd_conn_decref(conn);               /* ...until here */
+       if (idle)
+               kiblnd_tx_done(tx);
 }
 
-void
-kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
+static void
+kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx, int type,
+                  int body_nob)
 {
-        kib_hca_dev_t     *hdev = tx->tx_pool->tpo_hdev;
-        struct ib_sge     *sge = &tx->tx_sge[tx->tx_nwrq];
-        struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
-        int                nob = offsetof (kib_msg_t, ibm_u) + body_nob;
-        struct ib_mr      *mr;
-
-        LASSERT (tx->tx_nwrq >= 0);
-        LASSERT (tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
-        LASSERT (nob <= IBLND_MSG_SIZE);
+       struct kib_hca_dev *hdev = tx->tx_pool->tpo_hdev;
+       struct ib_sge *sge = &tx->tx_msgsge;
+       struct ib_rdma_wr *wrq;
+       int nob = offsetof(struct kib_msg, ibm_u) + body_nob;
+#ifdef HAVE_IB_GET_DMA_MR
+       struct ib_mr *mr = hdev->ibh_mrs;
+#endif
 
-        kiblnd_init_msg(tx->tx_msg, type, body_nob);
+       LASSERT(tx->tx_nwrq >= 0);
+       LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
+       LASSERT(nob <= IBLND_MSG_SIZE);
+#ifdef HAVE_IB_GET_DMA_MR
+       LASSERT(mr != NULL);
+#endif
 
-        mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob);
-        LASSERT (mr != NULL);
+       kiblnd_init_msg(tx->tx_msg, type, body_nob);
 
-        sge->lkey   = mr->lkey;
-        sge->addr   = tx->tx_msgaddr;
-        sge->length = nob;
+#ifdef HAVE_IB_GET_DMA_MR
+       sge->lkey   = mr->lkey;
+#else
+       sge->lkey   = hdev->ibh_pd->local_dma_lkey;
+#endif
+       sge->addr   = tx->tx_msgaddr;
+       sge->length = nob;
 
-        memset(wrq, 0, sizeof(*wrq));
+       wrq = &tx->tx_wrq[tx->tx_nwrq];
+       memset(wrq, 0, sizeof(*wrq));
 
-        wrq->next       = NULL;
-        wrq->wr_id      = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
-        wrq->sg_list    = sge;
-        wrq->num_sge    = 1;
-        wrq->opcode     = IB_WR_SEND;
-        wrq->send_flags = IB_SEND_SIGNALED;
+       wrq->wr.next            = NULL;
+       wrq->wr.wr_id           = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
+       wrq->wr.sg_list         = sge;
+       wrq->wr.num_sge         = 1;
+       wrq->wr.opcode          = IB_WR_SEND;
+       wrq->wr.send_flags      = IB_SEND_SIGNALED;
 
-        tx->tx_nwrq++;
+       tx->tx_nwrq++;
 }
 
-int
-kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
-                  int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
+static int
+kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
+                int resid, struct kib_rdma_desc *dstrd, u64 dstcookie)
 {
-        kib_msg_t         *ibmsg = tx->tx_msg;
-        kib_rdma_desc_t   *srcrd = tx->tx_rd;
-        struct ib_sge     *sge = &tx->tx_sge[0];
-        struct ib_send_wr *wrq = &tx->tx_wrq[0];
-        int                rc  = resid;
-        int                srcidx;
-        int                dstidx;
-        int                wrknob;
-
-        LASSERT (!cfs_in_interrupt());
-        LASSERT (tx->tx_nwrq == 0);
-        LASSERT (type == IBLND_MSG_GET_DONE ||
-                 type == IBLND_MSG_PUT_DONE);
-
-        srcidx = dstidx = 0;
-
-        while (resid > 0) {
-                if (srcidx >= srcrd->rd_nfrags) {
-                        CERROR("Src buffer exhausted: %d frags\n", srcidx);
-                        rc = -EPROTO;
-                        break;
-                }
-
-                if (dstidx == dstrd->rd_nfrags) {
-                        CERROR("Dst buffer exhausted: %d frags\n", dstidx);
-                        rc = -EPROTO;
-                        break;
-                }
-
-                if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) {
-                        CERROR("RDMA too fragmented for %s (%d): "
-                               "%d/%d src %d/%d dst frags\n",
-                               libcfs_nid2str(conn->ibc_peer->ibp_nid),
-                               IBLND_RDMA_FRAGS(conn->ibc_version),
-                               srcidx, srcrd->rd_nfrags,
-                               dstidx, dstrd->rd_nfrags);
-                        rc = -EMSGSIZE;
-                        break;
-                }
-
-                wrknob = MIN(MIN(kiblnd_rd_frag_size(srcrd, srcidx),
-                                 kiblnd_rd_frag_size(dstrd, dstidx)), resid);
-
-                sge = &tx->tx_sge[tx->tx_nwrq];
-                sge->addr   = kiblnd_rd_frag_addr(srcrd, srcidx);
-                sge->lkey   = kiblnd_rd_frag_key(srcrd, srcidx);
-                sge->length = wrknob;
-
-                wrq = &tx->tx_wrq[tx->tx_nwrq];
-
-                wrq->next       = wrq + 1;
-                wrq->wr_id      = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
-                wrq->sg_list    = sge;
-                wrq->num_sge    = 1;
-                wrq->opcode     = IB_WR_RDMA_WRITE;
-                wrq->send_flags = 0;
-
-                wrq->wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx);
-                wrq->wr.rdma.rkey        = kiblnd_rd_frag_key(dstrd, dstidx);
-
-                srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob);
-                dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob);
+       struct kib_msg *ibmsg = tx->tx_msg;
+       struct kib_rdma_desc *srcrd = tx->tx_rd;
+       struct ib_rdma_wr *wrq = NULL;
+       struct ib_sge     *sge;
+       int                rc  = resid;
+       int                srcidx;
+       int                dstidx;
+       int                sge_nob;
+       int                wrq_sge;
+
+       LASSERT(!in_interrupt());
+       LASSERT(tx->tx_nwrq == 0 && tx->tx_nsge == 0);
+       LASSERT(type == IBLND_MSG_GET_DONE || type == IBLND_MSG_PUT_DONE);
+
+       for (srcidx = dstidx = wrq_sge = sge_nob = 0;
+            resid > 0; resid -= sge_nob) {
+               int     prev = dstidx;
+
+               if (srcidx >= srcrd->rd_nfrags) {
+                       CERROR("Src buffer exhausted: %d frags\n", srcidx);
+                       rc = -EPROTO;
+                       break;
+               }
+
+               if (dstidx >= dstrd->rd_nfrags) {
+                       CERROR("Dst buffer exhausted: %d frags\n", dstidx);
+                       rc = -EPROTO;
+                       break;
+               }
+
+               if (tx->tx_nwrq >= conn->ibc_max_frags) {
+                       CERROR("RDMA has too many fragments for peer_ni %s (%d), "
+                              "src idx/frags: %d/%d dst idx/frags: %d/%d\n",
+                              libcfs_nid2str(conn->ibc_peer->ibp_nid),
+                              conn->ibc_max_frags,
+                              srcidx, srcrd->rd_nfrags,
+                              dstidx, dstrd->rd_nfrags);
+                       rc = -EMSGSIZE;
+                       break;
+               }
+
+               sge_nob = MIN(MIN(kiblnd_rd_frag_size(srcrd, srcidx),
+                                 kiblnd_rd_frag_size(dstrd, dstidx)), resid);
+
+               sge = &tx->tx_sge[tx->tx_nsge];
+               sge->addr   = kiblnd_rd_frag_addr(srcrd, srcidx);
+               sge->lkey   = kiblnd_rd_frag_key(srcrd, srcidx);
+               sge->length = sge_nob;
+
+               if (wrq_sge == 0) {
+                       wrq = &tx->tx_wrq[tx->tx_nwrq];
+
+                       wrq->wr.next    = &(wrq + 1)->wr;
+                       wrq->wr.wr_id   = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
+                       wrq->wr.sg_list = sge;
+                       wrq->wr.opcode  = IB_WR_RDMA_WRITE;
+                       wrq->wr.send_flags = 0;
+
+#ifdef HAVE_IB_RDMA_WR
+                       wrq->remote_addr        = kiblnd_rd_frag_addr(dstrd,
+                                                                     dstidx);
+                       wrq->rkey               = kiblnd_rd_frag_key(dstrd,
+                                                                    dstidx);
+#else
+                       wrq->wr.wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd,
+                                                                       dstidx);
+                       wrq->wr.wr.rdma.rkey    = kiblnd_rd_frag_key(dstrd,
+                                                                    dstidx);
+#endif
+               }
 
-                resid -= wrknob;
+               srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, sge_nob);
+               dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, sge_nob);
 
-                tx->tx_nwrq++;
-                wrq++;
-                sge++;
-        }
+               wrq_sge++;
+               if (wrq_sge == *kiblnd_tunables.kib_wrq_sge || dstidx != prev) {
+                       tx->tx_nwrq++;
+                       wrq->wr.num_sge = wrq_sge;
+                       wrq_sge = 0;
+               }
+               tx->tx_nsge++;
+       }
 
-        if (rc < 0)                             /* no RDMA if completing with failure */
-                tx->tx_nwrq = 0;
+       if (rc < 0)     /* no RDMA if completing with failure */
+               tx->tx_nwrq = tx->tx_nsge = 0;
 
         ibmsg->ibm_u.completion.ibcm_status = rc;
         ibmsg->ibm_u.completion.ibcm_cookie = dstcookie;
         kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
-                           type, sizeof (kib_completion_msg_t));
+                          type, sizeof(struct kib_completion_msg));
 
         return rc;
 }
 
-void
-kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
+static void
+kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn)
 {
-        cfs_list_t   *q;
-
-        LASSERT (tx->tx_nwrq > 0);              /* work items set up */
-        LASSERT (!tx->tx_queued);               /* not queued for sending already */
-        LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
-
-        tx->tx_queued = 1;
-        tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * CFS_HZ);
+       struct list_head *q;
+       s64 timeout_ns;
+
+       LASSERT(tx->tx_nwrq > 0);       /* work items set up */
+       LASSERT(!tx->tx_queued);        /* not queued for sending already */
+       LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+
+       if (conn->ibc_state >= IBLND_CONN_DISCONNECTED) {
+               tx->tx_status = -ECONNABORTED;
+               tx->tx_waiting = 0;
+               if (tx->tx_conn != NULL) {
+                       /* PUT_DONE first attached to conn as a PUT_REQ */
+                       LASSERT(tx->tx_conn == conn);
+                       LASSERT(tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
+                       tx->tx_conn = NULL;
+                       kiblnd_conn_decref(conn);
+               }
+               list_add(&tx->tx_list, &conn->ibc_zombie_txs);
+
+               return;
+       }
+
+       timeout_ns = lnet_get_lnd_timeout() * NSEC_PER_SEC;
+       tx->tx_queued = 1;
+       tx->tx_deadline = ktime_add_ns(ktime_get(), timeout_ns);
 
         if (tx->tx_conn == NULL) {
                 kiblnd_conn_addref(conn);
@@ -1194,17 +1350,16 @@ kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
                 break;
         }
 
-        cfs_list_add_tail(&tx->tx_list, q);
+       list_add_tail(&tx->tx_list, q);
 }
 
-void
-kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn)
+static void
+kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn)
 {
-        cfs_spin_lock(&conn->ibc_lock);
-        kiblnd_queue_tx_locked(tx, conn);
-        cfs_spin_unlock(&conn->ibc_lock);
-
-        kiblnd_check_sends(conn);
+       spin_lock(&conn->ibc_lock);
+       kiblnd_queue_tx_locked(tx, conn);
+       kiblnd_check_sends_locked(conn);
+       spin_unlock(&conn->ibc_lock);
 }
 
 static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
@@ -1215,14 +1370,12 @@ static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
         unsigned short port;
         int rc;
 
-#ifdef HAVE_OFED_RDMA_SET_REUSEADDR
         /* allow the port to be reused */
         rc = rdma_set_reuseaddr(cmid, 1);
         if (rc != 0) {
                 CERROR("Unable to set reuse on cmid: %d\n", rc);
                 return rc;
         }
-#endif
 
         /* look for a free privileged port */
         for (port = PROT_SOCK-1; port > 0; port--) {
@@ -1243,29 +1396,28 @@ static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
         }
 
         CERROR("Failed to bind to a free privileged port\n");
-#ifndef HAVE_OFED_RDMA_SET_REUSEADDR
-        CERROR("You may need IB verbs that supports rdma_set_reuseaddr()\n");
-#endif
         return rc;
 }
 
-void
-kiblnd_connect_peer (kib_peer_t *peer)
+static void
+kiblnd_connect_peer(struct kib_peer_ni *peer_ni)
 {
         struct rdma_cm_id *cmid;
-        kib_dev_t         *dev;
-        kib_net_t         *net = peer->ibp_ni->ni_data;
+       struct kib_dev *dev;
+       struct kib_net *net = peer_ni->ibp_ni->ni_data;
         struct sockaddr_in srcaddr;
         struct sockaddr_in dstaddr;
-        int                rc;
+       int rc;
 
         LASSERT (net != NULL);
-        LASSERT (peer->ibp_connecting > 0);
+        LASSERT (peer_ni->ibp_connecting > 0);
+
+        cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer_ni, RDMA_PS_TCP,
+                                     IB_QPT_RC);
 
-        cmid = rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP);
         if (IS_ERR(cmid)) {
                 CERROR("Can't create CMID for %s: %ld\n",
-                       libcfs_nid2str(peer->ibp_nid), PTR_ERR(cmid));
+                       libcfs_nid2str(peer_ni->ibp_nid), PTR_ERR(cmid));
                 rc = PTR_ERR(cmid);
                 goto failed;
         }
@@ -1278,49 +1430,100 @@ kiblnd_connect_peer (kib_peer_t *peer)
         memset(&dstaddr, 0, sizeof(dstaddr));
         dstaddr.sin_family = AF_INET;
         dstaddr.sin_port = htons(*kiblnd_tunables.kib_service);
-        dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer->ibp_nid));
-
-        kiblnd_peer_addref(peer);               /* cmid's ref */
-
-        if (*kiblnd_tunables.kib_use_priv_port) {
-                rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr,
-                                         *kiblnd_tunables.kib_timeout * 1000);
-        } else {
-                rc = rdma_resolve_addr(cmid,
-                                       (struct sockaddr *)&srcaddr,
-                                       (struct sockaddr *)&dstaddr,
-                                       *kiblnd_tunables.kib_timeout * 1000);
-        }
-        if (rc != 0) {
-                /* Can't initiate address resolution:  */
-                CERROR("Can't resolve addr for %s: %d\n",
-                       libcfs_nid2str(peer->ibp_nid), rc);
-                goto failed2;
-        }
-
-        LASSERT (cmid->device != NULL);
-        CDEBUG(D_NET, "%s: connection bound to %s:%u.%u.%u.%u:%s\n",
-               libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname,
-               HIPQUAD(dev->ibd_ifip), cmid->device->name);
-
-        return;
+        dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer_ni->ibp_nid));
+
+        kiblnd_peer_addref(peer_ni);               /* cmid's ref */
+
+       if (*kiblnd_tunables.kib_use_priv_port) {
+               rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr,
+                                        lnet_get_lnd_timeout() * 1000);
+       } else {
+               rc = rdma_resolve_addr(cmid,
+                                      (struct sockaddr *)&srcaddr,
+                                      (struct sockaddr *)&dstaddr,
+                                      lnet_get_lnd_timeout() * 1000);
+       }
+       if (rc != 0) {
+               /* Can't initiate address resolution:  */
+               CERROR("Can't resolve addr for %s: %d\n",
+                      libcfs_nid2str(peer_ni->ibp_nid), rc);
+               goto failed2;
+       }
+
+       return;
 
  failed2:
-        kiblnd_peer_decref(peer);               /* cmid's ref */
-        rdma_destroy_id(cmid);
+       kiblnd_peer_connect_failed(peer_ni, 1, rc);
+       kiblnd_peer_decref(peer_ni);               /* cmid's ref */
+       rdma_destroy_id(cmid);
+       return;
  failed:
-        kiblnd_peer_connect_failed(peer, 1, rc);
+       kiblnd_peer_connect_failed(peer_ni, 1, rc);
+}
+
+bool
+kiblnd_reconnect_peer(struct kib_peer_ni *peer_ni)
+{
+       rwlock_t         *glock = &kiblnd_data.kib_global_lock;
+       char             *reason = NULL;
+       struct list_head  txs;
+       unsigned long     flags;
+
+       INIT_LIST_HEAD(&txs);
+
+       write_lock_irqsave(glock, flags);
+       if (peer_ni->ibp_reconnecting == 0) {
+               if (peer_ni->ibp_accepting)
+                       reason = "accepting";
+               else if (peer_ni->ibp_connecting)
+                       reason = "connecting";
+               else if (!list_empty(&peer_ni->ibp_conns))
+                       reason = "connected";
+               else /* connected then closed */
+                       reason = "closed";
+
+               goto no_reconnect;
+       }
+
+       if (peer_ni->ibp_accepting)
+               CNETERR("Detecting race between accepting and reconnecting\n");
+       peer_ni->ibp_reconnecting--;
+
+       if (!kiblnd_peer_active(peer_ni)) {
+               list_splice_init(&peer_ni->ibp_tx_queue, &txs);
+               reason = "unlinked";
+               goto no_reconnect;
+       }
+
+       peer_ni->ibp_connecting++;
+       peer_ni->ibp_reconnected++;
+
+       write_unlock_irqrestore(glock, flags);
+
+       kiblnd_connect_peer(peer_ni);
+       return true;
+
+ no_reconnect:
+       write_unlock_irqrestore(glock, flags);
+
+       CWARN("Abort reconnection of %s: %s\n",
+             libcfs_nid2str(peer_ni->ibp_nid), reason);
+       kiblnd_txlist_done(&txs, -ECONNABORTED,
+                          LNET_MSG_STATUS_LOCAL_ABORTED);
+       return false;
 }
 
 void
-kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
+kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid)
 {
-        kib_peer_t        *peer;
-        kib_peer_t        *peer2;
-        kib_conn_t        *conn;
-        cfs_rwlock_t      *g_lock = &kiblnd_data.kib_global_lock;
+       struct kib_peer_ni *peer_ni;
+       struct kib_peer_ni *peer2;
+       struct kib_conn *conn;
+       rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
         unsigned long      flags;
         int                rc;
+       int                i;
+       struct lnet_ioctl_config_o2iblnd_tunables *tunables;
 
         /* If I get here, I've committed to send, so I complete the tx with
          * failure on any problems */
@@ -1328,17 +1531,17 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
         LASSERT (tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */
         LASSERT (tx == NULL || tx->tx_nwrq > 0);     /* work items have been set up */
 
-        /* First time, just use a read lock since I expect to find my peer
+        /* First time, just use a read lock since I expect to find my peer_ni
          * connected */
-        cfs_read_lock_irqsave(g_lock, flags);
+       read_lock_irqsave(g_lock, flags);
 
-        peer = kiblnd_find_peer_locked(nid);
-        if (peer != NULL && !cfs_list_empty(&peer->ibp_conns)) {
-                /* Found a peer with an established connection */
-                conn = kiblnd_get_conn_locked(peer);
+        peer_ni = kiblnd_find_peer_locked(ni, nid);
+       if (peer_ni != NULL && !list_empty(&peer_ni->ibp_conns)) {
+                /* Found a peer_ni with an established connection */
+                conn = kiblnd_get_conn_locked(peer_ni);
                 kiblnd_conn_addref(conn); /* 1 ref for me... */
 
-                cfs_read_unlock_irqrestore(g_lock, flags);
+               read_unlock_irqrestore(g_lock, flags);
 
                 if (tx != NULL)
                         kiblnd_queue_tx(tx, conn);
@@ -1346,25 +1549,24 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
                 return;
         }
 
-        cfs_read_unlock(g_lock);
-        /* Re-try with a write lock */
-        cfs_write_lock(g_lock);
+       read_unlock(g_lock);
+       /* Re-try with a write lock */
+       write_lock(g_lock);
 
-        peer = kiblnd_find_peer_locked(nid);
-        if (peer != NULL) {
-                if (cfs_list_empty(&peer->ibp_conns)) {
-                        /* found a peer, but it's still connecting... */
-                        LASSERT (peer->ibp_connecting != 0 ||
-                                 peer->ibp_accepting != 0);
+        peer_ni = kiblnd_find_peer_locked(ni, nid);
+        if (peer_ni != NULL) {
+               if (list_empty(&peer_ni->ibp_conns)) {
+                        /* found a peer_ni, but it's still connecting... */
+                       LASSERT(kiblnd_peer_connecting(peer_ni));
                         if (tx != NULL)
-                                cfs_list_add_tail(&tx->tx_list,
-                                                  &peer->ibp_tx_queue);
-                        cfs_write_unlock_irqrestore(g_lock, flags);
-                } else {
-                        conn = kiblnd_get_conn_locked(peer);
-                        kiblnd_conn_addref(conn); /* 1 ref for me... */
+                               list_add_tail(&tx->tx_list,
+                                                  &peer_ni->ibp_tx_queue);
+                       write_unlock_irqrestore(g_lock, flags);
+               } else {
+                       conn = kiblnd_get_conn_locked(peer_ni);
+                       kiblnd_conn_addref(conn); /* 1 ref for me... */
 
-                        cfs_write_unlock_irqrestore(g_lock, flags);
+                       write_unlock_irqrestore(g_lock, flags);
 
                         if (tx != NULL)
                                 kiblnd_queue_tx(tx, conn);
@@ -1373,101 +1575,104 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
                 return;
         }
 
-        cfs_write_unlock_irqrestore(g_lock, flags);
+       write_unlock_irqrestore(g_lock, flags);
 
-        /* Allocate a peer ready to add to the peer table and retry */
-        rc = kiblnd_create_peer(ni, &peer, nid);
-        if (rc != 0) {
-                CERROR("Can't create peer %s\n", libcfs_nid2str(nid));
-                if (tx != NULL) {
-                        tx->tx_status = -EHOSTUNREACH;
-                        tx->tx_waiting = 0;
-                        kiblnd_tx_done(ni, tx);
-                }
-                return;
-        }
+       /* Allocate a peer_ni ready to add to the peer_ni table and retry */
+       rc = kiblnd_create_peer(ni, &peer_ni, nid);
+       if (rc != 0) {
+               CERROR("Can't create peer_ni %s\n", libcfs_nid2str(nid));
+               if (tx != NULL) {
+                       tx->tx_status = -EHOSTUNREACH;
+                       tx->tx_waiting = 0;
+                       tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+                       kiblnd_tx_done(tx);
+               }
+               return;
+       }
 
-        cfs_write_lock_irqsave(g_lock, flags);
+       write_lock_irqsave(g_lock, flags);
 
-        peer2 = kiblnd_find_peer_locked(nid);
+        peer2 = kiblnd_find_peer_locked(ni, nid);
         if (peer2 != NULL) {
-                if (cfs_list_empty(&peer2->ibp_conns)) {
-                        /* found a peer, but it's still connecting... */
-                        LASSERT (peer2->ibp_connecting != 0 ||
-                                 peer2->ibp_accepting != 0);
+               if (list_empty(&peer2->ibp_conns)) {
+                        /* found a peer_ni, but it's still connecting... */
+                       LASSERT(kiblnd_peer_connecting(peer2));
                         if (tx != NULL)
-                                cfs_list_add_tail(&tx->tx_list,
+                               list_add_tail(&tx->tx_list,
                                                   &peer2->ibp_tx_queue);
-                        cfs_write_unlock_irqrestore(g_lock, flags);
-                } else {
-                        conn = kiblnd_get_conn_locked(peer2);
-                        kiblnd_conn_addref(conn); /* 1 ref for me... */
+                       write_unlock_irqrestore(g_lock, flags);
+               } else {
+                       conn = kiblnd_get_conn_locked(peer2);
+                       kiblnd_conn_addref(conn); /* 1 ref for me... */
 
-                        cfs_write_unlock_irqrestore(g_lock, flags);
+                       write_unlock_irqrestore(g_lock, flags);
 
                         if (tx != NULL)
                                 kiblnd_queue_tx(tx, conn);
                         kiblnd_conn_decref(conn); /* ...to here */
                 }
 
-                kiblnd_peer_decref(peer);
+                kiblnd_peer_decref(peer_ni);
                 return;
         }
 
-        /* Brand new peer */
-        LASSERT (peer->ibp_connecting == 0);
-        peer->ibp_connecting = 1;
+       /* Brand new peer_ni */
+       LASSERT(peer_ni->ibp_connecting == 0);
+       tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
+       peer_ni->ibp_connecting = tunables->lnd_conns_per_peer;
 
-        /* always called with a ref on ni, which prevents ni being shutdown */
-        LASSERT (((kib_net_t *)ni->ni_data)->ibn_shutdown == 0);
+       /* always called with a ref on ni, which prevents ni being shutdown */
+       LASSERT(((struct kib_net *)ni->ni_data)->ibn_shutdown == 0);
 
-        if (tx != NULL)
-                cfs_list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
+       if (tx != NULL)
+               list_add_tail(&tx->tx_list, &peer_ni->ibp_tx_queue);
 
-        kiblnd_peer_addref(peer);
-        cfs_list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
+        kiblnd_peer_addref(peer_ni);
+       list_add_tail(&peer_ni->ibp_list, kiblnd_nid2peerlist(nid));
 
-        cfs_write_unlock_irqrestore(g_lock, flags);
+       write_unlock_irqrestore(g_lock, flags);
 
-        kiblnd_connect_peer(peer);
-        kiblnd_peer_decref(peer);
+       for (i = 0; i < tunables->lnd_conns_per_peer; i++)
+               kiblnd_connect_peer(peer_ni);
+        kiblnd_peer_decref(peer_ni);
 }
 
 int
-kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
+kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
 {
-        lnet_hdr_t       *hdr = &lntmsg->msg_hdr;
-        int               type = lntmsg->msg_type;
-        lnet_process_id_t target = lntmsg->msg_target;
-        int               target_is_router = lntmsg->msg_target_is_router;
-        int               routing = lntmsg->msg_routing;
-        unsigned int      payload_niov = lntmsg->msg_niov;
-        struct iovec     *payload_iov = lntmsg->msg_iov;
-        lnet_kiov_t      *payload_kiov = lntmsg->msg_kiov;
-        unsigned int      payload_offset = lntmsg->msg_offset;
-        unsigned int      payload_nob = lntmsg->msg_len;
-        kib_msg_t        *ibmsg;
-        kib_tx_t         *tx;
-        int               nob;
-        int               rc;
+       struct lnet_hdr *hdr = &lntmsg->msg_hdr;
+       int               type = lntmsg->msg_type;
+       struct lnet_process_id target = lntmsg->msg_target;
+       int               target_is_router = lntmsg->msg_target_is_router;
+       int               routing = lntmsg->msg_routing;
+       unsigned int      payload_niov = lntmsg->msg_niov;
+       struct kvec      *payload_iov = lntmsg->msg_iov;
+       lnet_kiov_t      *payload_kiov = lntmsg->msg_kiov;
+       unsigned int      payload_offset = lntmsg->msg_offset;
+       unsigned int      payload_nob = lntmsg->msg_len;
+       struct kib_msg *ibmsg;
+       struct kib_rdma_desc *rd;
+       struct kib_tx *tx;
+       int               nob;
+       int               rc;
 
         /* NB 'private' is different depending on what we're sending.... */
 
         CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
                payload_nob, payload_niov, libcfs_id2str(target));
 
-        LASSERT (payload_nob == 0 || payload_niov > 0);
-        LASSERT (payload_niov <= LNET_MAX_IOV);
+       LASSERT (payload_nob == 0 || payload_niov > 0);
+       LASSERT (payload_niov <= LNET_MAX_IOV);
 
-        /* Thread context */
-        LASSERT (!cfs_in_interrupt());
-        /* payload is either all vaddrs or all pages */
-        LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
+       /* Thread context */
+       LASSERT (!in_interrupt());
+       /* payload is either all vaddrs or all pages */
+       LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
 
-        switch (type) {
-        default:
-                LBUG();
-                return (-EIO);
+       switch (type) {
+       default:
+               LBUG();
+               return (-EIO);
 
         case LNET_MSG_ACK:
                 LASSERT (payload_nob == 0);
@@ -1478,51 +1683,50 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
                         break;                  /* send IMMEDIATE */
 
                 /* is the REPLY message too small for RDMA? */
-                nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
+               nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
                 if (nob <= IBLND_MSG_SIZE)
                         break;                  /* send IMMEDIATE */
 
-                tx = kiblnd_get_idle_tx(ni);
-                if (tx == NULL) {
-                        CERROR("Can't allocate txd for GET to %s: \n",
-                               libcfs_nid2str(target.nid));
-                        return -ENOMEM;
-                }
-
-                ibmsg = tx->tx_msg;
-
-                if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
-                        rc = kiblnd_setup_rd_iov(ni, tx,
-                                                 &ibmsg->ibm_u.get.ibgm_rd,
-                                                 lntmsg->msg_md->md_niov,
-                                                 lntmsg->msg_md->md_iov.iov,
-                                                 0, lntmsg->msg_md->md_length);
-                else
-                        rc = kiblnd_setup_rd_kiov(ni, tx,
-                                                  &ibmsg->ibm_u.get.ibgm_rd,
-                                                  lntmsg->msg_md->md_niov,
-                                                  lntmsg->msg_md->md_iov.kiov,
-                                                  0, lntmsg->msg_md->md_length);
-                if (rc != 0) {
-                        CERROR("Can't setup GET sink for %s: %d\n",
-                               libcfs_nid2str(target.nid), rc);
-                        kiblnd_tx_done(ni, tx);
-                        return -EIO;
-                }
-
-                nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[tx->tx_nfrags]);
-                ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
-                ibmsg->ibm_u.get.ibgm_hdr = *hdr;
+               tx = kiblnd_get_idle_tx(ni, target.nid);
+               if (tx == NULL) {
+                       CERROR("Can't allocate txd for GET to %s\n",
+                              libcfs_nid2str(target.nid));
+                       return -ENOMEM;
+               }
+
+               ibmsg = tx->tx_msg;
+               rd = &ibmsg->ibm_u.get.ibgm_rd;
+               if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
+                       rc = kiblnd_setup_rd_iov(ni, tx, rd,
+                                                lntmsg->msg_md->md_niov,
+                                                lntmsg->msg_md->md_iov.iov,
+                                                0, lntmsg->msg_md->md_length);
+               else
+                       rc = kiblnd_setup_rd_kiov(ni, tx, rd,
+                                                 lntmsg->msg_md->md_niov,
+                                                 lntmsg->msg_md->md_iov.kiov,
+                                                 0, lntmsg->msg_md->md_length);
+               if (rc != 0) {
+                       CERROR("Can't setup GET sink for %s: %d\n",
+                              libcfs_nid2str(target.nid), rc);
+                       tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+                       kiblnd_tx_done(tx);
+                       return -EIO;
+               }
+
+               nob = offsetof(struct kib_get_msg, ibgm_rd.rd_frags[rd->rd_nfrags]);
+               ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
+               ibmsg->ibm_u.get.ibgm_hdr = *hdr;
 
                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
 
                 tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
-                if (tx->tx_lntmsg[1] == NULL) {
-                        CERROR("Can't create reply for GET -> %s\n",
-                               libcfs_nid2str(target.nid));
-                        kiblnd_tx_done(ni, tx);
-                        return -EIO;
-                }
+               if (tx->tx_lntmsg[1] == NULL) {
+                       CERROR("Can't create reply for GET -> %s\n",
+                              libcfs_nid2str(target.nid));
+                       kiblnd_tx_done(tx);
+                       return -EIO;
+               }
 
                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg[0,1] on completion */
                 tx->tx_waiting = 1;             /* waiting for GET_DONE */
@@ -1532,11 +1736,11 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
         case LNET_MSG_REPLY:
         case LNET_MSG_PUT:
                 /* Is the payload small enough not to need RDMA? */
-                nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]);
+               nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]);
                 if (nob <= IBLND_MSG_SIZE)
                         break;                  /* send IMMEDIATE */
 
-                tx = kiblnd_get_idle_tx(ni);
+               tx = kiblnd_get_idle_tx(ni, target.nid);
                 if (tx == NULL) {
                         CERROR("Can't allocate %s txd for %s\n",
                                type == LNET_MSG_PUT ? "PUT" : "REPLY",
@@ -1552,17 +1756,18 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
                         rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
                                                   payload_niov, payload_kiov,
                                                   payload_offset, payload_nob);
-                if (rc != 0) {
-                        CERROR("Can't setup PUT src for %s: %d\n",
-                               libcfs_nid2str(target.nid), rc);
-                        kiblnd_tx_done(ni, tx);
-                        return -EIO;
-                }
+               if (rc != 0) {
+                       CERROR("Can't setup PUT src for %s: %d\n",
+                              libcfs_nid2str(target.nid), rc);
+                       kiblnd_tx_done(tx);
+                       return -EIO;
+               }
 
                 ibmsg = tx->tx_msg;
                 ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
                 ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
-                kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t));
+               kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ,
+                                  sizeof(struct kib_putreq_msg));
 
                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
                 tx->tx_waiting = 1;             /* waiting for PUT_{ACK,NAK} */
@@ -1570,12 +1775,11 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
                 return 0;
         }
 
-        /* send IMMEDIATE */
+       /* send IMMEDIATE */
+       LASSERT(offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob])
+               <= IBLND_MSG_SIZE);
 
-        LASSERT (offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob])
-                 <= IBLND_MSG_SIZE);
-
-        tx = kiblnd_get_idle_tx(ni);
+       tx = kiblnd_get_idle_tx(ni, target.nid);
         if (tx == NULL) {
                 CERROR ("Can't send %d to %s: tx descs exhausted\n",
                         type, libcfs_nid2str(target.nid));
@@ -1587,16 +1791,16 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 
         if (payload_kiov != NULL)
                 lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
-                                    offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+                                   offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
                                     payload_niov, payload_kiov,
                                     payload_offset, payload_nob);
         else
                 lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg,
-                                   offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+                                  offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
                                    payload_niov, payload_iov,
                                    payload_offset, payload_nob);
 
-        nob = offsetof(kib_immediate_msg_t, ibim_payload[payload_nob]);
+       nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
         kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
 
         tx->tx_lntmsg[0] = lntmsg;              /* finalise lntmsg on completion */
@@ -1604,19 +1808,19 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
         return 0;
 }
 
-void
-kiblnd_reply (lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
+static void
+kiblnd_reply(struct lnet_ni *ni, struct kib_rx *rx, struct lnet_msg *lntmsg)
 {
-        lnet_process_id_t target = lntmsg->msg_target;
+       struct lnet_process_id target = lntmsg->msg_target;
         unsigned int      niov = lntmsg->msg_niov;
-        struct iovec     *iov = lntmsg->msg_iov;
+       struct kvec      *iov = lntmsg->msg_iov;
         lnet_kiov_t      *kiov = lntmsg->msg_kiov;
         unsigned int      offset = lntmsg->msg_offset;
         unsigned int      nob = lntmsg->msg_len;
-        kib_tx_t         *tx;
+       struct kib_tx *tx;
         int               rc;
 
-        tx = kiblnd_get_idle_tx(ni);
+       tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
         if (tx == NULL) {
                 CERROR("Can't get tx for REPLY to %s\n",
                        libcfs_nid2str(target.nid));
@@ -1647,50 +1851,52 @@ kiblnd_reply (lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
                        libcfs_nid2str(target.nid), rc);
                 goto failed_1;
         }
-        
-        if (nob == 0) {
-                /* No RDMA: local completion may happen now! */
-                lnet_finalize(ni, lntmsg, 0);
-        } else {
-                /* RDMA: lnet_finalize(lntmsg) when it
-                 * completes */
-                tx->tx_lntmsg[0] = lntmsg;
-        }
+
+       if (nob == 0) {
+               /* No RDMA: local completion may happen now! */
+               lnet_finalize(lntmsg, 0);
+       } else {
+               /* RDMA: lnet_finalize(lntmsg) when it
+                * completes */
+               tx->tx_lntmsg[0] = lntmsg;
+       }
 
         kiblnd_queue_tx(tx, rx->rx_conn);
         return;
 
- failed_1:
-        kiblnd_tx_done(ni, tx);
- failed_0:
-        lnet_finalize(ni, lntmsg, -EIO);
+
+failed_1:
+       tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+       kiblnd_tx_done(tx);
+failed_0:
+       lnet_finalize(lntmsg, -EIO);
 }
 
 int
-kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
-             unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
-             unsigned int offset, unsigned int mlen, unsigned int rlen)
+kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
+           int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
+           unsigned int offset, unsigned int mlen, unsigned int rlen)
 {
-        kib_rx_t    *rx = private;
-        kib_msg_t   *rxmsg = rx->rx_msg;
-        kib_conn_t  *conn = rx->rx_conn;
-        kib_tx_t    *tx;
-        kib_msg_t   *txmsg;
-        int          nob;
-        int          post_credit = IBLND_POSTRX_PEER_CREDIT;
-        int          rc = 0;
-
-        LASSERT (mlen <= rlen);
-        LASSERT (!cfs_in_interrupt());
-        /* Either all pages or all vaddrs */
-        LASSERT (!(kiov != NULL && iov != NULL));
-
-        switch (rxmsg->ibm_type) {
-        default:
-                LBUG();
+       struct kib_rx *rx = private;
+       struct kib_msg *rxmsg = rx->rx_msg;
+       struct kib_conn *conn = rx->rx_conn;
+       struct kib_tx *tx;
+       __u64        ibprm_cookie;
+       int          nob;
+       int          post_credit = IBLND_POSTRX_PEER_CREDIT;
+       int          rc = 0;
+
+       LASSERT (mlen <= rlen);
+       LASSERT (!in_interrupt());
+       /* Either all pages or all vaddrs */
+       LASSERT (!(kiov != NULL && iov != NULL));
+
+       switch (rxmsg->ibm_type) {
+       default:
+               LBUG();
 
         case IBLND_MSG_IMMEDIATE:
-                nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
+               nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[rlen]);
                 if (nob > rx->rx_nob) {
                         CERROR ("Immediate message from %s too big: %d(%d)\n",
                                 libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
@@ -1702,25 +1908,29 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
                 if (kiov != NULL)
                         lnet_copy_flat2kiov(niov, kiov, offset,
                                             IBLND_MSG_SIZE, rxmsg,
-                                            offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+                                           offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
                                             mlen);
                 else
                         lnet_copy_flat2iov(niov, iov, offset,
                                            IBLND_MSG_SIZE, rxmsg,
-                                           offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+                                          offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
                                            mlen);
-                lnet_finalize (ni, lntmsg, 0);
-                break;
-
-        case IBLND_MSG_PUT_REQ:
-                if (mlen == 0) {
-                        lnet_finalize(ni, lntmsg, 0);
-                        kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0,
-                                               rxmsg->ibm_u.putreq.ibprm_cookie);
-                        break;
-                }
-
-                tx = kiblnd_get_idle_tx(ni);
+               lnet_finalize(lntmsg, 0);
+               break;
+
+       case IBLND_MSG_PUT_REQ: {
+               struct kib_msg  *txmsg;
+               struct kib_rdma_desc *rd;
+               ibprm_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
+
+               if (mlen == 0) {
+                       lnet_finalize(lntmsg, 0);
+                       kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
+                                              0, ibprm_cookie);
+                       break;
+               }
+
+               tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
                 if (tx == NULL) {
                         CERROR("Can't allocate tx for %s\n",
                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
@@ -1729,28 +1939,28 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
                         break;
                 }
 
-                txmsg = tx->tx_msg;
-                if (kiov == NULL)
-                        rc = kiblnd_setup_rd_iov(ni, tx,
-                                                 &txmsg->ibm_u.putack.ibpam_rd,
-                                                 niov, iov, offset, mlen);
-                else
-                        rc = kiblnd_setup_rd_kiov(ni, tx,
-                                                  &txmsg->ibm_u.putack.ibpam_rd,
-                                                  niov, kiov, offset, mlen);
-                if (rc != 0) {
-                        CERROR("Can't setup PUT sink for %s: %d\n",
-                               libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
-                        kiblnd_tx_done(ni, tx);
-                        /* tell peer it's over */
-                        kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc,
-                                               rxmsg->ibm_u.putreq.ibprm_cookie);
-                        break;
-                }
-
-                nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[tx->tx_nfrags]);
-                txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
-                txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
+               txmsg = tx->tx_msg;
+               rd = &txmsg->ibm_u.putack.ibpam_rd;
+               if (kiov == NULL)
+                       rc = kiblnd_setup_rd_iov(ni, tx, rd,
+                                                niov, iov, offset, mlen);
+               else
+                       rc = kiblnd_setup_rd_kiov(ni, tx, rd,
+                                                 niov, kiov, offset, mlen);
+               if (rc != 0) {
+                       CERROR("Can't setup PUT sink for %s: %d\n",
+                              libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
+                       tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+                       kiblnd_tx_done(tx);
+                       /* tell peer_ni it's over */
+                       kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
+                                              rc, ibprm_cookie);
+                       break;
+               }
+
+               nob = offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[rd->rd_nfrags]);
+               txmsg->ibm_u.putack.ibpam_src_cookie = ibprm_cookie;
+               txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
 
                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob);
 
@@ -1761,6 +1971,7 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
                 /* reposted buffer reserved for PUT_DONE */
                 post_credit = IBLND_POSTRX_NO_CREDIT;
                 break;
+               }
 
         case IBLND_MSG_GET_REQ:
                 if (lntmsg != NULL) {
@@ -1780,59 +1991,56 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
 }
 
 int
-kiblnd_thread_start (int (*fn)(void *arg), void *arg)
+kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
 {
-        long    pid = cfs_create_thread (fn, arg, 0);
+       struct task_struct *task = kthread_run(fn, arg, name);
 
-        if (pid < 0)
-                return ((int)pid);
+       if (IS_ERR(task))
+               return PTR_ERR(task);
 
-        cfs_atomic_inc (&kiblnd_data.kib_nthreads);
-        return (0);
+       atomic_inc(&kiblnd_data.kib_nthreads);
+       return 0;
 }
 
-void
+static void
 kiblnd_thread_fini (void)
 {
-        cfs_atomic_dec (&kiblnd_data.kib_nthreads);
+       atomic_dec (&kiblnd_data.kib_nthreads);
 }
 
-void
-kiblnd_peer_alive (kib_peer_t *peer)
+static void
+kiblnd_peer_alive(struct kib_peer_ni *peer_ni)
 {
-        /* This is racy, but everyone's only writing cfs_time_current() */
-        peer->ibp_last_alive = cfs_time_current();
-        cfs_mb();
+       /* This is racy, but everyone's only writing ktime_get_seconds() */
+       peer_ni->ibp_last_alive = ktime_get_seconds();
+       smp_mb();
 }
 
-void
-kiblnd_peer_notify (kib_peer_t *peer)
+static void
+kiblnd_peer_notify(struct kib_peer_ni *peer_ni)
 {
-        int           error = 0;
-        cfs_time_t    last_alive = 0;
-        unsigned long flags;
+       int           error = 0;
+       time64_t last_alive = 0;
+       unsigned long flags;
 
-        cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
-        if (cfs_list_empty(&peer->ibp_conns) &&
-            peer->ibp_accepting == 0 &&
-            peer->ibp_connecting == 0 &&
-            peer->ibp_error != 0) {
-                error = peer->ibp_error;
-                peer->ibp_error = 0;
+       if (kiblnd_peer_idle(peer_ni) && peer_ni->ibp_error != 0) {
+               error = peer_ni->ibp_error;
+               peer_ni->ibp_error = 0;
 
-                last_alive = peer->ibp_last_alive;
-        }
+               last_alive = peer_ni->ibp_last_alive;
+       }
 
-        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
-        if (error != 0)
-                lnet_notify(peer->ibp_ni,
-                            peer->ibp_nid, 0, last_alive);
+       if (error != 0)
+               lnet_notify(peer_ni->ibp_ni,
+                           peer_ni->ibp_nid, false, false, last_alive);
 }
 
 void
-kiblnd_close_conn_locked (kib_conn_t *conn, int error)
+kiblnd_close_conn_locked(struct kib_conn *conn, int error)
 {
         /* This just does the immediate housekeeping.  'error' is zero for a
          * normal shutdown which can happen only after the connection has been
@@ -1840,9 +2048,9 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
          * connection to be finished off by the connd.  Otherwise the connd is
          * already dealing with it (either to set it up or tear it down).
          * Caller holds kib_global_lock exclusively in irq context */
-        kib_peer_t       *peer = conn->ibc_peer;
-        kib_dev_t        *dev;
-        unsigned long     flags;
+       struct kib_peer_ni *peer_ni = conn->ibc_peer;
+       struct kib_dev *dev;
+       unsigned long flags;
 
         LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
@@ -1853,302 +2061,344 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
                 return; /* already being handled  */
 
         if (error == 0 &&
-            cfs_list_empty(&conn->ibc_tx_noops) &&
-            cfs_list_empty(&conn->ibc_tx_queue) &&
-            cfs_list_empty(&conn->ibc_tx_queue_rsrvd) &&
-            cfs_list_empty(&conn->ibc_tx_queue_nocred) &&
-            cfs_list_empty(&conn->ibc_active_txs)) {
+           list_empty(&conn->ibc_tx_noops) &&
+           list_empty(&conn->ibc_tx_queue) &&
+           list_empty(&conn->ibc_tx_queue_rsrvd) &&
+           list_empty(&conn->ibc_tx_queue_nocred) &&
+           list_empty(&conn->ibc_active_txs)) {
                 CDEBUG(D_NET, "closing conn to %s\n", 
-                       libcfs_nid2str(peer->ibp_nid));
+                       libcfs_nid2str(peer_ni->ibp_nid));
         } else {
                 CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
-                       libcfs_nid2str(peer->ibp_nid), error,
-                       cfs_list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
-                       cfs_list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
-                       cfs_list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
-                       cfs_list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
-                       cfs_list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
-        }
-
-        dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev;
-        cfs_list_del(&conn->ibc_list);
-        /* connd (see below) takes over ibc_list's ref */
-
-        if (cfs_list_empty (&peer->ibp_conns) &&    /* no more conns */
-            kiblnd_peer_active(peer)) {         /* still in peer table */
-                kiblnd_unlink_peer_locked(peer);
+                       libcfs_nid2str(peer_ni->ibp_nid), error,
+                      list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
+                      list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
+                      list_empty(&conn->ibc_tx_queue_rsrvd) ?
+                                               "" : "(sending_rsrvd)",
+                      list_empty(&conn->ibc_tx_queue_nocred) ?
+                                                "" : "(sending_nocred)",
+                      list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
+       }
+
+       dev = ((struct kib_net *)peer_ni->ibp_ni->ni_data)->ibn_dev;
+       if (peer_ni->ibp_next_conn == conn)
+               /* clear next_conn so it won't be used */
+               peer_ni->ibp_next_conn = NULL;
+       list_del(&conn->ibc_list);
+       /* connd (see below) takes over ibc_list's ref */
+
+       if (list_empty(&peer_ni->ibp_conns) &&    /* no more conns */
+            kiblnd_peer_active(peer_ni)) {         /* still in peer_ni table */
+                kiblnd_unlink_peer_locked(peer_ni);
 
                 /* set/clear error on last conn */
-                peer->ibp_error = conn->ibc_comms_error;
+                peer_ni->ibp_error = conn->ibc_comms_error;
         }
 
         kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
 
-        if (error != 0 &&
-            kiblnd_dev_can_failover(dev)) {
-                cfs_list_add_tail(&dev->ibd_fail_list,
-                              &kiblnd_data.kib_failed_devs);
-                cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
-        }
+       if (error != 0 &&
+           kiblnd_dev_can_failover(dev)) {
+               list_add_tail(&dev->ibd_fail_list,
+                             &kiblnd_data.kib_failed_devs);
+               wake_up(&kiblnd_data.kib_failover_waitq);
+       }
 
-        cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+       spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
 
-        cfs_list_add_tail (&conn->ibc_list, &kiblnd_data.kib_connd_conns);
-        cfs_waitq_signal (&kiblnd_data.kib_connd_waitq);
+       list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
+       wake_up(&kiblnd_data.kib_connd_waitq);
 
-        cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+       spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
 }
 
 void
-kiblnd_close_conn (kib_conn_t *conn, int error)
+kiblnd_close_conn(struct kib_conn *conn, int error)
 {
-        unsigned long flags;
+       unsigned long flags;
 
-        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
-        kiblnd_close_conn_locked(conn, error);
+       kiblnd_close_conn_locked(conn, error);
 
-        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 }
 
-void
-kiblnd_handle_early_rxs(kib_conn_t *conn)
+static void
+kiblnd_handle_early_rxs(struct kib_conn *conn)
 {
-        unsigned long    flags;
-        kib_rx_t        *rx;
+       unsigned long flags;
+       struct kib_rx *rx;
 
-        LASSERT (!cfs_in_interrupt());
-        LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+       LASSERT(!in_interrupt());
+       LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
-        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-        while (!cfs_list_empty(&conn->ibc_early_rxs)) {
-                rx = cfs_list_entry(conn->ibc_early_rxs.next,
-                                kib_rx_t, rx_list);
-                cfs_list_del(&rx->rx_list);
-                cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
-                                            flags);
+       write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       while (!list_empty(&conn->ibc_early_rxs)) {
+               rx = list_entry(conn->ibc_early_rxs.next,
+                               struct kib_rx, rx_list);
+               list_del(&rx->rx_list);
+               write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
-                kiblnd_handle_rx(rx);
+               kiblnd_handle_rx(rx);
 
-                cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-        }
-        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+               write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       }
+       write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 }
 
 void
-kiblnd_abort_txs(kib_conn_t *conn, cfs_list_t *txs)
+kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs)
 {
-        CFS_LIST_HEAD       (zombies);
-        cfs_list_t          *tmp;
-        cfs_list_t          *nxt;
-        kib_tx_t            *tx;
-
-        cfs_spin_lock(&conn->ibc_lock);
-
-        cfs_list_for_each_safe (tmp, nxt, txs) {
-                tx = cfs_list_entry (tmp, kib_tx_t, tx_list);
-
-                if (txs == &conn->ibc_active_txs) {
-                        LASSERT (!tx->tx_queued);
-                        LASSERT (tx->tx_waiting ||
-                                 tx->tx_sending != 0);
-                } else {
-                        LASSERT (tx->tx_queued);
-                }
-
-                tx->tx_status = -ECONNABORTED;
-                tx->tx_waiting = 0;
-
-                if (tx->tx_sending == 0) {
-                        tx->tx_queued = 0;
-                        cfs_list_del (&tx->tx_list);
-                        cfs_list_add (&tx->tx_list, &zombies);
-                }
-        }
-
-        cfs_spin_unlock(&conn->ibc_lock);
-
-        kiblnd_txlist_done(conn->ibc_peer->ibp_ni,
-                           &zombies, -ECONNABORTED);
+       struct list_head         zombies = LIST_HEAD_INIT(zombies);
+       struct list_head        *tmp;
+       struct list_head        *nxt;
+       struct kib_tx *tx;
+
+       spin_lock(&conn->ibc_lock);
+
+       list_for_each_safe(tmp, nxt, txs) {
+               tx = list_entry(tmp, struct kib_tx, tx_list);
+
+               if (txs == &conn->ibc_active_txs) {
+                       LASSERT(!tx->tx_queued);
+                       LASSERT(tx->tx_waiting ||
+                               tx->tx_sending != 0);
+                       if (conn->ibc_comms_error == -ETIMEDOUT) {
+                               if (tx->tx_waiting && !tx->tx_sending)
+                                       tx->tx_hstatus =
+                                         LNET_MSG_STATUS_REMOTE_TIMEOUT;
+                               else if (tx->tx_sending)
+                                       tx->tx_hstatus =
+                                         LNET_MSG_STATUS_NETWORK_TIMEOUT;
+                       }
+               } else {
+                       LASSERT(tx->tx_queued);
+                       if (conn->ibc_comms_error == -ETIMEDOUT)
+                               tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
+                       else
+                               tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+               }
+
+               tx->tx_status = -ECONNABORTED;
+               tx->tx_waiting = 0;
+
+               /*
+                * TODO: This makes an assumption that
+                * kiblnd_tx_complete() will be called for each tx. If
+                * that event is dropped we could end up with stale
+                * connections floating around. We'd like to deal with
+                * that in a better way.
+                *
+                * Also that means we can exceed the timeout by many
+                * seconds.
+                */
+               if (tx->tx_sending == 0) {
+                       tx->tx_queued = 0;
+                       list_del(&tx->tx_list);
+                       list_add(&tx->tx_list, &zombies);
+               }
+       }
+
+       spin_unlock(&conn->ibc_lock);
+
+       /*
+        * aborting transmits occurs when finalizing the connection.
+        * The connection is finalized on error.
+        * Passing LNET_MSG_STATUS_OK to txlist_done() will not
+        * override the value already set in tx->tx_hstatus above.
+        */
+       kiblnd_txlist_done(&zombies, -ECONNABORTED, LNET_MSG_STATUS_OK);
 }
 
-void
-kiblnd_finalise_conn (kib_conn_t *conn)
+static void
+kiblnd_finalise_conn(struct kib_conn *conn)
 {
-        LASSERT (!cfs_in_interrupt());
-        LASSERT (conn->ibc_state > IBLND_CONN_INIT);
+       LASSERT (!in_interrupt());
+       LASSERT (conn->ibc_state > IBLND_CONN_INIT);
 
-        kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
+       /* abort_receives moves QP state to IB_QPS_ERR.  This is only required
+        * for connections that didn't get as far as being connected, because
+        * rdma_disconnect() does this for free. */
+       kiblnd_abort_receives(conn);
 
-        /* abort_receives moves QP state to IB_QPS_ERR.  This is only required
-         * for connections that didn't get as far as being connected, because
-         * rdma_disconnect() does this for free. */
-        kiblnd_abort_receives(conn);
+       kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
 
-        /* Complete all tx descs not waiting for sends to complete.
-         * NB we should be safe from RDMA now that the QP has changed state */
+       /* Complete all tx descs not waiting for sends to complete.
+        * NB we should be safe from RDMA now that the QP has changed state */
 
-        kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
-        kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
-        kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
-        kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
-        kiblnd_abort_txs(conn, &conn->ibc_active_txs);
+       kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
+       kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
+       kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
+       kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
+       kiblnd_abort_txs(conn, &conn->ibc_active_txs);
 
-        kiblnd_handle_early_rxs(conn);
+       kiblnd_handle_early_rxs(conn);
 }
 
-void
-kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error)
+static void
+kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active,
+                          int error)
 {
-        CFS_LIST_HEAD    (zombies);
-        unsigned long     flags;
+       struct list_head zombies = LIST_HEAD_INIT(zombies);
+       unsigned long   flags;
 
-        LASSERT (error != 0);
-        LASSERT (!cfs_in_interrupt());
+       LASSERT (error != 0);
+       LASSERT (!in_interrupt());
 
-        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
-        if (active) {
-                LASSERT (peer->ibp_connecting > 0);
-                peer->ibp_connecting--;
-        } else {
-                LASSERT (peer->ibp_accepting > 0);
-                peer->ibp_accepting--;
-        }
+       if (active) {
+               LASSERT(peer_ni->ibp_connecting > 0);
+               peer_ni->ibp_connecting--;
+       } else {
+               LASSERT (peer_ni->ibp_accepting > 0);
+               peer_ni->ibp_accepting--;
+       }
 
-        if (peer->ibp_connecting != 0 ||
-            peer->ibp_accepting != 0) {
-                /* another connection attempt under way... */
-                cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
-                                            flags);
-                return;
-        }
+       if (kiblnd_peer_connecting(peer_ni)) {
+               /* another connection attempt under way... */
+               write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+                                       flags);
+               return;
+       }
 
-        if (cfs_list_empty(&peer->ibp_conns)) {
-                /* Take peer's blocked transmits to complete with error */
-                cfs_list_add(&zombies, &peer->ibp_tx_queue);
-                cfs_list_del_init(&peer->ibp_tx_queue);
+       peer_ni->ibp_reconnected = 0;
+       if (list_empty(&peer_ni->ibp_conns)) {
+               /* Take peer_ni's blocked transmits to complete with error */
+               list_splice_init(&peer_ni->ibp_tx_queue, &zombies);
 
-                if (kiblnd_peer_active(peer))
-                        kiblnd_unlink_peer_locked(peer);
+               if (kiblnd_peer_active(peer_ni))
+                       kiblnd_unlink_peer_locked(peer_ni);
 
-                peer->ibp_error = error;
-        } else {
-                /* Can't have blocked transmits if there are connections */
-                LASSERT (cfs_list_empty(&peer->ibp_tx_queue));
-        }
+               peer_ni->ibp_error = error;
+       } else {
+               /* Can't have blocked transmits if there are connections */
+               LASSERT(list_empty(&peer_ni->ibp_tx_queue));
+       }
 
-        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
-        kiblnd_peer_notify(peer);
+       kiblnd_peer_notify(peer_ni);
 
-        if (cfs_list_empty (&zombies))
-                return;
+       if (list_empty(&zombies))
+               return;
 
-        CNETERR("Deleting messages for %s: connection failed\n",
-                libcfs_nid2str(peer->ibp_nid));
+       CNETERR("Deleting messages for %s: connection failed\n",
+               libcfs_nid2str(peer_ni->ibp_nid));
 
-        kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH);
+       kiblnd_txlist_done(&zombies, error,
+                          LNET_MSG_STATUS_LOCAL_DROPPED);
 }
 
-void
-kiblnd_connreq_done(kib_conn_t *conn, int status)
+static void
+kiblnd_connreq_done(struct kib_conn *conn, int status)
 {
-        kib_peer_t        *peer = conn->ibc_peer;
-        kib_tx_t          *tx;
-        cfs_list_t         txs;
-        unsigned long      flags;
-        int                active;
+       struct kib_peer_ni *peer_ni = conn->ibc_peer;
+       struct kib_tx *tx;
+       struct list_head txs;
+       unsigned long    flags;
+       int              active;
 
         active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
 
-        CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n",
-               libcfs_nid2str(peer->ibp_nid), active,
-               conn->ibc_version, status);
+       CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n",
+              libcfs_nid2str(peer_ni->ibp_nid), active,
+              conn->ibc_version, status);
 
-        LASSERT (!cfs_in_interrupt());
-        LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
-                  peer->ibp_connecting > 0) ||
-                 (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
-                  peer->ibp_accepting > 0));
+       LASSERT (!in_interrupt());
+       LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
+                 peer_ni->ibp_connecting > 0) ||
+                (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
+                 peer_ni->ibp_accepting > 0));
 
         LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
         conn->ibc_connvars = NULL;
 
         if (status != 0) {
                 /* failed to establish connection */
-                kiblnd_peer_connect_failed(peer, active, status);
+                kiblnd_peer_connect_failed(peer_ni, active, status);
                 kiblnd_finalise_conn(conn);
                 return;
         }
 
         /* connection established */
-        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
-        conn->ibc_last_send = jiffies;
+       conn->ibc_last_send = ktime_get();
         kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
-        kiblnd_peer_alive(peer);
-
-        /* Add conn to peer's list and nuke any dangling conns from a different
-         * peer instance... */
-        kiblnd_conn_addref(conn);               /* +1 ref for ibc_list */
-        cfs_list_add(&conn->ibc_list, &peer->ibp_conns);
-        if (active)
-                peer->ibp_connecting--;
-        else
-                peer->ibp_accepting--;
-
-        if (peer->ibp_version == 0) {
-                peer->ibp_version     = conn->ibc_version;
-                peer->ibp_incarnation = conn->ibc_incarnation;
-        }
-
-        if (peer->ibp_version     != conn->ibc_version ||
-            peer->ibp_incarnation != conn->ibc_incarnation) {
-                kiblnd_close_stale_conns_locked(peer, conn->ibc_version,
+        kiblnd_peer_alive(peer_ni);
+
+       /* Add conn to peer_ni's list and nuke any dangling conns from a different
+        * peer_ni instance... */
+       kiblnd_conn_addref(conn);       /* +1 ref for ibc_list */
+       list_add(&conn->ibc_list, &peer_ni->ibp_conns);
+       peer_ni->ibp_reconnected = 0;
+       if (active)
+               peer_ni->ibp_connecting--;
+       else
+               peer_ni->ibp_accepting--;
+
+        if (peer_ni->ibp_version == 0) {
+                peer_ni->ibp_version     = conn->ibc_version;
+                peer_ni->ibp_incarnation = conn->ibc_incarnation;
+        }
+
+        if (peer_ni->ibp_version     != conn->ibc_version ||
+            peer_ni->ibp_incarnation != conn->ibc_incarnation) {
+                kiblnd_close_stale_conns_locked(peer_ni, conn->ibc_version,
                                                 conn->ibc_incarnation);
-                peer->ibp_version     = conn->ibc_version;
-                peer->ibp_incarnation = conn->ibc_incarnation;
+                peer_ni->ibp_version     = conn->ibc_version;
+                peer_ni->ibp_incarnation = conn->ibc_incarnation;
         }
 
-        /* grab pending txs while I have the lock */
-        cfs_list_add(&txs, &peer->ibp_tx_queue);
-        cfs_list_del_init(&peer->ibp_tx_queue);
+       /* grab pending txs while I have the lock */
+       INIT_LIST_HEAD(&txs);
+       list_splice_init(&peer_ni->ibp_tx_queue, &txs);
 
-        if (!kiblnd_peer_active(peer) ||        /* peer has been deleted */
+        if (!kiblnd_peer_active(peer_ni) ||        /* peer_ni has been deleted */
             conn->ibc_comms_error != 0) {       /* error has happened already */
-                lnet_ni_t *ni = peer->ibp_ni;
 
                 /* start to shut down connection */
                 kiblnd_close_conn_locked(conn, -ECONNABORTED);
-                cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
-                                            flags);
-
-                kiblnd_txlist_done(ni, &txs, -ECONNABORTED);
-
-                return;
-        }
-
-        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
-        /* Schedule blocked txs */
-        cfs_spin_lock (&conn->ibc_lock);
-        while (!cfs_list_empty (&txs)) {
-                tx = cfs_list_entry (txs.next, kib_tx_t, tx_list);
-                cfs_list_del(&tx->tx_list);
-
-                kiblnd_queue_tx_locked(tx, conn);
-        }
-        cfs_spin_unlock (&conn->ibc_lock);
-
-        kiblnd_check_sends(conn);
-
-        /* schedule blocked rxs */
-        kiblnd_handle_early_rxs(conn);
+               write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+               kiblnd_txlist_done(&txs, -ECONNABORTED,
+                                  LNET_MSG_STATUS_LOCAL_ERROR);
+
+               return;
+       }
+
+       /* +1 ref for myself, this connection is visible to other threads
+        * now, refcount of peer:ibp_conns can be released by connection
+        * close from either a different thread, or the calling of
+        * kiblnd_check_sends_locked() below. See bz21911 for details.
+        */
+       kiblnd_conn_addref(conn);
+       write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+       /* Schedule blocked txs
+        * Note: if we are running with conns_per_peer > 1, these blocked
+        * txs will all get scheduled to the first connection which gets
+        * scheduled.  We won't be using round robin on this first batch.
+        */
+       spin_lock(&conn->ibc_lock);
+       while (!list_empty(&txs)) {
+               tx = list_entry(txs.next, struct kib_tx, tx_list);
+               list_del(&tx->tx_list);
+
+               kiblnd_queue_tx_locked(tx, conn);
+       }
+       kiblnd_check_sends_locked(conn);
+       spin_unlock(&conn->ibc_lock);
+
+       /* schedule blocked rxs */
+       kiblnd_handle_early_rxs(conn);
+       kiblnd_conn_decref(conn);
 }
 
-void
-kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
+static void
+kiblnd_reject(struct rdma_cm_id *cmid, struct kib_rej *rej)
 {
         int          rc;
 
@@ -2158,30 +2408,30 @@ kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
                 CWARN("Error %d sending reject\n", rc);
 }
 
-int
-kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
+static int
+kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 {
-        cfs_rwlock_t          *g_lock = &kiblnd_data.kib_global_lock;
-        kib_msg_t             *reqmsg = priv;
-        kib_msg_t             *ackmsg;
-        kib_dev_t             *ibdev;
-        kib_peer_t            *peer;
-        kib_peer_t            *peer2;
-        kib_conn_t            *conn;
-        lnet_ni_t             *ni  = NULL;
-        kib_net_t             *net = NULL;
+       rwlock_t                *g_lock = &kiblnd_data.kib_global_lock;
+       struct kib_msg *reqmsg = priv;
+       struct kib_msg *ackmsg;
+       struct kib_dev *ibdev;
+       struct kib_peer_ni *peer_ni;
+       struct kib_peer_ni *peer2;
+       struct kib_conn *conn;
+       struct lnet_ni *ni = NULL;
+       struct kib_net *net = NULL;
         lnet_nid_t             nid;
         struct rdma_conn_param cp;
-        kib_rej_t              rej;
-        int                    version = IBLND_MSG_VERSION;
-        unsigned long          flags;
-        int                    rc;
-        struct sockaddr_in    *peer_addr;
-        LASSERT (!cfs_in_interrupt());
+       struct kib_rej rej;
+       int                    version = IBLND_MSG_VERSION;
+       unsigned long          flags;
+       int                    rc;
+       struct sockaddr_in    *peer_addr;
+       LASSERT (!in_interrupt());
 
-        /* cmid inherits 'context' from the corresponding listener id */
-        ibdev = (kib_dev_t *)cmid->context;
-        LASSERT (ibdev != NULL);
+       /* cmid inherits 'context' from the corresponding listener id */
+       ibdev = cmid->context;
+       LASSERT(ibdev);
 
         memset(&rej, 0, sizeof(rej));
         rej.ibr_magic                = IBLND_MSG_MAGIC;
@@ -2191,105 +2441,120 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
         peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr);
         if (*kiblnd_tunables.kib_require_priv_port &&
             ntohs(peer_addr->sin_port) >= PROT_SOCK) {
-                __u32 ip = ntohl(peer_addr->sin_addr.s_addr);
-                CERROR("Peer's port (%u.%u.%u.%u:%hu) is not privileged\n",
-                       HIPQUAD(ip), ntohs(peer_addr->sin_port));
-                goto failed;
-        }
-
-        if (priv_nob < offsetof(kib_msg_t, ibm_type)) {
-                CERROR("Short connection request\n");
-                goto failed;
-        }
-
-        /* Future protocol version compatibility support!  If the
-         * o2iblnd-specific protocol changes, or when LNET unifies
-         * protocols over all LNDs, the initial connection will
-         * negotiate a protocol version.  I trap this here to avoid
-         * console errors; the reject tells the peer which protocol I
-         * speak. */
-        if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
-            reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
-                goto failed;
-        if (reqmsg->ibm_magic == IBLND_MSG_MAGIC &&
-            reqmsg->ibm_version != IBLND_MSG_VERSION &&
-            reqmsg->ibm_version != IBLND_MSG_VERSION_1)
-                goto failed;
-        if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) &&
-            reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) &&
-            reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1))
-                goto failed;
-
-        rc = kiblnd_unpack_msg(reqmsg, priv_nob);
-        if (rc != 0) {
-                CERROR("Can't parse connection request: %d\n", rc);
-                goto failed;
-        }
-
-        nid = reqmsg->ibm_srcnid;
-        ni  = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
-
-        if (ni != NULL) {
-                net = (kib_net_t *)ni->ni_data;
-                rej.ibr_incarnation = net->ibn_incarnation;
-        }
-
-        if (ni == NULL ||                         /* no matching net */
-            ni->ni_nid != reqmsg->ibm_dstnid ||   /* right NET, wrong NID! */
-            net->ibn_dev != ibdev) {              /* wrong device */
-                CERROR("Can't accept %s on %s (%s:%d:%u.%u.%u.%u): "
-                       "bad dst nid %s\n", libcfs_nid2str(nid),
-                       ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid),
-                       ibdev->ibd_ifname, ibdev->ibd_nnets,
-                       HIPQUAD(ibdev->ibd_ifip),
-                       libcfs_nid2str(reqmsg->ibm_dstnid));
-
-                goto failed;
-        }
+               __u32 ip = ntohl(peer_addr->sin_addr.s_addr);
+               CERROR("peer_ni's port (%pI4h:%hu) is not privileged\n",
+                      &ip, ntohs(peer_addr->sin_port));
+               goto failed;
+       }
+
+       if (priv_nob < offsetof(struct kib_msg, ibm_type)) {
+               CERROR("Short connection request\n");
+               goto failed;
+       }
+
+       /* Future protocol version compatibility support!  If the
+        * o2iblnd-specific protocol changes, or when LNET unifies
+        * protocols over all LNDs, the initial connection will
+        * negotiate a protocol version.  I trap this here to avoid
+        * console errors; the reject tells the peer_ni which protocol I
+        * speak. */
+       if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
+           reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
+               goto failed;
+       if (reqmsg->ibm_magic == IBLND_MSG_MAGIC &&
+           reqmsg->ibm_version != IBLND_MSG_VERSION &&
+           reqmsg->ibm_version != IBLND_MSG_VERSION_1)
+               goto failed;
+       if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) &&
+           reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) &&
+           reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1))
+               goto failed;
+
+       rc = kiblnd_unpack_msg(reqmsg, priv_nob);
+       if (rc != 0) {
+               CERROR("Can't parse connection request: %d\n", rc);
+               goto failed;
+       }
+
+       nid = reqmsg->ibm_srcnid;
+       ni  = lnet_nid2ni_addref(reqmsg->ibm_dstnid);
+
+       if (ni != NULL) {
+               net = (struct kib_net *)ni->ni_data;
+               rej.ibr_incarnation = net->ibn_incarnation;
+       }
+
+       if (ni == NULL ||                         /* no matching net */
+           ni->ni_nid != reqmsg->ibm_dstnid ||   /* right NET, wrong NID! */
+           net->ibn_dev != ibdev) {              /* wrong device */
+               CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): "
+                      "bad dst nid %s\n", libcfs_nid2str(nid),
+                      ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid),
+                      ibdev->ibd_ifname, ibdev->ibd_nnets,
+                       &ibdev->ibd_ifip,
+                      libcfs_nid2str(reqmsg->ibm_dstnid));
+
+               goto failed;
+       }
 
        /* check time stamp as soon as possible */
-        if (reqmsg->ibm_dststamp != 0 &&
-            reqmsg->ibm_dststamp != net->ibn_incarnation) {
-                CWARN("Stale connection request\n");
-                rej.ibr_why = IBLND_REJECT_CONN_STALE;
-                goto failed;
-        }
-
-        /* I can accept peer's version */
-        version = reqmsg->ibm_version;
-
-        if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) {
-                CERROR("Unexpected connreq msg type: %x from %s\n",
-                       reqmsg->ibm_type, libcfs_nid2str(nid));
-                goto failed;
-        }
-
-        if (reqmsg->ibm_u.connparams.ibcp_queue_depth !=
-            IBLND_MSG_QUEUE_SIZE(version)) {
-                CERROR("Can't accept %s: incompatible queue depth %d (%d wanted)\n",
-                       libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth,
-                       IBLND_MSG_QUEUE_SIZE(version));
-
-                if (version == IBLND_MSG_VERSION)
-                        rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
-
-                goto failed;
-        }
-
-        if (reqmsg->ibm_u.connparams.ibcp_max_frags !=
-            IBLND_RDMA_FRAGS(version)) {
-                CERROR("Can't accept %s(version %x): "
-                       "incompatible max_frags %d (%d wanted)\n",
-                       libcfs_nid2str(nid), version,
-                       reqmsg->ibm_u.connparams.ibcp_max_frags,
-                       IBLND_RDMA_FRAGS(version));
-
-                if (version == IBLND_MSG_VERSION)
-                        rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
-
-                goto failed;
-
-        }
+       if (reqmsg->ibm_dststamp != 0 &&
+           reqmsg->ibm_dststamp != net->ibn_incarnation) {
+               CWARN("Stale connection request\n");
+               rej.ibr_why = IBLND_REJECT_CONN_STALE;
+               goto failed;
+       }
+
+       /* I can accept peer_ni's version */
+       version = reqmsg->ibm_version;
+
+       if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) {
+               CERROR("Unexpected connreq msg type: %x from %s\n",
+                      reqmsg->ibm_type, libcfs_nid2str(nid));
+               goto failed;
+       }
+
+       if (reqmsg->ibm_u.connparams.ibcp_queue_depth >
+           kiblnd_msg_queue_size(version, ni)) {
+               CERROR("Can't accept conn from %s, queue depth too large: "
+                      " %d (<=%d wanted)\n",
+                      libcfs_nid2str(nid),
+                      reqmsg->ibm_u.connparams.ibcp_queue_depth,
+                      kiblnd_msg_queue_size(version, ni));
+
+               if (version == IBLND_MSG_VERSION)
+                       rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
+
+               goto failed;
+       }
+
+       if (reqmsg->ibm_u.connparams.ibcp_max_frags >
+           IBLND_MAX_RDMA_FRAGS) {
+               CWARN("Can't accept conn from %s (version %x): "
+                     "max_frags %d too large (%d wanted)\n",
+                     libcfs_nid2str(nid), version,
+                     reqmsg->ibm_u.connparams.ibcp_max_frags,
+                     IBLND_MAX_RDMA_FRAGS);
+
+               if (version >= IBLND_MSG_VERSION)
+                       rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
+
+               goto failed;
+       } else if (reqmsg->ibm_u.connparams.ibcp_max_frags <
+                  IBLND_MAX_RDMA_FRAGS &&
+                  net->ibn_fmr_ps == NULL) {
+               CWARN("Can't accept conn from %s (version %x): "
+                     "max_frags %d incompatible without FMR pool "
+                     "(%d wanted)\n",
+                     libcfs_nid2str(nid), version,
+                     reqmsg->ibm_u.connparams.ibcp_max_frags,
+                     IBLND_MAX_RDMA_FRAGS);
+
+               if (version == IBLND_MSG_VERSION)
+                       rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
+
+               goto failed;
+       }
 
         if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
                 CERROR("Can't accept %s: message size %d too big (%d max)\n",
@@ -2299,17 +2564,21 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
                 goto failed;
         }
 
-        /* assume 'nid' is a new peer; create  */
-        rc = kiblnd_create_peer(ni, &peer, nid);
-        if (rc != 0) {
-                CERROR("Can't create peer for %s\n", libcfs_nid2str(nid));
-                rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
-                goto failed;
-        }
+       /* assume 'nid' is a new peer_ni; create  */
+       rc = kiblnd_create_peer(ni, &peer_ni, nid);
+       if (rc != 0) {
+               CERROR("Can't create peer_ni for %s\n", libcfs_nid2str(nid));
+               rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
+               goto failed;
+       }
+
+       /* We have validated the peer's parameters so use those */
+       peer_ni->ibp_max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags;
+       peer_ni->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth;
 
-        cfs_write_lock_irqsave(g_lock, flags);
+       write_lock_irqsave(g_lock, flags);
 
-        peer2 = kiblnd_find_peer_locked(nid);
+        peer2 = kiblnd_find_peer_locked(ni, nid);
         if (peer2 != NULL) {
                 if (peer2->ibp_version == 0) {
                         peer2->ibp_version     = version;
@@ -2319,79 +2588,108 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
                 /* not the guy I've talked with */
                 if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
                     peer2->ibp_version     != version) {
-                        kiblnd_close_peer_conns_locked(peer2, -ESTALE);
-                        cfs_write_unlock_irqrestore(g_lock, flags);
-
-                        CWARN("Conn stale %s [old ver: %x, new ver: %x]\n",
-                              libcfs_nid2str(nid), peer2->ibp_version, version);
-
-                        kiblnd_peer_decref(peer);
-                        rej.ibr_why = IBLND_REJECT_CONN_STALE;
-                        goto failed;
-                }
+                       kiblnd_close_peer_conns_locked(peer2, -ESTALE);
 
-                /* tie-break connection race in favour of the higher NID */
-                if (peer2->ibp_connecting != 0 &&
-                    nid < ni->ni_nid) {
-                        cfs_write_unlock_irqrestore(g_lock, flags);
+                       if (kiblnd_peer_active(peer2)) {
+                               peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
+                               peer2->ibp_version = version;
+                       }
+                       write_unlock_irqrestore(g_lock, flags);
 
-                        CWARN("Conn race %s\n", libcfs_nid2str(peer2->ibp_nid));
+                       CWARN("Conn stale %s version %x/%x incarnation %llu/%llu\n",
+                             libcfs_nid2str(nid), peer2->ibp_version, version,
+                             peer2->ibp_incarnation, reqmsg->ibm_srcstamp);
 
-                        kiblnd_peer_decref(peer);
-                        rej.ibr_why = IBLND_REJECT_CONN_RACE;
+                        kiblnd_peer_decref(peer_ni);
+                        rej.ibr_why = IBLND_REJECT_CONN_STALE;
                         goto failed;
                 }
 
-                peer2->ibp_accepting++;
-                kiblnd_peer_addref(peer2);
-
-                cfs_write_unlock_irqrestore(g_lock, flags);
-                kiblnd_peer_decref(peer);
-                peer = peer2;
+               /* Tie-break connection race in favour of the higher NID.
+                * If we keep running into a race condition multiple times,
+                * we have to assume that the connection attempt with the
+                * higher NID is stuck in a connecting state and will never
+                * recover.  As such, we pass through this if-block and let
+                * the lower NID connection win so we can move forward.
+                */
+               if (peer2->ibp_connecting != 0 &&
+                   nid < ni->ni_nid && peer2->ibp_races <
+                   MAX_CONN_RACES_BEFORE_ABORT) {
+                       peer2->ibp_races++;
+                       write_unlock_irqrestore(g_lock, flags);
+
+                       CDEBUG(D_NET, "Conn race %s\n",
+                              libcfs_nid2str(peer2->ibp_nid));
+
+                       kiblnd_peer_decref(peer_ni);
+                       rej.ibr_why = IBLND_REJECT_CONN_RACE;
+                       goto failed;
+               }
+               if (peer2->ibp_races >= MAX_CONN_RACES_BEFORE_ABORT)
+                       CNETERR("Conn race %s: unresolved after %d attempts, letting lower NID win\n",
+                               libcfs_nid2str(peer2->ibp_nid),
+                               MAX_CONN_RACES_BEFORE_ABORT);
+               /*
+                * passive connection is allowed even this peer_ni is waiting for
+                * reconnection.
+                */
+               peer2->ibp_reconnecting = 0;
+               peer2->ibp_races = 0;
+               peer2->ibp_accepting++;
+               kiblnd_peer_addref(peer2);
+
+               /* Race with kiblnd_launch_tx (active connect) to create peer_ni
+                * so copy validated parameters since we now know what the
+                * peer_ni's limits are */
+               peer2->ibp_max_frags = peer_ni->ibp_max_frags;
+               peer2->ibp_queue_depth = peer_ni->ibp_queue_depth;
+
+               write_unlock_irqrestore(g_lock, flags);
+                kiblnd_peer_decref(peer_ni);
+                peer_ni = peer2;
         } else {
-                /* Brand new peer */
-                LASSERT (peer->ibp_accepting == 0);
-                LASSERT (peer->ibp_version == 0 &&
-                         peer->ibp_incarnation == 0);
+                /* Brand new peer_ni */
+                LASSERT (peer_ni->ibp_accepting == 0);
+                LASSERT (peer_ni->ibp_version == 0 &&
+                         peer_ni->ibp_incarnation == 0);
 
-                peer->ibp_accepting   = 1;
-                peer->ibp_version     = version;
-                peer->ibp_incarnation = reqmsg->ibm_srcstamp;
+                peer_ni->ibp_accepting   = 1;
+                peer_ni->ibp_version     = version;
+                peer_ni->ibp_incarnation = reqmsg->ibm_srcstamp;
 
                 /* I have a ref on ni that prevents it being shutdown */
                 LASSERT (net->ibn_shutdown == 0);
 
-                kiblnd_peer_addref(peer);
-                cfs_list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
+                kiblnd_peer_addref(peer_ni);
+               list_add_tail(&peer_ni->ibp_list, kiblnd_nid2peerlist(nid));
 
-                cfs_write_unlock_irqrestore(g_lock, flags);
+               write_unlock_irqrestore(g_lock, flags);
         }
 
-        conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version);
+       conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_PASSIVE_WAIT, version);
         if (conn == NULL) {
-                kiblnd_peer_connect_failed(peer, 0, -ENOMEM);
-                kiblnd_peer_decref(peer);
+                kiblnd_peer_connect_failed(peer_ni, 0, -ENOMEM);
+                kiblnd_peer_decref(peer_ni);
                 rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
                 goto failed;
         }
 
         /* conn now "owns" cmid, so I return success from here on to ensure the
          * CM callback doesn't destroy cmid. */
-
-        conn->ibc_incarnation      = reqmsg->ibm_srcstamp;
-        conn->ibc_credits          = IBLND_MSG_QUEUE_SIZE(version);
-        conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version);
-        LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version)
-                 <= IBLND_RX_MSGS(version));
+       conn->ibc_incarnation      = reqmsg->ibm_srcstamp;
+       conn->ibc_credits          = conn->ibc_queue_depth;
+       conn->ibc_reserved_credits = conn->ibc_queue_depth;
+       LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
+               IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn));
 
         ackmsg = &conn->ibc_connvars->cv_msg;
         memset(ackmsg, 0, sizeof(*ackmsg));
 
         kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
                         sizeof(ackmsg->ibm_u.connparams));
-        ackmsg->ibm_u.connparams.ibcp_queue_depth  = IBLND_MSG_QUEUE_SIZE(version);
-        ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
-        ackmsg->ibm_u.connparams.ibcp_max_frags    = IBLND_RDMA_FRAGS(version);
+       ackmsg->ibm_u.connparams.ibcp_queue_depth  = conn->ibc_queue_depth;
+       ackmsg->ibm_u.connparams.ibcp_max_frags    = conn->ibc_max_frags;
+       ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
 
         kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
 
@@ -2421,57 +2719,102 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
         return 0;
 
  failed:
-        if (ni != NULL)
-                lnet_ni_decref(ni);
+       if (ni != NULL) {
+               rej.ibr_cp.ibcp_queue_depth =
+                       kiblnd_msg_queue_size(version, ni);
+               rej.ibr_cp.ibcp_max_frags   = IBLND_MAX_RDMA_FRAGS;
+               lnet_ni_decref(ni);
+       }
 
-        rej.ibr_version = version;
-        rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
-        rej.ibr_cp.ibcp_max_frags   = IBLND_RDMA_FRAGS(version);
-        kiblnd_reject(cmid, &rej);
+       rej.ibr_version = version;
+       kiblnd_reject(cmid, &rej);
 
-        return -ECONNREFUSED;
+       return -ECONNREFUSED;
 }
 
-void
-kiblnd_reconnect (kib_conn_t *conn, int version,
-                  __u64 incarnation, int why, kib_connparams_t *cp)
+static void
+kiblnd_check_reconnect(struct kib_conn *conn, int version,
+                      u64 incarnation, int why, struct kib_connparams *cp)
 {
-        kib_peer_t    *peer = conn->ibc_peer;
-        char          *reason;
-        int            retry = 0;
-        unsigned long  flags;
-
-        LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
-        LASSERT (peer->ibp_connecting > 0);     /* 'conn' at least */
-
-        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
+       rwlock_t        *glock = &kiblnd_data.kib_global_lock;
+       struct kib_peer_ni *peer_ni = conn->ibc_peer;
+       char            *reason;
+       int              msg_size = IBLND_MSG_SIZE;
+       int              frag_num = -1;
+       int              queue_dep = -1;
+       bool             reconnect;
+       unsigned long    flags;
+
+       LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
+       LASSERT(peer_ni->ibp_connecting > 0);   /* 'conn' at least */
+
+       if (cp) {
+               msg_size        = cp->ibcp_max_msg_size;
+               frag_num        = cp->ibcp_max_frags;
+               queue_dep       = cp->ibcp_queue_depth;
+       }
+
+       write_lock_irqsave(glock, flags);
         /* retry connection if it's still needed and no other connection
          * attempts (active or passive) are in progress
          * NB: reconnect is still needed even when ibp_tx_queue is
          * empty if ibp_version != version because reconnect may be
          * initiated by kiblnd_query() */
-        if ((!cfs_list_empty(&peer->ibp_tx_queue) ||
-             peer->ibp_version != version) &&
-            peer->ibp_connecting == 1 &&
-            peer->ibp_accepting == 0) {
-                retry = 1;
-                peer->ibp_connecting++;
-
-                peer->ibp_version     = version;
-                peer->ibp_incarnation = incarnation;
-        }
-
-        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
-        if (!retry)
-                return;
+       reconnect = (!list_empty(&peer_ni->ibp_tx_queue) ||
+                    peer_ni->ibp_version != version) &&
+                   peer_ni->ibp_connecting &&
+                   peer_ni->ibp_accepting == 0;
+       if (!reconnect) {
+               reason = "no need";
+               goto out;
+       }
 
         switch (why) {
         default:
                 reason = "Unknown";
                 break;
 
+       case IBLND_REJECT_RDMA_FRAGS: {
+               struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+
+               if (!cp) {
+                       reason = "can't negotiate max frags";
+                       goto out;
+               }
+               tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
+#ifdef HAVE_IB_GET_DMA_MR
+               /*
+                * This check only makes sense if the kernel supports global
+                * memory registration. Otherwise, map_on_demand will never == 0
+                */
+               if (!tunables->lnd_map_on_demand) {
+                       reason = "map_on_demand must be enabled";
+                       goto out;
+               }
+#endif
+               if (conn->ibc_max_frags <= frag_num) {
+                       reason = "unsupported max frags";
+                       goto out;
+               }
+
+               peer_ni->ibp_max_frags = frag_num;
+               reason = "rdma fragments";
+               break;
+       }
+       case IBLND_REJECT_MSG_QUEUE_SIZE:
+               if (!cp) {
+                       reason = "can't negotiate queue depth";
+                       goto out;
+               }
+               if (conn->ibc_queue_depth <= queue_dep) {
+                       reason = "unsupported queue depth";
+                       goto out;
+               }
+
+               peer_ni->ibp_queue_depth = queue_dep;
+               reason = "queue depth";
+               break;
+
         case IBLND_REJECT_CONN_STALE:
                 reason = "stale";
                 break;
@@ -2483,43 +2826,58 @@ kiblnd_reconnect (kib_conn_t *conn, int version,
         case IBLND_REJECT_CONN_UNCOMPAT:
                 reason = "version negotiation";
                 break;
-        }
-
-        CNETERR("%s: retrying (%s), %x, %x, "
-                "queue_dep: %d, max_frag: %d, msg_size: %d\n",
-                libcfs_nid2str(peer->ibp_nid),
-                reason, IBLND_MSG_VERSION, version,
-                cp != NULL? cp->ibcp_queue_depth :IBLND_MSG_QUEUE_SIZE(version),
-                cp != NULL? cp->ibcp_max_frags   : IBLND_RDMA_FRAGS(version),
-                cp != NULL? cp->ibcp_max_msg_size: IBLND_MSG_SIZE);
 
-        kiblnd_connect_peer(peer);
+       case IBLND_REJECT_INVALID_SRV_ID:
+               reason = "invalid service id";
+               break;
+        }
+
+       conn->ibc_reconnect = 1;
+       peer_ni->ibp_reconnecting++;
+       peer_ni->ibp_version = version;
+       if (incarnation != 0)
+               peer_ni->ibp_incarnation = incarnation;
+ out:
+       write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+       CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n",
+               libcfs_nid2str(peer_ni->ibp_nid),
+               reconnect ? "reconnect" : "don't reconnect",
+               reason, IBLND_MSG_VERSION, version, msg_size,
+               conn->ibc_queue_depth, queue_dep,
+               conn->ibc_max_frags, frag_num);
+       /*
+        * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer_ni
+        * while destroying the zombie
+        */
 }
 
-void
-kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
+static void
+kiblnd_rejected(struct kib_conn *conn, int reason, void *priv, int priv_nob)
 {
-        kib_peer_t    *peer = conn->ibc_peer;
+       struct kib_peer_ni *peer_ni = conn->ibc_peer;
 
-        LASSERT (!cfs_in_interrupt());
-        LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
+       LASSERT (!in_interrupt());
+       LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
 
-        switch (reason) {
-        case IB_CM_REJ_STALE_CONN:
-                kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0,
-                                 IBLND_REJECT_CONN_STALE, NULL);
-                break;
+       switch (reason) {
+       case IB_CM_REJ_STALE_CONN:
+               kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
+                                      IBLND_REJECT_CONN_STALE, NULL);
+               break;
 
         case IB_CM_REJ_INVALID_SERVICE_ID:
+               kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
+                                      IBLND_REJECT_INVALID_SRV_ID, NULL);
                 CNETERR("%s rejected: no listener at %d\n",
-                        libcfs_nid2str(peer->ibp_nid),
+                        libcfs_nid2str(peer_ni->ibp_nid),
                         *kiblnd_tunables.kib_service);
                 break;
 
         case IB_CM_REJ_CONSUMER_DEFINED:
-                if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) {
-                        kib_rej_t        *rej         = priv;
-                        kib_connparams_t *cp          = NULL;
+               if (priv_nob >= offsetof(struct kib_rej, ibr_padding)) {
+                       struct kib_rej *rej = priv;
+                       struct kib_connparams *cp = NULL;
                         int               flip        = 0;
                         __u64             incarnation = -1;
 
@@ -2528,7 +2886,7 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
                          * b) V2 will provide incarnation while rejecting me,
                          *    -1 will be overwrote.
                          *
-                         * if I try to connect to a V1 peer with V2 protocol,
+                         * if I try to connect to a V1 peer_ni with V2 protocol,
                          * it rejected me then upgrade to V2, I have no idea
                          * about the upgrading and try to reconnect with V1,
                          * in this case upgraded V2 can find out I'm trying to
@@ -2542,7 +2900,7 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
                                 flip = 1;
                         }
 
-                        if (priv_nob >= sizeof(kib_rej_t) &&
+                       if (priv_nob >= sizeof(struct kib_rej) &&
                             rej->ibr_version > IBLND_MSG_VERSION_1) {
                                 /* priv_nob is always 148 in current version
                                  * of OFED, so we still need to check version.
@@ -2562,22 +2920,22 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
                         if (rej->ibr_magic != IBLND_MSG_MAGIC &&
                             rej->ibr_magic != LNET_PROTO_MAGIC) {
                                 CERROR("%s rejected: consumer defined fatal error\n",
-                                       libcfs_nid2str(peer->ibp_nid));
+                                       libcfs_nid2str(peer_ni->ibp_nid));
                                 break;
                         }
 
                         if (rej->ibr_version != IBLND_MSG_VERSION &&
                             rej->ibr_version != IBLND_MSG_VERSION_1) {
                                 CERROR("%s rejected: o2iblnd version %x error\n",
-                                       libcfs_nid2str(peer->ibp_nid),
+                                       libcfs_nid2str(peer_ni->ibp_nid),
                                        rej->ibr_version);
                                 break;
                         }
 
                         if (rej->ibr_why     == IBLND_REJECT_FATAL &&
                             rej->ibr_version == IBLND_MSG_VERSION_1) {
-                                CDEBUG(D_NET, "rejected by old version peer %s: %x\n",
-                                       libcfs_nid2str(peer->ibp_nid), rej->ibr_version);
+                                CDEBUG(D_NET, "rejected by old version peer_ni %s: %x\n",
+                                       libcfs_nid2str(peer_ni->ibp_nid), rej->ibr_version);
 
                                 if (conn->ibc_version != IBLND_MSG_VERSION_1)
                                         rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
@@ -2587,35 +2945,25 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
                         case IBLND_REJECT_CONN_RACE:
                         case IBLND_REJECT_CONN_STALE:
                         case IBLND_REJECT_CONN_UNCOMPAT:
-                                kiblnd_reconnect(conn, rej->ibr_version,
-                                                 incarnation, rej->ibr_why, cp);
-                                break;
-
-                        case IBLND_REJECT_MSG_QUEUE_SIZE:
-                                CERROR("%s rejected: incompatible message queue depth %d, %d\n",
-                                       libcfs_nid2str(peer->ibp_nid), cp->ibcp_queue_depth,
-                                       IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
-                                break;
-
-                        case IBLND_REJECT_RDMA_FRAGS:
-                                CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n",
-                                       libcfs_nid2str(peer->ibp_nid), cp->ibcp_max_frags,
-                                       IBLND_RDMA_FRAGS(conn->ibc_version));
+                       case IBLND_REJECT_MSG_QUEUE_SIZE:
+                       case IBLND_REJECT_RDMA_FRAGS:
+                               kiblnd_check_reconnect(conn, rej->ibr_version,
+                                               incarnation, rej->ibr_why, cp);
                                 break;
 
                         case IBLND_REJECT_NO_RESOURCES:
                                 CERROR("%s rejected: o2iblnd no resources\n",
-                                       libcfs_nid2str(peer->ibp_nid));
+                                       libcfs_nid2str(peer_ni->ibp_nid));
                                 break;
 
                         case IBLND_REJECT_FATAL:
                                 CERROR("%s rejected: o2iblnd fatal error\n",
-                                       libcfs_nid2str(peer->ibp_nid));
+                                       libcfs_nid2str(peer_ni->ibp_nid));
                                 break;
 
                         default:
                                 CERROR("%s rejected: o2iblnd reason %d\n",
-                                       libcfs_nid2str(peer->ibp_nid),
+                                       libcfs_nid2str(peer_ni->ibp_nid),
                                        rej->ibr_why);
                                 break;
                         }
@@ -2624,20 +2972,20 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
                 /* fall through */
         default:
                 CNETERR("%s rejected: reason %d, size %d\n",
-                        libcfs_nid2str(peer->ibp_nid), reason, priv_nob);
+                        libcfs_nid2str(peer_ni->ibp_nid), reason, priv_nob);
                 break;
         }
 
         kiblnd_connreq_done(conn, -ECONNREFUSED);
 }
 
-void
-kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
+static void
+kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob)
 {
-        kib_peer_t    *peer = conn->ibc_peer;
-        lnet_ni_t     *ni   = peer->ibp_ni;
-        kib_net_t     *net  = ni->ni_data;
-        kib_msg_t     *msg  = priv;
+       struct kib_peer_ni *peer_ni = conn->ibc_peer;
+       struct lnet_ni *ni = peer_ni->ibp_ni;
+       struct kib_net *net = ni->ni_data;
+       struct kib_msg *msg = priv;
         int            ver  = conn->ibc_version;
         int            rc   = kiblnd_unpack_msg(msg, priv_nob);
         unsigned long  flags;
@@ -2646,13 +2994,13 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
 
         if (rc != 0) {
                 CERROR("Can't unpack connack from %s: %d\n",
-                       libcfs_nid2str(peer->ibp_nid), rc);
+                       libcfs_nid2str(peer_ni->ibp_nid), rc);
                 goto failed;
         }
 
         if (msg->ibm_type != IBLND_MSG_CONNACK) {
                 CERROR("Unexpected message %d from %s\n",
-                       msg->ibm_type, libcfs_nid2str(peer->ibp_nid));
+                       msg->ibm_type, libcfs_nid2str(peer_ni->ibp_nid));
                 rc = -EPROTO;
                 goto failed;
         }
@@ -2660,61 +3008,63 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
         if (ver != msg->ibm_version) {
                 CERROR("%s replied version %x is different with "
                        "requested version %x\n",
-                       libcfs_nid2str(peer->ibp_nid), msg->ibm_version, ver);
+                       libcfs_nid2str(peer_ni->ibp_nid), msg->ibm_version, ver);
                 rc = -EPROTO;
                 goto failed;
         }
 
-        if (msg->ibm_u.connparams.ibcp_queue_depth !=
-            IBLND_MSG_QUEUE_SIZE(ver)) {
-                CERROR("%s has incompatible queue depth %d(%d wanted)\n",
-                       libcfs_nid2str(peer->ibp_nid),
-                       msg->ibm_u.connparams.ibcp_queue_depth,
-                       IBLND_MSG_QUEUE_SIZE(ver));
-                rc = -EPROTO;
-                goto failed;
-        }
-
-        if (msg->ibm_u.connparams.ibcp_max_frags !=
-            IBLND_RDMA_FRAGS(ver)) {
-                CERROR("%s has incompatible max_frags %d (%d wanted)\n",
-                       libcfs_nid2str(peer->ibp_nid),
-                       msg->ibm_u.connparams.ibcp_max_frags,
-                       IBLND_RDMA_FRAGS(ver));
-                rc = -EPROTO;
-                goto failed;
-        }
+       if (msg->ibm_u.connparams.ibcp_queue_depth >
+           conn->ibc_queue_depth) {
+               CERROR("%s has incompatible queue depth %d (<=%d wanted)\n",
+                      libcfs_nid2str(peer_ni->ibp_nid),
+                      msg->ibm_u.connparams.ibcp_queue_depth,
+                      conn->ibc_queue_depth);
+               rc = -EPROTO;
+               goto failed;
+       }
+
+       if (msg->ibm_u.connparams.ibcp_max_frags >
+           conn->ibc_max_frags) {
+               CERROR("%s has incompatible max_frags %d (<=%d wanted)\n",
+                      libcfs_nid2str(peer_ni->ibp_nid),
+                      msg->ibm_u.connparams.ibcp_max_frags,
+                      conn->ibc_max_frags);
+               rc = -EPROTO;
+               goto failed;
+       }
 
         if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
                 CERROR("%s max message size %d too big (%d max)\n",
-                       libcfs_nid2str(peer->ibp_nid),
+                       libcfs_nid2str(peer_ni->ibp_nid),
                        msg->ibm_u.connparams.ibcp_max_msg_size,
                        IBLND_MSG_SIZE);
                 rc = -EPROTO;
                 goto failed;
         }
 
-        cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-        if (msg->ibm_dstnid == ni->ni_nid &&
-            msg->ibm_dststamp == net->ibn_incarnation)
-                rc = 0;
-        else
-                rc = -ESTALE;
-        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       if (msg->ibm_dstnid == ni->ni_nid &&
+           msg->ibm_dststamp == net->ibn_incarnation)
+               rc = 0;
+       else
+               rc = -ESTALE;
+       read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         if (rc != 0) {
                 CERROR("Bad connection reply from %s, rc = %d, "
                        "version: %x max_frags: %d\n",
-                       libcfs_nid2str(peer->ibp_nid), rc,
+                       libcfs_nid2str(peer_ni->ibp_nid), rc,
                        msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
                 goto failed;
         }
 
-        conn->ibc_incarnation      = msg->ibm_srcstamp;
-        conn->ibc_credits          =
-        conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver);
-        LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver)
-                 <= IBLND_RX_MSGS(ver));
+       conn->ibc_incarnation      = msg->ibm_srcstamp;
+       conn->ibc_credits          = msg->ibm_u.connparams.ibcp_queue_depth;
+       conn->ibc_reserved_credits = msg->ibm_u.connparams.ibcp_queue_depth;
+       conn->ibc_queue_depth      = msg->ibm_u.connparams.ibcp_queue_depth;
+       conn->ibc_max_frags        = msg->ibm_u.connparams.ibcp_max_frags;
+       LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
+               IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(conn));
 
         kiblnd_connreq_done(conn, 0);
         return;
@@ -2730,46 +3080,48 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
         kiblnd_connreq_done(conn, 0);
 }
 
-int
-kiblnd_active_connect (struct rdma_cm_id *cmid)
+static int
+kiblnd_active_connect(struct rdma_cm_id *cmid)
 {
-        kib_peer_t              *peer = (kib_peer_t *)cmid->context;
-        kib_conn_t              *conn;
-        kib_msg_t               *msg;
-        struct rdma_conn_param   cp;
+       struct kib_peer_ni *peer_ni = cmid->context;
+       struct kib_conn *conn;
+       struct kib_msg *msg;
+       struct rdma_conn_param cp;
         int                      version;
         __u64                    incarnation;
         unsigned long            flags;
         int                      rc;
 
-        cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
-        incarnation = peer->ibp_incarnation;
-        version     = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : peer->ibp_version;
+       incarnation = peer_ni->ibp_incarnation;
+       version     = (peer_ni->ibp_version == 0) ? IBLND_MSG_VERSION :
+                                                peer_ni->ibp_version;
 
-        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
-        conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version);
+       conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_ACTIVE_CONNECT,
+                                 version);
         if (conn == NULL) {
-                kiblnd_peer_connect_failed(peer, 1, -ENOMEM);
-                kiblnd_peer_decref(peer); /* lose cmid's ref */
+                kiblnd_peer_connect_failed(peer_ni, 1, -ENOMEM);
+                kiblnd_peer_decref(peer_ni); /* lose cmid's ref */
                 return -ENOMEM;
         }
 
         /* conn "owns" cmid now, so I return success from here on to ensure the
          * CM callback doesn't destroy cmid. conn also takes over cmid's ref
-         * on peer */
+         * on peer_ni */
 
         msg = &conn->ibc_connvars->cv_msg;
 
-        memset(msg, 0, sizeof(*msg));
-        kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
-        msg->ibm_u.connparams.ibcp_queue_depth  = IBLND_MSG_QUEUE_SIZE(version);
-        msg->ibm_u.connparams.ibcp_max_frags    = IBLND_RDMA_FRAGS(version);
-        msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
+       memset(msg, 0, sizeof(*msg));
+       kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
+       msg->ibm_u.connparams.ibcp_queue_depth  = conn->ibc_queue_depth;
+       msg->ibm_u.connparams.ibcp_max_frags    = conn->ibc_max_frags;
+       msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
 
-        kiblnd_pack_msg(peer->ibp_ni, msg, version,
-                        0, peer->ibp_nid, incarnation);
+        kiblnd_pack_msg(peer_ni->ibp_ni, msg, version,
+                        0, peer_ni->ibp_nid, incarnation);
 
         memset(&cp, 0, sizeof(cp));
         cp.private_data        = msg;
@@ -2786,7 +3138,7 @@ kiblnd_active_connect (struct rdma_cm_id *cmid)
         rc = rdma_connect(cmid, &cp);
         if (rc != 0) {
                 CERROR("Can't connect to %s: %d\n",
-                       libcfs_nid2str(peer->ibp_nid), rc);
+                       libcfs_nid2str(peer_ni->ibp_nid), rc);
                 kiblnd_connreq_done(conn, rc);
                 kiblnd_conn_decref(conn);
         }
@@ -2797,9 +3149,9 @@ kiblnd_active_connect (struct rdma_cm_id *cmid)
 int
 kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
 {
-        kib_peer_t  *peer;
-        kib_conn_t  *conn;
-       int          rc;
+       struct kib_peer_ni *peer_ni;
+       struct kib_conn *conn;
+       int rc;
 
        switch (event->event) {
        default:
@@ -2809,67 +3161,78 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
 
        case RDMA_CM_EVENT_CONNECT_REQUEST:
                 /* destroy cmid on failure */
-               rc = kiblnd_passive_connect(cmid, 
+               rc = kiblnd_passive_connect(cmid,
                                             (void *)KIBLND_CONN_PARAM(event),
                                             KIBLND_CONN_PARAM_LEN(event));
                 CDEBUG(D_NET, "connreq: %d\n", rc);
                 return rc;
-                
+
        case RDMA_CM_EVENT_ADDR_ERROR:
-                peer = (kib_peer_t *)cmid->context;
+               peer_ni = cmid->context;
                 CNETERR("%s: ADDR ERROR %d\n",
-                       libcfs_nid2str(peer->ibp_nid), event->status);
-                kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
-                kiblnd_peer_decref(peer);
+                       libcfs_nid2str(peer_ni->ibp_nid), event->status);
+                kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
+                kiblnd_peer_decref(peer_ni);
                 return -EHOSTUNREACH;      /* rc != 0 destroys cmid */
 
        case RDMA_CM_EVENT_ADDR_RESOLVED:
-                peer = (kib_peer_t *)cmid->context;
+               peer_ni = cmid->context;
 
                 CDEBUG(D_NET,"%s Addr resolved: %d\n",
-                       libcfs_nid2str(peer->ibp_nid), event->status);
+                       libcfs_nid2str(peer_ni->ibp_nid), event->status);
 
                 if (event->status != 0) {
                         CNETERR("Can't resolve address for %s: %d\n",
-                                libcfs_nid2str(peer->ibp_nid), event->status);
+                                libcfs_nid2str(peer_ni->ibp_nid), event->status);
                         rc = event->status;
-                } else {
-                        rc = rdma_resolve_route(
-                                cmid, *kiblnd_tunables.kib_timeout * 1000);
-                        if (rc == 0)
-                                return 0;
+               } else {
+                       rc = rdma_resolve_route(
+                               cmid, lnet_get_lnd_timeout() * 1000);
+                       if (rc == 0) {
+                               struct kib_net *net = peer_ni->ibp_ni->ni_data;
+                               struct kib_dev *dev = net->ibn_dev;
+
+                               CDEBUG(D_NET, "%s: connection bound to "\
+                                      "%s:%pI4h:%s\n",
+                                      libcfs_nid2str(peer_ni->ibp_nid),
+                                      dev->ibd_ifname,
+                                      &dev->ibd_ifip, cmid->device->name);
+
+                               return 0;
+                       }
+
                         /* Can't initiate route resolution */
                         CERROR("Can't resolve route for %s: %d\n",
-                               libcfs_nid2str(peer->ibp_nid), rc);
+                               libcfs_nid2str(peer_ni->ibp_nid), rc);
                 }
-                kiblnd_peer_connect_failed(peer, 1, rc);
-                kiblnd_peer_decref(peer);
+                kiblnd_peer_connect_failed(peer_ni, 1, rc);
+                kiblnd_peer_decref(peer_ni);
                 return rc;                      /* rc != 0 destroys cmid */
 
        case RDMA_CM_EVENT_ROUTE_ERROR:
-                peer = (kib_peer_t *)cmid->context;
+               peer_ni = cmid->context;
                 CNETERR("%s: ROUTE ERROR %d\n",
-                        libcfs_nid2str(peer->ibp_nid), event->status);
-                kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
-                kiblnd_peer_decref(peer);
+                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
+                kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
+                kiblnd_peer_decref(peer_ni);
                 return -EHOSTUNREACH;           /* rc != 0 destroys cmid */
 
        case RDMA_CM_EVENT_ROUTE_RESOLVED:
-                peer = (kib_peer_t *)cmid->context;
+               peer_ni = cmid->context;
                 CDEBUG(D_NET,"%s Route resolved: %d\n",
-                       libcfs_nid2str(peer->ibp_nid), event->status);
+                       libcfs_nid2str(peer_ni->ibp_nid), event->status);
 
                 if (event->status == 0)
                         return kiblnd_active_connect(cmid);
 
                 CNETERR("Can't resolve route for %s: %d\n",
-                       libcfs_nid2str(peer->ibp_nid), event->status);
-                kiblnd_peer_connect_failed(peer, 1, event->status);
-                kiblnd_peer_decref(peer);
+                       libcfs_nid2str(peer_ni->ibp_nid), event->status);
+                kiblnd_peer_connect_failed(peer_ni, 1, event->status);
+                kiblnd_peer_decref(peer_ni);
                 return event->status;           /* rc != 0 destroys cmid */
-                
+
        case RDMA_CM_EVENT_UNREACHABLE:
-                conn = (kib_conn_t *)cmid->context;
+               conn = cmid->context;
                 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
                         conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
                 CNETERR("%s: UNREACHABLE %d\n",
@@ -2879,7 +3242,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
                 return 0;
 
        case RDMA_CM_EVENT_CONNECT_ERROR:
-                conn = (kib_conn_t *)cmid->context;
+               conn = cmid->context;
                 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
                         conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
                 CNETERR("%s: CONNECT ERROR %d\n",
@@ -2889,7 +3252,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
                 return 0;
 
        case RDMA_CM_EVENT_REJECTED:
-                conn = (kib_conn_t *)cmid->context;
+               conn = cmid->context;
                 switch (conn->ibc_state) {
                 default:
                         LBUG();
@@ -2911,7 +3274,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
                 return 0;
 
        case RDMA_CM_EVENT_ESTABLISHED:
-                conn = (kib_conn_t *)cmid->context;
+               conn = cmid->context;
                 switch (conn->ibc_state) {
                 default:
                         LBUG();
@@ -2933,13 +3296,12 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
                 /* net keeps its ref on conn! */
                 return 0;
 
-#ifdef HAVE_OFED_RDMA_CMEV_TIMEWAIT_EXIT
         case RDMA_CM_EVENT_TIMEWAIT_EXIT:
                 CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n");
                 return 0;
-#endif
+
        case RDMA_CM_EVENT_DISCONNECTED:
-                conn = (kib_conn_t *)cmid->context;
+               conn = cmid->context;
                 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
                         CERROR("%s DISCONNECTED\n",
                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
@@ -2959,181 +3321,270 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
                  * to ignore this */
                 return 0;
 
-#ifdef HAVE_OFED_RDMA_CMEV_ADDRCHANGE
         case RDMA_CM_EVENT_ADDR_CHANGE:
                 LCONSOLE_INFO("Physical link changed (eg hca/port)\n");
                 return 0;
-#endif
         }
 }
 
-int
-kiblnd_check_txs (kib_conn_t *conn, cfs_list_t *txs)
+static int
+kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs)
 {
-        kib_tx_t          *tx;
-        cfs_list_t        *ttmp;
-        int                timed_out = 0;
-
-        cfs_spin_lock(&conn->ibc_lock);
-
-        cfs_list_for_each (ttmp, txs) {
-                tx = cfs_list_entry (ttmp, kib_tx_t, tx_list);
-
-                if (txs != &conn->ibc_active_txs) {
-                        LASSERT (tx->tx_queued);
-                } else {
-                        LASSERT (!tx->tx_queued);
-                        LASSERT (tx->tx_waiting || tx->tx_sending != 0);
-                }
-
-                if (cfs_time_aftereq (jiffies, tx->tx_deadline)) {
-                        timed_out = 1;
-                        CERROR("Timed out tx: %s, %lu seconds\n",
-                               kiblnd_queue2str(conn, txs),
-                               cfs_duration_sec(jiffies - tx->tx_deadline));
-                        break;
-                }
-        }
-
-        cfs_spin_unlock(&conn->ibc_lock);
-        return timed_out;
+       struct kib_tx *tx;
+       struct list_head *ttmp;
+
+       list_for_each(ttmp, txs) {
+               tx = list_entry(ttmp, struct kib_tx, tx_list);
+
+               if (txs != &conn->ibc_active_txs) {
+                       LASSERT(tx->tx_queued);
+               } else {
+                       LASSERT(!tx->tx_queued);
+                       LASSERT(tx->tx_waiting || tx->tx_sending != 0);
+               }
+
+               if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) {
+                       CERROR("Timed out tx: %s, %lld seconds\n",
+                              kiblnd_queue2str(conn, txs),
+                              ktime_ms_delta(ktime_get(),
+                                             tx->tx_deadline) / MSEC_PER_SEC);
+                       return 1;
+               }
+       }
+
+       return 0;
 }
 
-int
-kiblnd_conn_timed_out (kib_conn_t *conn)
+static int
+kiblnd_conn_timed_out_locked(struct kib_conn *conn)
 {
-        return  kiblnd_check_txs(conn, &conn->ibc_tx_queue) ||
-                kiblnd_check_txs(conn, &conn->ibc_tx_noops) ||
-                kiblnd_check_txs(conn, &conn->ibc_tx_queue_rsrvd) ||
-                kiblnd_check_txs(conn, &conn->ibc_tx_queue_nocred) ||
-                kiblnd_check_txs(conn, &conn->ibc_active_txs);
+        return  kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) ||
+                kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) ||
+                kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_rsrvd) ||
+                kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_nocred) ||
+                kiblnd_check_txs_locked(conn, &conn->ibc_active_txs);
 }
 
-void
+static void
 kiblnd_check_conns (int idx)
 {
-        cfs_list_t        *peers = &kiblnd_data.kib_peers[idx];
-        cfs_list_t        *ptmp;
-        kib_peer_t        *peer;
-        kib_conn_t        *conn;
-        cfs_list_t        *ctmp;
-        unsigned long      flags;
-
- again:
-        /* NB. We expect to have a look at all the peers and not find any
-         * rdmas to time out, so we just use a shared lock while we
-         * take a look... */
-        cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
-        cfs_list_for_each (ptmp, peers) {
-                peer = cfs_list_entry (ptmp, kib_peer_t, ibp_list);
-
-                cfs_list_for_each (ctmp, &peer->ibp_conns) {
-                        conn = cfs_list_entry (ctmp, kib_conn_t, ibc_list);
-
-                        LASSERT (conn->ibc_state == IBLND_CONN_ESTABLISHED);
-
-                        /* In case we have enough credits to return via a
-                         * NOOP, but there were no non-blocking tx descs
-                         * free to do it last time... */
-                        kiblnd_check_sends(conn);
-
-                        if (!kiblnd_conn_timed_out(conn))
-                                continue;
-
-                        /* Handle timeout by closing the whole connection.  We
-                         * can only be sure RDMA activity has ceased once the
-                         * QP has been modified. */
-
-                        kiblnd_conn_addref(conn); /* 1 ref for me... */
-
-                        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
-                                                   flags);
-
-                        CERROR("Timed out RDMA with %s (%lu)\n",
-                               libcfs_nid2str(peer->ibp_nid),
-                               cfs_duration_sec(cfs_time_current() -
-                                                peer->ibp_last_alive));
-
-                        kiblnd_close_conn(conn, -ETIMEDOUT);
-                        kiblnd_conn_decref(conn); /* ...until here */
-
-                        /* start again now I've dropped the lock */
-                        goto again;
-                }
-        }
-
-        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       struct list_head  closes = LIST_HEAD_INIT(closes);
+       struct list_head  checksends = LIST_HEAD_INIT(checksends);
+       struct list_head  timedout_txs = LIST_HEAD_INIT(timedout_txs);
+       struct list_head *peers = &kiblnd_data.kib_peers[idx];
+       struct list_head *ptmp;
+       struct kib_peer_ni *peer_ni;
+       struct kib_conn *conn;
+       struct kib_tx *tx, *tx_tmp;
+       struct list_head *ctmp;
+       unsigned long     flags;
+
+       /* NB. We expect to have a look at all the peers and not find any
+        * RDMAs to time out, so we just use a shared lock while we
+        * take a look... */
+       write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+       list_for_each(ptmp, peers) {
+               peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list);
+
+               /* Check tx_deadline */
+               list_for_each_entry_safe(tx, tx_tmp, &peer_ni->ibp_tx_queue, tx_list) {
+                       if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) {
+                               CWARN("Timed out tx for %s: %lld seconds\n",
+                                     libcfs_nid2str(peer_ni->ibp_nid),
+                                     ktime_ms_delta(ktime_get(),
+                                                    tx->tx_deadline) / MSEC_PER_SEC);
+                               list_move(&tx->tx_list, &timedout_txs);
+                       }
+               }
+
+               list_for_each(ctmp, &peer_ni->ibp_conns) {
+                       int timedout;
+                       int sendnoop;
+
+                       conn = list_entry(ctmp, struct kib_conn, ibc_list);
+
+                       LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
+
+                       spin_lock(&conn->ibc_lock);
+
+                       sendnoop = kiblnd_need_noop(conn);
+                       timedout = kiblnd_conn_timed_out_locked(conn);
+                       if (!sendnoop && !timedout) {
+                               spin_unlock(&conn->ibc_lock);
+                               continue;
+                       }
+
+                       if (timedout) {
+                               CERROR("Timed out RDMA with %s (%lld): "
+                                      "c: %u, oc: %u, rc: %u\n",
+                                      libcfs_nid2str(peer_ni->ibp_nid),
+                                      ktime_get_seconds() - peer_ni->ibp_last_alive,
+                                      conn->ibc_credits,
+                                      conn->ibc_outstanding_credits,
+                                      conn->ibc_reserved_credits);
+                               list_add(&conn->ibc_connd_list, &closes);
+                       } else {
+                               list_add(&conn->ibc_connd_list, &checksends);
+                       }
+                       /* +ref for 'closes' or 'checksends' */
+                       kiblnd_conn_addref(conn);
+
+                       spin_unlock(&conn->ibc_lock);
+               }
+       }
+
+       write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+       if (!list_empty(&timedout_txs))
+               kiblnd_txlist_done(&timedout_txs, -ETIMEDOUT,
+                                  LNET_MSG_STATUS_LOCAL_TIMEOUT);
+
+       /* Handle timeout by closing the whole
+        * connection. We can only be sure RDMA activity
+        * has ceased once the QP has been modified. */
+       while (!list_empty(&closes)) {
+               conn = list_entry(closes.next,
+                                 struct kib_conn, ibc_connd_list);
+               list_del(&conn->ibc_connd_list);
+               kiblnd_close_conn(conn, -ETIMEDOUT);
+               kiblnd_conn_decref(conn);
+       }
+
+       /* In case we have enough credits to return via a
+        * NOOP, but there were no non-blocking tx descs
+        * free to do it last time... */
+       while (!list_empty(&checksends)) {
+               conn = list_entry(checksends.next,
+                                 struct kib_conn, ibc_connd_list);
+               list_del(&conn->ibc_connd_list);
+
+               spin_lock(&conn->ibc_lock);
+               kiblnd_check_sends_locked(conn);
+               spin_unlock(&conn->ibc_lock);
+
+               kiblnd_conn_decref(conn);
+       }
 }
 
-void
-kiblnd_disconnect_conn (kib_conn_t *conn)
+static void
+kiblnd_disconnect_conn(struct kib_conn *conn)
 {
-        LASSERT (!cfs_in_interrupt());
-        LASSERT (current == kiblnd_data.kib_connd);
-        LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
+       LASSERT (!in_interrupt());
+       LASSERT (current == kiblnd_data.kib_connd);
+       LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
 
-        rdma_disconnect(conn->ibc_cmid);
-        kiblnd_finalise_conn(conn);
+       rdma_disconnect(conn->ibc_cmid);
+       kiblnd_finalise_conn(conn);
 
-        kiblnd_peer_notify(conn->ibc_peer);
+       kiblnd_peer_notify(conn->ibc_peer);
 }
 
+/*
+ * High-water for reconnection to the same peer_ni, reconnection attempt should
+ * be delayed after trying more than KIB_RECONN_HIGH_RACE.
+ */
+#define KIB_RECONN_HIGH_RACE   10
+/*
+ * Allow connd to take a break and handle other things after consecutive
+ * reconnection attemps.
+ */
+#define KIB_RECONN_BREAK       100
+
 int
 kiblnd_connd (void *arg)
 {
-        cfs_waitlink_t     wait;
-        unsigned long      flags;
-        kib_conn_t        *conn;
-        int                timeout;
-        int                i;
-        int                dropped_lock;
-        int                peer_index = 0;
-        unsigned long      deadline = jiffies;
+       spinlock_t        *lock= &kiblnd_data.kib_connd_lock;
+       wait_queue_entry_t wait;
+       unsigned long      flags;
+       struct kib_conn *conn;
+       int                timeout;
+       int                i;
+       int                dropped_lock;
+       int                peer_index = 0;
+       unsigned long      deadline = jiffies;
 
-        cfs_daemonize ("kiblnd_connd");
-        cfs_block_allsigs ();
+       cfs_block_allsigs();
 
-        cfs_waitlink_init (&wait);
-        kiblnd_data.kib_connd = current;
+       init_waitqueue_entry(&wait, current);
+       kiblnd_data.kib_connd = current;
 
-        cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+       spin_lock_irqsave(lock, flags);
 
-        while (!kiblnd_data.kib_shutdown) {
+       while (!kiblnd_data.kib_shutdown) {
+               int reconn = 0;
 
                 dropped_lock = 0;
 
-                if (!cfs_list_empty (&kiblnd_data.kib_connd_zombies)) {
-                        conn = cfs_list_entry(kiblnd_data. \
-                                              kib_connd_zombies.next,
-                                              kib_conn_t, ibc_list);
-                        cfs_list_del(&conn->ibc_list);
+               if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
+                       struct kib_peer_ni *peer_ni = NULL;
+
+                       conn = list_entry(kiblnd_data.kib_connd_zombies.next,
+                                         struct kib_conn, ibc_list);
+                       list_del(&conn->ibc_list);
+                       if (conn->ibc_reconnect) {
+                               peer_ni = conn->ibc_peer;
+                               kiblnd_peer_addref(peer_ni);
+                       }
+
+                       spin_unlock_irqrestore(lock, flags);
+                       dropped_lock = 1;
+
+                       kiblnd_destroy_conn(conn);
+
+                       spin_lock_irqsave(lock, flags);
+                       if (!peer_ni) {
+                               LIBCFS_FREE(conn, sizeof(*conn));
+                               continue;
+                       }
+
+                       conn->ibc_peer = peer_ni;
+                       if (peer_ni->ibp_reconnected < KIB_RECONN_HIGH_RACE)
+                               list_add_tail(&conn->ibc_list,
+                                             &kiblnd_data.kib_reconn_list);
+                       else
+                               list_add_tail(&conn->ibc_list,
+                                             &kiblnd_data.kib_reconn_wait);
+               }
+
+               if (!list_empty(&kiblnd_data.kib_connd_conns)) {
+                       conn = list_entry(kiblnd_data.kib_connd_conns.next,
+                                         struct kib_conn, ibc_list);
+                       list_del(&conn->ibc_list);
+
+                       spin_unlock_irqrestore(lock, flags);
+                       dropped_lock = 1;
+
+                       kiblnd_disconnect_conn(conn);
+                       kiblnd_conn_decref(conn);
+
+                       spin_lock_irqsave(lock, flags);
+                }
 
-                        cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock,
-                                                   flags);
-                        dropped_lock = 1;
+               while (reconn < KIB_RECONN_BREAK) {
+                       if (kiblnd_data.kib_reconn_sec !=
+                           ktime_get_real_seconds()) {
+                               kiblnd_data.kib_reconn_sec = ktime_get_real_seconds();
+                               list_splice_init(&kiblnd_data.kib_reconn_wait,
+                                                &kiblnd_data.kib_reconn_list);
+                       }
 
-                        kiblnd_destroy_conn(conn);
+                       if (list_empty(&kiblnd_data.kib_reconn_list))
+                               break;
 
-                        cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock,
-                                               flags);
-                }
+                       conn = list_entry(kiblnd_data.kib_reconn_list.next,
+                                         struct kib_conn, ibc_list);
+                       list_del(&conn->ibc_list);
 
-                if (!cfs_list_empty (&kiblnd_data.kib_connd_conns)) {
-                        conn = cfs_list_entry (kiblnd_data.kib_connd_conns.next,
-                                               kib_conn_t, ibc_list);
-                        cfs_list_del(&conn->ibc_list);
+                       spin_unlock_irqrestore(lock, flags);
+                       dropped_lock = 1;
 
-                        cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock,
-                                                    flags);
-                        dropped_lock = 1;
-
-                        kiblnd_disconnect_conn(conn);
-                        kiblnd_conn_decref(conn);
+                       reconn += kiblnd_reconnect_peer(conn->ibc_peer);
+                       kiblnd_peer_decref(conn->ibc_peer);
+                       LIBCFS_FREE(conn, sizeof(*conn));
 
-                        cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock,
-                                               flags);
-                }
+                       spin_lock_irqsave(lock, flags);
+               }
 
                 /* careful with the jiffy wrap... */
                 timeout = (int)(deadline - jiffies);
@@ -3141,80 +3592,103 @@ kiblnd_connd (void *arg)
                         const int n = 4;
                         const int p = 1;
                         int       chunk = kiblnd_data.kib_peer_hash_size;
+                       unsigned int lnd_timeout;
 
-                        cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+                       spin_unlock_irqrestore(lock, flags);
                         dropped_lock = 1;
 
                         /* Time to check for RDMA timeouts on a few more
                          * peers: I do checks every 'p' seconds on a
-                         * proportion of the peer table and I need to check
+                         * proportion of the peer_ni table and I need to check
                          * every connection 'n' times within a timeout
                          * interval, to ensure I detect a timeout on any
                          * connection within (n+1)/n times the timeout
                          * interval. */
 
-                        if (*kiblnd_tunables.kib_timeout > n * p)
-                                chunk = (chunk * n * p) /
-                                        *kiblnd_tunables.kib_timeout;
-                        if (chunk == 0)
-                                chunk = 1;
+                       lnd_timeout = lnet_get_lnd_timeout();
+                       if (lnd_timeout > n * p)
+                               chunk = (chunk * n * p) / lnd_timeout;
+                       if (chunk == 0)
+                               chunk = 1;
 
-                        for (i = 0; i < chunk; i++) {
-                                kiblnd_check_conns(peer_index);
-                                peer_index = (peer_index + 1) %
-                                             kiblnd_data.kib_peer_hash_size;
-                        }
+                       for (i = 0; i < chunk; i++) {
+                               kiblnd_check_conns(peer_index);
+                               peer_index = (peer_index + 1) %
+                                            kiblnd_data.kib_peer_hash_size;
+                       }
 
-                        deadline += p * CFS_HZ;
-                        cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock,
-                                              flags);
-                }
+                       deadline += msecs_to_jiffies(p * MSEC_PER_SEC);
+                       spin_lock_irqsave(lock, flags);
+               }
 
-                if (dropped_lock)
-                        continue;
+               if (dropped_lock)
+                       continue;
 
-                /* Nothing to do for 'timeout'  */
-                cfs_set_current_state (CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_add (&kiblnd_data.kib_connd_waitq, &wait);
-                cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
+               /* Nothing to do for 'timeout'  */
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
+               spin_unlock_irqrestore(lock, flags);
 
-                cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout);
+               schedule_timeout(timeout);
 
-                cfs_set_current_state (CFS_TASK_RUNNING);
-                cfs_waitq_del (&kiblnd_data.kib_connd_waitq, &wait);
-                cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
-        }
+               set_current_state(TASK_RUNNING);
+               remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
+               spin_lock_irqsave(lock, flags);
+       }
 
-        cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
+       spin_unlock_irqrestore(lock, flags);
 
-        kiblnd_thread_fini();
-        return (0);
+       kiblnd_thread_fini();
+       return 0;
 }
 
 void
 kiblnd_qp_event(struct ib_event *event, void *arg)
 {
-        kib_conn_t *conn = arg;
+       struct kib_conn *conn = arg;
 
-        switch (event->event) {
-        case IB_EVENT_COMM_EST:
-                CDEBUG(D_NET, "%s established\n",
-                       libcfs_nid2str(conn->ibc_peer->ibp_nid));
-                return;
+       switch (event->event) {
+       case IB_EVENT_COMM_EST:
+               CDEBUG(D_NET, "%s established\n",
+                      libcfs_nid2str(conn->ibc_peer->ibp_nid));
+               /* We received a packet but connection isn't established
+                * probably handshake packet was lost, so free to
+                * force make connection established */
+               rdma_notify(conn->ibc_cmid, IB_EVENT_COMM_EST);
+               return;
+
+       case IB_EVENT_PORT_ERR:
+       case IB_EVENT_DEVICE_FATAL:
+               CERROR("Fatal device error for NI %s\n",
+                      libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid));
+               atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 1);
+               return;
+
+       case IB_EVENT_PORT_ACTIVE:
+               CERROR("Port reactivated for NI %s\n",
+                      libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid));
+               atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 0);
+               return;
 
-        default:
-                CERROR("%s: Async QP event type %d\n",
-                       libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
-                return;
-        }
+       default:
+               CERROR("%s: Async QP event type %d\n",
+                      libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
+               return;
+       }
 }
 
-void
+static void
 kiblnd_complete (struct ib_wc *wc)
 {
-        switch (kiblnd_wreqid2type(wc->wr_id)) {
-        default:
-                LBUG();
+       switch (kiblnd_wreqid2type(wc->wr_id)) {
+       default:
+               LBUG();
+
+       case IBLND_WID_MR:
+               if (wc->status != IB_WC_SUCCESS &&
+                   wc->status != IB_WC_WR_FLUSH_ERR)
+                       CNETERR("FastReg failed: %d\n", wc->status);
+               return;
 
         case IBLND_WID_RDMA:
                 /* We only get RDMA completion notification if it fails.  All
@@ -3239,39 +3713,41 @@ kiblnd_complete (struct ib_wc *wc)
 }
 
 void
-kiblnd_cq_completion (struct ib_cq *cq, void *arg)
+kiblnd_cq_completion(struct ib_cq *cq, void *arg)
 {
-        /* NB I'm not allowed to schedule this conn once its refcount has
-         * reached 0.  Since fundamentally I'm racing with scheduler threads
-         * consuming my CQ I could be called after all completions have
-         * occurred.  But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
-         * and this CQ is about to be destroyed so I NOOP. */
-        kib_conn_t     *conn = (kib_conn_t *)arg;
-        unsigned long   flags;
-
-        LASSERT (cq == conn->ibc_cq);
-
-        cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
-
-        conn->ibc_ready = 1;
-
-        if (!conn->ibc_scheduled &&
-            (conn->ibc_nrx > 0 ||
-             conn->ibc_nsends_posted > 0)) {
-                kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
-                conn->ibc_scheduled = 1;
-                cfs_list_add_tail(&conn->ibc_sched_list,
-                                  &kiblnd_data.kib_sched_conns);
-                cfs_waitq_signal(&kiblnd_data.kib_sched_waitq);
-        }
+       /* NB I'm not allowed to schedule this conn once its refcount has
+        * reached 0.  Since fundamentally I'm racing with scheduler threads
+        * consuming my CQ I could be called after all completions have
+        * occurred.  But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
+        * and this CQ is about to be destroyed so I NOOP. */
+       struct kib_conn *conn = arg;
+       struct kib_sched_info *sched = conn->ibc_sched;
+       unsigned long flags;
+
+       LASSERT(cq == conn->ibc_cq);
+
+       spin_lock_irqsave(&sched->ibs_lock, flags);
+
+       conn->ibc_ready = 1;
+
+       if (!conn->ibc_scheduled &&
+           (conn->ibc_nrx > 0 ||
+            conn->ibc_nsends_posted > 0)) {
+               kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
+               conn->ibc_scheduled = 1;
+               list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
 
-        cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
+               if (waitqueue_active(&sched->ibs_waitq))
+                       wake_up(&sched->ibs_waitq);
+       }
+
+       spin_unlock_irqrestore(&sched->ibs_lock, flags);
 }
 
 void
 kiblnd_cq_event(struct ib_event *event, void *arg)
 {
-        kib_conn_t *conn = arg;
+       struct kib_conn *conn = arg;
 
         CERROR("%s: async CQ event type %d\n",
                libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
@@ -3280,48 +3756,55 @@ kiblnd_cq_event(struct ib_event *event, void *arg)
 int
 kiblnd_scheduler(void *arg)
 {
-        long            id = (long)arg;
-        cfs_waitlink_t  wait;
-        char            name[16];
-        unsigned long   flags;
-        kib_conn_t     *conn;
-        struct ib_wc    wc;
-        int             rc;
-        int             did_something;
-        int             busy_loops = 0;
+       long                    id = (long)arg;
+       struct kib_sched_info   *sched;
+       struct kib_conn *conn;
+       wait_queue_entry_t      wait;
+       unsigned long           flags;
+       struct ib_wc            wc;
+       int                     did_something;
+       int                     busy_loops = 0;
+       int                     rc;
 
-        snprintf(name, sizeof(name), "kiblnd_sd_%02ld", id);
-        cfs_daemonize(name);
-        cfs_block_allsigs();
+       cfs_block_allsigs();
 
-        cfs_waitlink_init(&wait);
+       init_waitqueue_entry(&wait, current);
 
-        cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+       sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
 
-        while (!kiblnd_data.kib_shutdown) {
-                if (busy_loops++ >= IBLND_RESCHED) {
-                        cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
-                                                   flags);
+       rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
+       if (rc != 0) {
+               CWARN("Unable to bind on CPU partition %d, please verify "
+                     "whether all CPUs are healthy and reload modules if "
+                     "necessary, otherwise your system might under risk of "
+                     "low performance\n", sched->ibs_cpt);
+       }
 
-                        cfs_cond_resched();
-                        busy_loops = 0;
+       spin_lock_irqsave(&sched->ibs_lock, flags);
 
-                        cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
-                                              flags);
-                }
+       while (!kiblnd_data.kib_shutdown) {
+               if (busy_loops++ >= IBLND_RESCHED) {
+                       spin_unlock_irqrestore(&sched->ibs_lock, flags);
+
+                       cond_resched();
+                       busy_loops = 0;
 
-                did_something = 0;
+                       spin_lock_irqsave(&sched->ibs_lock, flags);
+               }
 
-                if (!cfs_list_empty(&kiblnd_data.kib_sched_conns)) {
-                        conn = cfs_list_entry(kiblnd_data.kib_sched_conns.next,
-                                              kib_conn_t, ibc_sched_list);
-                        /* take over kib_sched_conns' ref on conn... */
-                        LASSERT(conn->ibc_scheduled);
-                        cfs_list_del(&conn->ibc_sched_list);
-                        conn->ibc_ready = 0;
+               did_something = 0;
 
-                        cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
-                                                   flags);
+               if (!list_empty(&sched->ibs_conns)) {
+                       conn = list_entry(sched->ibs_conns.next,
+                                         struct kib_conn, ibc_sched_list);
+                       /* take over kib_sched_conns' ref on conn... */
+                       LASSERT(conn->ibc_scheduled);
+                       list_del(&conn->ibc_sched_list);
+                       conn->ibc_ready = 0;
+
+                       spin_unlock_irqrestore(&sched->ibs_lock, flags);
+
+                       wc.wr_id = IBLND_WID_INVAL;
 
                         rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
                         if (rc == 0) {
@@ -3333,52 +3816,59 @@ kiblnd_scheduler(void *arg)
                                               libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
                                         kiblnd_close_conn(conn, -EIO);
                                         kiblnd_conn_decref(conn);
-                                        cfs_spin_lock_irqsave(&kiblnd_data. \
-                                                              kib_sched_lock,
-                                                              flags);
-                                        continue;
-                                }
-
-                                rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
-                        }
-
-                        if (rc < 0) {
-                                CWARN("%s: ib_poll_cq failed: %d, "
-                                      "closing connection\n",
-                                      libcfs_nid2str(conn->ibc_peer->ibp_nid),
-                                                     rc);
-                                kiblnd_close_conn(conn, -EIO);
-                                kiblnd_conn_decref(conn);
-                                cfs_spin_lock_irqsave(&kiblnd_data. \
-                                                      kib_sched_lock, flags);
-                                continue;
-                        }
-
-                        cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
-                                              flags);
-
-                        if (rc != 0 || conn->ibc_ready) {
-                                /* There may be another completion waiting; get
-                                 * another scheduler to check while I handle
-                                 * this one... */
-                                kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
-                                cfs_list_add_tail(&conn->ibc_sched_list,
-                                                  &kiblnd_data.kib_sched_conns);
-                                cfs_waitq_signal(&kiblnd_data.kib_sched_waitq);
-                        } else {
-                                conn->ibc_scheduled = 0;
-                        }
-
-                        if (rc != 0) {
-                                cfs_spin_unlock_irqrestore(&kiblnd_data. \
-                                                           kib_sched_lock,
-                                                           flags);
-
-                                kiblnd_complete(&wc);
-
-                                cfs_spin_lock_irqsave(&kiblnd_data. \
-                                                      kib_sched_lock,
-                                                      flags);
+                                       spin_lock_irqsave(&sched->ibs_lock,
+                                                             flags);
+                                       continue;
+                               }
+
+                               rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
+                       }
+
+                       if (unlikely(rc > 0 && wc.wr_id == IBLND_WID_INVAL)) {
+                               LCONSOLE_ERROR(
+                                       "ib_poll_cq (rc: %d) returned invalid "
+                                       "wr_id, opcode %d, status: %d, "
+                                       "vendor_err: %d, conn: %s status: %d\n"
+                                       "please upgrade firmware and OFED or "
+                                       "contact vendor.\n", rc,
+                                       wc.opcode, wc.status, wc.vendor_err,
+                                       libcfs_nid2str(conn->ibc_peer->ibp_nid),
+                                       conn->ibc_state);
+                               rc = -EINVAL;
+                       }
+
+                       if (rc < 0) {
+                               CWARN("%s: ib_poll_cq failed: %d, "
+                                     "closing connection\n",
+                                     libcfs_nid2str(conn->ibc_peer->ibp_nid),
+                                     rc);
+                               kiblnd_close_conn(conn, -EIO);
+                               kiblnd_conn_decref(conn);
+                               spin_lock_irqsave(&sched->ibs_lock, flags);
+                               continue;
+                       }
+
+                       spin_lock_irqsave(&sched->ibs_lock, flags);
+
+                       if (rc != 0 || conn->ibc_ready) {
+                               /* There may be another completion waiting; get
+                                * another scheduler to check while I handle
+                                * this one... */
+                               /* +1 ref for sched_conns */
+                               kiblnd_conn_addref(conn);
+                               list_add_tail(&conn->ibc_sched_list,
+                                                 &sched->ibs_conns);
+                               if (waitqueue_active(&sched->ibs_waitq))
+                                       wake_up(&sched->ibs_waitq);
+                       } else {
+                               conn->ibc_scheduled = 0;
+                       }
+
+                       if (rc != 0) {
+                               spin_unlock_irqrestore(&sched->ibs_lock, flags);
+                               kiblnd_complete(&wc);
+
+                               spin_lock_irqsave(&sched->ibs_lock, flags);
                         }
 
                         kiblnd_conn_decref(conn); /* ...drop my ref from above */
@@ -3388,75 +3878,73 @@ kiblnd_scheduler(void *arg)
                 if (did_something)
                         continue;
 
-                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_add_exclusive(&kiblnd_data.kib_sched_waitq, &wait);
-                cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
+               spin_unlock_irqrestore(&sched->ibs_lock, flags);
 
-                cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
-                busy_loops = 0;
+               schedule();
+               busy_loops = 0;
 
-                cfs_waitq_del(&kiblnd_data.kib_sched_waitq, &wait);
-                cfs_set_current_state(CFS_TASK_RUNNING);
-                cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
-        }
+               remove_wait_queue(&sched->ibs_waitq, &wait);
+               set_current_state(TASK_RUNNING);
+               spin_lock_irqsave(&sched->ibs_lock, flags);
+       }
 
-        cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
+       spin_unlock_irqrestore(&sched->ibs_lock, flags);
 
-        kiblnd_thread_fini();
-        return (0);
+       kiblnd_thread_fini();
+       return 0;
 }
 
 int
 kiblnd_failover_thread(void *arg)
 {
-        cfs_rwlock_t      *glock = &kiblnd_data.kib_global_lock;
-        kib_dev_t         *dev;
-        cfs_waitlink_t     wait;
-        unsigned long      flags;
-        int                rc;
+       rwlock_t        *glock = &kiblnd_data.kib_global_lock;
+       struct kib_dev *dev;
+       wait_queue_entry_t wait;
+       unsigned long    flags;
+       int              rc;
 
-        LASSERT (*kiblnd_tunables.kib_dev_failover != 0);
+       LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
 
-        cfs_daemonize ("kiblnd_failover");
-        cfs_block_allsigs ();
+       cfs_block_allsigs();
 
-        cfs_waitlink_init(&wait);
-        cfs_write_lock_irqsave(glock, flags);
+       init_waitqueue_entry(&wait, current);
+       write_lock_irqsave(glock, flags);
 
         while (!kiblnd_data.kib_shutdown) {
                 int     do_failover = 0;
                 int     long_sleep;
 
-                cfs_list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
+               list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
                                     ibd_fail_list) {
-                        if (cfs_time_before(cfs_time_current(),
-                                            dev->ibd_next_failover))
+                       if (ktime_get_seconds() < dev->ibd_next_failover)
                                 continue;
                         do_failover = 1;
                         break;
                 }
 
                 if (do_failover) {
-                        cfs_list_del_init(&dev->ibd_fail_list);
+                       list_del_init(&dev->ibd_fail_list);
                         dev->ibd_failover = 1;
-                        cfs_write_unlock_irqrestore(glock, flags);
+                       write_unlock_irqrestore(glock, flags);
 
-                        rc = kiblnd_dev_failover(dev);
+                       rc = kiblnd_dev_failover(dev);
 
-                        cfs_write_lock_irqsave(glock, flags);
+                       write_lock_irqsave(glock, flags);
 
                         LASSERT (dev->ibd_failover);
                         dev->ibd_failover = 0;
                         if (rc >= 0) { /* Device is OK or failover succeed */
-                                dev->ibd_next_failover = cfs_time_shift(3);
+                               dev->ibd_next_failover = ktime_get_seconds() + 3;
                                 continue;
                         }
 
                         /* failed to failover, retry later */
-                        dev->ibd_next_failover =
-                                cfs_time_shift(min(dev->ibd_failed_failover, 10));
+                       dev->ibd_next_failover = ktime_get_seconds() +
+                                                min(dev->ibd_failed_failover, 10);
                         if (kiblnd_dev_can_failover(dev)) {
-                                cfs_list_add_tail(&dev->ibd_fail_list,
+                               list_add_tail(&dev->ibd_fail_list,
                                               &kiblnd_data.kib_failed_devs);
                         }
 
@@ -3464,17 +3952,17 @@ kiblnd_failover_thread(void *arg)
                 }
 
                 /* long sleep if no more pending failover */
-                long_sleep = cfs_list_empty(&kiblnd_data.kib_failed_devs);
+               long_sleep = list_empty(&kiblnd_data.kib_failed_devs);
 
-                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_add(&kiblnd_data.kib_failover_waitq, &wait);
-                cfs_write_unlock_irqrestore(glock, flags);
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
+               write_unlock_irqrestore(glock, flags);
 
-                rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
-                                                   cfs_time_seconds(1));
-                cfs_set_current_state(CFS_TASK_RUNNING);
-                cfs_waitq_del(&kiblnd_data.kib_failover_waitq, &wait);
-                cfs_write_lock_irqsave(glock, flags);
+               rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
+                                                  cfs_time_seconds(1));
+               set_current_state(TASK_RUNNING);
+               remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
+               write_lock_irqsave(glock, flags);
 
                 if (!long_sleep || rc != 0)
                         continue;
@@ -3483,15 +3971,15 @@ kiblnd_failover_thread(void *arg)
                  * we need checking like this because if there is not active
                  * connection on the dev and no SEND from local, we may listen
                  * on wrong HCA for ever while there is a bonding failover */
-                cfs_list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
+               list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
                         if (kiblnd_dev_can_failover(dev)) {
-                                cfs_list_add_tail(&dev->ibd_fail_list,
+                               list_add_tail(&dev->ibd_fail_list,
                                               &kiblnd_data.kib_failed_devs);
                         }
                 }
         }
 
-        cfs_write_unlock_irqrestore(glock, flags);
+       write_unlock_irqrestore(glock, flags);
 
         kiblnd_thread_fini();
         return 0;