Whamcloud - gitweb
Branch HEAD
authorliangzhen <liangzhen>
Mon, 13 Apr 2009 17:45:26 +0000 (17:45 +0000)
committerliangzhen <liangzhen>
Mon, 13 Apr 2009 17:45:26 +0000 (17:45 +0000)
b=13621, 15983
i=isaac
i=maxim

o2iblnd protocol version 2

lnet/ChangeLog
lnet/klnds/o2iblnd/o2iblnd.c
lnet/klnds/o2iblnd/o2iblnd.h
lnet/klnds/o2iblnd/o2iblnd_cb.c
lnet/klnds/o2iblnd/o2iblnd_modparams.c

index ddcf070..93c926e 100644 (file)
@@ -12,10 +12,33 @@ tbd  Sun Microsystems, Inc.
         mxlnd     - MX 1.2.1 or later,
         ptllnd    - Portals 3.3 / UNICOS/lc 1.5.x, 2.0.x
 
-Severity   :
-Bugzilla   :
-Description:
-Details    :
+Severity   : 
+Bugzilla   : 
+Description: 
+Details    : 
+
+Severity   : major
+Bugzilla   : 13621, 15983
+Description: Protocol V2 of o2iblnd
+Details    : o2iblnd V2 has several new features:
+             . map-on-demand: map-on-demand is disabled by default, it can
+              be enabled by using modparam "map_on_demand=@value@", @value@
+              should >= 0 and < 256, 0 will disable map-on-demand, any other
+              valid value will enable map-on-demand. 
+              Oi2blnd will create FMR or physical MR for RDMA if fragments of
+              RD > @value@.
+              Enable map-on-demand will take less memory for new connection,
+              but a little more CPU for RDMA.
+            . iWARP : to support iWARP, please enable map-on-demand, 32 and 64
+              are recommanded value. iWARP will probably fail for value >=128.
+            . OOB NOOP message: to resolve deadlock on router.
+            . tunable peer_credits_hiw: (high water to return credits),
+              default value of peer_credits_hiw equals to (peer_credits -1),
+              user can change it between peer_credits/2 and (peer_credits - 1).
+              Lower value is recommended for high latency network.
+            . tunable message queue size: it always equals to peer_credits,
+              higher value is recommended for high latency network.
+            . It's compatible with earlier version of o2iblnd
 
 Severity   : normal
 Bugzilla   : 16034
index 440587e..9a5cd02 100644 (file)
@@ -40,7 +40,7 @@
 
 #include "o2iblnd.h"
 
-lnd_t the_kiblnd = {
+lnd_t the_o2iblnd = {
         .lnd_type       = O2IBLND,
         .lnd_startup    = kiblnd_startup,
         .lnd_shutdown   = kiblnd_shutdown,
@@ -65,15 +65,129 @@ kiblnd_cksum (void *ptr, int nob)
         return (sum == 0) ? 1 : sum;
 }
 
-void
-kiblnd_init_msg (kib_msg_t *msg, int type, int body_nob)
+static char *
+kiblnd_msgtype2str(int type)
+{
+        switch (type) {
+        case IBLND_MSG_CONNREQ:
+                return "CONNREQ";
+
+        case IBLND_MSG_CONNACK:
+                return "CONNACK";
+
+        case IBLND_MSG_NOOP:
+                return "NOOP";
+
+        case IBLND_MSG_IMMEDIATE:
+                return "IMMEDIATE";
+
+        case IBLND_MSG_PUT_REQ:
+                return "PUT_REQ";
+
+        case IBLND_MSG_PUT_NAK:
+                return "PUT_NAK";
+
+        case IBLND_MSG_PUT_ACK:
+                return "PUT_ACK";
+
+        case IBLND_MSG_PUT_DONE:
+                return "PUT_DONE";
+
+        case IBLND_MSG_GET_REQ:
+                return "GET_REQ";
+
+        case IBLND_MSG_GET_DONE:
+                return "GET_DONE";
+
+        default:
+                return "???";
+        }
+}
+
+static int
+kiblnd_msgtype2size(int type)
+{
+        const int hdr_size = offsetof(kib_msg_t, ibm_u);
+
+        switch (type) {
+        case IBLND_MSG_CONNREQ:
+        case IBLND_MSG_CONNACK:
+                return hdr_size + sizeof(kib_connparams_t);
+
+        case IBLND_MSG_NOOP:
+                return hdr_size;
+
+        case IBLND_MSG_IMMEDIATE:
+                return offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]);
+
+        case IBLND_MSG_PUT_REQ:
+                return hdr_size + sizeof(kib_putreq_msg_t);
+
+        case IBLND_MSG_PUT_ACK:
+                return hdr_size + sizeof(kib_putack_msg_t);
+
+        case IBLND_MSG_GET_REQ:
+                return hdr_size + sizeof(kib_get_msg_t);
+
+        case IBLND_MSG_PUT_NAK:
+        case IBLND_MSG_PUT_DONE:
+        case IBLND_MSG_GET_DONE:
+                return hdr_size + sizeof(kib_completion_msg_t);
+        default:
+                return -1;
+        }
+}
+
+static int
+kiblnd_unpack_rd(kib_msg_t *msg, int flip)
 {
-        msg->ibm_type = type;
-        msg->ibm_nob  = offsetof(kib_msg_t, ibm_u) + body_nob;
+        kib_rdma_desc_t   *rd;
+        int                nob;
+        int                n;
+        int                i;
+
+        LASSERT (msg->ibm_type == IBLND_MSG_GET_REQ ||
+                 msg->ibm_type == IBLND_MSG_PUT_ACK);
+
+        rd = msg->ibm_type == IBLND_MSG_GET_REQ ?
+                              &msg->ibm_u.get.ibgm_rd :
+                              &msg->ibm_u.putack.ibpam_rd;
+
+        if (flip) {
+                __swab32s(&rd->rd_key);
+                __swab32s(&rd->rd_nfrags);
+        }
+
+        n = rd->rd_nfrags;
+
+        if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
+                CERROR("Bad nfrags: %d, should be 0 < n <= %d\n",
+                       n, IBLND_MAX_RDMA_FRAGS);
+                return 1;
+        }
+
+        nob = offsetof (kib_msg_t, ibm_u) +
+              kiblnd_rd_msg_size(rd, msg->ibm_type, n);
+
+        if (msg->ibm_nob < nob) {
+                CERROR("Short %s: %d(%d)\n",
+                       kiblnd_msgtype2str(msg->ibm_type), msg->ibm_nob, nob);
+                return 1;
+        }
+
+        if (!flip)
+                return 0;
+
+        for (i = 0; i < n; i++) {
+                __swab32s(&rd->rd_frags[i].rf_nob);
+                __swab64s(&rd->rd_frags[i].rf_addr);
+        }
+
+        return 0;
 }
 
 void
-kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg,
+kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version,
                  int credits, lnet_nid_t dstnid, __u64 dststamp)
 {
         kib_net_t *net = ni->ni_data;
@@ -81,7 +195,7 @@ kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg,
         /* CAVEAT EMPTOR! all message fields not set here should have been
          * initialised previously. */
         msg->ibm_magic    = IBLND_MSG_MAGIC;
-        msg->ibm_version  = IBLND_MSG_VERSION;
+        msg->ibm_version  = version;
         /*   ibm_type */
         msg->ibm_credits  = credits;
         /*   ibm_nob */
@@ -102,12 +216,10 @@ kiblnd_unpack_msg(kib_msg_t *msg, int nob)
 {
         const int hdr_size = offsetof(kib_msg_t, ibm_u);
         __u32     msg_cksum;
-        int       flip;
+        __u16     version;
         int       msg_nob;
-#if !IBLND_MAP_ON_DEMAND
-        int       i;
-        int       n;
-#endif
+        int       flip;
+
         /* 6 bytes are enough to have received magic + version */
         if (nob < 6) {
                 CERROR("Short message: %d\n", nob);
@@ -123,9 +235,10 @@ kiblnd_unpack_msg(kib_msg_t *msg, int nob)
                 return -EPROTO;
         }
 
-        if (msg->ibm_version !=
-            (flip ? __swab16(IBLND_MSG_VERSION) : IBLND_MSG_VERSION)) {
-                CERROR("Bad version: %d\n", msg->ibm_version);
+        version = flip ? __swab16(msg->ibm_version) : msg->ibm_version;
+        if (version != IBLND_MSG_VERSION &&
+            version != IBLND_MSG_VERSION_1) {
+                CERROR("Bad version: %x\n", version);
                 return -EPROTO;
         }
 
@@ -149,14 +262,15 @@ kiblnd_unpack_msg(kib_msg_t *msg, int nob)
                 CERROR("Bad checksum\n");
                 return -EPROTO;
         }
+
         msg->ibm_cksum = msg_cksum;
 
         if (flip) {
                 /* leave magic unflipped as a clue to peer endianness */
-                __swab16s(&msg->ibm_version);
+                msg->ibm_version = version;
                 CLASSERT (sizeof(msg->ibm_type) == 1);
                 CLASSERT (sizeof(msg->ibm_credits) == 1);
-                msg->ibm_nob = msg_nob;
+                msg->ibm_nob     = msg_nob;
                 __swab64s(&msg->ibm_srcnid);
                 __swab64s(&msg->ibm_srcstamp);
                 __swab64s(&msg->ibm_dstnid);
@@ -168,128 +282,37 @@ kiblnd_unpack_msg(kib_msg_t *msg, int nob)
                 return -EPROTO;
         }
 
+        if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) {
+                CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type),
+                       msg_nob, kiblnd_msgtype2size(msg->ibm_type));
+                return -EPROTO;
+        }
+
         switch (msg->ibm_type) {
         default:
                 CERROR("Unknown message type %x\n", msg->ibm_type);
                 return -EPROTO;
 
         case IBLND_MSG_NOOP:
-                break;
-
         case IBLND_MSG_IMMEDIATE:
-                if (msg_nob < offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0])) {
-                        CERROR("Short IMMEDIATE: %d(%d)\n", msg_nob,
-                               (int)offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]));
-                        return -EPROTO;
-                }
-                break;
-
         case IBLND_MSG_PUT_REQ:
-                if (msg_nob < hdr_size + sizeof(msg->ibm_u.putreq)) {
-                        CERROR("Short PUT_REQ: %d(%d)\n", msg_nob,
-                               (int)(hdr_size + sizeof(msg->ibm_u.putreq)));
-                        return -EPROTO;
-                }
                 break;
 
         case IBLND_MSG_PUT_ACK:
-                if (msg_nob < hdr_size + sizeof(msg->ibm_u.putack)) {
-                        CERROR("Short PUT_ACK: %d(%d)\n", msg_nob,
-                               (int)(hdr_size + sizeof(msg->ibm_u.putack)));
-                        return -EPROTO;
-                }
-#if IBLND_MAP_ON_DEMAND
-                if (flip) {
-                        __swab64s(&msg->ibm_u.putack.ibpam_rd.rd_addr);
-                        __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_nob);
-                        __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_key);
-                }
-#else
-                if (flip) {
-                        __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_key);
-                        __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_nfrags);
-                }
-
-                n = msg->ibm_u.putack.ibpam_rd.rd_nfrags;
-                if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
-                        CERROR("Bad PUT_ACK nfrags: %d, should be 0 < n <= %d\n", 
-                               n, IBLND_MAX_RDMA_FRAGS);
-                        return -EPROTO;
-                }
-
-                if (msg_nob < offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[n])) {
-                        CERROR("Short PUT_ACK: %d(%d)\n", msg_nob,
-                               (int)offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[n]));
-                        return -EPROTO;
-                }
-
-                if (flip) {
-                        for (i = 0; i < n; i++) {
-                                __swab32s(&msg->ibm_u.putack.ibpam_rd.rd_frags[i].rf_nob);
-                                __swab64s(&msg->ibm_u.putack.ibpam_rd.rd_frags[i].rf_addr);
-                        }
-                }
-#endif
-                break;
-
         case IBLND_MSG_GET_REQ:
-                if (msg_nob < hdr_size + sizeof(msg->ibm_u.get)) {
-                        CERROR("Short GET_REQ: %d(%d)\n", msg_nob,
-                               (int)(hdr_size + sizeof(msg->ibm_u.get)));
-                        return -EPROTO;
-                }
-#if IBLND_MAP_ON_DEMAND
-                if (flip) {
-                        __swab64s(&msg->ibm_u.get.ibgm_rd.rd_addr);
-                        __swab32s(&msg->ibm_u.get.ibgm_rd.rd_nob);
-                        __swab32s(&msg->ibm_u.get.ibgm_rd.rd_key);
-                }
-#else
-                if (flip) {
-                        __swab32s(&msg->ibm_u.get.ibgm_rd.rd_key);
-                        __swab32s(&msg->ibm_u.get.ibgm_rd.rd_nfrags);
-                }
-
-                n = msg->ibm_u.get.ibgm_rd.rd_nfrags;
-                if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
-                        CERROR("Bad GET_REQ nfrags: %d, should be 0 < n <= %d\n", 
-                               n, IBLND_MAX_RDMA_FRAGS);
+                if (kiblnd_unpack_rd(msg, flip))
                         return -EPROTO;
-                }
-                
-                if (msg_nob < offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[n])) {
-                        CERROR("Short GET_REQ: %d(%d)\n", msg_nob,
-                               (int)offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[n]));
-                        return -EPROTO;
-                }
-                
-                if (flip)
-                        for (i = 0; i < msg->ibm_u.get.ibgm_rd.rd_nfrags; i++) {
-                                __swab32s(&msg->ibm_u.get.ibgm_rd.rd_frags[i].rf_nob);
-                                __swab64s(&msg->ibm_u.get.ibgm_rd.rd_frags[i].rf_addr);
-                        }
-#endif
                 break;
 
         case IBLND_MSG_PUT_NAK:
         case IBLND_MSG_PUT_DONE:
         case IBLND_MSG_GET_DONE:
-                if (msg_nob < hdr_size + sizeof(msg->ibm_u.completion)) {
-                        CERROR("Short RDMA completion: %d(%d)\n", msg_nob,
-                               (int)(hdr_size + sizeof(msg->ibm_u.completion)));
-                        return -EPROTO;
-                }
                 if (flip)
                         __swab32s(&msg->ibm_u.completion.ibcm_status);
                 break;
 
         case IBLND_MSG_CONNREQ:
         case IBLND_MSG_CONNACK:
-                if (msg_nob < hdr_size + sizeof(msg->ibm_u.connparams)) {
-                        CERROR("Short connreq/ack: %d(%d)\n", msg_nob,
-                               (int)(hdr_size + sizeof(msg->ibm_u.connparams)));
-                        return -EPROTO;
-                }
                 if (flip) {
                         __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth);
                         __swab16s(&msg->ibm_u.connparams.ibcp_max_frags);
@@ -364,26 +387,6 @@ kiblnd_destroy_peer (kib_peer_t *peer)
         atomic_dec(&net->ibn_npeers);
 }
 
-void
-kiblnd_destroy_dev (kib_dev_t *dev)
-{
-        LASSERT (dev->ibd_nnets == 0);
-
-        if (!list_empty(&dev->ibd_list)) /* on kib_devs? */
-                list_del_init(&dev->ibd_list);
-
-        if (dev->ibd_mr != NULL)
-                ib_dereg_mr(dev->ibd_mr);
-
-        if (dev->ibd_pd != NULL)
-                ib_dealloc_pd(dev->ibd_pd);
-
-        if (dev->ibd_cmid != NULL)
-                rdma_destroy_id(dev->ibd_cmid);
-
-        LIBCFS_FREE(dev, sizeof(*dev));
-}
-
 kib_peer_t *
 kiblnd_find_peer_locked (lnet_nid_t nid)
 {
@@ -404,9 +407,10 @@ kiblnd_find_peer_locked (lnet_nid_t nid)
                 if (peer->ibp_nid != nid)
                         continue;
 
-                CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
+                CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
                        peer, libcfs_nid2str(nid),
-                       atomic_read(&peer->ibp_refcount));
+                       atomic_read(&peer->ibp_refcount),
+                       peer->ibp_version);
                 return peer;
         }
         return NULL;
@@ -604,11 +608,12 @@ kiblnd_debug_conn (kib_conn_t *conn)
 
         spin_lock(&conn->ibc_lock);
 
-        CDEBUG(D_CONSOLE, "conn[%d] %p -> %s: \n",
+        CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s: \n",
                atomic_read(&conn->ibc_refcount), conn,
-               libcfs_nid2str(conn->ibc_peer->ibp_nid));
-        CDEBUG(D_CONSOLE, "   state %d nposted %d cred %d o_cred %d r_cred %d\n",
-               conn->ibc_state, conn->ibc_nsends_posted, conn->ibc_credits, 
+               conn->ibc_version, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+        CDEBUG(D_CONSOLE, "   state %d nposted %d/%d cred %d o_cred %d r_cred %d\n",
+               conn->ibc_state, conn->ibc_noops_posted,
+               conn->ibc_nsends_posted, conn->ibc_credits,
                conn->ibc_outstanding_credits, conn->ibc_reserved_credits);
         CDEBUG(D_CONSOLE, "   comms_err %d\n", conn->ibc_comms_error);
 
@@ -616,10 +621,6 @@ kiblnd_debug_conn (kib_conn_t *conn)
         list_for_each(tmp, &conn->ibc_early_rxs)
                 kiblnd_debug_rx(list_entry(tmp, kib_rx_t, rx_list));
 
-        CDEBUG(D_CONSOLE, "   tx_noops:\n");
-        list_for_each(tmp, &conn->ibc_tx_noops)
-                kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
-
         CDEBUG(D_CONSOLE, "   tx_queue_nocred:\n");
         list_for_each(tmp, &conn->ibc_tx_queue_nocred)
                 kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
@@ -637,14 +638,56 @@ kiblnd_debug_conn (kib_conn_t *conn)
                 kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
 
         CDEBUG(D_CONSOLE, "   rxs:\n");
-        for (i = 0; i < IBLND_RX_MSGS; i++)
+        for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++)
                 kiblnd_debug_rx(&conn->ibc_rxs[i]);
 
         spin_unlock(&conn->ibc_lock);
 }
 
+int
+kiblnd_translate_mtu(int value)
+{
+        switch (value) {
+        default:
+                return -1;
+        case 0:
+                return 0;
+        case 256:
+                return IB_MTU_256;
+        case 512:
+                return IB_MTU_512;
+        case 1024:
+                return IB_MTU_1024;
+        case 2048:
+                return IB_MTU_2048;
+        case 4096:
+                return IB_MTU_4096;
+        }
+}
+
+static void
+kiblnd_setup_mtu(struct rdma_cm_id *cmid)
+{
+        unsigned long flags;
+        int           mtu;
+
+        /* XXX There is no path record for iWARP, set by netdev->change_mtu? */
+        if (cmid->route.path_rec == NULL)
+                return;
+
+        write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+        mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu);
+        LASSERT (mtu >= 0);
+        if (mtu != 0)
+                cmid->route.path_rec->mtu = mtu;
+
+        write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+}
+
 kib_conn_t *
-kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid, int state)
+kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
+                   int state, int version)
 {
         /* CAVEAT EMPTOR:
          * If the new conn is created successfully it takes over the caller's
@@ -653,15 +696,13 @@ kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid, int state)
          * she must dispose of 'cmid'.  (Actually I'd block forever if I tried
          * to destroy 'cmid' here since I'm called from the CM which still has
          * its ref on 'cmid'). */
-        kib_conn_t             *conn;
         kib_net_t              *net = peer->ibp_ni->ni_data;
-        int                     i;
-        int                     page_offset;
-        int                     ipage;
-        int                     rc;
-        struct ib_cq           *cq;
         struct ib_qp_init_attr *init_qp_attr;
+        kib_conn_t             *conn;
+        struct ib_cq           *cq;
         unsigned long           flags;
+        int                     rc;
+        int                     i;
 
         LASSERT (net != NULL);
         LASSERT (!in_interrupt());
@@ -683,12 +724,12 @@ kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid, int state)
         memset(conn, 0, sizeof(*conn)); /* zero flags, NULL pointers etc... */
 
         conn->ibc_state = IBLND_CONN_INIT;
+        conn->ibc_version = version;
         conn->ibc_peer = peer;                  /* I take the caller's ref */
         cmid->context = conn;                   /* for future CM callbacks */
         conn->ibc_cmid = cmid;
 
         INIT_LIST_HEAD(&conn->ibc_early_rxs);
-        INIT_LIST_HEAD(&conn->ibc_tx_noops);
         INIT_LIST_HEAD(&conn->ibc_tx_queue);
         INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
         INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
@@ -702,70 +743,50 @@ kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid, int state)
         }
         memset(conn->ibc_connvars, 0, sizeof(*conn->ibc_connvars));
 
-        LIBCFS_ALLOC(conn->ibc_rxs, IBLND_RX_MSGS * sizeof(kib_rx_t));
+        LIBCFS_ALLOC(conn->ibc_rxs, IBLND_RX_MSGS(version) * sizeof(kib_rx_t));
         if (conn->ibc_rxs == NULL) {
                 CERROR("Cannot allocate RX buffers\n");
                 goto failed_2;
         }
-        memset(conn->ibc_rxs, 0, IBLND_RX_MSGS * sizeof(kib_rx_t));
+        memset(conn->ibc_rxs, 0, IBLND_RX_MSGS(version) * sizeof(kib_rx_t));
 
-        rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, IBLND_RX_MSG_PAGES);
+        rc = kiblnd_alloc_pages(&conn->ibc_rx_pages,
+                                IBLND_RX_MSG_PAGES(version));
         if (rc != 0)
                 goto failed_2;
 
-        for (i = ipage = page_offset = 0; i < IBLND_RX_MSGS; i++) {
-                struct page *page = conn->ibc_rx_pages->ibp_pages[ipage];
-                kib_rx_t    *rx = &conn->ibc_rxs[i];
-
-                rx->rx_conn = conn;
-                rx->rx_msg = (kib_msg_t *)(((char *)page_address(page)) +
-                                           page_offset);
-                rx->rx_msgaddr = kiblnd_dma_map_single(cmid->device,
-                                                       rx->rx_msg, IBLND_MSG_SIZE,
-                                                       DMA_FROM_DEVICE);
-                KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
-
-                CDEBUG(D_NET,"rx %d: %p "LPX64"("LPX64")\n",
-                       i, rx->rx_msg, rx->rx_msgaddr,
-                       lnet_page2phys(page) + page_offset);
-
-                page_offset += IBLND_MSG_SIZE;
-                LASSERT (page_offset <= PAGE_SIZE);
-
-                if (page_offset == PAGE_SIZE) {
-                        page_offset = 0;
-                        ipage++;
-                        LASSERT (ipage <= IBLND_RX_MSG_PAGES);
-                }
-        }
+        kiblnd_map_rx_descs(conn);
 
 #ifdef HAVE_OFED_IB_COMP_VECTOR
         cq = ib_create_cq(cmid->device,
                           kiblnd_cq_completion, kiblnd_cq_event, conn,
-                          IBLND_CQ_ENTRIES(), 0);
+                          IBLND_CQ_ENTRIES(version), 0);
 #else
         cq = ib_create_cq(cmid->device,
                           kiblnd_cq_completion, kiblnd_cq_event, conn,
-                          IBLND_CQ_ENTRIES());
+                          IBLND_CQ_ENTRIES(version));
 #endif
-        if (!IS_ERR(cq)) {
-                conn->ibc_cq = cq;
-        } else {
-                CERROR("Can't create CQ: %ld\n", PTR_ERR(cq));
+        if (IS_ERR(cq)) {
+                CERROR("Can't create CQ: %ld, cqe: %d\n",
+                       PTR_ERR(cq), IBLND_CQ_ENTRIES(version));
                 goto failed_2;
         }
 
+        conn->ibc_cq = cq;
+
         rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
         if (rc != 0) {
                 CERROR("Can't request completion notificiation: %d\n", rc);
                 goto failed_2;
         }
 
+        kiblnd_setup_mtu(cmid);
+
         memset(init_qp_attr, 0, sizeof(*init_qp_attr));
         init_qp_attr->event_handler = kiblnd_qp_event;
         init_qp_attr->qp_context = conn;
-        init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS;
-        init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS;
+        init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(version);
+        init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(version);
         init_qp_attr->cap.max_send_sge = 1;
         init_qp_attr->cap.max_recv_sge = 1;
         init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
@@ -773,45 +794,22 @@ kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid, int state)
         init_qp_attr->send_cq = cq;
         init_qp_attr->recv_cq = cq;
 
-        write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-        switch (*kiblnd_tunables.kib_ib_mtu) {
-        case 0: /* don't force path MTU */
-                break;
-        case 256:
-                cmid->route.path_rec->mtu = IB_MTU_256;
-                break;
-        case 512:
-                cmid->route.path_rec->mtu = IB_MTU_512;
-                break;
-        case 1024:
-                cmid->route.path_rec->mtu = IB_MTU_1024;
-                break;
-        case 2048:
-                cmid->route.path_rec->mtu = IB_MTU_2048;
-                break;
-        case 4096:
-                cmid->route.path_rec->mtu = IB_MTU_4096;
-                break;
-        default:
-                LBUG();
-                break;
-        }
-        write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
         rc = rdma_create_qp(cmid, net->ibn_dev->ibd_pd, init_qp_attr);
         if (rc != 0) {
-                CERROR("Can't create QP: %d\n", rc);
+                CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
+                       rc, init_qp_attr->cap.max_send_wr,
+                       init_qp_attr->cap.max_recv_wr);
                 goto failed_2;
         }
 
         LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
 
         /* 1 ref for caller and each rxmsg */
-        atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS);
-        conn->ibc_nrx = IBLND_RX_MSGS;
+        atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
+        conn->ibc_nrx = IBLND_RX_MSGS(version);
 
         /* post receives */
-        for (i = 0; i < IBLND_RX_MSGS; i++) {
+        for (i = 0; i < IBLND_RX_MSGS(version); i++) {
                 rc = kiblnd_post_rx(&conn->ibc_rxs[i],
                                     IBLND_POSTRX_NO_CREDIT);
                 if (rc != 0) {
@@ -823,18 +821,24 @@ kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid, int state)
                         /* correct # of posted buffers 
                          * NB locking needed now I'm racing with completion */
                         spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
-                        conn->ibc_nrx -= IBLND_RX_MSGS - i;
+                        conn->ibc_nrx -= IBLND_RX_MSGS(version) - i;
                         spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
                                                flags);
 
+                        /* cmid will be destroyed by CM(ofed) after cm_callback
+                         * returned, so we can't refer it anymore
+                         * (by kiblnd_connd()->kiblnd_destroy_conn) */
+                        rdma_destroy_qp(conn->ibc_cmid);
+                        conn->ibc_cmid = NULL;
+
                         /* Drop my own and unused rxbuffer refcounts */
-                        while (i++ <= IBLND_RX_MSGS)
+                        while (i++ <= IBLND_RX_MSGS(version))
                                 kiblnd_conn_decref(conn);
 
                         return NULL;
                 }
         }
-        
+
         /* Init successful! */
         LASSERT (state == IBLND_CONN_ACTIVE_CONNECT ||
                  state == IBLND_CONN_PASSIVE_WAIT);
@@ -858,16 +862,15 @@ kiblnd_destroy_conn (kib_conn_t *conn)
         struct rdma_cm_id *cmid = conn->ibc_cmid;
         kib_peer_t        *peer = conn->ibc_peer;
         int                rc;
-        int                i;
 
         LASSERT (!in_interrupt());
         LASSERT (atomic_read(&conn->ibc_refcount) == 0);
         LASSERT (list_empty(&conn->ibc_early_rxs));
-        LASSERT (list_empty(&conn->ibc_tx_noops));
         LASSERT (list_empty(&conn->ibc_tx_queue));
         LASSERT (list_empty(&conn->ibc_tx_queue_rsrvd));
         LASSERT (list_empty(&conn->ibc_tx_queue_nocred));
         LASSERT (list_empty(&conn->ibc_active_txs));
+        LASSERT (conn->ibc_noops_posted == 0);
         LASSERT (conn->ibc_nsends_posted == 0);
 
         switch (conn->ibc_state) {
@@ -884,7 +887,8 @@ kiblnd_destroy_conn (kib_conn_t *conn)
                 break;
         }
 
-        if (cmid->qp != NULL)
+        /* conn->ibc_cmid might be destroyed by CM already */
+        if (cmid != NULL && cmid->qp != NULL)
                 rdma_destroy_qp(cmid);
 
         if (conn->ibc_cq != NULL) {
@@ -893,26 +897,12 @@ kiblnd_destroy_conn (kib_conn_t *conn)
                         CWARN("Error destroying CQ: %d\n", rc);
         }
 
-        if (conn->ibc_rx_pages != NULL) {
-                LASSERT (conn->ibc_rxs != NULL);
-
-                for (i = 0; i < IBLND_RX_MSGS; i++) {
-                        kib_rx_t *rx = &conn->ibc_rxs[i];
-
-                        LASSERT (rx->rx_nob >= 0); /* not posted */
-
-                        kiblnd_dma_unmap_single(cmid->device,
-                                                KIBLND_UNMAP_ADDR(rx, rx_msgunmap,
-                                                                  rx->rx_msgaddr),
-                                                IBLND_MSG_SIZE, DMA_FROM_DEVICE);
-                }
-
-                kiblnd_free_pages(conn->ibc_rx_pages);
-        }
+        if (conn->ibc_rx_pages != NULL)
+                kiblnd_unmap_rx_descs(conn);
 
         if (conn->ibc_rxs != NULL) {
                 LIBCFS_FREE(conn->ibc_rxs,
-                            IBLND_RX_MSGS * sizeof(kib_rx_t));
+                            IBLND_RX_MSGS(conn->ibc_version) * sizeof(kib_rx_t));
         }
 
         if (conn->ibc_connvars != NULL)
@@ -941,15 +931,21 @@ kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why)
         list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
                 conn = list_entry(ctmp, kib_conn_t, ibc_list);
 
-                count++;
+                CDEBUG(D_NET, "Closing conn -> %s, "
+                              "version: %x, reason: %d\n",
+                       libcfs_nid2str(peer->ibp_nid),
+                       conn->ibc_version, why);
+
                 kiblnd_close_conn_locked(conn, why);
+                count++;
         }
 
         return count;
 }
 
 int
-kiblnd_close_stale_conns_locked (kib_peer_t *peer, __u64 incarnation)
+kiblnd_close_stale_conns_locked (kib_peer_t *peer,
+                                 int version, __u64 incarnation)
 {
         kib_conn_t         *conn;
         struct list_head   *ctmp;
@@ -959,15 +955,18 @@ kiblnd_close_stale_conns_locked (kib_peer_t *peer, __u64 incarnation)
         list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
                 conn = list_entry(ctmp, kib_conn_t, ibc_list);
 
-                if (conn->ibc_incarnation == incarnation)
+                if (conn->ibc_version     == version &&
+                    conn->ibc_incarnation == incarnation)
                         continue;
 
-                CDEBUG(D_NET, "Closing stale conn -> %s incarnation:"LPX64"("LPX64")\n",
+                CDEBUG(D_NET, "Closing stale conn -> %s version: %x, "
+                              "incarnation:"LPX64"(%x, "LPX64")\n",
                        libcfs_nid2str(peer->ibp_nid),
-                       conn->ibc_incarnation, incarnation);
+                       conn->ibc_version, conn->ibc_incarnation,
+                       version, incarnation);
 
-                count++;
                 kiblnd_close_conn_locked(conn, -ESTALE);
+                count++;
         }
 
         return count;
@@ -1112,6 +1111,8 @@ kiblnd_free_pages (kib_pages_t *p)
         int         npages = p->ibp_npages;
         int         i;
 
+        LASSERT (p->ibp_device == NULL);
+
         for (i = 0; i < npages; i++)
                 if (p->ibp_pages[i] != NULL)
                         __free_page(p->ibp_pages[i]);
@@ -1148,147 +1149,104 @@ kiblnd_alloc_pages (kib_pages_t **pp, int npages)
 }
 
 void
-kiblnd_free_tx_descs (lnet_ni_t *ni)
+kiblnd_unmap_rx_descs(kib_conn_t *conn)
 {
-        int        i;
-        kib_net_t *net = ni->ni_data;
+        kib_rx_t *rx;
+        int       i;
 
-        LASSERT (net != NULL);
+        LASSERT (conn->ibc_rxs != NULL);
+        LASSERT (conn->ibc_rx_pages->ibp_device != NULL);
 
-        if (net->ibn_tx_descs != NULL) {
-                for (i = 0; i < IBLND_TX_MSGS(); i++) {
-                        kib_tx_t *tx = &net->ibn_tx_descs[i];
+        for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
+                rx = &conn->ibc_rxs[i];
 
-#if IBLND_MAP_ON_DEMAND
-                        if (tx->tx_pages != NULL)
-                                LIBCFS_FREE(tx->tx_pages, LNET_MAX_IOV *
-                                            sizeof(*tx->tx_pages));
-#else
-                        if (tx->tx_wrq != NULL)
-                                LIBCFS_FREE(tx->tx_wrq, 
-                                            (1 + IBLND_MAX_RDMA_FRAGS) * 
-                                            sizeof(*tx->tx_wrq));
-
-                        if (tx->tx_sge != NULL)
-                                LIBCFS_FREE(tx->tx_sge, 
-                                            (1 + IBLND_MAX_RDMA_FRAGS) * 
-                                            sizeof(*tx->tx_sge));
-
-                        if (tx->tx_rd != NULL)
-                                LIBCFS_FREE(tx->tx_rd, 
-                                            offsetof(kib_rdma_desc_t, 
-                                               rd_frags[IBLND_MAX_RDMA_FRAGS]));
-
-                        if (tx->tx_frags != NULL)
-                                LIBCFS_FREE(tx->tx_frags, 
-                                            IBLND_MAX_RDMA_FRAGS *
-                                            sizeof(*tx->tx_frags));
-#endif
-                }
+                LASSERT (rx->rx_nob >= 0); /* not posted */
 
-                LIBCFS_FREE(net->ibn_tx_descs,
-                            IBLND_TX_MSGS() * sizeof(kib_tx_t));
+                kiblnd_dma_unmap_single(conn->ibc_rx_pages->ibp_device,
+                                        KIBLND_UNMAP_ADDR(rx, rx_msgunmap,
+                                                          rx->rx_msgaddr),
+                                        IBLND_MSG_SIZE, DMA_FROM_DEVICE);
         }
 
-        if (net->ibn_tx_pages != NULL)
-                kiblnd_free_pages(net->ibn_tx_pages);
+        conn->ibc_rx_pages->ibp_device = NULL;
+
+        kiblnd_free_pages(conn->ibc_rx_pages);
+
+        conn->ibc_rx_pages = NULL;
 }
 
-int
-kiblnd_alloc_tx_descs (lnet_ni_t *ni)
+void
+kiblnd_map_rx_descs(kib_conn_t *conn)
 {
-        int        i;
-        int        rc;
-        kib_net_t *net = ni->ni_data;
-
-        LASSERT (net != NULL);
+        kib_rx_t       *rx;
+        struct page    *pg;
+        int             pg_off;
+        int             ipg;
+        int             i;
 
-        rc = kiblnd_alloc_pages(&net->ibn_tx_pages, IBLND_TX_MSG_PAGES());
+        for (pg_off = ipg = i = 0;
+             i < IBLND_RX_MSGS(conn->ibc_version); i++) {
+                pg = conn->ibc_rx_pages->ibp_pages[ipg];
+                rx = &conn->ibc_rxs[i];
 
-        if (rc != 0) {
-                CERROR("Can't allocate tx pages\n");
-                return rc;
-        }
+                rx->rx_conn = conn;
+                rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off);
 
-        LIBCFS_ALLOC (net->ibn_tx_descs,
-                      IBLND_TX_MSGS() * sizeof(kib_tx_t));
-        if (net->ibn_tx_descs == NULL) {
-                CERROR("Can't allocate %d tx descriptors\n", IBLND_TX_MSGS());
-                return -ENOMEM;
-        }
+                rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_cmid->device,
+                                                       rx->rx_msg, IBLND_MSG_SIZE,
+                                                       DMA_FROM_DEVICE);
+                LASSERT (!kiblnd_dma_mapping_error(conn->ibc_cmid->device,
+                                                   rx->rx_msgaddr));
+                KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
 
-        memset(net->ibn_tx_descs, 0,
-               IBLND_TX_MSGS() * sizeof(kib_tx_t));
+                CDEBUG(D_NET,"rx %d: %p "LPX64"("LPX64")\n",
+                       i, rx->rx_msg, rx->rx_msgaddr,
+                       lnet_page2phys(pg) + pg_off);
 
-        for (i = 0; i < IBLND_TX_MSGS(); i++) {
-                kib_tx_t *tx = &net->ibn_tx_descs[i];
+                pg_off += IBLND_MSG_SIZE;
+                LASSERT (pg_off <= PAGE_SIZE);
 
-#if IBLND_MAP_ON_DEMAND
-                LIBCFS_ALLOC(tx->tx_pages, LNET_MAX_IOV *
-                             sizeof(*tx->tx_pages));
-                if (tx->tx_pages == NULL) {
-                        CERROR("Can't allocate phys page vector[%d]\n",
-                               LNET_MAX_IOV);
-                        return -ENOMEM;
+                if (pg_off == PAGE_SIZE) {
+                        pg_off = 0;
+                        ipg++;
+                        LASSERT (ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version));
                 }
-#else
-                LIBCFS_ALLOC(tx->tx_wrq,
-                             (1 + IBLND_MAX_RDMA_FRAGS) *
-                             sizeof(*tx->tx_wrq));
-                if (tx->tx_wrq == NULL)
-                        return -ENOMEM;
-
-                LIBCFS_ALLOC(tx->tx_sge,
-                             (1 + IBLND_MAX_RDMA_FRAGS) *
-                             sizeof(*tx->tx_sge));
-                if (tx->tx_sge == NULL)
-                        return -ENOMEM;
-
-                LIBCFS_ALLOC(tx->tx_rd,
-                             offsetof(kib_rdma_desc_t,
-                                      rd_frags[IBLND_MAX_RDMA_FRAGS]));
-                if (tx->tx_rd == NULL)
-                        return -ENOMEM;
-
-                LIBCFS_ALLOC(tx->tx_frags,
-                             IBLND_MAX_RDMA_FRAGS * 
-                             sizeof(*tx->tx_frags));
-                if (tx->tx_frags == NULL)
-                        return -ENOMEM;
-#endif
         }
 
-        return 0;
+        conn->ibc_rx_pages->ibp_device = conn->ibc_cmid->device;
 }
 
 void
-kiblnd_unmap_tx_descs (lnet_ni_t *ni)
+kiblnd_unmap_tx_descs(lnet_ni_t *ni)
 {
         int             i;
         kib_tx_t       *tx;
         kib_net_t      *net = ni->ni_data;
 
-        LASSERT (net != NULL);
+        LASSERT (net->ibn_tx_pages != NULL);
+        LASSERT (net->ibn_tx_pages->ibp_device != NULL);
 
         for (i = 0; i < IBLND_TX_MSGS(); i++) {
                 tx = &net->ibn_tx_descs[i];
 
-                kiblnd_dma_unmap_single(net->ibn_dev->ibd_cmid->device,
+                kiblnd_dma_unmap_single(net->ibn_tx_pages->ibp_device,
                                         KIBLND_UNMAP_ADDR(tx, tx_msgunmap,
                                                           tx->tx_msgaddr),
                                         IBLND_MSG_SIZE, DMA_TO_DEVICE);
         }
+
+        net->ibn_tx_pages->ibp_device = NULL;
 }
 
 void
 kiblnd_map_tx_descs (lnet_ni_t *ni)
 {
-        int             ipage = 0;
-        int             page_offset = 0;
-        int             i;
+        kib_net_t      *net = ni->ni_data;
         struct page    *page;
         kib_tx_t       *tx;
-        kib_net_t      *net = ni->ni_data;
+        int             page_offset;
+        int             ipage;
+        int             i;
 
         LASSERT (net != NULL);
 
@@ -1298,7 +1256,8 @@ kiblnd_map_tx_descs (lnet_ni_t *ni)
         /* No fancy arithmetic when we do the buffer calculations */
         CLASSERT (PAGE_SIZE % IBLND_MSG_SIZE == 0);
 
-        for (i = 0; i < IBLND_TX_MSGS(); i++) {
+
+        for (ipage = page_offset = i = 0; i < IBLND_TX_MSGS(); i++) {
                 page = net->ibn_tx_pages->ibp_pages[ipage];
                 tx = &net->ibn_tx_descs[i];
 
@@ -1308,6 +1267,8 @@ kiblnd_map_tx_descs (lnet_ni_t *ni)
                 tx->tx_msgaddr = kiblnd_dma_map_single(
                         net->ibn_dev->ibd_cmid->device,
                         tx->tx_msg, IBLND_MSG_SIZE, DMA_TO_DEVICE);
+                LASSERT (!kiblnd_dma_mapping_error(net->ibn_dev->ibd_cmid->device,
+                                                   tx->tx_msgaddr));
                 KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
 
                 list_add(&tx->tx_list, &net->ibn_idle_txs);
@@ -1321,6 +1282,534 @@ kiblnd_map_tx_descs (lnet_ni_t *ni)
                         LASSERT (ipage <= IBLND_TX_MSG_PAGES());
                 }
         }
+
+        net->ibn_tx_pages->ibp_device = net->ibn_dev->ibd_cmid->device;
+}
+
+void
+kiblnd_free_tx_descs (lnet_ni_t *ni)
+{
+        int        i;
+        kib_net_t *net = ni->ni_data;
+
+        LASSERT (net != NULL);
+
+        if (net->ibn_tx_pages != NULL)
+                kiblnd_free_pages(net->ibn_tx_pages);
+
+        if (net->ibn_tx_descs == NULL)
+                return;
+
+        for (i = 0; i < IBLND_TX_MSGS(); i++) {
+                kib_tx_t *tx = &net->ibn_tx_descs[i];
+
+                if (tx->tx_pages != NULL)
+                        LIBCFS_FREE(tx->tx_pages,
+                                    LNET_MAX_IOV *
+                                    sizeof(*tx->tx_pages));
+
+                if (tx->tx_ipb != NULL)
+                        LIBCFS_FREE(tx->tx_ipb,
+                                    IBLND_MAX_RDMA_FRAGS *
+                                    sizeof(*tx->tx_ipb));
+
+                if (tx->tx_frags != NULL)
+                        LIBCFS_FREE(tx->tx_frags,
+                                    IBLND_MAX_RDMA_FRAGS *
+                                            sizeof(*tx->tx_frags));
+
+                if (tx->tx_wrq != NULL)
+                        LIBCFS_FREE(tx->tx_wrq,
+                                    (1 + IBLND_MAX_RDMA_FRAGS) *
+                                    sizeof(*tx->tx_wrq));
+
+                if (tx->tx_sge != NULL)
+                        LIBCFS_FREE(tx->tx_sge,
+                                    (1 + IBLND_MAX_RDMA_FRAGS) *
+                                    sizeof(*tx->tx_sge));
+
+                if (tx->tx_rd != NULL)
+                        LIBCFS_FREE(tx->tx_rd,
+                                    offsetof(kib_rdma_desc_t,
+                                             rd_frags[IBLND_MAX_RDMA_FRAGS]));
+        }
+
+        LIBCFS_FREE(net->ibn_tx_descs,
+                    IBLND_TX_MSGS() * sizeof(kib_tx_t));
+}
+
+int
+kiblnd_alloc_tx_descs (lnet_ni_t *ni)
+{
+        int        i;
+        int        rc;
+        kib_net_t *net = ni->ni_data;
+
+        LASSERT (net != NULL);
+
+        rc = kiblnd_alloc_pages(&net->ibn_tx_pages, IBLND_TX_MSG_PAGES());
+
+        if (rc != 0) {
+                CERROR("Can't allocate tx pages\n");
+                return rc;
+        }
+
+        LIBCFS_ALLOC (net->ibn_tx_descs,
+                      IBLND_TX_MSGS() * sizeof(kib_tx_t));
+        if (net->ibn_tx_descs == NULL) {
+                CERROR("Can't allocate %d tx descriptors\n", IBLND_TX_MSGS());
+                return -ENOMEM;
+        }
+
+        memset(net->ibn_tx_descs, 0,
+               IBLND_TX_MSGS() * sizeof(kib_tx_t));
+
+        for (i = 0; i < IBLND_TX_MSGS(); i++) {
+                kib_tx_t *tx = &net->ibn_tx_descs[i];
+
+                if (net->ibn_fmrpool != NULL) {
+                        LIBCFS_ALLOC(tx->tx_pages, LNET_MAX_IOV *
+                                     sizeof(*tx->tx_pages));
+                        if (tx->tx_pages == NULL)
+                                return -ENOMEM;
+                }
+
+                if (net->ibn_pmrpool != NULL) {
+                        LIBCFS_ALLOC(tx->tx_ipb,
+                                     IBLND_MAX_RDMA_FRAGS *
+                                     sizeof(*tx->tx_ipb));
+                        if (tx->tx_ipb == NULL)
+                                return -ENOMEM;
+                }
+
+                LIBCFS_ALLOC(tx->tx_frags,
+                             IBLND_MAX_RDMA_FRAGS *
+                             sizeof(*tx->tx_frags));
+                if (tx->tx_frags == NULL)
+                        return -ENOMEM;
+
+                LIBCFS_ALLOC(tx->tx_wrq,
+                             (1 + IBLND_MAX_RDMA_FRAGS) *
+                             sizeof(*tx->tx_wrq));
+                if (tx->tx_wrq == NULL)
+                        return -ENOMEM;
+
+                LIBCFS_ALLOC(tx->tx_sge,
+                             (1 + IBLND_MAX_RDMA_FRAGS) *
+                             sizeof(*tx->tx_sge));
+                if (tx->tx_sge == NULL)
+                        return -ENOMEM;
+
+                LIBCFS_ALLOC(tx->tx_rd,
+                             offsetof(kib_rdma_desc_t,
+                                      rd_frags[IBLND_MAX_RDMA_FRAGS]));
+                if (tx->tx_rd == NULL)
+                        return -ENOMEM;
+        }
+
+        return 0;
+}
+
+struct ib_mr *
+kiblnd_find_dma_mr(kib_net_t *net, __u64 addr, __u64 size)
+{
+        __u64   index;
+
+        LASSERT (net->ibn_dev->ibd_mrs[0] != NULL);
+
+        if (net->ibn_dev->ibd_nmrs == 1)
+                return net->ibn_dev->ibd_mrs[0];
+
+        index = addr >> net->ibn_dev->ibd_mr_shift;
+
+        if (index <  net->ibn_dev->ibd_nmrs &&
+            index == ((addr + size - 1) >> net->ibn_dev->ibd_mr_shift))
+                return net->ibn_dev->ibd_mrs[index];
+
+        return NULL;
+}
+
+struct ib_mr *
+kiblnd_find_rd_dma_mr(kib_net_t *net, kib_rdma_desc_t *rd)
+{
+        struct ib_mr *prev_mr;
+        struct ib_mr *mr;
+        int           i;
+
+        LASSERT (net->ibn_dev->ibd_mrs[0] != NULL);
+
+        if (*kiblnd_tunables.kib_map_on_demand > 0 &&
+            *kiblnd_tunables.kib_map_on_demand < rd->rd_nfrags)
+                return NULL;
+
+        if (net->ibn_dev->ibd_nmrs == 1)
+                return net->ibn_dev->ibd_mrs[0];
+
+        for (i = 0, mr = prev_mr = NULL;
+             i < rd->rd_nfrags; i++) {
+                mr = kiblnd_find_dma_mr(net,
+                                        rd->rd_frags[i].rf_addr,
+                                        rd->rd_frags[i].rf_nob);
+                if (prev_mr == NULL)
+                        prev_mr = mr;
+
+                if (mr == NULL || prev_mr != mr) {
+                        /* Can't covered by one single MR */
+                        mr = NULL;
+                        break;
+                }
+        }
+
+        return mr;
+}
+
+void
+kiblnd_dev_cleanup(kib_dev_t *ibdev)
+{
+        int     i;
+
+        if (ibdev->ibd_mrs == NULL)
+                return;
+
+        for (i = 0; i < ibdev->ibd_nmrs; i++) {
+                if (ibdev->ibd_mrs[i] == NULL)
+                        break;
+
+                ib_dereg_mr(ibdev->ibd_mrs[i]);
+        }
+
+        LIBCFS_FREE(ibdev->ibd_mrs, sizeof(*ibdev->ibd_mrs) * ibdev->ibd_nmrs);
+        ibdev->ibd_mrs = NULL;
+}
+
+int
+kiblnd_ib_create_fmr_pool(kib_dev_t *ibdev, struct ib_fmr_pool **fmrpp)
+{
+        /* FMR pool for RDMA */
+        struct ib_fmr_pool      *fmrpool;
+        struct ib_fmr_pool_param param = {
+                .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
+                .page_shift        = PAGE_SHIFT,
+                .access            = (IB_ACCESS_LOCAL_WRITE |
+                                      IB_ACCESS_REMOTE_WRITE),
+                .pool_size         = *kiblnd_tunables.kib_fmr_pool_size,
+                .dirty_watermark   = *kiblnd_tunables.kib_fmr_flush_trigger,
+                .flush_function    = NULL,
+                .flush_arg         = NULL,
+                .cache             = *kiblnd_tunables.kib_fmr_cache};
+
+        if (*kiblnd_tunables.kib_fmr_pool_size <
+            *kiblnd_tunables.kib_ntx) {
+                CERROR("Can't set fmr pool size (%d) < ntx(%d)\n",
+                       *kiblnd_tunables.kib_fmr_pool_size,
+                       *kiblnd_tunables.kib_ntx);
+                return -EINVAL;
+        }
+
+        fmrpool = ib_create_fmr_pool(ibdev->ibd_pd, &param);
+        if (IS_ERR(fmrpool))
+                return PTR_ERR(fmrpool);
+
+        *fmrpp = fmrpool;
+
+        return 0;
+}
+
+void
+kiblnd_phys_mr_unmap(kib_net_t *net, kib_phys_mr_t *pmr)
+{
+        kib_phys_mr_pool_t  *pool = net->ibn_pmrpool;
+        struct ib_mr        *mr;
+
+        spin_lock(&pool->ibmp_lock);
+
+        mr = pmr->ibpm_mr;
+
+        list_add(&pmr->ibpm_link, &pool->ibmp_free_list);
+        pool->ibmp_allocated --;
+
+        spin_unlock(&pool->ibmp_lock);
+
+        if (mr != NULL)
+                ib_dereg_mr(mr);
+}
+
+kib_phys_mr_t *
+kiblnd_phys_mr_map(kib_net_t *net, kib_rdma_desc_t *rd,
+                   struct ib_phys_buf *ipb, __u64 *iova)
+{
+        kib_phys_mr_pool_t  *pool  = net->ibn_pmrpool;
+        kib_phys_mr_t       *pmr;
+        int                  i;
+
+        LASSERT (ipb   != NULL);
+
+        spin_lock(&pool->ibmp_lock);
+        if (list_empty(&pool->ibmp_free_list)) {
+                spin_unlock(&pool->ibmp_lock);
+                CERROR("pre-allocated MRs is not enough\n");
+
+                return NULL;
+        }
+
+        pmr = list_entry(pool->ibmp_free_list.next,
+                           kib_phys_mr_t, ibpm_link);
+        list_del_init(&pmr->ibpm_link);
+        pool->ibmp_allocated ++;
+
+        spin_unlock(&pool->ibmp_lock);
+
+        for (i = 0; i < rd->rd_nfrags; i ++) {
+                ipb[i].addr = rd->rd_frags[i].rf_addr;
+                ipb[i].size = rd->rd_frags[i].rf_nob;
+        }
+
+        pmr->ibpm_mr = ib_reg_phys_mr(net->ibn_dev->ibd_pd, ipb,
+                                      rd->rd_nfrags,
+                                      IB_ACCESS_LOCAL_WRITE |
+                                      IB_ACCESS_REMOTE_WRITE,
+                                      iova);
+        if (!IS_ERR(pmr->ibpm_mr)) {
+                pmr->ibpm_iova = *iova;
+                return pmr;
+        }
+
+        CERROR("Failed ib_reg_phys_mr: %ld\n", PTR_ERR(pmr->ibpm_mr));
+        pmr->ibpm_mr = NULL;
+
+        spin_lock(&pool->ibmp_lock);
+
+        list_add(&pmr->ibpm_link, &pool->ibmp_free_list);
+        pool->ibmp_allocated --;
+
+        spin_unlock(&pool->ibmp_lock);
+
+        return NULL;
+}
+
+void
+kiblnd_destroy_pmr_pool(kib_phys_mr_pool_t *pool)
+{
+        kib_phys_mr_t *pmr;
+
+        LASSERT (pool->ibmp_allocated == 0);
+
+        while (!list_empty(&pool->ibmp_free_list)) {
+                pmr = list_entry(pool->ibmp_free_list.next,
+                                 kib_phys_mr_t, ibpm_link);
+
+                LASSERT (pmr->ibpm_mr == NULL);
+
+                list_del(&pmr->ibpm_link);
+
+                LIBCFS_FREE(pmr, sizeof(kib_phys_mr_t));
+        }
+
+        LIBCFS_FREE(pool, sizeof(kib_phys_mr_pool_t));
+}
+
+int
+kiblnd_create_pmr_pool(kib_dev_t *ibdev, kib_phys_mr_pool_t **poolpp)
+{
+        kib_phys_mr_pool_t  *pool;
+        kib_phys_mr_t       *pmr;
+        int                  i;
+
+        if (*kiblnd_tunables.kib_pmr_pool_size <
+            *kiblnd_tunables.kib_ntx) {
+                CERROR("Can't set pmr pool size (%d) < ntx(%d)\n",
+                       *kiblnd_tunables.kib_pmr_pool_size,
+                       *kiblnd_tunables.kib_ntx);
+                return -EINVAL;
+        }
+
+        LIBCFS_ALLOC(pool, sizeof(kib_phys_mr_pool_t));
+        if (pool == NULL)
+                return -ENOMEM;
+
+        spin_lock_init(&pool->ibmp_lock);
+
+        pool->ibmp_allocated     = 0;
+        CFS_INIT_LIST_HEAD(&pool->ibmp_free_list);
+
+        for (i = 0; i < *kiblnd_tunables.kib_pmr_pool_size; i++) {
+                LIBCFS_ALLOC(pmr, sizeof(kib_phys_mr_t));
+
+                if (pmr == NULL) {
+                        kiblnd_destroy_pmr_pool(pool);
+                        return -ENOMEM;
+                }
+
+                memset(pmr, 0, sizeof(kib_phys_mr_t));
+
+                list_add(&pmr->ibpm_link, &pool->ibmp_free_list);
+        }
+
+        *poolpp = pool;
+
+        return 0;
+}
+
+static int
+kiblnd_dev_get_attr(kib_dev_t *ibdev)
+{
+        struct ib_device_attr *attr;
+        int                    rc;
+
+        /* XXX here should be HCA's page shift/size/mask in the future? */
+        ibdev->ibd_page_shift = PAGE_SHIFT;
+        ibdev->ibd_page_size  = 1 << PAGE_SHIFT;
+        ibdev->ibd_page_mask  = ~((__u64)ibdev->ibd_page_size - 1);
+
+        LIBCFS_ALLOC(attr, sizeof(*attr));
+        if (attr == NULL) {
+                CERROR("Out of memory\n");
+                return -ENOMEM;
+        }
+
+        rc = ib_query_device(ibdev->ibd_cmid->device, attr);
+        if (rc == 0)
+                ibdev->ibd_mr_size = attr->max_mr_size;
+
+        LIBCFS_FREE(attr, sizeof(*attr));
+
+        if (rc != 0) {
+                CERROR("Failed to query IB device: %d\n", rc);
+                return rc;
+        }
+
+#if 1
+        /* XXX We can't trust this value returned by Chelsio driver, it's wrong
+         * and we have reported the bug, remove these in the future when Chelsio
+         * bug got fixed. */
+        if (rdma_node_get_transport(ibdev->ibd_cmid->device->node_type) ==
+            RDMA_TRANSPORT_IWARP)
+                ibdev->ibd_mr_size = (1ULL << 32) - 1;
+#endif
+
+        if (ibdev->ibd_mr_size == ~0ULL) {
+                ibdev->ibd_mr_shift = 64;
+                return 0;
+        }
+
+        for (ibdev->ibd_mr_shift = 0;
+             ibdev->ibd_mr_shift < 64; ibdev->ibd_mr_shift ++) {
+                if (ibdev->ibd_mr_size == (1ULL << ibdev->ibd_mr_shift) ||
+                    ibdev->ibd_mr_size == (1ULL << ibdev->ibd_mr_shift) - 1)
+                        return 0;
+        }
+
+        CERROR("Invalid mr size: "LPX64"\n", ibdev->ibd_mr_size);
+        return -EINVAL;
+}
+
+int
+kiblnd_dev_setup(kib_dev_t *ibdev)
+{
+        struct ib_mr *mr;
+        int           i;
+        int           rc;
+        __u64         mm_size;
+        __u64         mr_size;
+        int           acflags = IB_ACCESS_LOCAL_WRITE |
+                                IB_ACCESS_REMOTE_WRITE;
+
+        rc = kiblnd_dev_get_attr(ibdev);
+        if (rc != 0)
+                return rc;
+
+        if (ibdev->ibd_mr_shift == 64) {
+                LIBCFS_ALLOC(ibdev->ibd_mrs, 1 * sizeof(*ibdev->ibd_mrs));
+                if (ibdev->ibd_mrs == NULL) {
+                        CERROR("Failed to allocate MRs table\n");
+                        return -ENOMEM;
+                }
+
+                ibdev->ibd_mrs[0] = NULL;
+                ibdev->ibd_nmrs   = 1;
+
+                mr = ib_get_dma_mr(ibdev->ibd_pd, acflags);
+                if (IS_ERR(mr)) {
+                        CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr));
+                        kiblnd_dev_cleanup(ibdev);
+                        return PTR_ERR(mr);
+                }
+
+                ibdev->ibd_mrs[0] = mr;
+
+                goto out;
+        }
+
+        mr_size = (1ULL << ibdev->ibd_mr_shift);
+        mm_size = (unsigned long)high_memory - PAGE_OFFSET;
+
+        ibdev->ibd_nmrs = (int)((mm_size + mr_size - 1) >> ibdev->ibd_mr_shift);
+
+        if (ibdev->ibd_mr_shift < 32 || ibdev->ibd_nmrs > 1024) {
+                /* it's 4T..., assume we will re-code at that time */
+                CERROR("Can't support memory size: x"LPX64
+                       " with MR size: x"LPX64"\n", mm_size, mr_size);
+                return -EINVAL;
+        }
+
+        /* create an array of MRs to cover all memory */
+        LIBCFS_ALLOC(ibdev->ibd_mrs, sizeof(*ibdev->ibd_mrs) * ibdev->ibd_nmrs);
+        if (ibdev->ibd_mrs == NULL) {
+                CERROR("Failed to allocate MRs' table\n");
+                return -ENOMEM;
+        }
+
+        memset(ibdev->ibd_mrs, 0, sizeof(*ibdev->ibd_mrs) * ibdev->ibd_nmrs);
+
+        for (i = 0; i < ibdev->ibd_nmrs; i++) {
+                struct ib_phys_buf ipb;
+                __u64              iova;
+
+                ipb.size = ibdev->ibd_mr_size;
+                ipb.addr = i * mr_size;
+                iova     = ipb.addr;
+
+                mr = ib_reg_phys_mr(ibdev->ibd_pd, &ipb, 1, acflags, &iova);
+                if (IS_ERR(mr)) {
+                        CERROR("Failed ib_reg_phys_mr addr "LPX64
+                               " size "LPX64" : %ld\n",
+                               ipb.addr, ipb.size, PTR_ERR(mr));
+                        kiblnd_dev_cleanup(ibdev);
+                        return PTR_ERR(mr);
+                }
+
+                LASSERT (iova == ipb.addr);
+
+                ibdev->ibd_mrs[i] = mr;
+        }
+
+out:
+        CDEBUG(D_CONSOLE, "Register global MR array, MR size: "
+                          LPX64", array size: %d\n",
+                          ibdev->ibd_mr_size, ibdev->ibd_nmrs);
+
+        list_add_tail(&ibdev->ibd_list,
+                      &kiblnd_data.kib_devs);
+        return 0;
+}
+
+void
+kiblnd_destroy_dev (kib_dev_t *dev)
+{
+        LASSERT (dev->ibd_nnets == 0);
+
+        if (!list_empty(&dev->ibd_list)) /* on kib_devs? */
+                list_del_init(&dev->ibd_list);
+
+        kiblnd_dev_cleanup(dev);
+
+        if (dev->ibd_pd != NULL)
+                ib_dealloc_pd(dev->ibd_pd);
+
+        if (dev->ibd_cmid != NULL)
+                rdma_destroy_id(dev->ibd_cmid);
+
+        LIBCFS_FREE(dev, sizeof(*dev));
 }
 
 void
@@ -1427,10 +1916,11 @@ kiblnd_shutdown (lnet_ni_t *ni)
         case IBLND_INIT_NOTHING:
                 LASSERT (atomic_read(&net->ibn_nconns) == 0);
 
-#if IBLND_MAP_ON_DEMAND
                 if (net->ibn_fmrpool != NULL)
                         ib_destroy_fmr_pool(net->ibn_fmrpool);
-#endif
+                if (net->ibn_pmrpool != NULL)
+                        kiblnd_destroy_pmr_pool(net->ibn_pmrpool);
+
                 if (net->ibn_dev != NULL &&
                     net->ibn_dev->ibd_nnets == 0)
                         kiblnd_destroy_dev(net->ibn_dev);
@@ -1523,13 +2013,13 @@ int
 kiblnd_startup (lnet_ni_t *ni)
 {
         char                     *ifname;
+        kib_dev_t                *ibdev = NULL;
         kib_net_t                *net;
-        kib_dev_t                *ibdev;
         struct list_head         *tmp;
         struct timeval            tv;
         int                       rc;
 
-        LASSERT (ni->ni_lnd == &the_kiblnd);
+        LASSERT (ni->ni_lnd == &the_o2iblnd);
 
         if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
                 rc = kiblnd_base_startup();
@@ -1554,12 +2044,6 @@ kiblnd_startup (lnet_ni_t *ni)
         spin_lock_init(&net->ibn_tx_lock);
         INIT_LIST_HEAD(&net->ibn_idle_txs);
 
-        rc = kiblnd_alloc_tx_descs(ni);
-        if (rc != 0) {
-                CERROR("Can't allocate tx descs\n");
-                goto failed;
-        }
-
         if (ni->ni_interfaces[0] != NULL) {
                 /* Use the IPoIB interface specified in 'networks=' */
 
@@ -1579,7 +2063,6 @@ kiblnd_startup (lnet_ni_t *ni)
                 goto failed;
         }
 
-        ibdev = NULL;
         list_for_each (tmp, &kiblnd_data.kib_devs) {
                 ibdev = list_entry(tmp, kib_dev_t, ibd_list);
 
@@ -1595,7 +2078,6 @@ kiblnd_startup (lnet_ni_t *ni)
                 int                       up;
                 struct rdma_cm_id        *id;
                 struct ib_pd             *pd;
-                struct ib_mr             *mr;
                 struct sockaddr_in       addr;
 
                 rc = libcfs_ipif_query(ifname, &up, &ip, &netmask);
@@ -1617,18 +2099,18 @@ kiblnd_startup (lnet_ni_t *ni)
 
                 memset(ibdev, 0, sizeof(*ibdev));
 
-                INIT_LIST_HEAD(&ibdev->ibd_list); /* not yet in kib_devs */
+                CFS_INIT_LIST_HEAD(&ibdev->ibd_list); /* not yet in kib_devs */
                 ibdev->ibd_ifip = ip;
                 strcpy(&ibdev->ibd_ifname[0], ifname);
 
                 id = rdma_create_id(kiblnd_cm_callback, ibdev, RDMA_PS_TCP);
-                if (!IS_ERR(id)) {
-                        ibdev->ibd_cmid = id;
-                } else {
+                if (IS_ERR(id)) {
                         CERROR("Can't create listen ID: %ld\n", PTR_ERR(id));
                         goto failed;
                 }
 
+                ibdev->ibd_cmid = id;
+
                 memset(&addr, 0, sizeof(addr));
                 addr.sin_family      = AF_INET;
                 addr.sin_port        = htons(*kiblnd_tunables.kib_service);
@@ -1644,74 +2126,54 @@ kiblnd_startup (lnet_ni_t *ni)
                 LASSERT (id->device != NULL);
 
                 pd = ib_alloc_pd(id->device);
-                if (!IS_ERR(pd)) {
-                        ibdev->ibd_pd = pd;
-                } else {
+                if (IS_ERR(pd)) {
                         CERROR("Can't allocate PD: %ld\n", PTR_ERR(pd));
                         goto failed;
                 }
 
-#if IBLND_MAP_ON_DEMAND
-                /* MR for sends and receives */
-                mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
-#else
-                /* MR for sends, recieves _and_ RDMA...........v */
-                mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE |
-                                       IB_ACCESS_REMOTE_WRITE);
-#endif
-                if (!IS_ERR(mr)) {
-                        ibdev->ibd_mr = mr;
-                } else {
-                        CERROR("Can't get MR: %ld\n", PTR_ERR(mr));
-                        goto failed;
-                }
+                ibdev->ibd_pd = pd;
 
-                rc = rdma_listen(id, 0);
+                rc = rdma_listen(id, 256);
                 if (rc != 0) {
                         CERROR("Can't start listener: %d\n", rc);
                         goto failed;
                 }
 
-                list_add_tail(&ibdev->ibd_list, 
-                              &kiblnd_data.kib_devs);
+                rc = kiblnd_dev_setup(ibdev);
+                if (rc != 0) {
+                        CERROR("Can't setup device: %d\n", rc);
+                        goto failed;
+                }
         }
 
         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
         net->ibn_dev = ibdev;
 
-#if IBLND_MAP_ON_DEMAND
-        /* FMR pool for RDMA */
-        {
-                struct ib_fmr_pool      *fmrpool;
-                struct ib_fmr_pool_param param = {
-                        .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
-                        .page_shift        = PAGE_SHIFT,
-                        .access            = (IB_ACCESS_LOCAL_WRITE |
-                                              IB_ACCESS_REMOTE_WRITE),
-                        .pool_size         = *kiblnd_tunables.kib_fmr_pool_size,
-                        .dirty_watermark   = *kiblnd_tunables.kib_fmr_flush_trigger,
-                        .flush_function    = NULL,
-                        .flush_arg         = NULL,
-                        .cache             = *kiblnd_tunables.kib_fmr_cache};
-
-                if (*kiblnd_tunables.kib_fmr_pool_size < 
-                    *kiblnd_tunables.kib_ntx) {
-                        CERROR("Can't set fmr pool size (%d) < ntx(%d)\n",
-                               *kiblnd_tunables.kib_fmr_pool_size,
-                               *kiblnd_tunables.kib_ntx);
-                        goto failed;
+        if (*kiblnd_tunables.kib_map_on_demand > 0 ||
+            ibdev->ibd_nmrs > 1) { /* premapping can fail if ibd_nmr > 1,
+                                    * so we always create FMR/PMR pool and
+                                    * map-on-demand if premapping failed */
+                /* Map on demand */
+                rc = kiblnd_ib_create_fmr_pool(ibdev, &net->ibn_fmrpool);
+                if (rc == -ENOSYS) {
+                        CDEBUG(D_CONSOLE, "No FMR, creating physical mapping\n");
+
+                        rc = kiblnd_create_pmr_pool(ibdev, &net->ibn_pmrpool);
                 }
 
-                fmrpool = ib_create_fmr_pool(ibdev->ibd_pd, &param);
-                if (!IS_ERR(fmrpool)) {
-                        net->ibn_fmrpool = fmrpool;
-                } else {
-                        CERROR("Can't create FMR pool: %ld\n", 
-                               PTR_ERR(fmrpool));
+                if (rc != 0) {
+                        CERROR("Can't create FMR or physical mapping pool: %d, "
+                               "please disable map_on_demand and retry\n", rc);
                         goto failed;
                 }
+
+        }
+
+        rc = kiblnd_alloc_tx_descs(ni);
+        if (rc != 0) {
+                CERROR("Can't allocate tx descs\n");
+                goto failed;
         }
-#endif
 
         kiblnd_map_tx_descs(ni);
 
@@ -1721,6 +2183,9 @@ kiblnd_startup (lnet_ni_t *ni)
         return 0;
 
 failed:
+        if (net->ibn_dev == NULL && ibdev != NULL)
+                kiblnd_destroy_dev(ibdev);
+
         kiblnd_shutdown(ni);
 
         CDEBUG(D_NET, "kiblnd_startup failed\n");
@@ -1730,7 +2195,7 @@ failed:
 void __exit
 kiblnd_module_fini (void)
 {
-        lnet_unregister_lnd(&the_kiblnd);
+        lnet_unregister_lnd(&the_o2iblnd);
         kiblnd_tunables_fini();
 }
 
@@ -1740,23 +2205,22 @@ kiblnd_module_init (void)
         int    rc;
 
         CLASSERT (sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
-#if !IBLND_MAP_ON_DEMAND
         CLASSERT (offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
                   <= IBLND_MSG_SIZE);
         CLASSERT (offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
                   <= IBLND_MSG_SIZE);
-#endif
+
         rc = kiblnd_tunables_init();
         if (rc != 0)
                 return rc;
 
-        lnet_register_lnd(&the_kiblnd);
+        lnet_register_lnd(&the_o2iblnd);
 
         return 0;
 }
 
 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Kernel OpenIB gen2 LND v1.00");
+MODULE_DESCRIPTION("Kernel OpenIB gen2 LND v2.00");
 MODULE_LICENSE("GPL");
 
 module_init(kiblnd_module_init);
index 57cc3fa..ed5bacf 100644 (file)
@@ -96,35 +96,6 @@ typedef int gfp_t;
 
 #define IBLND_PEER_HASH_SIZE         101        /* # peer lists */
 #define IBLND_RESCHED                100        /* # scheduler loops before reschedule */
-#define IBLND_MSG_QUEUE_SIZE         8          /* # messages/RDMAs in-flight */
-#define IBLND_CREDIT_HIGHWATER       7          /* when eagerly to return credits */
-#define IBLND_MSG_SIZE              (4<<10)     /* max size of queued messages (inc hdr) */
-
-#define IBLND_MAP_ON_DEMAND  0
-#if IBLND_MAP_ON_DEMAND
-# define IBLND_MAX_RDMA_FRAGS        1
-#else
-# define IBLND_MAX_RDMA_FRAGS        LNET_MAX_IOV
-#endif
-
-/************************/
-/* derived constants... */
-
-/* TX messages (shared by all connections) */
-#define IBLND_TX_MSGS()       (*kiblnd_tunables.kib_ntx)
-#define IBLND_TX_MSG_BYTES()  (IBLND_TX_MSGS() * IBLND_MSG_SIZE)
-#define IBLND_TX_MSG_PAGES()  ((IBLND_TX_MSG_BYTES() + PAGE_SIZE - 1)/PAGE_SIZE)
-
-/* RX messages (per connection) */
-#define IBLND_RX_MSGS         (IBLND_MSG_QUEUE_SIZE * 2)
-#define IBLND_RX_MSG_BYTES    (IBLND_RX_MSGS * IBLND_MSG_SIZE)
-#define IBLND_RX_MSG_PAGES    ((IBLND_RX_MSG_BYTES + PAGE_SIZE - 1)/PAGE_SIZE)
-
-/* WRs and CQEs (per connection) */
-#define IBLND_RECV_WRS        IBLND_RX_MSGS
-#define IBLND_SEND_WRS        ((*kiblnd_tunables.kib_concurrent_sends) * \
-                               (1 + IBLND_MAX_RDMA_FRAGS))
-#define IBLND_CQ_ENTRIES()    (IBLND_RECV_WRS + IBLND_SEND_WRS)
 
 typedef struct
 {
@@ -137,29 +108,105 @@ typedef struct
         int              *kib_ntx;              /* # tx descs */
         int              *kib_credits;          /* # concurrent sends */
         int              *kib_peercredits;      /* # concurrent sends to 1 peer */
+        int              *kib_peercredits_hiw;  /* # when eagerly to return credits */
         int              *kib_peertimeout;      /* seconds to consider peer dead */
         char            **kib_default_ipif;     /* default IPoIB interface */
         int              *kib_retry_count;
         int              *kib_rnr_retry_count;
         int              *kib_concurrent_sends; /* send work queue sizing */
         int             *kib_ib_mtu;           /* IB MTU */
-#if IBLND_MAP_ON_DEMAND
+        int              *kib_map_on_demand;    /* map-on-demand if RD has more fragments
+                                                 * than this value, 0 disable map-on-demand */
+        int              *kib_pmr_pool_size;    /* # physical MR in pool */
         int              *kib_fmr_pool_size;    /* # FMRs in pool */
         int              *kib_fmr_flush_trigger; /* When to trigger FMR flush */
         int              *kib_fmr_cache;        /* enable FMR pool cache? */
-#endif
 #if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
         cfs_sysctl_table_header_t *kib_sysctl;  /* sysctl interface */
 #endif
 } kib_tunables_t;
 
+extern kib_tunables_t  kiblnd_tunables;
+
+#define IBLND_MSG_QUEUE_SIZE_V1      8          /* V1 only : # messages/RDMAs in-flight */
+#define IBLND_CREDIT_HIGHWATER_V1    7          /* V1 only : when eagerly to return credits */
+
+#define IBLND_CREDITS_DEFAULT        8          /* default # of peer credits */
+#define IBLND_CREDITS_MAX            4096       /* Max # of peer credits */
+
+#define IBLND_MSG_QUEUE_SIZE(v)    ((v) == IBLND_MSG_VERSION_1 ? \
+                                     IBLND_MSG_QUEUE_SIZE_V1 :   \
+                                     *kiblnd_tunables.kib_peercredits) /* # messages/RDMAs in-flight */
+#define IBLND_CREDITS_HIGHWATER(v) ((v) == IBLND_MSG_VERSION_1 ? \
+                                     IBLND_CREDIT_HIGHWATER_V1 : \
+                                     *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */
+
+static inline int
+kiblnd_concurrent_sends_v1(void)
+{
+        if (*kiblnd_tunables.kib_concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
+                return IBLND_MSG_QUEUE_SIZE_V1 * 2;
+
+        if (*kiblnd_tunables.kib_concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
+                return IBLND_MSG_QUEUE_SIZE_V1 / 2;
+
+        return *kiblnd_tunables.kib_concurrent_sends;
+}
+
+#define IBLND_CONCURRENT_SENDS(v)  ((v) == IBLND_MSG_VERSION_1 ? \
+                                     kiblnd_concurrent_sends_v1() : \
+                                     *kiblnd_tunables.kib_concurrent_sends)
+/* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
+#define IBLND_OOB_CAPABLE(v)       ((v) != IBLND_MSG_VERSION_1)
+#define IBLND_OOB_MSGS(v)           (IBLND_OOB_CAPABLE(v) ? 2 : 0)
+
+#define IBLND_MSG_SIZE              (4<<10)                 /* max size of queued messages (inc hdr) */
+#define IBLND_MAX_RDMA_FRAGS         LNET_MAX_IOV           /* max # of fragments supported */
+#define IBLND_CFG_RDMA_FRAGS       (*kiblnd_tunables.kib_map_on_demand != 0 ? \
+                                    *kiblnd_tunables.kib_map_on_demand :      \
+                                     IBLND_MAX_RDMA_FRAGS)  /* max # of fragments configured by user */
+#define IBLND_RDMA_FRAGS(v)        ((v) == IBLND_MSG_VERSION_1 ? \
+                                     IBLND_MAX_RDMA_FRAGS : IBLND_CFG_RDMA_FRAGS)
+
+/************************/
+/* derived constants... */
+
+/* TX messages (shared by all connections) */
+#define IBLND_TX_MSGS()            (*kiblnd_tunables.kib_ntx)
+#define IBLND_TX_MSG_BYTES()        (IBLND_TX_MSGS() * IBLND_MSG_SIZE)
+#define IBLND_TX_MSG_PAGES()       ((IBLND_TX_MSG_BYTES() + PAGE_SIZE - 1) / PAGE_SIZE)
+
+/* RX messages (per connection) */
+#define IBLND_RX_MSGS(v)            (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v))
+#define IBLND_RX_MSG_BYTES(v)       (IBLND_RX_MSGS(v) * IBLND_MSG_SIZE)
+#define IBLND_RX_MSG_PAGES(v)      ((IBLND_RX_MSG_BYTES(v) + PAGE_SIZE - 1) / PAGE_SIZE)
+
+/* WRs and CQEs (per connection) */
+#define IBLND_RECV_WRS(v)            IBLND_RX_MSGS(v)
+#define IBLND_SEND_WRS(v)          ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v))
+#define IBLND_CQ_ENTRIES(v)         (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v))
+
 typedef struct
 {
+        struct ib_device *ibp_device;           /* device for mapping */
         int               ibp_npages;           /* # pages */
         struct page      *ibp_pages[0];
 } kib_pages_t;
 
-typedef struct 
+typedef struct {
+        spinlock_t              ibmp_lock;      /* serialize */
+        int                     ibmp_allocated; /* MR in use */
+        struct list_head        ibmp_free_list; /* pre-allocated MR */
+} kib_phys_mr_pool_t;
+
+typedef struct {
+        struct list_head        ibpm_link;      /* link node */
+        struct ib_mr           *ibpm_mr;        /* MR */
+        __u64                   ibpm_iova;      /* Virtual I/O address */
+        int                     ibpm_refcount;  /* reference count */
+} kib_phys_mr_t;
+
+typedef struct
 {
         struct list_head     ibd_list;          /* chain on kib_devs */
         __u32                ibd_ifip;          /* IPoIB interface IP */
@@ -168,7 +215,14 @@ typedef struct
 
         struct rdma_cm_id   *ibd_cmid;          /* IB listener (bound to 1 device) */
         struct ib_pd        *ibd_pd;            /* PD for the device */
-        struct ib_mr        *ibd_mr;            /* MR for non RDMA I/O */
+        int                  ibd_page_shift;    /* page shift of current HCA */
+        int                  ibd_page_size;     /* page size of current HCA */
+        __u64                ibd_page_mask;     /* page mask of current HCA */
+        int                  ibd_mr_shift;      /* bits shift of max MR size */
+        __u64                ibd_mr_size;       /* size of MR */
+
+        int                  ibd_nmrs;          /* # of global MRs */
+        struct ib_mr       **ibd_mrs;           /* MR for non RDMA I/O */
 } kib_dev_t;
 
 typedef struct
@@ -180,14 +234,14 @@ typedef struct
         atomic_t             ibn_npeers;        /* # peers extant */
         atomic_t             ibn_nconns;        /* # connections extant */
 
+        __u64                ibn_tx_next_cookie; /* RDMA completion cookie */
         struct kib_tx       *ibn_tx_descs;      /* all the tx descriptors */
         kib_pages_t         *ibn_tx_pages;      /* premapped tx msg pages */
         struct list_head     ibn_idle_txs;      /* idle tx descriptors */
         spinlock_t           ibn_tx_lock;       /* serialise */
 
-#if IBLND_MAP_ON_DEMAND
         struct ib_fmr_pool  *ibn_fmrpool;       /* FMR pool for RDMA I/O */
-#endif
+        kib_phys_mr_pool_t  *ibn_pmrpool;       /* Physical MR pool for RDMA I/O */
 
         kib_dev_t           *ibn_dev;           /* underlying IB device */
 } kib_net_t;
@@ -213,7 +267,6 @@ typedef struct
         struct list_head     kib_sched_conns;   /* conns to check for rx completions */
         spinlock_t           kib_sched_lock;    /* serialise */
 
-        __u64                kib_next_tx_cookie; /* RDMA completion cookie */
         struct ib_qp_attr    kib_error_qpa;      /* QP->ERROR */
 } kib_data_t;
 
@@ -239,14 +292,6 @@ typedef struct
         char              ibim_payload[0];      /* piggy-backed payload */
 } WIRE_ATTR kib_immediate_msg_t;
 
-#if IBLND_MAP_ON_DEMAND
-typedef struct
-{
-       __u64             rd_addr;              /* IO VMA address */
-       __u32             rd_nob;               /* # of bytes */
-       __u32             rd_key;               /* remote key */
-} WIRE_ATTR kib_rdma_desc_t;
-#else
 typedef struct
 {
         __u32             rf_nob;               /* # bytes this frag */
@@ -259,8 +304,7 @@ typedef struct
         __u32             rd_nfrags;            /* # fragments */
         kib_rdma_frag_t   rd_frags[0];          /* buffer frags */
 } WIRE_ATTR kib_rdma_desc_t;
-#endif
-        
+
 typedef struct
 {
         lnet_hdr_t        ibprm_hdr;            /* portals header */
@@ -314,7 +358,9 @@ typedef struct
 
 #define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC    /* unique magic */
 
-#define IBLND_MSG_VERSION           0x11
+#define IBLND_MSG_VERSION_1         0x11
+#define IBLND_MSG_VERSION_2         0x12
+#define IBLND_MSG_VERSION           IBLND_MSG_VERSION_2
 
 #define IBLND_MSG_CONNREQ           0xc0        /* connection request */
 #define IBLND_MSG_CONNACK           0xc1        /* connection acknowledge */
@@ -331,14 +377,22 @@ typedef struct {
         __u32            ibr_magic;             /* sender's magic */
         __u16            ibr_version;           /* sender's version */
         __u8             ibr_why;               /* reject reason */
+        __u8             ibr_padding;           /* padding */
+        __u64            ibr_incarnation;       /* incarnation of peer */
+        kib_connparams_t ibr_cp;                /* connection parameters */
 } WIRE_ATTR kib_rej_t;
 
-
 /* connection rejection reasons */
 #define IBLND_REJECT_CONN_RACE       1          /* You lost connection race */
 #define IBLND_REJECT_NO_RESOURCES    2          /* Out of memory/conns etc */
 #define IBLND_REJECT_FATAL           3          /* Anything else */
 
+#define IBLND_REJECT_CONN_UNCOMPAT   4          /* incompatible version peer */
+#define IBLND_REJECT_CONN_STALE      5          /* stale peer */
+
+#define IBLND_REJECT_RDMA_FRAGS      6          /* Fatal: peer's rdma frags can't match mine */
+#define IBLND_REJECT_MSG_QUEUE_SIZE  7          /* Fatal: peer's msg queue size can't match mine */
+
 /***********************************************************************/
 
 typedef struct kib_rx                           /* receive message */
@@ -374,20 +428,18 @@ typedef struct kib_tx                           /* transmit message */
         __u64                     tx_msgaddr;   /* message buffer (I/O addr) */
         DECLARE_PCI_UNMAP_ADDR   (tx_msgunmap); /* for dma_unmap_single() */
         int                       tx_nwrq;      /* # send work items */
-#if IBLND_MAP_ON_DEMAND
-        struct ib_send_wr         tx_wrq[2];    /* send work items... */
-        struct ib_sge             tx_sge[2];    /* ...and their memory */
-        kib_rdma_desc_t           tx_rd[1];     /* rdma descriptor */
-        __u64                    *tx_pages;     /* rdma phys page addrs */
-        struct ib_pool_fmr       *tx_fmr;       /* rdma mapping (mapped if != NULL) */
-#else
         struct ib_send_wr        *tx_wrq;       /* send work items... */
         struct ib_sge            *tx_sge;       /* ...and their memory */
         kib_rdma_desc_t          *tx_rd;        /* rdma descriptor */
         int                       tx_nfrags;    /* # entries in... */
         struct scatterlist       *tx_frags;     /* dma_map_sg descriptor */
+        struct ib_phys_buf       *tx_ipb;       /* physical buffer (for iWARP) */
+        __u64                    *tx_pages;     /* rdma phys page addrs */
+        union {
+                kib_phys_mr_t      *pmr;         /* MR for physical buffer */
+                struct ib_pool_fmr *fmr;         /* rdma mapping (mapped if != NULL) */
+        }                         tx_u;
         int                       tx_dmadir;    /* dma direction */
-#endif        
 } kib_tx_t;
 
 typedef struct kib_connvars
@@ -401,20 +453,21 @@ typedef struct kib_conn
         struct kib_peer    *ibc_peer;           /* owning peer */
         struct list_head    ibc_list;           /* stash on peer's conn list */
         struct list_head    ibc_sched_list;     /* schedule for attention */
+        __u16               ibc_version;        /* version of connection */
         __u64               ibc_incarnation;    /* which instance of the peer */
         atomic_t            ibc_refcount;       /* # users */
         int                 ibc_state;          /* what's happening */
         int                 ibc_nsends_posted;  /* # uncompleted sends */
+        int                 ibc_noops_posted;   /* # uncompleted NOOPs */
         int                 ibc_credits;        /* # credits I have */
         int                 ibc_outstanding_credits; /* # credits to return */
         int                 ibc_reserved_credits;/* # ACK/DONE msg credits */
         int                 ibc_comms_error;    /* set on comms error */
-        int                 ibc_nrx:8;          /* receive buffers owned */
+        int                 ibc_nrx:16;         /* receive buffers owned */
         int                 ibc_scheduled:1;    /* scheduled for attention */
         int                 ibc_ready:1;        /* CQ callback fired */
         unsigned long       ibc_last_send;      /* time of last send */
         struct list_head    ibc_early_rxs;      /* rxs completed before ESTABLISHED */
-        struct list_head    ibc_tx_noops;       /* IBLND_MSG_NOOPs */
         struct list_head    ibc_tx_queue;       /* sends that need a credit */
         struct list_head    ibc_tx_queue_nocred;/* sends that don't need a credit */
         struct list_head    ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */
@@ -444,15 +497,15 @@ typedef struct kib_peer
         atomic_t            ibp_refcount;       /* # users */
         struct list_head    ibp_conns;          /* all active connections */
         struct list_head    ibp_tx_queue;       /* msgs waiting for a conn */
+        __u16               ibp_version;        /* version of peer */
+        __u64               ibp_incarnation;    /* incarnation of peer */
         int                 ibp_connecting;     /* current active connection attempts */
         int                 ibp_accepting;      /* current passive connection attempts */
         int                 ibp_error;          /* errno on closing this peer */
         cfs_time_t          ibp_last_alive;     /* when (in jiffies) I was last alive */
 } kib_peer_t;
 
-
 extern kib_data_t      kiblnd_data;
-extern kib_tunables_t  kiblnd_tunables;
 
 #define kiblnd_conn_addref(conn)                                \
 do {                                                            \
@@ -534,21 +587,19 @@ kiblnd_send_noop(kib_conn_t *conn)
 {
         LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
-        if (conn->ibc_outstanding_credits < IBLND_CREDIT_HIGHWATER &&
+        if (conn->ibc_outstanding_credits <
+            IBLND_CREDITS_HIGHWATER(conn->ibc_version) &&
             !kiblnd_send_keepalive(conn))
                 return 0; /* No need to send NOOP */
 
-        if (!list_empty(&conn->ibc_tx_noops) ||       /* NOOP already queued */
-            !list_empty(&conn->ibc_tx_queue_nocred) || /* can be piggybacked */
-            conn->ibc_credits == 0)                    /* no credit */
-                return 0;
+        if (!list_empty(&conn->ibc_tx_queue_nocred))
+                return 0; /* NOOP can be piggybacked */
 
-        if (conn->ibc_credits == 1 &&      /* last credit reserved for */
-            conn->ibc_outstanding_credits == 0) /* giving back credits */
-                return 0;
+        if (!IBLND_OOB_CAPABLE(conn->ibc_version))
+                return list_empty(&conn->ibc_tx_queue); /* can't piggyback? */
 
         /* No tx to piggyback NOOP onto or no credit to send a tx */
-        return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
+        return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 0);
 }
 
 static inline void
@@ -558,6 +609,25 @@ kiblnd_abort_receives(kib_conn_t *conn)
                      &kiblnd_data.kib_error_qpa, IB_QP_STATE);
 }
 
+static inline const char *
+kiblnd_queue2str (kib_conn_t *conn, struct list_head *q)
+{
+        if (q == &conn->ibc_tx_queue)
+                return "tx_queue";
+
+        if (q == &conn->ibc_tx_queue_rsrvd)
+                return "tx_queue_rsrvd";
+
+        if (q == &conn->ibc_tx_queue_nocred)
+                return "tx_queue_nocred";
+
+        if (q == &conn->ibc_active_txs)
+                return "active_txs";
+
+        LBUG();
+        return NULL;
+}
+
 /* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the
  * lowest bits of the work request id to stash the work item type. */
 
@@ -595,28 +665,75 @@ kiblnd_set_conn_state (kib_conn_t *conn, int state)
         mb();
 }
 
-#if IBLND_MAP_ON_DEMAND
-static inline int
-kiblnd_rd_size (kib_rdma_desc_t *rd)
+static inline void
+kiblnd_init_msg (kib_msg_t *msg, int type, int body_nob)
 {
-        return rd->rd_nob;
+        msg->ibm_type = type;
+        msg->ibm_nob  = offsetof(kib_msg_t, ibm_u) + body_nob;
 }
-#else
+
 static inline int
 kiblnd_rd_size (kib_rdma_desc_t *rd)
 {
         int   i;
         int   size;
-        
+
         for (i = size = 0; i < rd->rd_nfrags; i++)
                 size += rd->rd_frags[i].rf_nob;
-        
+
         return size;
 }
-#endif
+
+static inline __u64
+kiblnd_rd_frag_addr(kib_rdma_desc_t *rd, int index)
+{
+        return rd->rd_frags[index].rf_addr;
+}
+
+static inline __u32
+kiblnd_rd_frag_size(kib_rdma_desc_t *rd, int index)
+{
+        return rd->rd_frags[index].rf_nob;
+}
+
+static inline __u32
+kiblnd_rd_frag_key(kib_rdma_desc_t *rd, int index)
+{
+        return rd->rd_key;
+}
+
+static inline int
+kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
+{
+        if (nob < rd->rd_frags[index].rf_nob) {
+                rd->rd_frags[index].rf_addr += nob;
+                rd->rd_frags[index].rf_nob  -= nob;
+        } else {
+                index ++;
+        }
+
+        return index;
+}
+
+static inline int
+kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n)
+{
+        LASSERT (msgtype == IBLND_MSG_GET_REQ ||
+                 msgtype == IBLND_MSG_PUT_ACK);
+
+        return msgtype == IBLND_MSG_GET_REQ ?
+               offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) :
+               offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[n]);
+}
 
 #ifdef HAVE_OFED_IB_DMA_MAP
 
+static inline __u64
+kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
+{
+        return ib_dma_mapping_error(dev, dma_addr);
+}
+
 static inline __u64 kiblnd_dma_map_single(struct ib_device *dev,
                                           void *msg, size_t size,
                                           enum dma_data_direction direction)
@@ -669,6 +786,12 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
 
 #else
 
+static inline __u64
+kiblnd_dma_mapping_error(struct ib_device *dev, dma_addr_t dma_addr)
+{
+        return dma_mapping_error(dma_addr);
+}
+
 static inline dma_addr_t kiblnd_dma_map_single(struct ib_device *dev,
                                                void *msg, size_t size,
                                                enum dma_data_direction direction)
@@ -719,6 +842,21 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
 
 #endif
 
+struct ib_mr *kiblnd_find_rd_dma_mr(kib_net_t *net,
+                                    kib_rdma_desc_t *rd);
+struct ib_mr *kiblnd_find_dma_mr(kib_net_t *net,
+                                 __u64 addr, __u64 size);
+void kiblnd_map_rx_descs(kib_conn_t *conn);
+void kiblnd_unmap_rx_descs(kib_conn_t *conn);
+void kiblnd_map_tx_descs (lnet_ni_t *ni);
+void kiblnd_unmap_tx_descs(lnet_ni_t *ni);
+int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
+                  kib_rdma_desc_t *rd, int nfrags);
+void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
+kib_phys_mr_t *kiblnd_phys_mr_map(kib_net_t *net, kib_rdma_desc_t *rd,
+                                  struct ib_phys_buf *ipb, __u64 *iova);
+void kiblnd_phys_mr_unmap(kib_net_t *net, kib_phys_mr_t *pmr);
+
 int  kiblnd_startup (lnet_ni_t *ni);
 void kiblnd_shutdown (lnet_ni_t *ni);
 int  kiblnd_ctl (lnet_ni_t *ni, unsigned int cmd, void *arg);
@@ -736,6 +874,7 @@ void kiblnd_free_pages (kib_pages_t *p);
 
 int  kiblnd_cm_callback(struct rdma_cm_id *cmid,
                         struct rdma_cm_event *event);
+int  kiblnd_translate_mtu(int value);
 
 int  kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
 void kiblnd_destroy_peer (kib_peer_t *peer);
@@ -744,16 +883,18 @@ void kiblnd_unlink_peer_locked (kib_peer_t *peer);
 void kiblnd_peer_alive (kib_peer_t *peer);
 kib_peer_t *kiblnd_find_peer_locked (lnet_nid_t nid);
 void kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error);
-int  kiblnd_close_stale_conns_locked (kib_peer_t *peer, __u64 incarnation);
+int  kiblnd_close_stale_conns_locked (kib_peer_t *peer,
+                                      int version, __u64 incarnation);
+int  kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why);
 
 void kiblnd_connreq_done(kib_conn_t *conn, int status);
 kib_conn_t *kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid,
-                                int state);
+                                int state, int version);
 void kiblnd_destroy_conn (kib_conn_t *conn);
 void kiblnd_close_conn (kib_conn_t *conn, int error);
 void kiblnd_close_conn_locked (kib_conn_t *conn, int error);
 
-int  kiblnd_init_rdma (lnet_ni_t *ni, kib_tx_t *tx, int type,
+int  kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
                        int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
 
 void kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
@@ -767,8 +908,7 @@ void kiblnd_qp_event(struct ib_event *event, void *arg);
 void kiblnd_cq_event(struct ib_event *event, void *arg);
 void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
 
-void kiblnd_init_msg (kib_msg_t *msg, int type, int body_nob);
-void kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg,
+void kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version,
                       int credits, lnet_nid_t dstnid, __u64 dststamp);
 int  kiblnd_unpack_msg(kib_msg_t *msg, int nob);
 int  kiblnd_post_rx (kib_rx_t *rx, int credit);
index 4abb588..b55e06d 100644 (file)
 
 #include "o2iblnd.h"
 
-char *
-kiblnd_msgtype2str(int type) 
-{
-        switch (type) {
-        case IBLND_MSG_CONNREQ:
-                return "CONNREQ";
-                
-        case IBLND_MSG_CONNACK:
-                return "CONNACK";
-                
-        case IBLND_MSG_NOOP:
-                return "NOOP";
-                
-        case IBLND_MSG_IMMEDIATE:
-                return "IMMEDIATE";
-                
-        case IBLND_MSG_PUT_REQ:
-                return "PUT_REQ";
-                
-        case IBLND_MSG_PUT_NAK:
-                return "PUT_NAK";
-                
-        case IBLND_MSG_PUT_ACK:
-                return "PUT_ACK";
-                
-        case IBLND_MSG_PUT_DONE:
-                return "PUT_DONE";
-                
-        case IBLND_MSG_GET_REQ:
-                return "GET_REQ";
-                
-        case IBLND_MSG_GET_DONE:
-                return "GET_DONE";
-                
-        default:
-                return "???";
-        }
-}
-
 void
 kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx)
 {
@@ -93,25 +54,8 @@ kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx)
         LASSERT (tx->tx_sending == 0);          /* mustn't be awaiting sent callback */
         LASSERT (!tx->tx_waiting);              /* mustn't be awaiting peer response */
 
-#if IBLND_MAP_ON_DEMAND
-        if (tx->tx_fmr != NULL) {
-                rc = ib_fmr_pool_unmap(tx->tx_fmr);
-                LASSERT (rc == 0);
+        kiblnd_unmap_tx(ni, tx);
 
-                if (tx->tx_status != 0) {
-                        rc = ib_flush_fmr_pool(net->ibn_fmrpool);
-                        LASSERT (rc == 0);
-                }
-
-                tx->tx_fmr = NULL;
-        }
-#else
-        if (tx->tx_nfrags != 0) {
-                kiblnd_dma_unmap_sg(net->ibn_dev->ibd_cmid->device,
-                                    tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
-                tx->tx_nfrags = 0;
-        }
-#endif
         /* tx may have up to 2 lnet msgs to finalise */
         lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
         lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
@@ -146,11 +90,11 @@ void
 kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int status)
 {
         kib_tx_t *tx;
-        
+
         while (!list_empty (txlist)) {
                 tx = list_entry (txlist->next, kib_tx_t, tx_list);
 
-                list_del (&tx->tx_list);
+                list_del(&tx->tx_list);
                 /* complete now */
                 tx->tx_waiting = 0;
                 tx->tx_status = status;
@@ -179,7 +123,7 @@ kiblnd_get_idle_tx (lnet_ni_t *ni)
         /* Allocate a new completion cookie.  It might not be needed,
          * but we've got a lock right now and we're unlikely to
          * wrap... */
-        tx->tx_cookie = kiblnd_data.kib_next_tx_cookie++;
+        tx->tx_cookie = net->ibn_tx_next_cookie++;
 
         spin_unlock(&net->ibn_tx_lock);
 
@@ -191,11 +135,8 @@ kiblnd_get_idle_tx (lnet_ni_t *ni)
         LASSERT (tx->tx_conn == NULL);
         LASSERT (tx->tx_lntmsg[0] == NULL);
         LASSERT (tx->tx_lntmsg[1] == NULL);
-#if IBLND_MAP_ON_DEMAND
-        LASSERT (tx->tx_fmr == NULL);
-#else
+        LASSERT (tx->tx_u.fmr == NULL);
         LASSERT (tx->tx_nfrags == 0);
-#endif
 
         return tx;
 }
@@ -219,7 +160,8 @@ kiblnd_post_rx (kib_rx_t *rx, int credit)
 {
         kib_conn_t         *conn = rx->rx_conn;
         kib_net_t          *net = conn->ibc_peer->ibp_ni->ni_data;
-        struct ib_recv_wr  *bad_wrq;
+        struct ib_recv_wr  *bad_wrq = NULL;
+        struct ib_mr       *mr;
         int                 rc;
 
         LASSERT (net != NULL);
@@ -228,9 +170,12 @@ kiblnd_post_rx (kib_rx_t *rx, int credit)
                  credit == IBLND_POSTRX_PEER_CREDIT ||
                  credit == IBLND_POSTRX_RSRVD_CREDIT);
 
+        mr = kiblnd_find_dma_mr(net, rx->rx_msgaddr, IBLND_MSG_SIZE);
+        LASSERT (mr != NULL);
+
+        rx->rx_sge.lkey   = mr->lkey;
+        rx->rx_sge.addr   = rx->rx_msgaddr;
         rx->rx_sge.length = IBLND_MSG_SIZE;
-        rx->rx_sge.lkey = net->ibn_dev->ibd_mr->lkey;
-        rx->rx_sge.addr = rx->rx_msgaddr;
 
         rx->rx_wrq.next = NULL;
         rx->rx_wrq.sg_list = &rx->rx_sge;
@@ -248,13 +193,16 @@ kiblnd_post_rx (kib_rx_t *rx, int credit)
         rx->rx_nob = -1;                        /* flag posted */
 
         rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
+        if (rc != 0) {
+                CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
+                       libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
+                rx->rx_nob = 0;
+        }
 
         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
                 return rc;
 
         if (rc != 0) {
-                CERROR("Can't post rx for %s: %d\n",
-                       libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
                 kiblnd_close_conn(conn, rc);
                 kiblnd_drop_rx(rx);             /* No more posts for this rx */
                 return rc;
@@ -378,13 +326,14 @@ kiblnd_handle_rx (kib_rx_t *rx)
                 /* Have I received credits that will let me send? */
                 spin_lock(&conn->ibc_lock);
 
-                if (conn->ibc_credits + credits > IBLND_MSG_QUEUE_SIZE) {
+                if (conn->ibc_credits + credits >
+                    IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) {
                         rc2 = conn->ibc_credits;
                         spin_unlock(&conn->ibc_lock);
 
                         CERROR("Bad credits from %s: %d + %d > %d\n",
                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
-                               rc2, credits, IBLND_MSG_QUEUE_SIZE);
+                               rc2, credits, IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
 
                         kiblnd_close_conn(conn, -EPROTO);
                         kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
@@ -393,10 +342,6 @@ kiblnd_handle_rx (kib_rx_t *rx)
 
                 conn->ibc_credits += credits;
 
-                /* This ensures the credit taken by NOOP can be returned */
-                if (msg->ibm_type == IBLND_MSG_NOOP)
-                        conn->ibc_outstanding_credits++;
-
                 spin_unlock(&conn->ibc_lock);
                 kiblnd_check_sends(conn);
         }
@@ -410,9 +355,9 @@ kiblnd_handle_rx (kib_rx_t *rx)
                 break;
 
         case IBLND_MSG_NOOP:
-                if (credits != 0) /* credit already posted */
+                if (IBLND_OOB_CAPABLE(conn->ibc_version))
                         post_credit = IBLND_POSTRX_NO_CREDIT;
-                else              /* a keepalive NOOP */
+                else
                         post_credit = IBLND_POSTRX_PEER_CREDIT;
                 break;
 
@@ -464,7 +409,7 @@ kiblnd_handle_rx (kib_rx_t *rx)
 
                 tx->tx_nwrq = 0;                /* overwrite PUT_REQ */
 
-                rc2 = kiblnd_init_rdma(ni, tx, IBLND_MSG_PUT_DONE,
+                rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
                                        kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd),
                                        &msg->ibm_u.putack.ibpam_rd,
                                        msg->ibm_u.putack.ibpam_dst_cookie);
@@ -515,7 +460,6 @@ kiblnd_rx_complete (kib_rx_t *rx, int status, int nob)
         kib_conn_t   *conn = rx->rx_conn;
         lnet_ni_t    *ni = conn->ibc_peer->ibp_ni;
         kib_net_t    *net = ni->ni_data;
-        unsigned long flags;
         int           rc;
         int           err = -EIO;
 
@@ -558,16 +502,17 @@ kiblnd_rx_complete (kib_rx_t *rx, int status, int nob)
         /* racing with connection establishment/teardown! */
 
         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
-                write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+                rwlock_t      *g_lock = &kiblnd_data.kib_global_lock;
+                unsigned long  flags;
+
+                write_lock_irqsave(g_lock, flags);
                 /* must check holding global lock to eliminate race */
                 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
                         list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
-                        write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
-                                                flags);
+                        write_unlock_irqrestore(g_lock, flags);
                         return;
                 }
-                write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
-                                        flags);
+                write_unlock_irqrestore(g_lock, flags);
         }
         kiblnd_handle_rx(rx);
         return;
@@ -603,19 +548,172 @@ kiblnd_kvaddr_to_page (unsigned long vaddr)
         return page;
 }
 
-#if !IBLND_MAP_ON_DEMAND
+static void
+kiblnd_fmr_unmap_tx(kib_net_t *net, kib_tx_t *tx)
+{
+        int     rc;
+
+        if (tx->tx_u.fmr == NULL)
+                return;
+
+        rc = ib_fmr_pool_unmap(tx->tx_u.fmr);
+        LASSERT (rc == 0);
+
+        if (tx->tx_status != 0) {
+                rc = ib_flush_fmr_pool(net->ibn_fmrpool);
+                LASSERT (rc == 0);
+        }
+
+        tx->tx_u.fmr = NULL;
+}
+
+static int
+kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
+{
+        struct ib_pool_fmr *fmr;
+        kib_dev_t          *ibdev  = net->ibn_dev;
+        __u64              *pages  = tx->tx_pages;
+        int                 npages;
+        int                 size;
+        int                 i;
+
+        for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
+                for (size = 0; size <  rd->rd_frags[i].rf_nob;
+                               size += ibdev->ibd_page_size) {
+                        pages[npages ++] = (rd->rd_frags[i].rf_addr &
+                                            ibdev->ibd_page_mask) + size;
+                }
+        }
+
+        fmr = ib_fmr_pool_map_phys(net->ibn_fmrpool, pages, npages, 0);
+
+        if (IS_ERR(fmr)) {
+                CERROR ("Can't map %d pages: %ld\n", npages, PTR_ERR(fmr));
+                return PTR_ERR(fmr);
+        }
+
+        /* If rd is not tx_rd, it's going to get sent to a peer, who will need
+         * the rkey */
+        rd->rd_key = (rd != tx->tx_rd) ? fmr->fmr->rkey :
+                                         fmr->fmr->lkey;
+        rd->rd_frags[0].rf_addr &= ~ibdev->ibd_page_mask;
+        rd->rd_frags[0].rf_nob   = nob;
+        rd->rd_nfrags = 1;
+
+        tx->tx_u.fmr = fmr;
+
+        return 0;
+}
+
+static void
+kiblnd_pmr_unmap_tx(kib_net_t *net, kib_tx_t *tx)
+{
+        if (tx->tx_u.pmr == NULL)
+                return;
+
+        kiblnd_phys_mr_unmap(net, tx->tx_u.pmr);
+
+        tx->tx_u.pmr = NULL;
+}
+
+static int
+kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
+{
+        kib_phys_mr_t      *pmr;
+        __u64               iova;
+
+        iova = rd->rd_frags[0].rf_addr & ~net->ibn_dev->ibd_page_mask;
+
+        pmr = kiblnd_phys_mr_map(net, rd, tx->tx_ipb, &iova);
+        if (pmr == NULL) {
+                CERROR("Failed to create MR by phybuf\n");
+                return -ENOMEM;
+        }
+
+        rd->rd_key = (rd != tx->tx_rd) ? pmr->ibpm_mr->rkey :
+                                         pmr->ibpm_mr->lkey;
+        rd->rd_nfrags = 1;
+        rd->rd_frags[0].rf_addr = iova;
+        rd->rd_frags[0].rf_nob  = nob;
+
+        tx->tx_u.pmr = pmr;
+
+        return 0;
+}
+
+void
+kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
+{
+        kib_net_t  *net = ni->ni_data;
+
+        LASSERT (net != NULL);
+
+        if (net->ibn_fmrpool != NULL)
+                kiblnd_fmr_unmap_tx(net, tx);
+        else if (net->ibn_pmrpool != NULL)
+                kiblnd_pmr_unmap_tx(net, tx);
+
+        if (tx->tx_nfrags != 0) {
+                kiblnd_dma_unmap_sg(net->ibn_dev->ibd_cmid->device,
+                                    tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
+                tx->tx_nfrags = 0;
+        }
+}
+
 int
-kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, 
+kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
+              kib_rdma_desc_t *rd, int nfrags)
+{
+        kib_net_t          *net   = ni->ni_data;
+        struct ib_mr       *mr    = NULL;
+        __u32               nob;
+        int                 i;
+
+        /* If rd is not tx_rd, it's going to get sent to a peer and I'm the
+         * RDMA sink */
+        tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+        tx->tx_nfrags = nfrags;
+
+        rd->rd_nfrags =
+                kiblnd_dma_map_sg(net->ibn_dev->ibd_cmid->device,
+                                  tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
+
+        for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
+                rd->rd_frags[i].rf_nob  = kiblnd_sg_dma_len(
+                        net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
+                rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
+                        net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
+                nob += rd->rd_frags[i].rf_nob;
+        }
+
+        /* looking for pre-mapping MR */
+        mr = kiblnd_find_rd_dma_mr(net, rd);
+        if (mr != NULL) {
+                /* found pre-mapping MR */
+                rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
+                return 0;
+        }
+
+        if (net->ibn_fmrpool != NULL)
+                return kiblnd_fmr_map_tx(net, tx, rd, nob);
+
+        if (net->ibn_pmrpool != NULL);
+                return kiblnd_pmr_map_tx(net, tx, rd, nob);
+
+        return -EINVAL;
+}
+
+
+int
+kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
                     unsigned int niov, struct iovec *iov, int offset, int nob)
-                 
 {
+        kib_net_t          *net = ni->ni_data;
+        struct page        *page;
         struct scatterlist *sg;
-        int                 i;
-        int                 fragnob;
         unsigned long       vaddr;
-        struct page        *page;
+        int                 fragnob;
         int                 page_offset;
-        kib_net_t          *net = ni->ni_data;
 
         LASSERT (nob > 0);
         LASSERT (niov > 0);
@@ -655,36 +753,17 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
                 }
                 nob -= fragnob;
         } while (nob > 0);
-        
-        /* If rd is not tx_rd, it's going to get sent to a peer and I'm the
-         * RDMA sink */
-        tx->tx_nfrags = sg - tx->tx_frags;
-        tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
-        rd->rd_nfrags = kiblnd_dma_map_sg(net->ibn_dev->ibd_cmid->device,
-                                          tx->tx_frags, tx->tx_nfrags,
-                                          tx->tx_dmadir);
-        rd->rd_key    = (rd != tx->tx_rd) ? 
-                        net->ibn_dev->ibd_mr->rkey : net->ibn_dev->ibd_mr->lkey;
 
-        for (i = 0; i < rd->rd_nfrags; i++) {
-                rd->rd_frags[i].rf_nob  = kiblnd_sg_dma_len(
-                        net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
-                rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
-                        net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
-        }
-        
-        return 0;
+        return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
 }
 
 int
-kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, 
+kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
                       int nkiov, lnet_kiov_t *kiov, int offset, int nob)
 {
+        kib_net_t          *net = ni->ni_data;
         struct scatterlist *sg;
-        int                 i;
         int                 fragnob;
-        kib_net_t          *net = ni->ni_data;
 
         CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
 
@@ -709,182 +788,145 @@ kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
                 sg_set_page(sg, kiov->kiov_page, fragnob,
                             kiov->kiov_offset + offset);
                 sg++;
+
                 offset = 0;
                 kiov++;
                 nkiov--;
                 nob -= fragnob;
         } while (nob > 0);
 
-        /* If rd is not tx_rd, it's going to get sent to a peer and I'm the
-         * RDMA sink */
-        tx->tx_nfrags = sg - tx->tx_frags;
-        tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
-        rd->rd_nfrags = kiblnd_dma_map_sg(net->ibn_dev->ibd_cmid->device,
-                                          tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
-        rd->rd_key    = (rd != tx->tx_rd) ? 
-                        net->ibn_dev->ibd_mr->rkey : net->ibn_dev->ibd_mr->lkey;
-
-        for (i = 0; i < tx->tx_nfrags; i++) {
-                rd->rd_frags[i].rf_nob  = kiblnd_sg_dma_len(
-                        net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
-                rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
-                        net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
-#if 0
-                CDEBUG(D_WARNING,"frag[%d]: "LPX64" for %d\n",
-                       i, rd->rd_frags[i].rf_addr, rd->rd_frags[i].rf_nob);
-#endif
-        }
-        
-        return 0;
+        return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
 }
-#else
+
 int
-kiblnd_map_tx (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
-               int npages, unsigned long page_offset, int nob)
+kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
 {
-        struct ib_pool_fmr *fmr;
-        kib_net_t          *net = ni->ni_data;
+        kib_msg_t         *msg = tx->tx_msg;
+        kib_peer_t        *peer = conn->ibc_peer;
+        int                ver = conn->ibc_version;
+        int                rc;
+        int                done;
+        struct ib_send_wr *bad_wrq;
 
-        LASSERT (net != NULL);
-        LASSERT (tx->tx_fmr == NULL);
-        LASSERT (page_offset < PAGE_SIZE);
-        LASSERT (npages >= (1 + ((page_offset + nob - 1)>>PAGE_SHIFT)));
-        LASSERT (npages <= LNET_MAX_IOV);
+        LASSERT (tx->tx_queued);
+        /* We rely on this for QP sizing */
+        LASSERT (tx->tx_nwrq > 0);
+        LASSERT (tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver));
 
-        rd->rd_addr = 0;
+        LASSERT (credit == 0 || credit == 1);
+        LASSERT (conn->ibc_outstanding_credits >= 0);
+        LASSERT (conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver));
+        LASSERT (conn->ibc_credits >= 0);
+        LASSERT (conn->ibc_credits <= IBLND_MSG_QUEUE_SIZE(ver));
 
-        fmr = ib_fmr_pool_map_phys(net->ibn_fmrpool, tx->tx_pages,
-                                   npages, rd->rd_addr);
-        if (IS_ERR(fmr)) {
-                CERROR ("Can't map %d pages: %ld\n", npages, PTR_ERR(fmr));
-                return PTR_ERR(fmr);
+        if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) {
+                /* tx completions outstanding... */
+                CDEBUG(D_NET, "%s: posted enough\n",
+                       libcfs_nid2str(peer->ibp_nid));
+                return -EAGAIN;
         }
 
-        /* If rd is not tx_rd, it's going to get sent to a peer, who will need
-         * the rkey */
-
-        rd->rd_key = (rd != tx->tx_rd) ? fmr->fmr->rkey : fmr->fmr->lkey;
-        rd->rd_nob = nob;
-
-        tx->tx_fmr = fmr;
-        return 0;
-}
-
-int
-kiblnd_setup_rd_iov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
-                     unsigned int niov, struct iovec *iov, int offset, int nob)
-
-{
-        int           resid;
-        int           fragnob;
-        struct page  *page;
-        int           npages;
-        unsigned long page_offset;
-        unsigned long vaddr;
-
-        LASSERT (nob > 0);
-        LASSERT (niov > 0);
-
-        while (offset >= iov->iov_len) {
-                offset -= iov->iov_len;
-                niov--;
-                iov++;
-                LASSERT (niov > 0);
+        if (credit != 0 && conn->ibc_credits == 0) {   /* no credits */
+                CDEBUG(D_NET, "%s: no credits\n",
+                       libcfs_nid2str(peer->ibp_nid));
+                return -EAGAIN;
         }
 
-        if (nob > iov->iov_len - offset) {
-                CERROR ("Can't map multiple vaddr fragments\n");
-                return (-EMSGSIZE);
+        if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
+            conn->ibc_credits == 1 &&   /* last credit reserved for */
+            conn->ibc_outstanding_credits == 0) { /* giving back credits */
+                CDEBUG(D_NET, "%s: not using last credit\n",
+                       libcfs_nid2str(peer->ibp_nid));
+                return -EAGAIN;
         }
 
-        vaddr = ((unsigned long)iov->iov_base) + offset;
-
-        page_offset = vaddr & (PAGE_SIZE - 1);
-        resid = nob;
-        npages = 0;
-
-        do {
-                LASSERT (npages < LNET_MAX_IOV);
-
-                page = kiblnd_kvaddr_to_page(vaddr);
-                if (page == NULL) {
-                        CERROR("Can't find page for %lu\n", vaddr);
-                        return -EFAULT;
-                }
-
-                tx->tx_pages[npages++] = lnet_page2phys(page);
-
-                fragnob = PAGE_SIZE - (vaddr & (PAGE_SIZE - 1));
-                vaddr += fragnob;
-                resid -= fragnob;
-
-        } while (resid > 0);
+        /* NB don't drop ibc_lock before bumping tx_sending */
+        list_del(&tx->tx_list);
+        tx->tx_queued = 0;
+
+        if (msg->ibm_type == IBLND_MSG_NOOP &&
+            (!kiblnd_send_noop(conn) ||     /* redundant NOOP */
+             (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
+              conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
+                /* OK to drop when posted enough NOOPs, since
+                 * kiblnd_check_sends will queue NOOP again when
+                 * posted NOOPs complete */
+                spin_unlock(&conn->ibc_lock);
+                kiblnd_tx_done(peer->ibp_ni, tx);
+                spin_lock(&conn->ibc_lock);
+                CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
+                       libcfs_nid2str(peer->ibp_nid),
+                       conn->ibc_noops_posted);
+                return 0;
+        }
 
-        return kiblnd_map_tx(ni, tx, rd, npages, page_offset, nob);
-}
+        kiblnd_pack_msg(peer->ibp_ni, msg, ver, conn->ibc_outstanding_credits,
+                        peer->ibp_nid, conn->ibc_incarnation);
 
-int
-kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
-                      int nkiov, lnet_kiov_t *kiov, int offset, int nob)
-{
-        int            resid;
-        int            npages;
-        unsigned long  page_offset;
+        conn->ibc_credits -= credit;
+        conn->ibc_outstanding_credits = 0;
+        conn->ibc_nsends_posted++;
+        if (msg->ibm_type == IBLND_MSG_NOOP)
+                conn->ibc_noops_posted++;
 
-        CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
+        /* CAVEAT EMPTOR!  This tx could be the PUT_DONE of an RDMA
+         * PUT.  If so, it was first queued here as a PUT_REQ, sent and
+         * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
+         * and then re-queued here.  It's (just) possible that
+         * tx_sending is non-zero if we've not done the tx_complete()
+         * from the first send; hence the ++ rather than = below. */
+        tx->tx_sending++;
+        list_add(&tx->tx_list, &conn->ibc_active_txs);
 
-        LASSERT (nob > 0);
-        LASSERT (nkiov > 0);
-        LASSERT (nkiov <= LNET_MAX_IOV);
+        /* I'm still holding ibc_lock! */
+        if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
+                rc = -ECONNABORTED;
+        else
+                rc = ib_post_send(conn->ibc_cmid->qp,
+                                  tx->tx_wrq, &bad_wrq);
+        conn->ibc_last_send = jiffies;
 
-        while (offset >= kiov->kiov_len) {
-                offset -= kiov->kiov_len;
-                nkiov--;
-                kiov++;
-                LASSERT (nkiov > 0);
-        }
+        if (rc == 0)
+                return 0;
 
-        page_offset = kiov->kiov_offset + offset;
+        /* NB credits are transferred in the actual
+         * message, which can only be the last work item */
+        conn->ibc_credits += credit;
+        conn->ibc_outstanding_credits += msg->ibm_credits;
+        conn->ibc_nsends_posted--;
+        if (msg->ibm_type == IBLND_MSG_NOOP)
+                conn->ibc_noops_posted--;
 
-        resid = offset + nob;
-        npages = 0;
+        tx->tx_status = rc;
+        tx->tx_waiting = 0;
+        tx->tx_sending--;
 
-        do {
-                LASSERT (npages < LNET_MAX_IOV);
-                LASSERT (nkiov > 0);
+        done = (tx->tx_sending == 0);
+        if (done)
+                list_del(&tx->tx_list);
 
-                if ((npages > 0 && kiov->kiov_offset != 0) ||
-                    (resid > kiov->kiov_len &&
-                     (kiov->kiov_offset + kiov->kiov_len) != PAGE_SIZE)) {
-                        /* Can't have gaps */
-                        CERROR ("Can't make payload contiguous in I/O VM:"
-                                "page %d, offset %d, len %d \n",
-                                npages, kiov->kiov_offset, kiov->kiov_len);
+        spin_unlock(&conn->ibc_lock);
 
-                        return -EINVAL;
-                }
+        if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
+                CERROR("Error %d posting transmit to %s\n",
+                       rc, libcfs_nid2str(peer->ibp_nid));
+        else
+                CDEBUG(D_NET, "Error %d posting transmit to %s\n",
+                       rc, libcfs_nid2str(peer->ibp_nid));
 
-                tx->tx_pages[npages++] = lnet_page2phys(kiov->kiov_page);
-                resid -= kiov->kiov_len;
-                kiov++;
-                nkiov--;
-        } while (resid > 0);
+        kiblnd_close_conn(conn, rc);
 
-        return kiblnd_map_tx(ni, tx, rd, npages, page_offset, nob);
+        if (done)
+                kiblnd_tx_done(peer->ibp_ni, tx);
+        return -EIO;
 }
-#endif
 
 void
 kiblnd_check_sends (kib_conn_t *conn)
 {
-        kib_tx_t          *tx;
-        lnet_ni_t         *ni = conn->ibc_peer->ibp_ni;
-        int                rc;
-        int                consume_cred = 0;
-        struct ib_send_wr *bad_wrq;
-        int                done;
+        int        ver = conn->ibc_version;
+        lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+        kib_tx_t  *tx;
 
         /* Don't send anything until after the connection is established */
         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
@@ -895,8 +937,9 @@ kiblnd_check_sends (kib_conn_t *conn)
 
         spin_lock(&conn->ibc_lock);
 
-        LASSERT (conn->ibc_nsends_posted <=
-                 *kiblnd_tunables.kib_concurrent_sends);
+        LASSERT (conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver));
+        LASSERT (!IBLND_OOB_CAPABLE(ver) ||
+                 conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
         LASSERT (conn->ibc_reserved_credits >= 0);
 
         while (conn->ibc_reserved_credits > 0 &&
@@ -916,163 +959,26 @@ kiblnd_check_sends (kib_conn_t *conn)
                         kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
 
                 spin_lock(&conn->ibc_lock);
-
                 if (tx != NULL)
                         kiblnd_queue_tx_locked(tx, conn);
         }
 
         for (;;) {
+                int credit;
+
                 if (!list_empty(&conn->ibc_tx_queue_nocred)) {
-                        tx = list_entry(conn->ibc_tx_queue_nocred.next, 
-                                        kib_tx_t, tx_list);
-                        consume_cred = 0;
-                } else if (!list_empty(&conn->ibc_tx_noops)) {
-                        tx = list_entry(conn->ibc_tx_noops.next,
+                        credit = 0;
+                        tx = list_entry(conn->ibc_tx_queue_nocred.next,
                                         kib_tx_t, tx_list);
-                        consume_cred = 1;
                 } else if (!list_empty(&conn->ibc_tx_queue)) {
+                        credit = 1;
                         tx = list_entry(conn->ibc_tx_queue.next,
                                         kib_tx_t, tx_list);
-                        consume_cred = 1;
-                } else {
-                        /* nothing to send right now */
+                } else
                         break;
-                }
-                
-                LASSERT (tx->tx_queued);
-                /* We rely on this for QP sizing */
-                LASSERT (tx->tx_nwrq > 0 &&
-                         tx->tx_nwrq <= 1 + IBLND_MAX_RDMA_FRAGS);
-
-                LASSERT (conn->ibc_outstanding_credits >= 0);
-                LASSERT (conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE);
-                LASSERT (conn->ibc_credits >= 0);
-                LASSERT (conn->ibc_credits <= IBLND_MSG_QUEUE_SIZE);
-
-                if (conn->ibc_nsends_posted == 
-                    *kiblnd_tunables.kib_concurrent_sends) {
-                        /* tx completions outstanding... */
-                        CDEBUG(D_NET, "%s: posted enough\n",
-                               libcfs_nid2str(conn->ibc_peer->ibp_nid));
-                        break;
-                }
-
-                if (consume_cred) {
-                        if (conn->ibc_credits == 0) {   /* no credits */
-                                CDEBUG(D_NET, "%s: no credits\n",
-                                       libcfs_nid2str(conn->ibc_peer->ibp_nid));
-                                break; /* NB ibc_tx_queue_nocred checked */
-                        }
 
-                        /* Last credit reserved for NOOP */
-                        if (conn->ibc_credits == 1 &&
-                            tx->tx_msg->ibm_type != IBLND_MSG_NOOP) {
-                                CDEBUG(D_NET, "%s: not using last credit\n",
-                                       libcfs_nid2str(conn->ibc_peer->ibp_nid));
-                                break; /* NB ibc_tx_noops checked */
-                        }
-                }
-
-                list_del(&tx->tx_list);
-                tx->tx_queued = 0;
-
-                /* NB don't drop ibc_lock before bumping tx_sending */
-
-                if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP &&
-                    !kiblnd_send_noop(conn)) {
-                        /* redundant NOOP */
-                        spin_unlock(&conn->ibc_lock);
-                        kiblnd_tx_done(ni, tx);
-                        spin_lock(&conn->ibc_lock);
-                        CDEBUG(D_NET, "%s: redundant noop\n",
-                               libcfs_nid2str(conn->ibc_peer->ibp_nid));
-                        continue;
-                }
-
-                kiblnd_pack_msg(ni, tx->tx_msg, conn->ibc_outstanding_credits,
-                                conn->ibc_peer->ibp_nid, conn->ibc_incarnation);
-
-                conn->ibc_outstanding_credits = 0;
-                conn->ibc_nsends_posted++;
-                if (consume_cred)
-                        conn->ibc_credits--;
-
-                /* CAVEAT EMPTOR!  This tx could be the PUT_DONE of an RDMA
-                 * PUT.  If so, it was first queued here as a PUT_REQ, sent and
-                 * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
-                 * and then re-queued here.  It's (just) possible that
-                 * tx_sending is non-zero if we've not done the tx_complete() from
-                 * the first send; hence the ++ rather than = below. */
-                tx->tx_sending++;
-
-                list_add (&tx->tx_list, &conn->ibc_active_txs);
-#if 0
-                {
-                        int i;
-                        
-                        for (i = 0; i < tx->tx_nwrq - 1; i++) {
-                                LASSERT (tx->tx_wrq[i].opcode == IB_WR_RDMA_WRITE);
-                                LASSERT (tx->tx_wrq[i].next == &tx->tx_wrq[i+1]);
-                                LASSERT (tx->tx_wrq[i].sg_list == &tx->tx_sge[i]);
-                        
-                                CDEBUG(D_WARNING, "WORK[%d]: RDMA "LPX64
-                                       " for %d k %x -> "LPX64" k %x\n", i,
-                                       tx->tx_wrq[i].sg_list->addr,
-                                       tx->tx_wrq[i].sg_list->length,
-                                       tx->tx_wrq[i].sg_list->lkey,
-                                       tx->tx_wrq[i].wr.rdma.remote_addr,
-                                       tx->tx_wrq[i].wr.rdma.rkey);
-                        }
-                        
-                        LASSERT (tx->tx_wrq[i].opcode == IB_WR_SEND);
-                        LASSERT (tx->tx_wrq[i].next == NULL);
-                        LASSERT (tx->tx_wrq[i].sg_list == &tx->tx_sge[i]);
-                        
-                        CDEBUG(D_WARNING, "WORK[%d]: SEND "LPX64" for %d k %x\n", i,
-                               tx->tx_wrq[i].sg_list->addr,
-                               tx->tx_wrq[i].sg_list->length,
-                               tx->tx_wrq[i].sg_list->lkey);
-                }
-#endif           
-                /* I'm still holding ibc_lock! */
-                if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
-                        rc = -ECONNABORTED;
-                else
-                        rc = ib_post_send(conn->ibc_cmid->qp, tx->tx_wrq, &bad_wrq);
-
-                conn->ibc_last_send = jiffies;
-
-                if (rc != 0) {
-                        /* NB credits are transferred in the actual
-                         * message, which can only be the last work item */
-                        conn->ibc_outstanding_credits += tx->tx_msg->ibm_credits;
-                        if (consume_cred)
-                                conn->ibc_credits++;
-                        conn->ibc_nsends_posted--;
-
-                        tx->tx_status = rc;
-                        tx->tx_waiting = 0;
-                        tx->tx_sending--;
-
-                        done = (tx->tx_sending == 0);
-                        if (done)
-                                list_del (&tx->tx_list);
-
-                        spin_unlock(&conn->ibc_lock);
-
-                        if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
-                                CERROR("Error %d posting transmit to %s\n",
-                                       rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
-                        else
-                                CDEBUG(D_NET, "Error %d posting transmit to %s\n",
-                                       rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
-
-                        kiblnd_close_conn(conn, rc);
-
-                        if (done)
-                                kiblnd_tx_done(ni, tx);
-                        return;
-                }
+                if (kiblnd_post_tx_locked(conn, tx, credit) != 0)
+                        break;
         }
 
         spin_unlock(&conn->ibc_lock);
@@ -1107,6 +1013,8 @@ kiblnd_tx_complete (kib_tx_t *tx, int status)
 
         tx->tx_sending--;
         conn->ibc_nsends_posted--;
+        if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP)
+                conn->ibc_noops_posted--;
 
         if (failed) {
                 tx->tx_waiting = 0;             /* don't wait for peer */
@@ -1138,6 +1046,7 @@ kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
         struct ib_sge     *sge = &tx->tx_sge[tx->tx_nwrq];
         struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
         int                nob = offsetof (kib_msg_t, ibm_u) + body_nob;
+        struct ib_mr      *mr;
 
         LASSERT (net != NULL);
         LASSERT (tx->tx_nwrq >= 0);
@@ -1146,8 +1055,11 @@ kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
 
         kiblnd_init_msg(tx->tx_msg, type, body_nob);
 
-        sge->addr = tx->tx_msgaddr;
-        sge->lkey = net->ibn_dev->ibd_mr->lkey;
+        mr = kiblnd_find_dma_mr(net, tx->tx_msgaddr, nob);
+        LASSERT (mr != NULL);
+
+        sge->lkey   = mr->lkey;
+        sge->addr   = tx->tx_msgaddr;
         sge->length = nob;
 
         memset(wrq, 0, sizeof(*wrq));
@@ -1163,46 +1075,17 @@ kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
 }
 
 int
-kiblnd_init_rdma (lnet_ni_t *ni, kib_tx_t *tx, int type,
-                  int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie)
+kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
+                  int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
 {
         kib_msg_t         *ibmsg = tx->tx_msg;
         kib_rdma_desc_t   *srcrd = tx->tx_rd;
         struct ib_sge     *sge = &tx->tx_sge[0];
         struct ib_send_wr *wrq = &tx->tx_wrq[0];
-        int                rc = nob;
-
-#if IBLND_MAP_ON_DEMAND
-        LASSERT (!in_interrupt());
-        LASSERT (tx->tx_nwrq == 0);
-        LASSERT (type == IBLND_MSG_GET_DONE ||
-                 type == IBLND_MSG_PUT_DONE);
-
-        sge->addr = srcrd->rd_addr;
-        sge->lkey = srcrd->rd_key;
-        sge->length = nob;
-
-        wrq = &tx->tx_wrq[0];
-
-        wrq->next       = &tx->tx_wrq[1];
-        wrq->wr_id      = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
-        wrq->sg_list    = sge;
-        wrq->num_sge    = 1;
-        wrq->opcode     = IB_WR_RDMA_WRITE;
-        wrq->send_flags = 0;
-
-        wrq->wr.rdma.remote_addr = dstrd->rd_addr;
-        wrq->wr.rdma.rkey        = dstrd->rd_key;
-
-        tx->tx_nwrq = 1;
-#else
-        /* CAVEAT EMPTOR: this 'consumes' the frags in 'dstrd' */
-        int              resid = nob;
-        kib_rdma_frag_t *srcfrag;
-        int              srcidx;
-        kib_rdma_frag_t *dstfrag;
-        int              dstidx;
-        int              wrknob;
+        int                rc  = resid;
+        int                srcidx;
+        int                dstidx;
+        int                wrknob;
 
         LASSERT (!in_interrupt());
         LASSERT (tx->tx_nwrq == 0);
@@ -1210,8 +1093,6 @@ kiblnd_init_rdma (lnet_ni_t *ni, kib_tx_t *tx, int type,
                  type == IBLND_MSG_PUT_DONE);
 
         srcidx = dstidx = 0;
-        srcfrag = &srcrd->rd_frags[0];
-        dstfrag = &dstrd->rd_frags[0];
 
         while (resid > 0) {
                 if (srcidx >= srcrd->rd_nfrags) {
@@ -1219,27 +1100,31 @@ kiblnd_init_rdma (lnet_ni_t *ni, kib_tx_t *tx, int type,
                         rc = -EPROTO;
                         break;
                 }
-                
+
                 if (dstidx == dstrd->rd_nfrags) {
                         CERROR("Dst buffer exhausted: %d frags\n", dstidx);
                         rc = -EPROTO;
                         break;
                 }
 
-                if (tx->tx_nwrq == IBLND_MAX_RDMA_FRAGS) {
-                        CERROR("RDMA too fragmented: %d/%d src %d/%d dst frags\n",
+                if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) {
+                        CERROR("RDMA too fragmented for %s (%d): "
+                               "%d/%d src %d/%d dst frags\n",
+                               libcfs_nid2str(conn->ibc_peer->ibp_nid),
+                               IBLND_RDMA_FRAGS(conn->ibc_version),
                                srcidx, srcrd->rd_nfrags,
                                dstidx, dstrd->rd_nfrags);
                         rc = -EMSGSIZE;
                         break;
                 }
 
-                wrknob = MIN(MIN(srcfrag->rf_nob, dstfrag->rf_nob), resid);
+                wrknob = MIN(MIN(kiblnd_rd_frag_size(srcrd, srcidx),
+                                 kiblnd_rd_frag_size(dstrd, dstidx)), resid);
 
                 sge = &tx->tx_sge[tx->tx_nwrq];
-                sge->addr   = srcfrag->rf_addr;
+                sge->addr   = kiblnd_rd_frag_addr(srcrd, srcidx);
+                sge->lkey   = kiblnd_rd_frag_key(srcrd, srcidx);
                 sge->length = wrknob;
-                sge->lkey   = srcrd->rd_key;
 
                 wrq = &tx->tx_wrq[tx->tx_nwrq];
 
@@ -1250,38 +1135,26 @@ kiblnd_init_rdma (lnet_ni_t *ni, kib_tx_t *tx, int type,
                 wrq->opcode     = IB_WR_RDMA_WRITE;
                 wrq->send_flags = 0;
 
-                wrq->wr.rdma.remote_addr = dstfrag->rf_addr;
-                wrq->wr.rdma.rkey        = dstrd->rd_key;
+                wrq->wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx);
+                wrq->wr.rdma.rkey        = kiblnd_rd_frag_key(dstrd, dstidx);
 
-                wrq++;
-                sge++;
+                srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob);
+                dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob);
 
                 resid -= wrknob;
-                if (wrknob < srcfrag->rf_nob) {
-                        srcfrag->rf_nob  -= wrknob;
-                        srcfrag->rf_addr += wrknob;
-                } else {
-                        srcfrag++;
-                        srcidx++;
-                }
-                
-                if (wrknob < dstfrag->rf_nob) {
-                        dstfrag->rf_nob  -= wrknob;
-                        dstfrag->rf_addr += wrknob;
-                } else {
-                        dstfrag++;
-                        dstidx++;
-                }
 
                 tx->tx_nwrq++;
+                wrq++;
+                sge++;
         }
 
         if (rc < 0)                             /* no RDMA if completing with failure */
                 tx->tx_nwrq = 0;
-#endif
+
         ibmsg->ibm_u.completion.ibcm_status = rc;
         ibmsg->ibm_u.completion.ibcm_cookie = dstcookie;
-        kiblnd_init_tx_msg(ni, tx, type, sizeof (kib_completion_msg_t));
+        kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
+                           type, sizeof (kib_completion_msg_t));
 
         return rc;
 }
@@ -1293,6 +1166,7 @@ kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
 
         LASSERT (tx->tx_nwrq > 0);              /* work items set up */
         LASSERT (!tx->tx_queued);               /* not queued for sending already */
+        LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
         tx->tx_queued = 1;
         tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ);
@@ -1324,7 +1198,10 @@ kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
                 break;
 
         case IBLND_MSG_NOOP:
-                q = &conn->ibc_tx_noops;
+                if (IBLND_OOB_CAPABLE(conn->ibc_version))
+                        q = &conn->ibc_tx_queue_nocred;
+                else
+                        q = &conn->ibc_tx_queue;
                 break;
 
         case IBLND_MSG_IMMEDIATE:
@@ -1443,7 +1320,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
                 } else {
                         conn = kiblnd_get_conn_locked(peer);
                         kiblnd_conn_addref(conn); /* 1 ref for me... */
-                        
+
                         write_unlock_irqrestore(g_lock, flags);
 
                         if (tx != NULL)
@@ -1569,8 +1446,6 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
                 }
 
                 ibmsg = tx->tx_msg;
-                ibmsg->ibm_u.get.ibgm_hdr = *hdr;
-                ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
 
                 if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
                         rc = kiblnd_setup_rd_iov(ni, tx,
@@ -1590,11 +1465,11 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
                         kiblnd_tx_done(ni, tx);
                         return -EIO;
                 }
-#if IBLND_MAP_ON_DEMAND
-                nob = sizeof(kib_get_msg_t);
-#else
+
                 nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[tx->tx_nfrags]);
-#endif
+                ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
+                ibmsg->ibm_u.get.ibgm_hdr = *hdr;
+
                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
 
                 tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
@@ -1719,7 +1594,8 @@ kiblnd_reply (lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
                 goto failed_1;
         }
 
-        rc = kiblnd_init_rdma(ni, tx, IBLND_MSG_GET_DONE, nob,
+        rc = kiblnd_init_rdma(rx->rx_conn, tx,
+                              IBLND_MSG_GET_DONE, nob,
                               &rx->rx_msg->ibm_u.get.ibgm_rd,
                               rx->rx_msg->ibm_u.get.ibgm_cookie);
         if (rc < 0) {
@@ -1815,7 +1691,7 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
                                                  &txmsg->ibm_u.putack.ibpam_rd,
                                                  niov, iov, offset, mlen);
                 else
-                        rc = kiblnd_setup_rd_kiov(ni, tx, 
+                        rc = kiblnd_setup_rd_kiov(ni, tx,
                                                   &txmsg->ibm_u.putack.ibpam_rd,
                                                   niov, kiov, offset, mlen);
                 if (rc != 0) {
@@ -1828,13 +1704,10 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
                         break;
                 }
 
+                nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[tx->tx_nfrags]);
                 txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
                 txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
-#if IBLND_MAP_ON_DEMAND
-                nob = sizeof(kib_putack_msg_t);
-#else
-                nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[tx->tx_nfrags]);
-#endif
+
                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob);
 
                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
@@ -1937,7 +1810,6 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
                 return; /* already being handled  */
 
         if (error == 0 &&
-            list_empty(&conn->ibc_tx_noops) &&
             list_empty(&conn->ibc_tx_queue) &&
             list_empty(&conn->ibc_tx_queue_rsrvd) &&
             list_empty(&conn->ibc_tx_queue_nocred) &&
@@ -1945,16 +1817,15 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
                 CDEBUG(D_NET, "closing conn to %s\n", 
                        libcfs_nid2str(peer->ibp_nid));
         } else {
-                CDEBUG(D_NETERROR, "Closing conn to %s: error %d%s%s%s%s%s\n",
+                CDEBUG(D_NETERROR, "Closing conn to %s: error %d%s%s%s%s\n",
                        libcfs_nid2str(peer->ibp_nid), error,
                        list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
-                       list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
                        list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
                        list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
                        list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
         }
 
-        list_del (&conn->ibc_list);
+        list_del(&conn->ibc_list);
         /* connd (see below) takes over ibc_list's ref */
 
         if (list_empty (&peer->ibp_conns) &&    /* no more conns */
@@ -2013,7 +1884,7 @@ kiblnd_handle_early_rxs(kib_conn_t *conn)
 void
 kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
 {
-        LIST_HEAD           (zombies); 
+        LIST_HEAD           (zombies);
         struct list_head    *tmp;
         struct list_head    *nxt;
         kib_tx_t            *tx;
@@ -2063,7 +1934,6 @@ kiblnd_finalise_conn (kib_conn_t *conn)
         /* Complete all tx descs not waiting for sends to complete.
          * NB we should be safe from RDMA now that the QP has changed state */
 
-        kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
         kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
@@ -2128,17 +1998,17 @@ kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error)
 void
 kiblnd_connreq_done(kib_conn_t *conn, int status)
 {
-        struct list_head   txs;
-
         kib_peer_t        *peer = conn->ibc_peer;
-        int                active;
-        unsigned long      flags;
         kib_tx_t          *tx;
+        struct list_head   txs;
+        unsigned long      flags;
+        int                active;
 
         active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
 
-        CDEBUG(D_NET,"%s: %d, %d\n", libcfs_nid2str(peer->ibp_nid), 
-               active, status);
+        CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n",
+               libcfs_nid2str(peer->ibp_nid), active,
+               conn->ibc_version, status);
 
         LASSERT (!in_interrupt());
         LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
@@ -2172,7 +2042,18 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
         else
                 peer->ibp_accepting--;
 
-        kiblnd_close_stale_conns_locked(peer, conn->ibc_incarnation);
+        if (peer->ibp_version == 0) {
+                peer->ibp_version     = conn->ibc_version;
+                peer->ibp_incarnation = conn->ibc_incarnation;
+        }
+
+        if (peer->ibp_version     != conn->ibc_version ||
+            peer->ibp_incarnation != conn->ibc_incarnation) {
+                kiblnd_close_stale_conns_locked(peer, conn->ibc_version,
+                                                conn->ibc_incarnation);
+                peer->ibp_version     = conn->ibc_version;
+                peer->ibp_incarnation = conn->ibc_incarnation;
+        }
 
         /* grab pending txs while I have the lock */
         list_add(&txs, &peer->ibp_tx_queue);
@@ -2197,7 +2078,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
         spin_lock (&conn->ibc_lock);
         while (!list_empty (&txs)) {
                 tx = list_entry (txs.next, kib_tx_t, tx_list);
-                list_del (&tx->tx_list);
+                list_del(&tx->tx_list);
 
                 kiblnd_queue_tx_locked(tx, conn);
         }
@@ -2210,14 +2091,11 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
 }
 
 void
-kiblnd_reject(struct rdma_cm_id *cmid, int why)
+kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
 {
         int          rc;
-        kib_rej_t    rej = {.ibr_magic   = IBLND_MSG_MAGIC,
-                            .ibr_version = IBLND_MSG_VERSION,
-                            .ibr_why     = why};
 
-        rc = rdma_reject(cmid, &rej, sizeof(rej));
+        rc = rdma_reject(cmid, rej, sizeof(*rej));
 
         if (rc != 0)
                 CWARN("Error %d sending reject\n", rc);
@@ -2226,19 +2104,21 @@ kiblnd_reject(struct rdma_cm_id *cmid, int why)
 int
 kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
 {
-        kib_msg_t             *ackmsg;
-        kib_msg_t             *reqmsg = priv;
         rwlock_t              *g_lock = &kiblnd_data.kib_global_lock;
-        struct rdma_conn_param cp;
-        unsigned long          flags;
-        lnet_ni_t             *ni = NULL;
+        kib_msg_t             *reqmsg = priv;
+        kib_msg_t             *ackmsg;
         kib_dev_t             *ibdev;
         kib_peer_t            *peer;
         kib_peer_t            *peer2;
         kib_conn_t            *conn;
+        lnet_ni_t             *ni  = NULL;
+        kib_net_t             *net = NULL;
         lnet_nid_t             nid;
+        struct rdma_conn_param cp;
+        kib_rej_t              rej;
+        int                    version = IBLND_MSG_VERSION;
+        unsigned long          flags;
         int                    rc;
-        int                    rej = IBLND_REJECT_FATAL;
 
         LASSERT (!in_interrupt());
 
@@ -2246,25 +2126,33 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
         ibdev = (kib_dev_t *)cmid->context;
         LASSERT (ibdev != NULL);
 
+        memset(&rej, 0, sizeof(rej));
+        rej.ibr_magic                = IBLND_MSG_MAGIC;
+        rej.ibr_why                  = IBLND_REJECT_FATAL;
+        rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
+
         if (priv_nob < offsetof(kib_msg_t, ibm_type)) {
                 CERROR("Short connection request\n");
                 goto failed;
         }
 
+        /* Future protocol version compatibility support!  If the
+         * o2iblnd-specific protocol changes, or when LNET unifies
+         * protocols over all LNDs, the initial connection will
+         * negotiate a protocol version.  I trap this here to avoid
+         * console errors; the reject tells the peer which protocol I
+         * speak. */
         if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
-            reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC) ||
-            (reqmsg->ibm_magic == IBLND_MSG_MAGIC &&
-             reqmsg->ibm_version != IBLND_MSG_VERSION) ||
-            (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) &&
-             reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION))) {
-                /* Future protocol version compatibility support!  If the
-                 * o2iblnd-specific protocol changes, or when LNET unifies
-                 * protocols over all LNDs, the initial connection will
-                 * negotiate a protocol version.  I trap this here to avoid
-                 * console errors; the reject tells the peer which protocol I
-                 * speak. */
+            reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
+                goto failed;
+        if (reqmsg->ibm_magic == IBLND_MSG_MAGIC &&
+            reqmsg->ibm_version != IBLND_MSG_VERSION &&
+            reqmsg->ibm_version != IBLND_MSG_VERSION_1)
+                goto failed;
+        if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) &&
+            reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) &&
+            reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1))
                 goto failed;
-        }
 
         rc = kiblnd_unpack_msg(reqmsg, priv_nob);
         if (rc != 0) {
@@ -2273,6 +2161,33 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
         }
 
         nid = reqmsg->ibm_srcnid;
+        ni  = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
+
+        if (ni != NULL) {
+                net = (kib_net_t *)ni->ni_data;
+                rej.ibr_incarnation = net->ibn_incarnation;
+        }
+
+        if (ni == NULL ||                         /* no matching net */
+            ni->ni_nid != reqmsg->ibm_dstnid ||   /* right NET, wrong NID! */
+            net->ibn_dev != ibdev) {              /* wrong device */
+                CERROR("Can't accept %s: bad dst nid %s\n",
+                       libcfs_nid2str(nid),
+                       libcfs_nid2str(reqmsg->ibm_dstnid));
+
+                goto failed;
+        }
+
+       /* check time stamp as soon as possible */
+        if (reqmsg->ibm_dststamp != 0 &&
+            reqmsg->ibm_dststamp != net->ibn_incarnation) {
+                CWARN("Stale connection request\n");
+                rej.ibr_why = IBLND_REJECT_CONN_STALE;
+                goto failed;
+        }
+
+        /* I can accept peer's version */
+        version = reqmsg->ibm_version;
 
         if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) {
                 CERROR("Unexpected connreq msg type: %x from %s\n",
@@ -2280,20 +2195,31 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
                 goto failed;
         }
 
-        if (reqmsg->ibm_u.connparams.ibcp_queue_depth != IBLND_MSG_QUEUE_SIZE) {
+        if (reqmsg->ibm_u.connparams.ibcp_queue_depth !=
+            IBLND_MSG_QUEUE_SIZE(version)) {
                 CERROR("Can't accept %s: incompatible queue depth %d (%d wanted)\n",
-                       libcfs_nid2str(nid),
-                       reqmsg->ibm_u.connparams.ibcp_queue_depth,
-                       IBLND_MSG_QUEUE_SIZE);
+                       libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth,
+                       IBLND_MSG_QUEUE_SIZE(version));
+
+                if (version == IBLND_MSG_VERSION)
+                        rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
+
                 goto failed;
         }
 
-        if (reqmsg->ibm_u.connparams.ibcp_max_frags != IBLND_MAX_RDMA_FRAGS) {
-                CERROR("Can't accept %s: incompatible max_frags %d (%d wanted)\n",
-                       libcfs_nid2str(nid),
+        if (reqmsg->ibm_u.connparams.ibcp_max_frags !=
+            IBLND_RDMA_FRAGS(version)) {
+                CERROR("Can't accept %s(version %x): "
+                       "incompatible max_frags %d (%d wanted)\n",
+                       libcfs_nid2str(nid), version,
                        reqmsg->ibm_u.connparams.ibcp_max_frags,
-                       IBLND_MAX_RDMA_FRAGS);
+                       IBLND_RDMA_FRAGS(version));
+
+                if (version == IBLND_MSG_VERSION)
+                        rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
+
                 goto failed;
+
         }
 
         if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
@@ -2304,22 +2230,11 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
                 goto failed;
         }
 
-        ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
-        if (ni == NULL ||                               /* no matching net */
-            ni->ni_nid != reqmsg->ibm_dstnid ||   /* right NET, wrong NID! */
-            ((kib_net_t*)ni->ni_data)->ibn_dev != ibdev) { /* wrong device */
-                CERROR("Can't accept %s: bad dst nid %s\n",
-                       libcfs_nid2str(nid),
-                       libcfs_nid2str(reqmsg->ibm_dstnid));
-
-                goto failed;
-        }
-        
         /* assume 'nid' is a new peer; create  */
         rc = kiblnd_create_peer(ni, &peer, nid);
         if (rc != 0) {
                 CERROR("Can't create peer for %s\n", libcfs_nid2str(nid));
-                rej = IBLND_REJECT_NO_RESOURCES;
+                rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
                 goto failed;
         }
 
@@ -2327,16 +2242,34 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
 
         peer2 = kiblnd_find_peer_locked(nid);
         if (peer2 != NULL) {
-                /* tie-break connection race in favour of the higher NID */                
+                if (peer2->ibp_version == 0) {
+                        peer2->ibp_version     = version;
+                        peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
+                }
+
+                /* not the guy I've talked with */
+                if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
+                    peer2->ibp_version     != version) {
+                        kiblnd_close_peer_conns_locked(peer2, -ESTALE);
+                        write_unlock_irqrestore(g_lock, flags);
+
+                        CWARN("Conn stale %s [old ver: %x, new ver: %x]\n",
+                              libcfs_nid2str(nid), peer2->ibp_version, version);
+
+                        kiblnd_peer_decref(peer);
+                        rej.ibr_why = IBLND_REJECT_CONN_STALE;
+                        goto failed;
+                }
+
+                /* tie-break connection race in favour of the higher NID */
                 if (peer2->ibp_connecting != 0 &&
                     nid < ni->ni_nid) {
                         write_unlock_irqrestore(g_lock, flags);
 
-                        CWARN("Conn race %s\n",
-                              libcfs_nid2str(peer2->ibp_nid));
+                        CWARN("Conn race %s\n", libcfs_nid2str(peer2->ibp_nid));
 
                         kiblnd_peer_decref(peer);
-                        rej = IBLND_REJECT_CONN_RACE;
+                        rej.ibr_why = IBLND_REJECT_CONN_RACE;
                         goto failed;
                 }
 
@@ -2349,10 +2282,15 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
         } else {
                 /* Brand new peer */
                 LASSERT (peer->ibp_accepting == 0);
-                peer->ibp_accepting = 1;
+                LASSERT (peer->ibp_version == 0 &&
+                         peer->ibp_incarnation == 0);
+
+                peer->ibp_accepting   = 1;
+                peer->ibp_version     = version;
+                peer->ibp_incarnation = reqmsg->ibm_srcstamp;
 
                 /* I have a ref on ni that prevents it being shutdown */
-                LASSERT (((kib_net_t *)ni->ni_data)->ibn_shutdown == 0);
+                LASSERT (net->ibn_shutdown == 0);
 
                 kiblnd_peer_addref(peer);
                 list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
@@ -2360,11 +2298,11 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
                 write_unlock_irqrestore(g_lock, flags);
         }
 
-        conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT);
+        conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version);
         if (conn == NULL) {
                 kiblnd_peer_connect_failed(peer, 0, -ENOMEM);
                 kiblnd_peer_decref(peer);
-                rej = IBLND_REJECT_NO_RESOURCES;
+                rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
                 goto failed;
         }
 
@@ -2372,20 +2310,21 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
          * CM callback doesn't destroy cmid. */
 
         conn->ibc_incarnation      = reqmsg->ibm_srcstamp;
-        conn->ibc_credits          = IBLND_MSG_QUEUE_SIZE;
-        conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE;
-        LASSERT (conn->ibc_credits + conn->ibc_reserved_credits
-                 <= IBLND_RX_MSGS);
+        conn->ibc_credits          = IBLND_MSG_QUEUE_SIZE(version);
+        conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version);
+        LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version)
+                 <= IBLND_RX_MSGS(version));
 
         ackmsg = &conn->ibc_connvars->cv_msg;
         memset(ackmsg, 0, sizeof(*ackmsg));
 
         kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
                         sizeof(ackmsg->ibm_u.connparams));
-        ackmsg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE;
-        ackmsg->ibm_u.connparams.ibcp_max_frags = IBLND_MAX_RDMA_FRAGS;
+        ackmsg->ibm_u.connparams.ibcp_queue_depth  = IBLND_MSG_QUEUE_SIZE(version);
         ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
-        kiblnd_pack_msg(ni, ackmsg, 0, nid, reqmsg->ibm_srcstamp);
+        ackmsg->ibm_u.connparams.ibcp_max_frags    = IBLND_RDMA_FRAGS(version);
+
+        kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
 
         memset(&cp, 0, sizeof(cp));
         cp.private_data        = ackmsg;
@@ -2401,7 +2340,10 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
         rc = rdma_accept(cmid, &cp);
         if (rc != 0) {
                 CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc);
-                kiblnd_reject(cmid, IBLND_REJECT_FATAL);
+                rej.ibr_version = version;
+                rej.ibr_why     = IBLND_REJECT_FATAL;
+
+                kiblnd_reject(cmid, &rej);
                 kiblnd_connreq_done(conn, rc);
                 kiblnd_conn_decref(conn);
         }
@@ -2413,17 +2355,23 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
         if (ni != NULL)
                 lnet_ni_decref(ni);
 
-        kiblnd_reject(cmid, rej);
+        rej.ibr_version = version;
+        rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
+        rej.ibr_cp.ibcp_max_frags   = IBLND_RDMA_FRAGS(version);
+        kiblnd_reject(cmid, &rej);
+
         return -ECONNREFUSED;
 }
 
 void
-kiblnd_reconnect (kib_conn_t *conn, char *why)
+kiblnd_reconnect (kib_conn_t *conn, int version,
+                  __u64 incarnation, int why, kib_connparams_t *cp)
 {
         kib_peer_t    *peer = conn->ibc_peer;
+        char          *reason;
         int            retry = 0;
         unsigned long  flags;
-        
+
         LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
         LASSERT (peer->ibp_connecting > 0);     /* 'conn' at least */
 
@@ -2436,15 +2384,43 @@ kiblnd_reconnect (kib_conn_t *conn, char *why)
             peer->ibp_accepting == 0) {
                 retry = 1;
                 peer->ibp_connecting++;
+
+                peer->ibp_version     = version;
+                peer->ibp_incarnation = incarnation;
         }
-        
+
         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
-        if (retry) {
-                CDEBUG(D_NETERROR, "%s: retrying (%s)\n", 
-                       libcfs_nid2str(peer->ibp_nid), why);
-                kiblnd_connect_peer(peer);
+        if (!retry)
+                return;
+
+        switch (why) {
+        default:
+                reason = "Unknown";
+                break;
+
+        case IBLND_REJECT_CONN_STALE:
+                reason = "stale";
+                break;
+
+        case IBLND_REJECT_CONN_RACE:
+                reason = "conn race";
+                break;
+
+        case IBLND_REJECT_CONN_UNCOMPAT:
+                reason = "version negotiation";
+                break;
         }
+
+        CDEBUG(D_NETERROR, "%s: retrying (%s), %x, %x, "
+                           "queue_dep: %d, max_frag: %d, msg_size: %d\n",
+               libcfs_nid2str(peer->ibp_nid),
+               reason, IBLND_MSG_VERSION, version,
+               cp != NULL ? cp->ibcp_queue_depth : IBLND_MSG_QUEUE_SIZE(version),
+               cp != NULL ? cp->ibcp_max_frags   : IBLND_RDMA_FRAGS(version),
+               cp != NULL ? cp->ibcp_max_msg_size: IBLND_MSG_SIZE);
+
+        kiblnd_connect_peer(peer);
 }
 
 void
@@ -2457,17 +2433,51 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
 
         switch (reason) {
         case IB_CM_REJ_STALE_CONN:
-                kiblnd_reconnect(conn, "stale");
+                kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0,
+                                 IBLND_REJECT_CONN_STALE, NULL);
                 break;
 
         case IB_CM_REJ_CONSUMER_DEFINED:
-                if (priv_nob >= sizeof(kib_rej_t)) {
-                        kib_rej_t *rej = priv;
+                if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) {
+                        kib_rej_t        *rej         = priv;
+                        kib_connparams_t *cp          = NULL;
+                        int               flip        = 0;
+                        __u64             incarnation = -1;
+
+                        /* NB. default incarnation is -1 because:
+                         * a) V1 will ignore dst incarnation in connreq.
+                         * b) V2 will provide incarnation while rejecting me,
+                         *    -1 will be overwrote.
+                         *
+                         * if I try to connect to a V1 peer with V2 protocol,
+                         * it rejected me then upgrade to V2, I have no idea
+                         * about the upgrading and try to reconnect with V1,
+                         * in this case upgraded V2 can find out I'm trying to
+                         * talk to the old guy and reject me(incarnation is -1). 
+                         */
 
                         if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) ||
                             rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) {
                                 __swab32s(&rej->ibr_magic);
                                 __swab16s(&rej->ibr_version);
+                                flip = 1;
+                        }
+
+                        if (priv_nob >= sizeof(kib_rej_t) &&
+                            rej->ibr_version > IBLND_MSG_VERSION_1) {
+                                /* priv_nob is always 148 in current version
+                                 * of OFED, so we still need to check version.
+                                 * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */
+                                cp = &rej->ibr_cp;
+
+                                if (flip) {
+                                        __swab64s(&rej->ibr_incarnation);
+                                        __swab16s(&cp->ibcp_queue_depth);
+                                        __swab16s(&cp->ibcp_max_frags);
+                                        __swab32s(&cp->ibcp_max_msg_size);
+                                }
+
+                                incarnation = rej->ibr_incarnation;
                         }
 
                         if (rej->ibr_magic != IBLND_MSG_MAGIC &&
@@ -2476,27 +2486,54 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
                                        libcfs_nid2str(peer->ibp_nid));
                                 break;
                         }
-                        
-                        if (rej->ibr_version != IBLND_MSG_VERSION) {
-                                CERROR("%s rejected: o2iblnd version %d error\n",
+
+                        if (rej->ibr_version != IBLND_MSG_VERSION &&
+                            rej->ibr_version != IBLND_MSG_VERSION_1) {
+                                CERROR("%s rejected: o2iblnd version %x error\n",
                                        libcfs_nid2str(peer->ibp_nid),
                                        rej->ibr_version);
                                 break;
                         }
-                        
+
+                        if (rej->ibr_why     == IBLND_REJECT_FATAL &&
+                            rej->ibr_version == IBLND_MSG_VERSION_1) {
+                                CDEBUG(D_NET, "rejected by old version peer %s: %x\n",
+                                       libcfs_nid2str(peer->ibp_nid), rej->ibr_version);
+
+                                if (conn->ibc_version != IBLND_MSG_VERSION_1)
+                                        rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
+                        }
+
                         switch (rej->ibr_why) {
                         case IBLND_REJECT_CONN_RACE:
-                                kiblnd_reconnect(conn, "conn race");
+                        case IBLND_REJECT_CONN_STALE:
+                        case IBLND_REJECT_CONN_UNCOMPAT:
+                                kiblnd_reconnect(conn, rej->ibr_version,
+                                                 incarnation, rej->ibr_why, cp);
+                                break;
+
+                        case IBLND_REJECT_MSG_QUEUE_SIZE:
+                                CERROR("%s rejected: incompatible message queue depth %d, %d\n",
+                                       libcfs_nid2str(peer->ibp_nid), cp->ibcp_queue_depth,
+                                       IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
+                                break;
+
+                        case IBLND_REJECT_RDMA_FRAGS:
+                                CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n",
+                                       libcfs_nid2str(peer->ibp_nid), cp->ibcp_max_frags,
+                                       IBLND_RDMA_FRAGS(conn->ibc_version));
                                 break;
-                                
+
                         case IBLND_REJECT_NO_RESOURCES:
                                 CERROR("%s rejected: o2iblnd no resources\n",
                                        libcfs_nid2str(peer->ibp_nid));
                                 break;
+
                         case IBLND_REJECT_FATAL:
                                 CERROR("%s rejected: o2iblnd fatal error\n",
                                        libcfs_nid2str(peer->ibp_nid));
                                 break;
+
                         default:
                                 CERROR("%s rejected: o2iblnd reason %d\n",
                                        libcfs_nid2str(peer->ibp_nid),
@@ -2519,10 +2556,11 @@ void
 kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
 {
         kib_peer_t    *peer = conn->ibc_peer;
-        lnet_ni_t     *ni = peer->ibp_ni;
-        kib_net_t     *net = ni->ni_data;
-        kib_msg_t     *msg = priv;
-        int            rc = kiblnd_unpack_msg(msg, priv_nob);
+        lnet_ni_t     *ni   = peer->ibp_ni;
+        kib_net_t     *net  = ni->ni_data;
+        kib_msg_t     *msg  = priv;
+        int            ver  = conn->ibc_version;
+        int            rc   = kiblnd_unpack_msg(msg, priv_nob);
         unsigned long  flags;
 
         LASSERT (net != NULL);
@@ -2540,20 +2578,30 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
                 goto failed;
         }
 
-        if (msg->ibm_u.connparams.ibcp_queue_depth != IBLND_MSG_QUEUE_SIZE) {
+        if (ver != msg->ibm_version) {
+                CERROR("%s replied version %x is different with "
+                       "requested version %x\n",
+                       libcfs_nid2str(peer->ibp_nid), msg->ibm_version, ver);
+                rc = -EPROTO;
+                goto failed;
+        }
+
+        if (msg->ibm_u.connparams.ibcp_queue_depth !=
+            IBLND_MSG_QUEUE_SIZE(ver)) {
                 CERROR("%s has incompatible queue depth %d(%d wanted)\n",
                        libcfs_nid2str(peer->ibp_nid),
                        msg->ibm_u.connparams.ibcp_queue_depth,
-                       IBLND_MSG_QUEUE_SIZE);
+                       IBLND_MSG_QUEUE_SIZE(ver));
                 rc = -EPROTO;
                 goto failed;
         }
 
-        if (msg->ibm_u.connparams.ibcp_max_frags != IBLND_MAX_RDMA_FRAGS) {
+        if (msg->ibm_u.connparams.ibcp_max_frags !=
+            IBLND_RDMA_FRAGS(ver)) {
                 CERROR("%s has incompatible max_frags %d (%d wanted)\n",
                        libcfs_nid2str(peer->ibp_nid),
                        msg->ibm_u.connparams.ibcp_max_frags,
-                       IBLND_MAX_RDMA_FRAGS);
+                       IBLND_RDMA_FRAGS(ver));
                 rc = -EPROTO;
                 goto failed;
         }
@@ -2576,16 +2624,18 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         if (rc != 0) {
-                CERROR("Stale connection reply from %s\n",
-                       libcfs_nid2str(peer->ibp_nid));
+                CERROR("Bad connection reply from %s, rc = %d, "
+                       "version: %x max_frags: %d\n",
+                       libcfs_nid2str(peer->ibp_nid), rc,
+                       msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
                 goto failed;
         }
 
         conn->ibc_incarnation      = msg->ibm_srcstamp;
-        conn->ibc_credits          = IBLND_MSG_QUEUE_SIZE;
-        conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE;
-        LASSERT (conn->ibc_credits + conn->ibc_reserved_credits
-                 <= IBLND_RX_MSGS);
+        conn->ibc_credits          =
+        conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver);
+        LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver)
+                 <= IBLND_RX_MSGS(ver));
 
         kiblnd_connreq_done(conn, 0);
         return;
@@ -2608,9 +2658,19 @@ kiblnd_active_connect (struct rdma_cm_id *cmid)
         kib_conn_t              *conn;
         kib_msg_t               *msg;
         struct rdma_conn_param   cp;
+        int                      version;
+        __u64                    incarnation;
+        long                     flags;
         int                      rc;
 
-        conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT);
+        read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+        incarnation = peer->ibp_incarnation;
+        version     = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : peer->ibp_version;
+
+        read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+        conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version);
         if (conn == NULL) {
                 kiblnd_peer_connect_failed(peer, 1, -ENOMEM);
                 kiblnd_peer_decref(peer); /* lose cmid's ref */
@@ -2625,11 +2685,13 @@ kiblnd_active_connect (struct rdma_cm_id *cmid)
 
         memset(msg, 0, sizeof(*msg));
         kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
-        msg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE;
-        msg->ibm_u.connparams.ibcp_max_frags = IBLND_MAX_RDMA_FRAGS;
+        msg->ibm_u.connparams.ibcp_queue_depth  = IBLND_MSG_QUEUE_SIZE(version);
+        msg->ibm_u.connparams.ibcp_max_frags    = IBLND_RDMA_FRAGS(version);
         msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
-        kiblnd_pack_msg(peer->ibp_ni, msg, 0, peer->ibp_nid, 0);
-        
+
+        kiblnd_pack_msg(peer->ibp_ni, msg, version,
+                        0, peer->ibp_nid, incarnation);
+
         memset(&cp, 0, sizeof(cp));
         cp.private_data        = msg;
         cp.private_data_len    = msg->ibm_nob;
@@ -2641,7 +2703,7 @@ kiblnd_active_connect (struct rdma_cm_id *cmid)
 
         LASSERT(cmid->context == (void *)conn);
         LASSERT(conn->ibc_cmid == cmid);
-        
+
         rc = rdma_connect(cmid, &cp);
         if (rc != 0) {
                 CERROR("Can't connect to %s: %d\n",
@@ -2833,6 +2895,9 @@ kiblnd_check_txs (kib_conn_t *conn, struct list_head *txs)
 
                 if (time_after_eq (jiffies, tx->tx_deadline)) {
                         timed_out = 1;
+                        CERROR("Timed out tx: %s, %lu seconds\n",
+                               kiblnd_queue2str(conn, txs),
+                               cfs_duration_sec(jiffies - tx->tx_deadline));
                         break;
                 }
         }
@@ -2845,7 +2910,6 @@ int
 kiblnd_conn_timed_out (kib_conn_t *conn)
 {
         return  kiblnd_check_txs(conn, &conn->ibc_tx_queue) ||
-                kiblnd_check_txs(conn, &conn->ibc_tx_noops) ||
                 kiblnd_check_txs(conn, &conn->ibc_tx_queue_rsrvd) ||
                 kiblnd_check_txs(conn, &conn->ibc_tx_queue_nocred) ||
                 kiblnd_check_txs(conn, &conn->ibc_active_txs);
@@ -2892,8 +2956,10 @@ kiblnd_check_conns (int idx)
                         read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
                                                flags);
 
-                        CERROR("Timed out RDMA with %s\n",
-                               libcfs_nid2str(peer->ibp_nid));
+                        CERROR("Timed out RDMA with %s (%lu)\n",
+                               libcfs_nid2str(peer->ibp_nid),
+                               cfs_duration_sec(cfs_time_current() -
+                                                peer->ibp_last_alive));
 
                         kiblnd_close_conn(conn, -ETIMEDOUT);
                         kiblnd_conn_decref(conn); /* ...until here */
@@ -2946,7 +3012,7 @@ kiblnd_connd (void *arg)
                 if (!list_empty (&kiblnd_data.kib_connd_zombies)) {
                         conn = list_entry (kiblnd_data.kib_connd_zombies.next,
                                            kib_conn_t, ibc_list);
-                        list_del (&conn->ibc_list);
+                        list_del(&conn->ibc_list);
 
                         spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
                         dropped_lock = 1;
@@ -2959,7 +3025,7 @@ kiblnd_connd (void *arg)
                 if (!list_empty (&kiblnd_data.kib_connd_conns)) {
                         conn = list_entry (kiblnd_data.kib_connd_conns.next,
                                            kib_conn_t, ibc_list);
-                        list_del (&conn->ibc_list);
+                        list_del(&conn->ibc_list);
 
                         spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
                         dropped_lock = 1;
@@ -3152,7 +3218,7 @@ kiblnd_scheduler(void *arg)
                         LASSERT(conn->ibc_scheduled);
                         list_del(&conn->ibc_sched_list);
                         conn->ibc_ready = 0;
-                        
+
                         spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
                                                flags);
 
index 11faf4e..45a8efb 100644 (file)
@@ -64,6 +64,10 @@ static int peer_credits = 8;
 CFS_MODULE_PARM(peer_credits, "i", int, 0444,
                 "# concurrent sends to 1 peer");
 
+static int peer_credits_hiw = 0;
+CFS_MODULE_PARM(peer_credits_hiw, "i", int, 0444,
+                "when eagerly to return credits");
+
 static int peer_timeout = 0;
 CFS_MODULE_PARM(peer_timeout, "i", int, 0444,
                 "Seconds without aliveness news to declare peer dead (<=0 to disable)");
@@ -88,15 +92,14 @@ static int ib_mtu = 0;
 CFS_MODULE_PARM(ib_mtu, "i", int, 0444,
                 "IB MTU 256/512/1024/2048/4096");
 
-#if IBLND_MAP_ON_DEMAND
-static int concurrent_sends = IBLND_RX_MSGS;
-#else
-static int concurrent_sends = IBLND_MSG_QUEUE_SIZE;
-#endif
+static int concurrent_sends = 0;
 CFS_MODULE_PARM(concurrent_sends, "i", int, 0444,
                 "send work-queue sizing");
 
-#if IBLND_MAP_ON_DEMAND
+static int map_on_demand = 0;
+CFS_MODULE_PARM(map_on_demand, "i", int, 0444,
+                "map on demand");
+
 static int fmr_pool_size = 512;
 CFS_MODULE_PARM(fmr_pool_size, "i", int, 0444,
                 "size of the fmr pool (>= ntx)");
@@ -108,7 +111,10 @@ CFS_MODULE_PARM(fmr_flush_trigger, "i", int, 0444,
 static int fmr_cache = 1;
 CFS_MODULE_PARM(fmr_cache, "i", int, 0444,
                 "non-zero to enable FMR caching");
-#endif
+
+static int pmr_pool_size = 512;
+CFS_MODULE_PARM(pmr_pool_size, "i", int, 0444,
+                "size of the MR cache pmr pool");
 
 kib_tunables_t kiblnd_tunables = {
         .kib_service                = &service,
@@ -118,17 +124,18 @@ kib_tunables_t kiblnd_tunables = {
         .kib_ntx                    = &ntx,
         .kib_credits                = &credits,
         .kib_peercredits            = &peer_credits,
+        .kib_peercredits_hiw        = &peer_credits_hiw,
         .kib_peertimeout            = &peer_timeout,
         .kib_default_ipif           = &ipif_name,
         .kib_retry_count            = &retry_count,
         .kib_rnr_retry_count        = &rnr_retry_count,
         .kib_concurrent_sends       = &concurrent_sends,
         .kib_ib_mtu                 = &ib_mtu,
-#if IBLND_MAP_ON_DEMAND
+        .kib_map_on_demand          = &map_on_demand,
         .kib_fmr_pool_size          = &fmr_pool_size,
         .kib_fmr_flush_trigger      = &fmr_flush_trigger,
         .kib_fmr_cache              = &fmr_cache,
-#endif
+        .kib_pmr_pool_size          = &pmr_pool_size,
 };
 
 #if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
@@ -144,6 +151,7 @@ enum {
         O2IBLND_NTX,
         O2IBLND_CREDITS,
         O2IBLND_PEER_CREDITS,
+        O2IBLND_PEER_CREDITS_HIW,
         O2IBLND_PEER_TIMEOUT,
         O2IBLND_IPIF_BASENAME,
         O2IBLND_RETRY_COUNT,
@@ -151,9 +159,11 @@ enum {
         O2IBLND_KEEPALIVE,
         O2IBLND_CONCURRENT_SENDS,
         O2IBLND_IB_MTU,
+        O2IBLND_MAP_ON_DEMAND,
         O2IBLND_FMR_POOL_SIZE,
         O2IBLND_FMR_FLUSH_TRIGGER,
-        O2IBLND_FMR_CACHE
+        O2IBLND_FMR_CACHE,
+        O2IBLND_PMR_POOL_SIZE
 };
 #else
 
@@ -163,6 +173,7 @@ enum {
 #define O2IBLND_NTX              CTL_UNNUMBERED
 #define O2IBLND_CREDITS          CTL_UNNUMBERED
 #define O2IBLND_PEER_CREDITS     CTL_UNNUMBERED
+#define O2IBLND_PEER_CREDITS_HIW CTL_UNNUMBERED
 #define O2IBLND_PEER_TIMEOUT     CTL_UNNUMBERED
 #define O2IBLND_IPIF_BASENAME    CTL_UNNUMBERED
 #define O2IBLND_RETRY_COUNT      CTL_UNNUMBERED
@@ -170,9 +181,11 @@ enum {
 #define O2IBLND_KEEPALIVE        CTL_UNNUMBERED
 #define O2IBLND_CONCURRENT_SENDS CTL_UNNUMBERED
 #define O2IBLND_IB_MTU           CTL_UNNUMBERED
+#define O2IBLND_MAP_ON_DEMAND    CTL_UNNUMBERED
 #define O2IBLND_FMR_POOL_SIZE    CTL_UNNUMBERED
 #define O2IBLND_FMR_FLUSH_TRIGGER CTL_UNNUMBERED
 #define O2IBLND_FMR_CACHE        CTL_UNNUMBERED
+#define O2IBLND_PMR_POOL_SIZE    CTL_UNNUMBERED
 
 #endif
 
@@ -226,6 +239,14 @@ static cfs_sysctl_table_t kiblnd_ctl_table[] = {
                 .proc_handler = &proc_dointvec
         },
         {
+                .ctl_name = O2IBLND_PEER_CREDITS_HIW,
+                .procname = "peer_credits_hiw",
+                .data     = &peer_credits_hiw,
+                .maxlen   = sizeof(int),
+                .mode     = 0444,
+                .proc_handler = &proc_dointvec
+        },
+        {
                 .ctl_name = O2IBLND_PEER_TIMEOUT,
                 .procname = "peer_timeout",
                 .data     = &peer_timeout,
@@ -281,7 +302,15 @@ static cfs_sysctl_table_t kiblnd_ctl_table[] = {
                 .mode     = 0444,
                 .proc_handler = &proc_dointvec
         },
-#if IBLND_MAP_ON_DEMAND
+        {
+                .ctl_name = O2IBLND_MAP_ON_DEMAND,
+                .procname = "map_on_demand",
+                .data     = &map_on_demand,
+                .maxlen   = sizeof(int),
+                .mode     = 0444,
+                .proc_handler = &proc_dointvec
+        },
+
         {
                 .ctl_name = O2IBLND_FMR_POOL_SIZE,
                 .procname = "fmr_pool_size",
@@ -306,7 +335,14 @@ static cfs_sysctl_table_t kiblnd_ctl_table[] = {
                 .mode     = 0444,
                 .proc_handler = &proc_dointvec
         },
-#endif
+        {
+                .ctl_name = O2IBLND_PMR_POOL_SIZE,
+                .procname = "pmr_pool_size",
+                .data     = &pmr_pool_size,
+                .maxlen   = sizeof(int),
+                .mode     = 0444,
+                .proc_handler = &proc_dointvec
+        },
         {0}
 };
 
@@ -373,26 +409,46 @@ kiblnd_tunables_init (void)
                 return -EINVAL;
         }
 
-        if (*kiblnd_tunables.kib_ib_mtu != 0 &&
-            *kiblnd_tunables.kib_ib_mtu != 256 &&
-            *kiblnd_tunables.kib_ib_mtu != 512 &&
-            *kiblnd_tunables.kib_ib_mtu != 1024 &&
-            *kiblnd_tunables.kib_ib_mtu != 2048 &&
-            *kiblnd_tunables.kib_ib_mtu != 4096) {
+        if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) {
                 CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
                        *kiblnd_tunables.kib_ib_mtu);
                 return -EINVAL;
         }
 
-        if (*kiblnd_tunables.kib_concurrent_sends > IBLND_RX_MSGS)
-                *kiblnd_tunables.kib_concurrent_sends = IBLND_RX_MSGS;
-        if (*kiblnd_tunables.kib_concurrent_sends < IBLND_MSG_QUEUE_SIZE / 2)
-                *kiblnd_tunables.kib_concurrent_sends = IBLND_MSG_QUEUE_SIZE / 2;
+        if (*kiblnd_tunables.kib_peercredits < IBLND_CREDITS_DEFAULT)
+                *kiblnd_tunables.kib_peercredits = IBLND_CREDITS_DEFAULT;
+
+        if (*kiblnd_tunables.kib_peercredits > IBLND_CREDITS_MAX)
+                *kiblnd_tunables.kib_peercredits = IBLND_CREDITS_MAX;
+
+        if (*kiblnd_tunables.kib_peercredits_hiw < *kiblnd_tunables.kib_peercredits / 2)
+                *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peercredits / 2;
+
+        if (*kiblnd_tunables.kib_peercredits_hiw >= *kiblnd_tunables.kib_peercredits)
+                *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peercredits - 1;
+
+        if (*kiblnd_tunables.kib_map_on_demand < 0 ||
+            *kiblnd_tunables.kib_map_on_demand >= IBLND_MAX_RDMA_FRAGS)
+                *kiblnd_tunables.kib_map_on_demand = 0; /* disable map-on-demand */
+
+        if (*kiblnd_tunables.kib_concurrent_sends == 0) {
+                if (*kiblnd_tunables.kib_map_on_demand > 0 &&
+                    *kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8)
+                        *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peercredits) * 2;
+                else
+                        *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peercredits);
+        }
+
+        if (*kiblnd_tunables.kib_concurrent_sends > *kiblnd_tunables.kib_peercredits * 2)
+                *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peercredits * 2;
+
+        if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peercredits / 2)
+                *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peercredits / 2;
 
-        if (*kiblnd_tunables.kib_concurrent_sends < IBLND_MSG_QUEUE_SIZE) {
+        if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peercredits) {
                 CWARN("Concurrent sends %d is lower than message queue size: %d, "
                       "performance may drop slightly.\n",
-                      *kiblnd_tunables.kib_concurrent_sends, IBLND_MSG_QUEUE_SIZE);
+                      *kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peercredits);
         }
 
         kiblnd_sysctl_init();