Whamcloud - gitweb
LU-13004 gnilnd: remove support for GNILND_BUF_VIRT_*
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd_cb.c
index bce8194..42b1cd3 100644 (file)
 
 #include <asm/page.h>
 #include <linux/nmi.h>
+#include <linux/pagemap.h>
+
+#include <libcfs/linux/linux-mem.h>
+
 #include "gnilnd.h"
 
 /* this is useful when needed to debug wire corruption. */
@@ -81,7 +85,6 @@ kgnilnd_schedule_device(kgn_device_t *dev)
        if (!already_live) {
                wake_up_all(&dev->gnd_waitq);
        }
-       return;
 }
 
 void kgnilnd_schedule_device_timer(unsigned long arg)
@@ -151,7 +154,7 @@ kgnilnd_schedule_process_conn(kgn_conn_t *conn, int sched_intent)
  * as scheduled */
 
 int
-_kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refheld)
+_kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refheld, int lock_held)
 {
        kgn_device_t        *dev = conn->gnc_device;
        int                  sched;
@@ -184,10 +187,11 @@ _kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refhe
                         conn, sched);
 
                CDEBUG(D_INFO, "scheduling conn 0x%p caller %s:%d\n", conn, caller, line);
-
-               spin_lock(&dev->gnd_lock);
+               if (!lock_held)
+                       spin_lock(&dev->gnd_lock);
                list_add_tail(&conn->gnc_schedlist, &dev->gnd_ready_conns);
-               spin_unlock(&dev->gnd_lock);
+               if (!lock_held)
+                       spin_unlock(&dev->gnd_lock);
                set_mb(conn->gnc_last_sched_ask, jiffies);
                rc = 1;
        } else {
@@ -197,6 +201,23 @@ _kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refhe
 
        /* make sure thread(s) going to process conns - but let it make
         * separate decision from conn schedule */
+       if (!lock_held)
+               kgnilnd_schedule_device(dev);
+       return rc;
+}
+
+int
+_kgnilnd_schedule_delay_conn(kgn_conn_t *conn)
+{
+       kgn_device_t    *dev = conn->gnc_device;
+       int rc = 0;
+       spin_lock(&dev->gnd_lock);
+       if (list_empty(&conn->gnc_delaylist)) {
+               list_add_tail(&conn->gnc_delaylist, &dev->gnd_delay_conns);
+               rc = 1;
+       }
+       spin_unlock(&dev->gnd_lock);
+
        kgnilnd_schedule_device(dev);
        return rc;
 }
@@ -237,7 +258,7 @@ kgnilnd_free_tx(kgn_tx_t *tx)
 
        /* Only free the buffer if we used it */
        if (tx->tx_buffer_copy != NULL) {
-               vfree(tx->tx_buffer_copy);
+               kgnilnd_vfree(tx->tx_buffer_copy, tx->tx_rdma_desc.length);
                tx->tx_buffer_copy = NULL;
                CDEBUG(D_MALLOC, "vfreed buffer2\n");
        }
@@ -256,7 +277,7 @@ kgnilnd_alloc_tx (void)
        if (CFS_FAIL_CHECK(CFS_FAIL_GNI_ALLOC_TX))
                return tx;
 
-       tx = kmem_cache_alloc(kgnilnd_data.kgn_tx_cache, GFP_ATOMIC);
+       tx = kmem_cache_zalloc(kgnilnd_data.kgn_tx_cache, GFP_ATOMIC);
        if (tx == NULL) {
                CERROR("failed to allocate tx\n");
                return NULL;
@@ -264,9 +285,6 @@ kgnilnd_alloc_tx (void)
        CDEBUG(D_MALLOC, "slab-alloced 'tx': %lu at %p.\n",
               sizeof(*tx), tx);
 
-       /* need this memset, cache alloc'd memory is not cleared */
-       memset(tx, 0, sizeof(*tx));
-
        /* setup everything here to minimize time under the lock */
        tx->tx_buftype = GNILND_BUF_NONE;
        tx->tx_msg.gnm_type = GNILND_MSG_NONE;
@@ -283,8 +301,8 @@ kgnilnd_alloc_tx (void)
 #define _kgnilnd_cksum(seed, ptr, nob)  csum_partial(ptr, nob, seed)
 
 /* we don't use offset as every one is passing a buffer reference that already
- * includes the offset into the base address -
- *  see kgnilnd_setup_virt_buffer and kgnilnd_setup_immediate_buffer */
+ * includes the offset into the base address.
+ */
 static inline __u16
 kgnilnd_cksum(void *ptr, size_t nob)
 {
@@ -302,9 +320,9 @@ kgnilnd_cksum(void *ptr, size_t nob)
        return sum;
 }
 
-inline __u16
-kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov,
-                   unsigned int offset, unsigned int nob, int dump_blob)
+__u16
+kgnilnd_cksum_kiov(unsigned int nkiov, struct bio_vec *kiov,
+                  unsigned int offset, unsigned int nob, int dump_blob)
 {
        __wsum             cksum = 0;
        __wsum             tmpck;
@@ -321,15 +339,15 @@ kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov,
 
        /* if loops changes, please change kgnilnd_setup_phys_buffer */
 
-       while (offset >= kiov->kiov_len) {
-               offset -= kiov->kiov_len;
+       while (offset >= kiov->bv_len) {
+               offset -= kiov->bv_len;
                nkiov--;
                kiov++;
                LASSERT(nkiov > 0);
        }
 
-       /* ignore nob here, if nob < (kiov_len - offset), kiov == 1 */
-       odd = (unsigned long) (kiov[0].kiov_len - offset) & 1;
+       /* ignore nob here, if nob < (bv_len - offset), kiov == 1 */
+       odd = (unsigned long) (kiov[0].bv_len - offset) & 1;
 
        if ((odd || *kgnilnd_tunables.kgn_vmap_cksum) && nkiov > 1) {
                struct page **pages = kgnilnd_data.kgn_cksum_map_pages[get_cpu()];
@@ -338,10 +356,10 @@ kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov,
                         get_cpu(), kgnilnd_data.kgn_cksum_map_pages);
 
                CDEBUG(D_BUFFS, "odd %d len %u offset %u nob %u\n",
-                      odd, kiov[0].kiov_len, offset, nob);
+                      odd, kiov[0].bv_len, offset, nob);
 
                for (i = 0; i < nkiov; i++) {
-                       pages[i] = kiov[i].kiov_page;
+                       pages[i] = kiov[i].bv_page;
                }
 
                addr = vmap(pages, nkiov, VM_MAP, PAGE_KERNEL);
@@ -354,42 +372,46 @@ kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov,
                }
                atomic_inc(&kgnilnd_data.kgn_nvmap_cksum);
 
-               tmpck = _kgnilnd_cksum(0, (void *) addr + kiov[0].kiov_offset + offset, nob);
+               tmpck = _kgnilnd_cksum(0, ((void *) addr + kiov[0].bv_offset +
+                                          offset), nob);
                cksum = tmpck;
 
                if (dump_blob) {
                        kgnilnd_dump_blob(D_BUFFS, "flat kiov RDMA payload",
-                                         (void *)addr + kiov[0].kiov_offset + offset, nob);
+                                         (void *)addr + kiov[0].bv_offset +
+                                         offset, nob);
                }
                CDEBUG(D_BUFFS, "cksum 0x%x (+0x%x) for addr 0x%p+%u len %u offset %u\n",
-                      cksum, tmpck, addr, kiov[0].kiov_offset, nob, offset);
+                      cksum, tmpck, addr, kiov[0].bv_offset, nob, offset);
                vunmap(addr);
        } else {
                do {
-                       fraglen = min(kiov->kiov_len - offset, nob);
+                       fraglen = min(kiov->bv_len - offset, nob);
 
                        /* make dang sure we don't send a bogus checksum if somehow we get
                         * an odd length fragment on anything but the last entry in a kiov  -
                         * we know from kgnilnd_setup_rdma_buffer that we can't have non
                         * PAGE_SIZE pages in the middle, so if nob < PAGE_SIZE, it is the last one */
                        LASSERTF(!(fraglen&1) || (nob < PAGE_SIZE),
-                                "odd fraglen %u on nkiov %d, nob %u kiov_len %u offset %u kiov 0x%p\n",
-                                fraglen, nkiov, nob, kiov->kiov_len, offset, kiov);
+                                "odd fraglen %u on nkiov %d, nob %u bv_len %u offset %u kiov 0x%p\n",
+                                fraglen, nkiov, nob, kiov->bv_len,
+                                offset, kiov);
 
-                       addr = (void *)kmap(kiov->kiov_page) + kiov->kiov_offset + offset;
+                       addr = (void *)kmap(kiov->bv_page) + kiov->bv_offset +
+                               offset;
                        tmpck = _kgnilnd_cksum(cksum, addr, fraglen);
 
                        CDEBUG(D_BUFFS,
                               "cksum 0x%x (+0x%x) for page 0x%p+%u (0x%p) len %u offset %u\n",
-                              cksum, tmpck, kiov->kiov_page, kiov->kiov_offset, addr,
-                              fraglen, offset);
+                              cksum, tmpck, kiov->bv_page, kiov->bv_offset,
+                              addr, fraglen, offset);
 
                        cksum = tmpck;
 
                        if (dump_blob)
                                kgnilnd_dump_blob(D_BUFFS, "kiov cksum", addr, fraglen);
 
-                       kunmap(kiov->kiov_page);
+                       kunmap(kiov->bv_page);
 
                        kiov++;
                        nkiov--;
@@ -489,9 +511,9 @@ kgnilnd_nak_rdma(kgn_conn_t *conn, int rx_type, int error, __u64 cookie, lnet_ni
        kgnilnd_queue_tx(conn, tx);
 }
 
-int
+static int
 kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov,
-                              struct kvec *iov, lnet_kiov_t *kiov,
+                              struct bio_vec *kiov,
                               unsigned int offset, unsigned int nob)
 {
        kgn_msg_t       *msg = &tx->tx_msg;
@@ -504,45 +526,48 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov,
 
        if (nob == 0) {
                tx->tx_buffer = NULL;
-       } else if (kiov != NULL) {
+       } else {
 
                if ((niov > 0) && unlikely(niov > (nob/PAGE_SIZE))) {
-                       niov = ((nob + offset + kiov->kiov_offset + PAGE_SIZE - 1) /
-                               PAGE_SIZE);
+                       niov = round_up(nob + offset + kiov->bv_offset,
+                                       PAGE_SIZE);
                }
 
                LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE,
-                       "bad niov %d msg %p kiov %p iov %p offset %d nob%d\n",
-                       niov, msg, kiov, iov, offset, nob);
+                       "bad niov %d msg %p kiov %p offset %d nob%d\n",
+                       niov, msg, kiov, offset, nob);
 
-               while (offset >= kiov->kiov_len) {
-                       offset -= kiov->kiov_len;
+               while (offset >= kiov->bv_len) {
+                       offset -= kiov->bv_len;
                        niov--;
                        kiov++;
                        LASSERT(niov > 0);
                }
                for (i = 0; i < niov; i++) {
-                       /* We can't have a kiov_offset on anything but the first entry,
-                        * otherwise we'll have a hole at the end of the mapping as we only map
-                        * whole pages.
-                        * Also, if we have a kiov_len < PAGE_SIZE but we need to map more
-                        * than kiov_len, we will also have a whole at the end of that page
-                        * which isn't allowed */
-                       if ((kiov[i].kiov_offset != 0 && i > 0) ||
-                           (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1)) {
-                               CNETERR("Can't make payload contiguous in I/O VM:"
-                                      "page %d, offset %u, nob %u, kiov_offset %u kiov_len %u \n",
-                                      i, offset, nob, kiov->kiov_offset, kiov->kiov_len);
+                       /* We can't have a bv_offset on anything but the first
+                        * entry, otherwise we'll have a hole at the end of the
+                        * mapping as we only map whole pages.
+                        * Also, if we have a bv_len < PAGE_SIZE but we need to
+                        * map more than bv_len, we will also have a whole at
+                        * the end of that page which isn't allowed
+                        */
+                       if ((kiov[i].bv_offset != 0 && i > 0) ||
+                           (kiov[i].bv_offset + kiov[i].bv_len != PAGE_SIZE &&
+                            i < niov - 1)) {
+                               CNETERR("Can't make payload contiguous in I/O VM:page %d, offset %u, nob %u, bv_offset %u bv_len %u\n",
+                                      i, offset, nob, kiov->bv_offset,
+                                       kiov->bv_len);
                                RETURN(-EINVAL);
                        }
-                       tx->tx_imm_pages[i] = kiov[i].kiov_page;
+                       tx->tx_imm_pages[i] = kiov[i].bv_page;
                }
 
                /* hijack tx_phys for the later unmap */
                if (niov == 1) {
                        /* tx->phyx being equal to NULL is the signal for unmap to discern between kmap and vmap */
                        tx->tx_phys = NULL;
-                       tx->tx_buffer = (void *)kmap(tx->tx_imm_pages[0]) + kiov[0].kiov_offset + offset;
+                       tx->tx_buffer = (void *)kmap(tx->tx_imm_pages[0]) +
+                               kiov[0].bv_offset + offset;
                        atomic_inc(&kgnilnd_data.kgn_nkmap_short);
                        GNIDBG_TX(D_NET, tx, "kmapped page for %d bytes for kiov 0x%p, buffer 0x%p",
                                nob, kiov, tx->tx_buffer);
@@ -554,37 +579,18 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov,
 
                        }
                        atomic_inc(&kgnilnd_data.kgn_nvmap_short);
-                       /* make sure we take into account the kiov offset as the start of the buffer */
-                       tx->tx_buffer = (void *)tx->tx_phys + kiov[0].kiov_offset + offset;
-                       GNIDBG_TX(D_NET, tx, "mapped %d pages for %d bytes from kiov 0x%p to 0x%p, buffer 0x%p",
-                               niov, nob, kiov, tx->tx_phys, tx->tx_buffer);
+                       /* make sure we take into account the kiov offset as the
+                        * start of the buffer
+                        */
+                       tx->tx_buffer = (void *)tx->tx_phys + kiov[0].bv_offset
+                               + offset;
+                       GNIDBG_TX(D_NET, tx,
+                                 "mapped %d pages for %d bytes from kiov 0x%p to 0x%p, buffer 0x%p",
+                                 niov, nob, kiov, tx->tx_phys, tx->tx_buffer);
                }
                tx->tx_buftype = GNILND_BUF_IMMEDIATE_KIOV;
                tx->tx_nob = nob;
 
-       } else {
-               /* For now this is almost identical to kgnilnd_setup_virt_buffer, but we
-                * could "flatten" the payload into a single contiguous buffer ready
-                * for sending direct over an FMA if we ever needed to. */
-
-               LASSERT(niov > 0);
-
-               while (offset >= iov->iov_len) {
-                       offset -= iov->iov_len;
-                       niov--;
-                       iov++;
-                       LASSERT(niov > 0);
-               }
-
-               if (nob > iov->iov_len - offset) {
-                       CERROR("Can't handle multiple vaddr fragments\n");
-                       return -EMSGSIZE;
-               }
-
-               tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset);
-
-               tx->tx_buftype = GNILND_BUF_IMMEDIATE;
-               tx->tx_nob = nob;
        }
 
        /* checksum payload early - it shouldn't be changing after lnd_send */
@@ -605,35 +611,7 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov,
 }
 
 int
-kgnilnd_setup_virt_buffer(kgn_tx_t *tx,
-                         unsigned int niov, struct kvec *iov,
-                         unsigned int offset, unsigned int nob)
-
-{
-       LASSERT(nob > 0);
-       LASSERT(niov > 0);
-       LASSERT(tx->tx_buftype == GNILND_BUF_NONE);
-
-       while (offset >= iov->iov_len) {
-               offset -= iov->iov_len;
-               niov--;
-               iov++;
-               LASSERT(niov > 0);
-       }
-
-       if (nob > iov->iov_len - offset) {
-               CERROR("Can't handle multiple vaddr fragments\n");
-               return -EMSGSIZE;
-       }
-
-       tx->tx_buftype = GNILND_BUF_VIRT_UNMAPPED;
-       tx->tx_nob = nob;
-       tx->tx_buffer = (void *)(((unsigned long)iov->iov_base) + offset);
-       return 0;
-}
-
-int
-kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov,
+kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, struct bio_vec *kiov,
                          unsigned int offset, unsigned int nob)
 {
        gni_mem_segment_t *phys;
@@ -661,8 +639,8 @@ kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov,
        /* if loops changes, please change kgnilnd_cksum_kiov
         *   and kgnilnd_setup_immediate_buffer */
 
-       while (offset >= kiov->kiov_len) {
-               offset -= kiov->kiov_len;
+       while (offset >= kiov->bv_len) {
+               offset -= kiov->bv_len;
                nkiov--;
                kiov++;
                LASSERT(nkiov > 0);
@@ -674,31 +652,31 @@ kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov,
        tx->tx_buftype = GNILND_BUF_PHYS_UNMAPPED;
        tx->tx_nob = nob;
 
-       /* kiov_offset is start of 'valid' buffer, so index offset past that */
-       tx->tx_buffer = (void *)((unsigned long)(kiov->kiov_offset + offset));
+       /* bv_offset is start of 'valid' buffer, so index offset past that */
+       tx->tx_buffer = (void *)((unsigned long)(kiov->bv_offset + offset));
        phys = tx->tx_phys;
 
        CDEBUG(D_NET, "tx 0x%p buffer 0x%p map start kiov 0x%p+%u niov %d offset %u\n",
-              tx, tx->tx_buffer, kiov, kiov->kiov_offset, nkiov, offset);
+              tx, tx->tx_buffer, kiov, kiov->bv_offset, nkiov, offset);
 
        do {
-               fraglen = min(kiov->kiov_len - offset, nob);
-
-               /* We can't have a kiov_offset on anything but the first entry,
-                * otherwise we'll have a hole at the end of the mapping as we only map
-                * whole pages. Only the first page is allowed to have an offset -
-                * we'll add that into tx->tx_buffer and that will get used when we
-                * map in the segments (see kgnilnd_map_buffer).
-                * Also, if we have a kiov_len < PAGE_SIZE but we need to map more
-                * than kiov_len, we will also have a whole at the end of that page
-                * which isn't allowed */
+               fraglen = min(kiov->bv_len - offset, nob);
+
+               /* We can't have a bv_offset on anything but the first entry,
+                * otherwise we'll have a hole at the end of the mapping as we
+                * only map whole pages.  Only the first page is allowed to
+                * have an offset - we'll add that into tx->tx_buffer and that
+                * will get used when we map in the segments (see
+                * kgnilnd_map_buffer).  Also, if we have a bv_len < PAGE_SIZE
+                * but we need to map more than bv_len, we will also have a
+                * whole at the end of that page which isn't allowed
+                */
                if ((phys != tx->tx_phys) &&
-                   ((kiov->kiov_offset != 0) ||
-                    ((kiov->kiov_len < PAGE_SIZE) && (nob > kiov->kiov_len)))) {
-                       CERROR("Can't make payload contiguous in I/O VM:"
-                              "page %d, offset %u, nob %u, kiov_offset %u kiov_len %u \n",
+                   ((kiov->bv_offset != 0) ||
+                    ((kiov->bv_len < PAGE_SIZE) && (nob > kiov->bv_len)))) {
+                       CERROR("Can't make payload contiguous in I/O VM:page %d, offset %u, nob %u, bv_offset %u bv_len %u\n",
                               (int)(phys - tx->tx_phys),
-                              offset, nob, kiov->kiov_offset, kiov->kiov_len);
+                              offset, nob, kiov->bv_offset, kiov->bv_len);
                        rc = -EINVAL;
                        GOTO(error, rc);
                }
@@ -714,11 +692,12 @@ kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov,
                        GOTO(error, rc);
                }
 
-               CDEBUG(D_BUFFS, "page 0x%p kiov_offset %u kiov_len %u nob %u "
-                              "nkiov %u offset %u\n",
-                     kiov->kiov_page, kiov->kiov_offset, kiov->kiov_len, nob, nkiov, offset);
+               CDEBUG(D_BUFFS,
+                      "page 0x%p bv_offset %u bv_len %u nob %u nkiov %u offset %u\n",
+                      kiov->bv_page, kiov->bv_offset, kiov->bv_len, nob, nkiov,
+                      offset);
 
-               phys->address = page_to_phys(kiov->kiov_page);
+               phys->address = page_to_phys(kiov->bv_page);
                phys++;
                kiov++;
                nkiov--;
@@ -746,21 +725,10 @@ error:
 
 static inline int
 kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov,
-                         struct kvec *iov, lnet_kiov_t *kiov,
+                         struct bio_vec *kiov,
                          unsigned int offset, unsigned int nob)
 {
-       int     rc;
-
-       LASSERTF((iov == NULL) != (kiov == NULL), "iov 0x%p, kiov 0x%p, tx 0x%p,"
-                                               " offset %d, nob %d, niov %d\n"
-                                               , iov, kiov, tx, offset, nob, niov);
-
-       if (kiov != NULL) {
-               rc = kgnilnd_setup_phys_buffer(tx, niov, kiov, offset, nob);
-       } else {
-               rc = kgnilnd_setup_virt_buffer(tx, niov, iov, offset, nob);
-       }
-       return rc;
+       return kgnilnd_setup_phys_buffer(tx, niov, kiov, offset, nob);
 }
 
 /* kgnilnd_parse_lnet_rdma()
@@ -774,16 +742,16 @@ kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov,
  *           transfer.
  */
 static void
-kgnilnd_parse_lnet_rdma(lnet_msg_t *lntmsg, unsigned int *niov,
+kgnilnd_parse_lnet_rdma(struct lnet_msg *lntmsg, unsigned int *niov,
                        unsigned int *offset, unsigned int *nob,
-                       lnet_kiov_t **kiov, int put_len)
+                       struct bio_vec **kiov, int put_len)
 {
        /* GETs are weird, see kgnilnd_send */
        if (lntmsg->msg_type == LNET_MSG_GET) {
                if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0) {
                        *kiov = NULL;
                } else {
-                       *kiov = lntmsg->msg_md->md_iov.kiov;
+                       *kiov = lntmsg->msg_md->md_kiov;
                }
                *niov = lntmsg->msg_md->md_niov;
                *nob = lntmsg->msg_md->md_length;
@@ -799,10 +767,10 @@ kgnilnd_parse_lnet_rdma(lnet_msg_t *lntmsg, unsigned int *niov,
 static inline void
 kgnilnd_compute_rdma_cksum(kgn_tx_t *tx, int put_len)
 {
-       unsigned int     niov, offset, nob;
-       lnet_kiov_t     *kiov;
-       lnet_msg_t      *lntmsg = tx->tx_lntmsg[0];
-       int              dump_cksum = (*kgnilnd_tunables.kgn_checksum_dump > 1);
+       unsigned int niov, offset, nob;
+       struct bio_vec *kiov;
+       struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
+       int dump_cksum = (*kgnilnd_tunables.kgn_checksum_dump > 1);
 
        GNITX_ASSERTF(tx, ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE) ||
                           (tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE) ||
@@ -852,8 +820,8 @@ kgnilnd_verify_rdma_cksum(kgn_tx_t *tx, __u16 rx_cksum, int put_len)
        int              rc = 0;
        __u16            cksum;
        unsigned int     niov, offset, nob;
-       lnet_kiov_t     *kiov;
-       lnet_msg_t      *lntmsg = tx->tx_lntmsg[0];
+       struct bio_vec  *kiov;
+       struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
        int dump_on_err = *kgnilnd_tunables.kgn_checksum_dump;
 
        /* we can only match certain requests */
@@ -936,12 +904,6 @@ kgnilnd_mem_add_map_list(kgn_device_t *dev, kgn_tx_t *tx)
                dev->gnd_map_nphys++;
                dev->gnd_map_physnop += tx->tx_phys_npages;
                break;
-
-       case GNILND_BUF_VIRT_MAPPED:
-               bytes = tx->tx_nob;
-               dev->gnd_map_nvirt++;
-               dev->gnd_map_virtnob += tx->tx_nob;
-               break;
        }
 
        if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
@@ -985,12 +947,6 @@ kgnilnd_mem_del_map_list(kgn_device_t *dev, kgn_tx_t *tx)
                dev->gnd_map_nphys--;
                dev->gnd_map_physnop -= tx->tx_phys_npages;
                break;
-
-       case GNILND_BUF_VIRT_UNMAPPED:
-               bytes = tx->tx_nob;
-               dev->gnd_map_nvirt--;
-               dev->gnd_map_virtnob -= tx->tx_nob;
-               break;
        }
 
        if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
@@ -1043,7 +999,6 @@ kgnilnd_map_buffer(kgn_tx_t *tx)
        case GNILND_BUF_IMMEDIATE:
        case GNILND_BUF_IMMEDIATE_KIOV:
        case GNILND_BUF_PHYS_MAPPED:
-       case GNILND_BUF_VIRT_MAPPED:
                return 0;
 
        case GNILND_BUF_PHYS_UNMAPPED:
@@ -1056,41 +1011,16 @@ kgnilnd_map_buffer(kgn_tx_t *tx)
                 * - this needs to turn into a non-fatal error soon to allow
                 *  GART resource, etc starvation handling */
                if (rrc != GNI_RC_SUCCESS) {
-                       GNIDBG_TX(D_NET, tx, "Can't map %d pages: dev %d "
-                               "phys %u pp %u, virt %u nob %llu",
+                       GNIDBG_TX(D_NET, tx,
+                                 "Can't map %d pages: dev %d phys %u pp %u",
                                tx->tx_phys_npages, dev->gnd_id,
-                               dev->gnd_map_nphys, dev->gnd_map_physnop,
-                               dev->gnd_map_nvirt, dev->gnd_map_virtnob);
+                               dev->gnd_map_nphys, dev->gnd_map_physnop);
                        RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL);
                }
 
                tx->tx_buftype = GNILND_BUF_PHYS_MAPPED;
                kgnilnd_mem_add_map_list(dev, tx);
                return 0;
-
-       case GNILND_BUF_VIRT_UNMAPPED:
-               rrc = kgnilnd_mem_register(dev->gnd_handle,
-                       (__u64)tx->tx_buffer, tx->tx_nob,
-                       NULL, flags, &tx->tx_map_key);
-               if (rrc != GNI_RC_SUCCESS) {
-                       GNIDBG_TX(D_NET, tx, "Can't map %u bytes: dev %d "
-                               "phys %u pp %u, virt %u nob %llu",
-                               tx->tx_nob, dev->gnd_id,
-                               dev->gnd_map_nphys, dev->gnd_map_physnop,
-                               dev->gnd_map_nvirt, dev->gnd_map_virtnob);
-                       RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL);
-               }
-
-               tx->tx_buftype = GNILND_BUF_VIRT_MAPPED;
-               kgnilnd_mem_add_map_list(dev, tx);
-               if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK ||
-                   tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
-                       atomic64_add(tx->tx_nob, &dev->gnd_rdmaq_bytes_out);
-                       GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to %ld\n",
-                              tx->tx_nob, atomic64_read(&dev->gnd_rdmaq_bytes_out));
-               }
-
-               return 0;
        }
 }
 
@@ -1132,8 +1062,8 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error)
        int               hold_timeout = 0;
 
        /* code below relies on +1 relationship ... */
-       CLASSERT(GNILND_BUF_PHYS_MAPPED == (GNILND_BUF_PHYS_UNMAPPED + 1));
-       CLASSERT(GNILND_BUF_VIRT_MAPPED == (GNILND_BUF_VIRT_UNMAPPED + 1));
+       BUILD_BUG_ON(GNILND_BUF_PHYS_MAPPED !=
+                    (GNILND_BUF_PHYS_UNMAPPED + 1));
 
        switch (tx->tx_buftype) {
        default:
@@ -1142,7 +1072,6 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error)
        case GNILND_BUF_NONE:
        case GNILND_BUF_IMMEDIATE:
        case GNILND_BUF_PHYS_UNMAPPED:
-       case GNILND_BUF_VIRT_UNMAPPED:
                break;
        case GNILND_BUF_IMMEDIATE_KIOV:
                if (tx->tx_phys != NULL) {
@@ -1156,7 +1085,6 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error)
                break;
 
        case GNILND_BUF_PHYS_MAPPED:
-       case GNILND_BUF_VIRT_MAPPED:
                LASSERT(tx->tx_conn != NULL);
 
                dev = tx->tx_conn->gnc_device;
@@ -1198,9 +1126,9 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error)
 void
 kgnilnd_tx_done(kgn_tx_t *tx, int completion)
 {
-       lnet_msg_t      *lntmsg0, *lntmsg1;
+       struct lnet_msg      *lntmsg0, *lntmsg1;
        int             status0, status1;
-       lnet_ni_t       *ni = NULL;
+       struct lnet_ni       *ni = NULL;
        kgn_conn_t      *conn = tx->tx_conn;
 
        LASSERT(!in_interrupt());
@@ -1265,10 +1193,10 @@ kgnilnd_tx_done(kgn_tx_t *tx, int completion)
         * could free up lnet credits, resulting in a call chain back into
         * the LND via kgnilnd_send and friends */
 
-       lnet_finalize(ni, lntmsg0, status0);
+       lnet_finalize(lntmsg0, status0);
 
        if (lntmsg1 != NULL) {
-               lnet_finalize(ni, lntmsg1, status1);
+               lnet_finalize(lntmsg1, status1);
        }
 }
 
@@ -1343,70 +1271,35 @@ search_again:
        return 0;
 }
 
-static inline int
-kgnilnd_tx_should_retry(kgn_conn_t *conn, kgn_tx_t *tx)
+static inline void
+kgnilnd_tx_log_retrans(kgn_conn_t *conn, kgn_tx_t *tx)
 {
-       int             max_retrans = *kgnilnd_tunables.kgn_max_retransmits;
        int             log_retrans;
-       int             log_retrans_level;
-
-       /* I need kgni credits to send this.  Replace tx at the head of the
-        * fmaq and I'll get rescheduled when credits appear */
-       tx->tx_state = 0;
-       tx->tx_retrans++;
-       conn->gnc_tx_retrans++;
-       log_retrans = ((tx->tx_retrans < 25) || ((tx->tx_retrans % 25) == 0) ||
-                       (tx->tx_retrans > (max_retrans / 2)));
-       log_retrans_level = tx->tx_retrans < (max_retrans / 2) ? D_NET : D_NETERROR;
 
-       /* Decision time - either error, warn or just retransmit */
+       log_retrans = ((tx->tx_retrans < 25) || ((tx->tx_retrans % 25) == 0));
 
        /* we don't care about TX timeout - it could be that the network is slower
         * or throttled. We'll keep retranmitting - so if the network is so slow
         * that we fill up our mailbox, we'll keep trying to resend that msg
         * until we exceed the max_retrans _or_ gnc_last_rx expires, indicating
         * that he hasn't send us any traffic in return */
-
-       if (tx->tx_retrans > max_retrans) {
-               /* this means we are not backing off the retransmits
-                * in a healthy manner and are likely chewing up the
-                * CPU cycles quite badly */
-               GNIDBG_TOMSG(D_ERROR, &tx->tx_msg,
-                       "SOFTWARE BUG: too many retransmits (%d) for tx id %x "
-                       "conn 0x%p->%s\n",
-                       tx->tx_retrans, tx->tx_id, conn,
-                       libcfs_nid2str(conn->gnc_peer->gnp_nid));
-
-               /* yes - double errors to help debug this condition */
-               GNIDBG_TOMSG(D_NETERROR, &tx->tx_msg, "connection dead. "
-                       "unable to send to %s for %lu secs (%d tries)",
-                       libcfs_nid2str(tx->tx_conn->gnc_peer->gnp_nid),
-                       cfs_duration_sec(jiffies - tx->tx_cred_wait),
-                       tx->tx_retrans);
-
-               kgnilnd_close_conn(conn, -ETIMEDOUT);
-
-               /* caller should terminate */
-               RETURN(0);
-       } else {
-               /* some reasonable throttling of the debug message */
-               if (log_retrans) {
-                       unsigned long now = jiffies;
-                       /* XXX Nic: Mystical TX debug here... */
-                       GNIDBG_SMSG_CREDS(log_retrans_level, conn);
-                       GNIDBG_TOMSG(log_retrans_level, &tx->tx_msg,
-                               "NOT_DONE on conn 0x%p->%s id %x retrans %d wait %dus"
-                               " last_msg %uus/%uus last_cq %uus/%uus",
-                               conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
-                               tx->tx_id, tx->tx_retrans,
-                               jiffies_to_usecs(now - tx->tx_cred_wait),
-                               jiffies_to_usecs(now - conn->gnc_last_tx),
-                               jiffies_to_usecs(now - conn->gnc_last_rx),
-                               jiffies_to_usecs(now - conn->gnc_last_tx_cq),
-                               jiffies_to_usecs(now - conn->gnc_last_rx_cq));
-               }
-               /* caller should retry */
-               RETURN(1);
+       
+       /* some reasonable throttling of the debug message */
+       if (log_retrans) {
+               unsigned long now = jiffies;
+               /* XXX Nic: Mystical TX debug here... */
+               /* We expect retransmissions so only log when D_NET is enabled */
+               GNIDBG_SMSG_CREDS(D_NET, conn);
+               GNIDBG_TOMSG(D_NET, &tx->tx_msg,
+                       "NOT_DONE on conn 0x%p->%s id %x retrans %d wait %dus"
+                       " last_msg %uus/%uus last_cq %uus/%uus",
+                       conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
+                       tx->tx_id, tx->tx_retrans,
+                       jiffies_to_usecs(now - tx->tx_cred_wait),
+                       jiffies_to_usecs(now - conn->gnc_last_tx),
+                       jiffies_to_usecs(now - conn->gnc_last_rx),
+                       jiffies_to_usecs(now - conn->gnc_last_tx_cq),
+                       jiffies_to_usecs(now - conn->gnc_last_rx_cq));
        }
 }
 
@@ -1419,7 +1312,6 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
 {
        kgn_conn_t      *conn = tx->tx_conn;
        kgn_msg_t       *msg = &tx->tx_msg;
-       int              retry_send;
        gni_return_t     rrc;
        unsigned long    newest_last_rx, timeout;
        unsigned long    now;
@@ -1529,9 +1421,11 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
                return 0;
 
        case GNI_RC_NOT_DONE:
-               /* XXX Nic: We need to figure out how to track this
-                * - there are bound to be good reasons for it,
-                * but we want to know when it happens */
+               /* Jshimek: We can get GNI_RC_NOT_DONE for 3 reasons currently
+                * 1: out of mbox credits
+                * 2: out of mbox payload credits
+                * 3: On Aries out of dla credits
+                */
                kgnilnd_conn_mutex_unlock(&conn->gnc_smsg_mutex);
                kgnilnd_gl_mutex_unlock(&conn->gnc_device->gnd_cq_mutex);
                /* We'll handle this error inline - makes the calling logic much more
@@ -1542,31 +1436,36 @@ kgnilnd_sendmsg_nolock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob,
                        return -EAGAIN;
                }
 
-               retry_send = kgnilnd_tx_should_retry(conn, tx);
-               if (retry_send) {
-                       /* add to head of list for the state and retries */
-                       spin_lock(state_lock);
-                       kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, state, 0);
-                       spin_unlock(state_lock);
-
-                       /* We only reschedule for a certain number of retries, then
-                        * we will wait for the CQ events indicating a release of SMSG
-                        * credits */
-                       if (tx->tx_retrans < (*kgnilnd_tunables.kgn_max_retransmits/4)) {
-                               kgnilnd_schedule_conn(conn);
-                               return 0;
-                       } else {
-                               /* CQ event coming in signifies either TX completed or
-                                * RX receive. Either of these *could* free up credits
-                                * in the SMSG mbox and we should try sending again */
-                               GNIDBG_TX(D_NET, tx, "waiting for CQID %u event to resend",
-                                        tx->tx_conn->gnc_cqid);
-                               /* use +ve return code to let upper layers know they
-                                * should stop looping on sends */
-                               return EAGAIN;
-                       }
+               /* I need kgni credits to send this.  Replace tx at the head of the
+                * fmaq and I'll get rescheduled when credits appear. Reset the tx_state
+                * and bump retrans counts since we are requeueing the tx.
+                */
+               tx->tx_state = 0;
+               tx->tx_retrans++;
+               conn->gnc_tx_retrans++;
+
+               kgnilnd_tx_log_retrans(conn, tx);
+               /* add to head of list for the state and retries */
+               spin_lock(state_lock);
+               kgnilnd_tx_add_state_locked(tx, conn->gnc_peer, conn, state, 0);
+               spin_unlock(state_lock);
+
+               /* We only reschedule for a certain number of retries, then
+                * we will wait for the CQ events indicating a release of SMSG
+                * credits */
+               if (tx->tx_retrans < *kgnilnd_tunables.kgn_max_retransmits) {
+                       kgnilnd_schedule_conn(conn);
+                       return 0;
                } else {
-                       return -EAGAIN;
+                       /* CQ event coming in signifies either TX completed or
+                        * RX receive. Either of these *could* free up credits
+                        * in the SMSG mbox and we should try sending again */
+                       GNIDBG_TX(D_NET, tx, "waiting for CQID %u event to resend",
+                                tx->tx_conn->gnc_cqid);
+                       kgnilnd_schedule_delay_conn(conn);
+                       /* use +ve return code to let upper layers know they
+                        * should stop looping on sends */
+                       return EAGAIN;
                }
        default:
                /* handle bad retcode gracefully */
@@ -1656,7 +1555,7 @@ kgnilnd_sendmsg_trylock(kgn_tx_t *tx, void *immediate, unsigned int immediatenob
 }
 
 /* lets us know if we can push this RDMA through now */
-inline int
+static int
 kgnilnd_auth_rdma_bytes(kgn_device_t *dev, kgn_tx_t *tx)
 {
        long    bytes_left;
@@ -1770,7 +1669,7 @@ kgnilnd_queue_tx(kgn_conn_t *conn, kgn_tx_t *tx)
 }
 
 void
-kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target)
+kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, struct lnet_process_id *target)
 {
        kgn_peer_t      *peer;
        kgn_peer_t      *new_peer = NULL;
@@ -1805,7 +1704,7 @@ kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target)
                }
 
                /* don't create a connection if the peer is marked down */
-               if (peer->gnp_down == GNILND_RCA_NODE_DOWN) {
+               if (peer->gnp_state != GNILND_PEER_UP) {
                        read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
                        rc = -ENETRESET;
                        GOTO(no_peer, rc);
@@ -1844,7 +1743,7 @@ kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target)
        kgnilnd_add_peer_locked(target->nid, new_peer, &peer);
 
        /* don't create a connection if the peer is not up */
-       if (peer->gnp_down != GNILND_RCA_NODE_UP) {
+       if (peer->gnp_state != GNILND_PEER_UP) {
                write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
                rc = -ENETRESET;
                GOTO(no_peer, rc);
@@ -1924,11 +1823,11 @@ kgnilnd_rdma(kgn_tx_t *tx, int type,
 
                        tx->tx_offset = ((__u64)((unsigned long)sink->gnrd_addr)) & 3;
                        if (tx->tx_offset)
-                               kgnilnd_admin_addref(kgnilnd_data.kgn_rev_offset);
+                               atomic_inc(&kgnilnd_data.kgn_rev_offset);
 
                        if ((nob + tx->tx_offset) & 3) {
                                desc_nob = ((nob + tx->tx_offset) + (4 - ((nob + tx->tx_offset) & 3)));
-                               kgnilnd_admin_addref(kgnilnd_data.kgn_rev_length);
+                               atomic_inc(&kgnilnd_data.kgn_rev_length);
                        } else {
                                desc_nob = (nob + tx->tx_offset);
                        }
@@ -1936,7 +1835,7 @@ kgnilnd_rdma(kgn_tx_t *tx, int type,
                        if (tx->tx_buffer_copy == NULL) {
                                /* Allocate the largest copy buffer we will need, this will prevent us from overwriting data
                                 * and require at most we allocate a few extra bytes. */
-                               tx->tx_buffer_copy = vmalloc(desc_nob);
+                               tx->tx_buffer_copy = kgnilnd_vzalloc(desc_nob);
 
                                if (!tx->tx_buffer_copy) {
                                        /* allocation of buffer failed nak the rdma */
@@ -1944,11 +1843,12 @@ kgnilnd_rdma(kgn_tx_t *tx, int type,
                                        kgnilnd_tx_done(tx, -EFAULT);
                                        return 0;
                                }
-                               kgnilnd_admin_addref(kgnilnd_data.kgn_rev_copy_buff);
+                               atomic_inc(&kgnilnd_data.kgn_rev_copy_buff);
                                rc = kgnilnd_mem_register(conn->gnc_device->gnd_handle, (__u64)tx->tx_buffer_copy, desc_nob, NULL, GNI_MEM_READWRITE, &tx->tx_buffer_copy_map_key);
                                if (rc != GNI_RC_SUCCESS) {
                                        /* Registration Failed nak rdma and kill the tx. */
-                                       vfree(tx->tx_buffer_copy);
+                                       kgnilnd_vfree(tx->tx_buffer_copy,
+                                                     desc_nob);
                                        tx->tx_buffer_copy = NULL;
                                        kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
                                        kgnilnd_tx_done(tx, -EFAULT);
@@ -2014,7 +1914,7 @@ kgnilnd_rdma(kgn_tx_t *tx, int type,
                kgnilnd_unmap_buffer(tx, 0);
 
                if (tx->tx_buffer_copy != NULL) {
-                       vfree(tx->tx_buffer_copy);
+                       kgnilnd_vfree(tx->tx_buffer_copy, desc_nob);
                        tx->tx_buffer_copy = NULL;
                }
 
@@ -2079,7 +1979,7 @@ kgnilnd_release_msg(kgn_conn_t *conn)
        LASSERTF(rrc == GNI_RC_SUCCESS, "bad rrc %d\n", rrc);
        GNIDBG_SMSG_CREDS(D_NET, conn);
 
-       return;
+       kgnilnd_schedule_conn(conn);
 }
 
 void
@@ -2103,28 +2003,26 @@ kgnilnd_consume_rx(kgn_rx_t *rx)
        kmem_cache_free(kgnilnd_data.kgn_rx_cache, rx);
        CDEBUG(D_MALLOC, "slab-freed 'rx': %lu at %p.\n",
               sizeof(*rx), rx);
-
-       return;
 }
 
 int
-kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
+kgnilnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
 {
-       lnet_hdr_t       *hdr = &lntmsg->msg_hdr;
+       struct lnet_hdr  *hdr = &lntmsg->msg_hdr;
        int               type = lntmsg->msg_type;
-       lnet_process_id_t target = lntmsg->msg_target;
+       struct lnet_process_id target = lntmsg->msg_target;
        int               target_is_router = lntmsg->msg_target_is_router;
        int               routing = lntmsg->msg_routing;
        unsigned int      niov = lntmsg->msg_niov;
-       struct kvec      *iov = lntmsg->msg_iov;
-       lnet_kiov_t      *kiov = lntmsg->msg_kiov;
+       struct bio_vec   *kiov = lntmsg->msg_kiov;
        unsigned int      offset = lntmsg->msg_offset;
        unsigned int      nob = lntmsg->msg_len;
        unsigned int      msg_vmflush = lntmsg->msg_vmflush;
        kgn_net_t        *net = ni->ni_data;
        kgn_tx_t         *tx;
        int               rc = 0;
-       int               mpflag = 0;
+       /* '1' for consistency with code that checks !mpflag to restore */
+       unsigned int mpflag = 1;
        int               reverse_rdma_flag = *kgnilnd_tunables.kgn_reverse_rdma;
 
        /* NB 'private' is different depending on what we're sending.... */
@@ -2138,12 +2036,8 @@ kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
        LASSERTF(niov <= LNET_MAX_IOV,
                "lntmsg %p niov %d\n", lntmsg, niov);
 
-       /* payload is either all vaddrs or all pages */
-       LASSERTF(!(kiov != NULL && iov != NULL),
-               "lntmsg %p kiov %p iov %p\n", lntmsg, kiov, iov);
-
        if (msg_vmflush)
-               mpflag = cfs_memory_pressure_get_and_set();
+               mpflag = memalloc_noreclaim_save();
 
        switch (type) {
        default:
@@ -2181,16 +2075,9 @@ kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
                        rc = -ENOMEM;
                        goto out;
                }
-               /* slightly different options as we might actually have a GET with a
-                * MD_KIOV set but a non-NULL md_iov.iov */
-               if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
-                       rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
-                                                     lntmsg->msg_md->md_iov.iov, NULL,
-                                                     0, lntmsg->msg_md->md_length);
-               else
-                       rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
-                                                     NULL, lntmsg->msg_md->md_iov.kiov,
-                                                     0, lntmsg->msg_md->md_length);
+               rc = kgnilnd_setup_rdma_buffer(tx, lntmsg->msg_md->md_niov,
+                                              lntmsg->msg_md->md_kiov,
+                                              0, lntmsg->msg_md->md_length);
                if (rc != 0) {
                        CERROR("unable to setup buffer: %d\n", rc);
                        kgnilnd_tx_done(tx, rc);
@@ -2233,7 +2120,8 @@ kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
                        goto out;
                }
 
-               rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, nob);
+               rc = kgnilnd_setup_rdma_buffer(tx, niov,
+                                              kiov, offset, nob);
                if (rc != 0) {
                        kgnilnd_tx_done(tx, rc);
                        rc = -EIO;
@@ -2262,7 +2150,7 @@ kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
                goto out;
        }
 
-       rc = kgnilnd_setup_immediate_buffer(tx, niov, iov, kiov, offset, nob);
+       rc = kgnilnd_setup_immediate_buffer(tx, niov, NULL, kiov, offset, nob);
        if (rc != 0) {
                kgnilnd_tx_done(tx, rc);
                goto out;
@@ -2275,18 +2163,17 @@ kgnilnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 out:
        /* use stored value as we could have already finalized lntmsg here from a failed launch */
        if (msg_vmflush)
-               cfs_memory_pressure_restore(mpflag);
+               memalloc_noreclaim_restore(mpflag);
        return rc;
 }
 
 void
-kgnilnd_setup_rdma(lnet_ni_t *ni, kgn_rx_t *rx, lnet_msg_t *lntmsg, int mlen)
+kgnilnd_setup_rdma(struct lnet_ni *ni, kgn_rx_t *rx, struct lnet_msg *lntmsg, int mlen)
 {
        kgn_conn_t    *conn = rx->grx_conn;
        kgn_msg_t     *rxmsg = rx->grx_msg;
        unsigned int   niov = lntmsg->msg_niov;
-       struct kvec   *iov = lntmsg->msg_iov;
-       lnet_kiov_t   *kiov = lntmsg->msg_kiov;
+       struct bio_vec *kiov = lntmsg->msg_kiov;
        unsigned int   offset = lntmsg->msg_offset;
        unsigned int   nob = lntmsg->msg_len;
        int            done_type;
@@ -2316,7 +2203,7 @@ kgnilnd_setup_rdma(lnet_ni_t *ni, kgn_rx_t *rx, lnet_msg_t *lntmsg, int mlen)
        if (rc != 0)
                goto failed_1;
 
-       rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, nob);
+       rc = kgnilnd_setup_rdma_buffer(tx, niov, kiov, offset, nob);
        if (rc != 0)
                goto failed_1;
 
@@ -2337,11 +2224,11 @@ kgnilnd_setup_rdma(lnet_ni_t *ni, kgn_rx_t *rx, lnet_msg_t *lntmsg, int mlen)
        kgnilnd_tx_done(tx, rc);
        kgnilnd_nak_rdma(conn, done_type, rc, rxmsg->gnm_u.get.gngm_cookie, ni->ni_nid);
  failed_0:
-       lnet_finalize(ni, lntmsg, rc);
+       lnet_finalize(lntmsg, rc);
 }
 
 int
-kgnilnd_eager_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
+kgnilnd_eager_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
                   void **new_private)
 {
        kgn_rx_t        *rx = private;
@@ -2432,9 +2319,9 @@ kgnilnd_eager_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
 }
 
 int
-kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
+kgnilnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
             int delayed, unsigned int niov,
-            struct kvec *iov, lnet_kiov_t *kiov,
+            struct bio_vec *kiov,
             unsigned int offset, unsigned int mlen, unsigned int rlen)
 {
        kgn_rx_t    *rx = private;
@@ -2447,14 +2334,11 @@ kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
 
        LASSERT(!in_interrupt());
        LASSERTF(mlen <= rlen, "%d <= %d\n", mlen, rlen);
-       /* Either all pages or all vaddrs */
-       LASSERTF(!(kiov != NULL && iov != NULL), "kiov %p iov %p\n",
-               kiov, iov);
 
        GNIDBG_MSG(D_NET, rxmsg, "conn %p, rxmsg %p, lntmsg %p"
-               " niov=%d kiov=%p iov=%p offset=%d mlen=%d rlen=%d",
+               " niov=%d kiov=%p offset=%d mlen=%d rlen=%d",
                conn, rxmsg, lntmsg,
-               niov, kiov, iov, offset, mlen, rlen);
+               niov, kiov, offset, mlen, rlen);
 
        /* we need to lock here as recv can be called from any context */
        read_lock(&kgnilnd_data.kgn_peer_conn_lock);
@@ -2463,7 +2347,7 @@ kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
 
                /* someone closed the conn after we copied this out, nuke it */
                kgnilnd_consume_rx(rx);
-               lnet_finalize(ni, lntmsg, conn->gnc_error);
+               lnet_finalize(lntmsg, conn->gnc_error);
                RETURN(0);
        }
        read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
@@ -2471,8 +2355,8 @@ kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
        switch (rxmsg->gnm_type) {
        default:
                GNIDBG_MSG(D_NETERROR, rxmsg, "conn %p, rx %p, rxmsg %p, lntmsg %p"
-               " niov=%d kiov=%p iov=%p offset=%d mlen=%d rlen=%d",
-               conn, rx, rxmsg, lntmsg, niov, kiov, iov, offset, mlen, rlen);
+               " niov=%d kiov=%p offset=%d mlen=%d rlen=%d",
+               conn, rx, rxmsg, lntmsg, niov, kiov, offset, mlen, rlen);
                LBUG();
 
        case GNILND_MSG_IMMEDIATE:
@@ -2525,26 +2409,20 @@ kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
                        }
                }
 
-               if (kiov != NULL)
-                       lnet_copy_flat2kiov(
-                               niov, kiov, offset,
-                               *kgnilnd_tunables.kgn_max_immediate,
-                               &rxmsg[1], 0, mlen);
-               else
-                       lnet_copy_flat2iov(
-                               niov, iov, offset,
-                               *kgnilnd_tunables.kgn_max_immediate,
-                               &rxmsg[1], 0, mlen);
+               lnet_copy_flat2kiov(
+                       niov, kiov, offset,
+                       *kgnilnd_tunables.kgn_max_immediate,
+                       &rxmsg[1], 0, mlen);
 
                kgnilnd_consume_rx(rx);
-               lnet_finalize(ni, lntmsg, 0);
+               lnet_finalize(lntmsg, 0);
                RETURN(0);
 
        case GNILND_MSG_PUT_REQ:
                /* LNET wants to truncate or drop transaction, sending NAK */
                if (mlen == 0) {
                        kgnilnd_consume_rx(rx);
-                       lnet_finalize(ni, lntmsg, 0);
+                       lnet_finalize(lntmsg, 0);
 
                        /* only error if lntmsg == NULL, otherwise we are just
                         * short circuiting the rdma process of 0 bytes */
@@ -2566,7 +2444,8 @@ kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
                        GOTO(nak_put_req, rc);
                }
 
-               rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, mlen);
+               rc = kgnilnd_setup_rdma_buffer(tx, niov,
+                                              kiov, offset, mlen);
                if (rc != 0) {
                        GOTO(nak_put_req, rc);
                }
@@ -2603,7 +2482,7 @@ nak_put_req:
                /* LNET wants to truncate or drop transaction, sending NAK */
                if (mlen == 0) {
                        kgnilnd_consume_rx(rx);
-                       lnet_finalize(ni, lntmsg, 0);
+                       lnet_finalize(lntmsg, 0);
 
                        /* only error if lntmsg == NULL, otherwise we are just
                         * short circuiting the rdma process of 0 bytes */
@@ -2626,12 +2505,11 @@ nak_put_req:
                        if (rc != 0)
                                GOTO(nak_get_req_rev, rc);
 
-
-                       rc = kgnilnd_setup_rdma_buffer(tx, niov, iov, kiov, offset, mlen);
+                       rc = kgnilnd_setup_rdma_buffer(tx, niov,
+                                                      kiov, offset, mlen);
                        if (rc != 0)
                                GOTO(nak_get_req_rev, rc);
 
-
                        tx->tx_msg.gnm_u.putack.gnpam_src_cookie =
                                rxmsg->gnm_u.putreq.gnprm_cookie;
                        tx->tx_msg.gnm_u.putack.gnpam_dst_cookie = tx->tx_id.txe_cookie;
@@ -2673,7 +2551,7 @@ nak_get_req_rev:
                /* LNET wants to truncate or drop transaction, sending NAK */
                if (mlen == 0) {
                        kgnilnd_consume_rx(rx);
-                       lnet_finalize(ni, lntmsg, 0);
+                       lnet_finalize(lntmsg, 0);
 
                        /* only error if lntmsg == NULL, otherwise we are just
                         * short circuiting the rdma process of 0 bytes */
@@ -2748,7 +2626,7 @@ kgnilnd_check_conn_timeouts_locked(kgn_conn_t *conn)
        if (time_after_eq(now, newest_last_rx + timeout)) {
                uint32_t level = D_CONSOLE|D_NETERROR;
 
-               if (conn->gnc_peer->gnp_down == GNILND_RCA_NODE_DOWN) {
+               if (conn->gnc_peer->gnp_state == GNILND_PEER_DOWN) {
                        level = D_NET;
                }
                        GNIDBG_CONN(level, conn,
@@ -2810,7 +2688,7 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
                peer, libcfs_nid2str(peer->gnp_nid),
                peer->gnp_reconnect_interval);
 
-       timeout = cfs_time_seconds(MAX(*kgnilnd_tunables.kgn_timeout,
+       timeout = cfs_time_seconds(max(*kgnilnd_tunables.kgn_timeout,
                                       GNILND_MIN_TIMEOUT));
 
        conn = kgnilnd_find_conn_locked(peer);
@@ -2824,6 +2702,14 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
                                conn->gnc_close_recvd = GNILND_CLOSE_INJECT1;
                                conn->gnc_peer_error = -ETIMEDOUT;
                        }
+
+                       if (*kgnilnd_tunables.kgn_to_reconn_disable &&
+                           rc == -ETIMEDOUT) {
+                               peer->gnp_state = GNILND_PEER_TIMED_OUT;
+                               CDEBUG(D_WARNING, "%s conn timed out, will "
+                                      "reconnect upon request from peer\n",
+                                      libcfs_nid2str(conn->gnc_peer->gnp_nid));
+                       }
                        /* Once we mark closed, any of the scheduler threads could
                         * get it and move through before we hit the fail loc code */
                        kgnilnd_close_conn_locked(conn, rc);
@@ -2867,7 +2753,7 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
        /* Don't reconnect if we are still trying to clear out old conns.
         * This prevents us sending traffic on the new mbox before ensuring we are done
         * with the old one */
-       reconnect = (peer->gnp_down == GNILND_RCA_NODE_UP) &&
+       reconnect = (peer->gnp_state == GNILND_PEER_UP) &&
                    (atomic_read(&peer->gnp_dirty_eps) == 0);
 
        /* fast reconnect after a timeout */
@@ -2886,8 +2772,9 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
 
                CDEBUG(D_NET, "starting connect to %s\n",
                        libcfs_nid2str(peer->gnp_nid));
-               LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE, "Peer was idle and we"
-                       "have a write_lock, state issue %d\n", peer->gnp_connecting);
+               LASSERTF(peer->gnp_connecting == GNILND_PEER_IDLE,
+                        "Peer was idle and we have a write_lock, state issue %d\n",
+                        peer->gnp_connecting);
 
                peer->gnp_connecting = GNILND_PEER_CONNECT;
                kgnilnd_peer_addref(peer); /* extra ref for connd */
@@ -2977,8 +2864,6 @@ kgnilnd_check_peer_timeouts_locked(kgn_peer_t *peer, struct list_head *todie,
                        }
                }
        }
-
-       return;
 }
 
 void
@@ -2986,11 +2871,8 @@ kgnilnd_reaper_check(int idx)
 {
        struct list_head  *peers = &kgnilnd_data.kgn_peers[idx];
        struct list_head  *ctmp, *ctmpN;
-       struct list_head   geriatrics;
-       struct list_head   souls;
-
-       INIT_LIST_HEAD(&geriatrics);
-       INIT_LIST_HEAD(&souls);
+       LIST_HEAD(geriatrics);
+       LIST_HEAD(souls);
 
        write_lock(&kgnilnd_data.kgn_peer_conn_lock);
 
@@ -3042,8 +2924,6 @@ kgnilnd_reaper(void *arg)
        struct timer_list  timer;
        DEFINE_WAIT(wait);
 
-       cfs_block_allsigs();
-
        /* all gnilnd threads need to run fairly urgently */
        set_user_nice(current, *kgnilnd_tunables.kgn_nice);
        spin_lock(&kgnilnd_data.kgn_reaper_lock);
@@ -3130,8 +3010,8 @@ kgnilnd_reaper(void *arg)
 int
 kgnilnd_recv_bte_get(kgn_tx_t *tx) {
        unsigned niov, offset, nob;
-       lnet_kiov_t     *kiov;
-       lnet_msg_t *lntmsg = tx->tx_lntmsg[0];
+       struct bio_vec *kiov;
+       struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
        kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov, tx->tx_nob_rdma);
 
        if (kiov != NULL) {
@@ -3329,6 +3209,7 @@ kgnilnd_check_fma_send_cq(kgn_device_t *dev)
        kgn_conn_t            *conn = NULL;
        int                    queued_fma, saw_reply, rc;
        long                   num_processed = 0;
+       struct list_head      *ctmp, *ctmpN;
 
        for (;;) {
                /* make sure we don't keep looping if we need to reset */
@@ -3351,6 +3232,22 @@ kgnilnd_check_fma_send_cq(kgn_device_t *dev)
                               "SMSG send CQ %d not ready (data %#llx) "
                               "processed %ld\n", dev->gnd_id, event_data,
                               num_processed);
+
+                       if (num_processed > 0) {
+                               spin_lock(&dev->gnd_lock);
+                               if (!list_empty(&dev->gnd_delay_conns)) {
+                                       list_for_each_safe(ctmp, ctmpN, &dev->gnd_delay_conns) {
+                                               conn = list_entry(ctmp, kgn_conn_t, gnc_delaylist);
+                                               list_del_init(&conn->gnc_delaylist);
+                                               CDEBUG(D_NET, "Moving Conn %p from delay queue to ready_queue\n", conn);
+                                               kgnilnd_schedule_conn_nolock(conn);
+                                       }
+                                       spin_unlock(&dev->gnd_lock);
+                                       kgnilnd_schedule_device(dev);
+                               } else {
+                                       spin_unlock(&dev->gnd_lock);
+                               }
+                       }
                        return num_processed;
                }
 
@@ -3520,8 +3417,7 @@ kgnilnd_check_fma_rcv_cq(kgn_device_t *dev)
                                /* set overrun too */
                                event_data |= (1UL << 63);
                                LASSERTF(GNI_CQ_OVERRUN(event_data),
-                                        "(1UL << 63) is no longer the bit to"
-                                        "set to indicate CQ_OVERRUN\n");
+                                        "(1UL << 63) is no longer the bit to set to indicate CQ_OVERRUN\n");
                        }
                }
                /* sender should get error event too and take care
@@ -4468,8 +4364,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                if (tx == NULL)
                        break;
 
-               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
-                              tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
                               "bad tx buftype %d", tx->tx_buftype);
 
                kgnilnd_finalize_rx_done(tx, msg);
@@ -4487,8 +4382,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                if (tx == NULL)
                        break;
 
-               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
-                              tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
                               "bad tx buftype %d", tx->tx_buftype);
 
                kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
@@ -4500,8 +4394,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                if (tx == NULL)
                        break;
 
-               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
-                              tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
                               "bad tx buftype %d", tx->tx_buftype);
 
                lnet_set_reply_msg_len(net->gnn_ni, tx->tx_lntmsg[1],
@@ -4515,8 +4408,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                if (tx == NULL)
                        break;
 
-               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
-                               tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
                                "bad tx buftype %d", tx->tx_buftype);
 
                kgnilnd_finalize_rx_done(tx, msg);
@@ -4529,8 +4421,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                if (tx == NULL)
                        break;
 
-               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
-                              tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
                               "bad tx buftype %d", tx->tx_buftype);
 
                kgnilnd_finalize_rx_done(tx, msg);
@@ -4542,8 +4433,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn)
                if (tx == NULL)
                        break;
 
-               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED ||
-                              tx->tx_buftype == GNILND_BUF_VIRT_MAPPED,
+               GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED,
                                "bad tx buftype %d", tx->tx_buftype);
 
                kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval);
@@ -4846,11 +4736,9 @@ kgnilnd_process_mapped_tx(kgn_device_t *dev)
                } else {
                       GNIDBG_TX(log_retrans_level, tx,
                                "transient map failure #%d %d pages/%d bytes phys %u@%u "
-                               "virt %u@%llu "
                                "nq_map %d mdd# %d/%d GART %ld",
                                dev->gnd_map_attempt, tx->tx_phys_npages, tx->tx_nob,
                                dev->gnd_map_nphys, dev->gnd_map_physnop * PAGE_SIZE,
-                               dev->gnd_map_nvirt, dev->gnd_map_virtnob,
                                atomic_read(&dev->gnd_nq_map),
                                atomic_read(&dev->gnd_n_mdd), atomic_read(&dev->gnd_n_mdd_held),
                                atomic64_read(&dev->gnd_nbytes_map));
@@ -4891,6 +4779,12 @@ kgnilnd_process_conns(kgn_device_t *dev, unsigned long deadline)
 
                conn = list_first_entry(&dev->gnd_ready_conns, kgn_conn_t, gnc_schedlist);
                list_del_init(&conn->gnc_schedlist);
+               /* 
+                * Since we are processing conn now, we don't need to be on the delaylist any longer.
+                */
+
+               if (!list_empty(&conn->gnc_delaylist))
+                       list_del_init(&conn->gnc_delaylist);
                spin_unlock(&dev->gnd_lock);
 
                conn_sched = xchg(&conn->gnc_scheduled, GNILND_CONN_PROCESS);
@@ -4917,7 +4811,7 @@ kgnilnd_process_conns(kgn_device_t *dev, unsigned long deadline)
                                kgnilnd_conn_decref(conn);
                                up_write(&dev->gnd_conn_sem);
                        } else if (rc != 1) {
-                       kgnilnd_conn_decref(conn);
+                               kgnilnd_conn_decref(conn);
                        }
                        /* clear this so that scheduler thread doesn't spin */
                        found_work = 0;
@@ -4968,7 +4862,7 @@ kgnilnd_process_conns(kgn_device_t *dev, unsigned long deadline)
                        kgnilnd_conn_decref(conn);
                        up_write(&dev->gnd_conn_sem);
                } else if (rc != 1) {
-               kgnilnd_conn_decref(conn);
+                       kgnilnd_conn_decref(conn);
                }
 
                /* check list again with lock held */
@@ -4997,8 +4891,6 @@ kgnilnd_scheduler(void *arg)
 
        dev = &kgnilnd_data.kgn_devices[(threadno + 1) % kgnilnd_data.kgn_ndevs];
 
-       cfs_block_allsigs();
-
        /* all gnilnd threads need to run fairly urgently */
        set_user_nice(current, *kgnilnd_tunables.kgn_sched_nice);
        deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_sched_timeout);