Whamcloud - gitweb
LU-9679 lnet: use LIST_HEAD() for local lists.
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd_conn.c
index 39716b8..9fa539c 100644 (file)
@@ -1,6 +1,8 @@
 /*
  * Copyright (C) 2012 Cray, Inc.
  *
+ * Copyright (c) 2014, Intel Corporation.
+ *
  *   Author: Nic Henke <nic@cray.com>
  *   Author: James Shimek <jshimek@cray.com>
  *
@@ -36,11 +38,15 @@ kgnilnd_map_fmablk(kgn_device_t *device, kgn_fma_memblock_t *fma_blk)
 {
        gni_return_t            rrc;
        __u32                   flags = GNI_MEM_READWRITE;
+       static unsigned long    reg_to;
+       int                     rfto = *kgnilnd_tunables.kgn_reg_fail_timeout;
 
        if (fma_blk->gnm_state == GNILND_FMABLK_PHYS) {
                flags |= GNI_MEM_PHYS_CONT;
        }
 
+       fma_blk->gnm_hold_timeout = 0;
+
        /* make sure we are mapping a clean block */
        LASSERTF(fma_blk->gnm_hndl.qword1 == 0UL, "fma_blk %p dirty\n", fma_blk);
 
@@ -48,14 +54,25 @@ kgnilnd_map_fmablk(kgn_device_t *device, kgn_fma_memblock_t *fma_blk)
                                   fma_blk->gnm_blk_size, device->gnd_rcv_fma_cqh,
                                   flags, &fma_blk->gnm_hndl);
        if (rrc != GNI_RC_SUCCESS) {
-               /* XXX Nic: need a way to silence this for runtime stuff that is ok to fail
-                * -- like when under MDD or GART pressure on big systems
-                */
+               if (rfto != GNILND_REGFAILTO_DISABLE) {
+                       if (reg_to == 0) {
+                               reg_to = jiffies + cfs_time_seconds(rfto);
+                       } else if (time_after(jiffies, reg_to)) {
+                               CERROR("FATAL:fmablk registration has failed "
+                                      "for %ld seconds.\n",
+                                      cfs_duration_sec(jiffies - reg_to) +
+                                               rfto);
+                               LBUG();
+                       }
+               }
+
                CNETERR("register fmablk failed 0x%p mbox_size %d flags %u\n",
                        fma_blk, fma_blk->gnm_mbox_size, flags);
                RETURN(-ENOMEM);
        }
 
+       reg_to = 0;
+
        /* PHYS_CONT memory isn't really mapped, at least not in GART -
         *  but all mappings chew up a MDD
         */
@@ -79,9 +96,22 @@ kgnilnd_alloc_fmablk(kgn_device_t *device, int use_phys)
        gni_smsg_attr_t         smsg_attr;
        unsigned long           fmablk_vers;
 
-       /* we'll use fmablk_vers and the gnd_fmablk_sem to gate access
+#if defined(CONFIG_CRAY_XT) && !defined(CONFIG_CRAY_COMPUTE)
+       /* We allocate large blocks of memory here potentially leading
+        * to memory exhaustion during massive reconnects during a network
+        * outage. Limit the amount of fma blocks to use by always keeping
+        * a percent of pages free initially set to 25% of total memory. */
+       if (global_page_state(NR_FREE_PAGES) < kgnilnd_data.free_pages_limit) {
+               LCONSOLE_INFO("Exceeding free page limit of %ld. "
+                             "Free pages available %ld\n",
+                             kgnilnd_data.free_pages_limit,
+                             global_page_state(NR_FREE_PAGES));
+               return -ENOMEM;
+       }
+#endif
+       /* we'll use fmablk_vers and the gnd_fmablk_mutex to gate access
         * to this allocation code. Everyone will sample the version
-        * before and after getting the semaphore. If it has changed,
+        * before and after getting the mutex. If it has changed,
         * we'll bail out to check the lists again - this indicates that
         * some sort of change was made to the lists and it is possible
         * that there is a mailbox for us to find now. This should prevent
@@ -89,12 +119,12 @@ kgnilnd_alloc_fmablk(kgn_device_t *device, int use_phys)
         * that need a yet-to-be-allocated mailbox for a connection. */
 
        fmablk_vers = atomic_read(&device->gnd_fmablk_vers);
-       down(&device->gnd_fmablk_sem);
+       mutex_lock(&device->gnd_fmablk_mutex);
 
        if (fmablk_vers != atomic_read(&device->gnd_fmablk_vers)) {
                /* version changed while we were waiting for semaphore,
                 * we'll recheck the lists assuming something nice happened */
-               up(&device->gnd_fmablk_sem);
+               mutex_unlock(&device->gnd_fmablk_mutex);
                return 0;
        }
 
@@ -125,13 +155,13 @@ kgnilnd_alloc_fmablk(kgn_device_t *device, int use_phys)
         * as reallocating them is tough if there is memory fragmentation */
 
        if (use_phys) {
-               fma_blk->gnm_block = cfs_mem_cache_alloc(kgnilnd_data.kgn_mbox_cache, CFS_ALLOC_ATOMIC);
+               fma_blk->gnm_block = kmem_cache_alloc(kgnilnd_data.kgn_mbox_cache, GFP_ATOMIC);
                if (fma_blk->gnm_block == NULL) {
                        CNETERR("could not allocate physical SMSG mailbox memory\n");
                        rc = -ENOMEM;
                        GOTO(free_desc, rc);
                }
-               fma_blk->gnm_blk_size = KMALLOC_MAX_SIZE;
+               fma_blk->gnm_blk_size = GNILND_MBOX_SIZE;
                num_mbox = fma_blk->gnm_blk_size / fma_blk->gnm_mbox_size;
 
                LASSERTF(num_mbox >= 1,
@@ -149,7 +179,7 @@ kgnilnd_alloc_fmablk(kgn_device_t *device, int use_phys)
                         num_mbox, fma_blk->gnm_blk_size, fma_blk->gnm_mbox_size,
                         *kgnilnd_tunables.kgn_mbox_per_block);
 
-               LIBCFS_ALLOC(fma_blk->gnm_block, fma_blk->gnm_blk_size);
+               fma_blk->gnm_block = kgnilnd_vzalloc(fma_blk->gnm_blk_size);
                if (fma_blk->gnm_block == NULL) {
                        CNETERR("could not allocate virtual SMSG mailbox memory, %d bytes\n", fma_blk->gnm_blk_size);
                        rc = -ENOMEM;
@@ -187,7 +217,7 @@ kgnilnd_alloc_fmablk(kgn_device_t *device, int use_phys)
        fma_blk->gnm_avail_mboxs = fma_blk->gnm_num_mboxs = num_mbox;
 
        CDEBUG(D_MALLOC, "alloc fmablk 0x%p num %d msg_maxsize %d credits %d "
-               "mbox_size %d MDD "LPX64"."LPX64"\n",
+               "mbox_size %d MDD %#llx.%#llx\n",
                fma_blk, num_mbox, smsg_attr.msg_maxsize, smsg_attr.mbox_maxcredit,
                fma_blk->gnm_mbox_size, fma_blk->gnm_hndl.qword1,
                fma_blk->gnm_hndl.qword2);
@@ -203,7 +233,7 @@ kgnilnd_alloc_fmablk(kgn_device_t *device, int use_phys)
 
        spin_unlock(&device->gnd_fmablk_lock);
 
-       up(&device->gnd_fmablk_sem);
+       mutex_unlock(&device->gnd_fmablk_mutex);
 
        return 0;
 
@@ -213,14 +243,14 @@ free_bit:
        LIBCFS_FREE(fma_blk->gnm_bit_array, BITS_TO_LONGS(num_mbox) * sizeof (unsigned long));
 free_blk:
        if (fma_blk->gnm_state == GNILND_FMABLK_VIRT) {
-               LIBCFS_FREE(fma_blk->gnm_block, fma_blk->gnm_blk_size);
+               kgnilnd_vfree(fma_blk->gnm_block, fma_blk->gnm_blk_size);
        } else {
-               cfs_mem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
+               kmem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
        }
 free_desc:
        LIBCFS_FREE(fma_blk, sizeof(kgn_fma_memblock_t));
 out:
-       up(&device->gnd_fmablk_sem);
+       mutex_unlock(&device->gnd_fmablk_mutex);
        return rc;
 }
 
@@ -230,8 +260,11 @@ kgnilnd_unmap_fmablk(kgn_device_t *dev, kgn_fma_memblock_t *fma_blk)
        gni_return_t            rrc;
 
        /* if some held, set hold_timeout from conn timeouts used in this block
-        * but not during shutdown, then just nuke and pave */
-       if (fma_blk->gnm_held_mboxs && (!kgnilnd_data.kgn_shutdown)) {
+        * but not during shutdown, then just nuke and pave
+        * During a stack reset, we need to deregister with a hold timeout
+        * set so we don't use the same mdd after reset is complete */
+       if ((fma_blk->gnm_held_mboxs && !kgnilnd_data.kgn_shutdown) ||
+           kgnilnd_data.kgn_in_reset) {
                fma_blk->gnm_hold_timeout = GNILND_TIMEOUT2DEADMAN;
        }
 
@@ -253,7 +286,9 @@ kgnilnd_unmap_fmablk(kgn_device_t *dev, kgn_fma_memblock_t *fma_blk)
                "tried to double unmap or something bad, fma_blk %p (rrc %d)\n",
                fma_blk, rrc);
 
-       if (fma_blk->gnm_hold_timeout) {
+       if (fma_blk->gnm_hold_timeout &&
+           !(kgnilnd_data.kgn_in_reset &&
+             fma_blk->gnm_state == GNILND_FMABLK_PHYS)) {
                atomic_inc(&dev->gnd_n_mdd_held);
        } else {
                atomic_dec(&dev->gnd_n_mdd);
@@ -310,9 +345,9 @@ kgnilnd_free_fmablk_locked(kgn_device_t *dev, kgn_fma_memblock_t *fma_blk)
                fma_blk, fma_blk->gnm_block, fma_blk->gnm_mbox_size);
 
        if (fma_blk->gnm_state == GNILND_FMABLK_PHYS) {
-               cfs_mem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
+               kmem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
        } else {
-               LIBCFS_FREE(fma_blk->gnm_block, fma_blk->gnm_blk_size);
+               kgnilnd_vfree(fma_blk->gnm_block, fma_blk->gnm_blk_size);
        }
        fma_blk->gnm_state = GNILND_FMABLK_FREED;
 
@@ -380,7 +415,7 @@ kgnilnd_find_free_mbox(kgn_conn_t *conn)
 
                CDEBUG(D_NET, "conn %p smsg %p fmablk %p "
                        "allocating SMSG mbox %d buf %p "
-                       "offset %u hndl "LPX64"."LPX64"\n",
+                       "offset %u hndl %#llx.%#llx\n",
                        conn, smsg_attr, fma_blk, id,
                        smsg_attr->msg_buffer, smsg_attr->mbox_offset,
                        fma_blk->gnm_hndl.qword1,
@@ -470,14 +505,14 @@ kgnilnd_release_mbox(kgn_conn_t *conn, int purgatory_hold)
         * > 0 - hold it for now */
        if (purgatory_hold == 0) {
                CDEBUG(D_NET, "conn %p smsg %p fmablk %p freeing SMSG mbox %d "
-                       "hndl "LPX64"."LPX64"\n",
+                       "hndl %#llx.%#llx\n",
                        conn, smsg_attr, fma_blk, id,
                        fma_blk->gnm_hndl.qword1, fma_blk->gnm_hndl.qword2);
                fma_blk->gnm_avail_mboxs++;
 
        } else if (purgatory_hold > 0) {
                CDEBUG(D_NET, "conn %p smsg %p fmablk %p holding SMSG mbox %d "
-                       "hndl "LPX64"."LPX64"\n",
+                       "hndl %#llx.%#llx\n",
                        conn, smsg_attr, fma_blk, id,
                        fma_blk->gnm_hndl.qword1, fma_blk->gnm_hndl.qword2);
 
@@ -486,7 +521,7 @@ kgnilnd_release_mbox(kgn_conn_t *conn, int purgatory_hold)
                                                conn->gnc_timeout);
        } else {
                CDEBUG(D_NET, "conn %p smsg %p fmablk %p release SMSG mbox %d "
-                       "hndl "LPX64"."LPX64"\n",
+                       "hndl %#llx.%#llx\n",
                        conn, smsg_attr, fma_blk, id,
                        fma_blk->gnm_hndl.qword1, fma_blk->gnm_hndl.qword2);
 
@@ -584,42 +619,42 @@ kgnilnd_map_phys_fmablk(kgn_device_t *device)
        int                     rc = 0;
        kgn_fma_memblock_t     *fma_blk;
 
-       /* use sem to gate access to single thread, just in case */
-       down(&device->gnd_fmablk_sem);
+       /* use mutex to gate access to single thread, just in case */
+       mutex_lock(&device->gnd_fmablk_mutex);
 
        spin_lock(&device->gnd_fmablk_lock);
 
        list_for_each_entry(fma_blk, &device->gnd_fma_buffs, gnm_bufflist) {
-               if (fma_blk->gnm_state == GNILND_FMABLK_PHYS)
+               if (fma_blk->gnm_state == GNILND_FMABLK_PHYS) {
                        rc = kgnilnd_map_fmablk(device, fma_blk);
                        if (rc)
                                break;
+               }
        }
        spin_unlock(&device->gnd_fmablk_lock);
 
-       up(&device->gnd_fmablk_sem);
+       mutex_unlock(&device->gnd_fmablk_mutex);
 
        RETURN(rc);
 }
 
 void
-kgnilnd_unmap_phys_fmablk(kgn_device_t *device)
+kgnilnd_unmap_fma_blocks(kgn_device_t *device)
 {
 
        kgn_fma_memblock_t      *fma_blk;
 
-       /* use sem to gate access to single thread, just in case */
-       down(&device->gnd_fmablk_sem);
+       /* use mutex to gate access to single thread, just in case */
+       mutex_lock(&device->gnd_fmablk_mutex);
 
        spin_lock(&device->gnd_fmablk_lock);
 
        list_for_each_entry(fma_blk, &device->gnd_fma_buffs, gnm_bufflist) {
-               if (fma_blk->gnm_state == GNILND_FMABLK_PHYS)
-                       kgnilnd_unmap_fmablk(device, fma_blk);
+               kgnilnd_unmap_fmablk(device, fma_blk);
        }
        spin_unlock(&device->gnd_fmablk_lock);
 
-       up(&device->gnd_fmablk_sem);
+       mutex_unlock(&device->gnd_fmablk_mutex);
 }
 
 void
@@ -628,8 +663,8 @@ kgnilnd_free_phys_fmablk(kgn_device_t *device)
 
        kgn_fma_memblock_t      *fma_blk, *fma_blkN;
 
-       /* use sem to gate access to single thread, just in case */
-       down(&device->gnd_fmablk_sem);
+       /* use mutex to gate access to single thread, just in case */
+       mutex_lock(&device->gnd_fmablk_mutex);
 
        spin_lock(&device->gnd_fmablk_lock);
 
@@ -639,7 +674,7 @@ kgnilnd_free_phys_fmablk(kgn_device_t *device)
        }
        spin_unlock(&device->gnd_fmablk_lock);
 
-       up(&device->gnd_fmablk_sem);
+       mutex_unlock(&device->gnd_fmablk_mutex);
 }
 
 /* kgnilnd dgram nid->struct managment */
@@ -698,7 +733,7 @@ kgnilnd_pack_connreq(kgn_connreq_t *connreq, kgn_conn_t *conn,
        int err = 0;
 
        /* ensure we haven't violated max datagram size */
-       CLASSERT(sizeof(kgn_connreq_t) <= GNI_DATAGRAM_MAXSIZE);
+       BUILD_BUG_ON(sizeof(kgn_connreq_t) > GNI_DATAGRAM_MAXSIZE);
 
        /* no need to zero out, we do that when allocating dgram */
        connreq->gncr_magic     = GNILND_MSG_MAGIC;
@@ -906,7 +941,7 @@ kgnilnd_unpack_connreq(kgn_dgram_t *dgram)
        }
 
        if (connreq->gncr_peerstamp == 0 || connreq->gncr_connstamp == 0) {
-               CERROR("Recived bad timestamps peer "LPU64" conn "LPU64"\n",
+               CERROR("Recived bad timestamps peer %llu conn %llu\n",
                connreq->gncr_peerstamp, connreq->gncr_connstamp);
                return -EPROTO;
        }
@@ -925,8 +960,7 @@ kgnilnd_alloc_dgram(kgn_dgram_t **dgramp, kgn_device_t *dev, kgn_dgram_type_t ty
 {
        kgn_dgram_t         *dgram;
 
-       dgram = cfs_mem_cache_alloc(kgnilnd_data.kgn_dgram_cache,
-                                       CFS_ALLOC_ATOMIC);
+       dgram = kmem_cache_alloc(kgnilnd_data.kgn_dgram_cache, GFP_ATOMIC);
        if (dgram == NULL)
                return -ENOMEM;
 
@@ -940,8 +974,10 @@ kgnilnd_alloc_dgram(kgn_dgram_t **dgramp, kgn_device_t *dev, kgn_dgram_type_t ty
 
        atomic_inc(&dev->gnd_ndgrams);
 
-       CDEBUG(D_MALLOC|D_NETTRACE, "slab-alloced 'dgram': %lu at %p.\n",
-              sizeof(*dgram), dgram);
+       CDEBUG(D_MALLOC|D_NETTRACE, "slab-alloced 'dgram': %lu at %p %s ndgrams"
+               " %d\n",
+               sizeof(*dgram), dgram, kgnilnd_dgram_type2str(dgram),
+               atomic_read(&dev->gnd_ndgrams));
 
        *dgramp = dgram;
        return 0;
@@ -1152,9 +1188,11 @@ kgnilnd_free_dgram(kgn_device_t *dev, kgn_dgram_t *dgram)
        dgram->gndg_magic = 0x6f5a6b5f;
        atomic_dec(&dev->gnd_ndgrams);
 
-       cfs_mem_cache_free(kgnilnd_data.kgn_dgram_cache, dgram);
-       CDEBUG(D_MALLOC|D_NETTRACE, "slab-freed 'dgram': %lu at %p.\n",
-              sizeof(*dgram), dgram);
+       kmem_cache_free(kgnilnd_data.kgn_dgram_cache, dgram);
+       CDEBUG(D_MALLOC|D_NETTRACE, "slab-freed 'dgram': %lu at %p %s"
+              " ndgrams %d\n",
+              sizeof(*dgram), dgram, kgnilnd_dgram_type2str(dgram),
+              atomic_read(&dev->gnd_ndgrams));
 }
 
 int
@@ -1305,9 +1343,44 @@ post_failed:
        RETURN(rc);
 }
 
+/* The shutdown flag is set from the shutdown and stack reset threads. */
 void
-kgnilnd_release_dgram(kgn_device_t *dev, kgn_dgram_t *dgram)
+kgnilnd_release_dgram(kgn_device_t *dev, kgn_dgram_t *dgram, int shutdown)
 {
+       /* The conns of canceled active dgrams need to be put in purgatory so
+        * we don't reuse the mailbox */
+       if (unlikely(dgram->gndg_state == GNILND_DGRAM_CANCELED)) {
+               kgn_peer_t *peer;
+               kgn_conn_t *conn = dgram->gndg_conn;
+               lnet_nid_t nid = dgram->gndg_conn_out.gncr_dstnid;
+
+               dgram->gndg_state = GNILND_DGRAM_DONE;
+
+               /* During shutdown we've already removed the peer so we don't
+                * need to add a peer. During stack reset we don't care about
+                * MDDs since they are all released. */
+               if (!shutdown) {
+                       write_lock(&kgnilnd_data.kgn_peer_conn_lock);
+                       peer = kgnilnd_find_peer_locked(nid);
+
+                       if (peer != NULL) {
+                               CDEBUG(D_NET, "adding peer's conn with nid %s "
+                                       "to purgatory\n", libcfs_nid2str(nid));
+                               kgnilnd_conn_addref(conn);
+                               conn->gnc_peer = peer;
+                               kgnilnd_peer_addref(peer);
+                               kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
+                               conn->gnc_state = GNILND_CONN_CLOSED;
+                               list_add_tail(&conn->gnc_list,
+                                             &peer->gnp_conns);
+                               kgnilnd_add_purgatory_locked(conn,
+                                                            conn->gnc_peer);
+                               kgnilnd_schedule_conn(conn);
+                       }
+                       write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
+               }
+       }
+
        spin_lock(&dev->gnd_dgram_lock);
        kgnilnd_cancel_dgram_locked(dgram);
        spin_unlock(&dev->gnd_dgram_lock);
@@ -1366,13 +1439,13 @@ kgnilnd_probe_for_dgram(kgn_device_t *dev, kgn_dgram_t **dgramp)
                RETURN(0);
        }
 
-       CDEBUG(D_NET, "ready "LPX64" on device 0x%p\n",
+       CDEBUG(D_NET, "ready %#llx on device 0x%p\n",
                readyid, dev);
 
        dgram = (kgn_dgram_t *)readyid;
 
        LASSERTF(dgram->gndg_magic == GNILND_DGRAM_MAGIC,
-                "dgram 0x%p from id "LPX64" with bad magic %x\n",
+                "dgram 0x%p from id %#llx with bad magic %x\n",
                 dgram, readyid, dgram->gndg_magic);
 
        LASSERTF(dgram->gndg_state == GNILND_DGRAM_POSTED ||
@@ -1381,8 +1454,9 @@ kgnilnd_probe_for_dgram(kgn_device_t *dev, kgn_dgram_t **dgramp)
                 dgram, kgnilnd_dgram_state2str(dgram));
 
        LASSERTF(!list_empty(&dgram->gndg_list),
-                "dgram 0x%p with bad list state %s\n",
-                dgram, kgnilnd_dgram_state2str(dgram));
+                "dgram 0x%p with bad list state %s type %s\n",
+                dgram, kgnilnd_dgram_state2str(dgram),
+                kgnilnd_dgram_type2str(dgram));
 
        /* now we know that the datagram structure is ok, so pull off list */
        list_del_init(&dgram->gndg_list);
@@ -1394,10 +1468,6 @@ kgnilnd_probe_for_dgram(kgn_device_t *dev, kgn_dgram_t **dgramp)
                dgram->gndg_state = GNILND_DGRAM_PROCESSING;
        }
 
-       spin_unlock(&dev->gnd_dgram_lock);
-
-       /* we now "own" this datagram */
-
        LASSERTF(dgram->gndg_conn != NULL,
                "dgram 0x%p with NULL conn\n", dgram);
 
@@ -1405,8 +1475,11 @@ kgnilnd_probe_for_dgram(kgn_device_t *dev, kgn_dgram_t **dgramp)
                                             (__u64)dgram, &post_state,
                                             &remote_addr, &remote_id);
 
+       /* we now "own" this datagram */
+       spin_unlock(&dev->gnd_dgram_lock);
+
        LASSERTF(grc != GNI_RC_NO_MATCH, "kgni lied! probe_by_id told us that"
-                " id "LPU64" was ready\n", readyid);
+                " id %llu was ready\n", readyid);
 
        CDEBUG(D_NET, "grc %d dgram 0x%p type %s post_state %d "
                "remote_addr %u remote_id %u\n", grc, dgram,
@@ -1434,8 +1507,10 @@ kgnilnd_probe_for_dgram(kgn_device_t *dev, kgn_dgram_t **dgramp)
                /* fake rc to mark that we've done something */
                rc = 1;
        } else {
-               /* bring out your dead! */
-               dgram->gndg_state = GNILND_DGRAM_DONE;
+               /* let kgnilnd_release_dgram take care of canceled dgrams */
+               if (dgram->gndg_state != GNILND_DGRAM_CANCELED) {
+                       dgram->gndg_state = GNILND_DGRAM_DONE;
+               }
        }
 
        *dgramp = dgram;
@@ -1443,7 +1518,7 @@ kgnilnd_probe_for_dgram(kgn_device_t *dev, kgn_dgram_t **dgramp)
 
 probe_for_out:
 
-       kgnilnd_release_dgram(dev, dgram);
+       kgnilnd_release_dgram(dev, dgram, 0);
        RETURN(rc);
 }
 
@@ -1471,9 +1546,9 @@ failed:
 int
 kgnilnd_cancel_net_dgrams(kgn_net_t *net)
 {
-       kgn_dgram_t            *dg, *dgN;
-       struct list_head        zombies;
-       int                     i;
+       kgn_dgram_t *dg, *dgN;
+       LIST_HEAD(zombies);
+       int i;
        ENTRY;
 
        /* we want to cancel any outstanding dgrams - we don't want to rely
@@ -1486,8 +1561,6 @@ kgnilnd_cancel_net_dgrams(kgn_net_t *net)
                 "in reset %d\n", net->gnn_shutdown,
                 kgnilnd_data.kgn_in_reset);
 
-       INIT_LIST_HEAD(&zombies);
-
        spin_lock(&net->gnn_dev->gnd_dgram_lock);
 
        for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
@@ -1513,7 +1586,7 @@ int
 kgnilnd_cancel_wc_dgrams(kgn_device_t *dev)
 {
        kgn_dgram_t *dg, *dgN;
-       struct list_head zombies;
+       LIST_HEAD(zombies);
        ENTRY;
 
        /* Time to kill the outstanding WC's
@@ -1525,7 +1598,6 @@ kgnilnd_cancel_wc_dgrams(kgn_device_t *dev)
                "in reset %d\n", kgnilnd_data.kgn_wc_kill,
                kgnilnd_data.kgn_in_reset);
 
-       INIT_LIST_HEAD(&zombies);
        spin_lock(&dev->gnd_dgram_lock);
 
        do {
@@ -1539,10 +1611,8 @@ kgnilnd_cancel_wc_dgrams(kgn_device_t *dev)
                        kgnilnd_cancel_dgram_locked(dg);
 
                        /* WC could be DONE already, check and if so add to list to be released */
-                       if (dg->gndg_state == GNILND_DGRAM_DONE) {
-                               list_del_init(&dg->gndg_list);
-                               list_add_tail(&dg->gndg_list, &zombies);
-                       }
+                       if (dg->gndg_state == GNILND_DGRAM_DONE)
+                               list_move_tail(&dg->gndg_list, &zombies);
                }
        } while (dg != NULL);
 
@@ -1550,12 +1620,41 @@ kgnilnd_cancel_wc_dgrams(kgn_device_t *dev)
 
        list_for_each_entry_safe(dg, dgN, &zombies, gndg_list) {
                list_del_init(&dg->gndg_list);
-               kgnilnd_release_dgram(dev, dg);
+               kgnilnd_release_dgram(dev, dg, 1);
        }
        RETURN(0);
 
 }
 
+int
+kgnilnd_cancel_dgrams(kgn_device_t *dev)
+{
+       kgn_dgram_t *dg, *dgN;
+       int i;
+       ENTRY;
+
+       /* Cancel any outstanding non wildcard datagrams regardless
+        * of which net they are on as we are in base shutdown and
+        * dont care about connecting anymore.
+        */
+
+       LASSERTF(kgnilnd_data.kgn_wc_kill == 1,"We didnt get called from base shutdown\n");
+
+       spin_lock(&dev->gnd_dgram_lock);
+
+       for (i = 0; i < (*kgnilnd_tunables.kgn_peer_hash_size -1); i++) {
+               list_for_each_entry_safe(dg, dgN, &dev->gnd_dgrams[i], gndg_list) {
+                       if (dg->gndg_type != GNILND_DGRAM_WC_REQ)
+                               kgnilnd_cancel_dgram_locked(dg);
+               }
+       }
+
+       spin_unlock(&dev->gnd_dgram_lock);
+
+       RETURN(0);
+}
+
+
 void
 kgnilnd_wait_for_canceled_dgrams(kgn_device_t *dev)
 {
@@ -1591,13 +1690,13 @@ kgnilnd_wait_for_canceled_dgrams(kgn_device_t *dev)
                if (grc != GNI_RC_SUCCESS)
                        continue;
 
-               CDEBUG(D_NET, "ready "LPX64" on device %d->0x%p\n",
+               CDEBUG(D_NET, "ready %#llx on device %d->0x%p\n",
                        readyid, dev->gnd_id, dev);
 
                rc = kgnilnd_probe_for_dgram(dev, &dgram);
                if (rc != 0) {
                        /* if we got a valid dgram or one that is now done, clean up */
-                       kgnilnd_release_dgram(dev, dgram);
+                       kgnilnd_release_dgram(dev, dgram, 1);
                }
        } while (atomic_read(&dev->gnd_canceled_dgrams));
 }
@@ -1690,7 +1789,7 @@ kgnilnd_finish_connect(kgn_dgram_t *dgram)
        /* assume this is a new peer  - it makes locking cleaner when it isn't */
        /* no holding kgn_net_rw_sem - already are at the kgnilnd_dgram_mover level */
 
-       rc = kgnilnd_create_peer_safe(&new_peer, her_nid, NULL);
+       rc = kgnilnd_create_peer_safe(&new_peer, her_nid, NULL, GNILND_PEER_UP);
        if (rc != 0) {
                CERROR("Can't create peer for %s\n", libcfs_nid2str(her_nid));
                return rc;
@@ -1745,12 +1844,12 @@ kgnilnd_finish_connect(kgn_dgram_t *dgram)
                }
        }
 
-       if (peer->gnp_down == GNILND_RCA_NODE_DOWN) {
-               CNETERR("Received connection request from %s that RCA thinks is"
-                       " down.\n", libcfs_nid2str(her_nid));
-               peer->gnp_down = GNILND_RCA_NODE_UP;
+       if (peer->gnp_state == GNILND_PEER_DOWN) {
+               CNETERR("Received connection request from down nid %s\n",
+                       libcfs_nid2str(her_nid));
        }
 
+       peer->gnp_state = GNILND_PEER_UP;
        nstale = kgnilnd_close_stale_conns_locked(peer, conn);
 
        /* either way with peer (new or existing), we are ok with ref counts here as the
@@ -1848,9 +1947,10 @@ kgnilnd_finish_connect(kgn_dgram_t *dgram)
        write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
 
        /* Notify LNET that we now have a working connection to this peer.
-        * This is a Cray extension to the "standard" LND behavior. */
-       lnet_notify(peer->gnp_net->gnn_ni, peer->gnp_nid,
-                    1, cfs_time_current());
+        * This is a Cray extension to the "standard" LND behavior.
+        */
+       lnet_notify(peer->gnp_net->gnn_ni, peer->gnp_nid, true, true,
+                   ktime_get_seconds());
 
        /* drop our 'hold' ref */
        kgnilnd_conn_decref(conn);
@@ -1927,7 +2027,6 @@ kgnilnd_process_nak(kgn_dgram_t *dgram)
                        libcfs_nid2str(connreq->gncr_srcnid),
                        libcfs_nid2str(connreq->gncr_dstnid), errno, rc);
        } else {
-               rc = 0;
                spin_lock(&dgram->gndg_conn->gnc_device->gnd_connd_lock);
 
                if (list_empty(&peer->gnp_connd_list)) {
@@ -1958,7 +2057,7 @@ kgnilnd_process_nak(kgn_dgram_t *dgram)
        /* success! we found a peer and at least marked pending_nak */
        write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
 
-       return 0;
+       return rc;
 }
 
 int
@@ -2056,7 +2155,7 @@ inform_peer:
 
        orig_dstnid = dgram->gndg_conn_out.gncr_dstnid;
 
-       kgnilnd_release_dgram(dev, dgram);
+       kgnilnd_release_dgram(dev, dgram, 0);
 
        CDEBUG(D_NET, "cleaning up dgram to %s, rc %d\n",
               libcfs_nid2str(orig_dstnid), rc);
@@ -2100,7 +2199,7 @@ inform_peer:
 
                /* now that we are outside the lock, tell Mommy */
                if (peer != NULL) {
-                       kgnilnd_peer_notify(peer, rc);
+                       kgnilnd_peer_notify(peer, rc, 0);
                        kgnilnd_peer_decref(peer);
                }
        }
@@ -2173,7 +2272,6 @@ kgnilnd_dgram_waitq(void *arg)
        DEFINE_WAIT(mover_done);
 
        snprintf(name, sizeof(name), "kgnilnd_dgn_%02d", dev->gnd_id);
-       cfs_daemonize(name);
        cfs_block_allsigs();
 
        /* all gnilnd threads need to run fairly urgently */
@@ -2354,7 +2452,6 @@ kgnilnd_dgram_mover(void *arg)
        DEFINE_WAIT(wait);
 
        snprintf(name, sizeof(name), "kgnilnd_dg_%02d", dev->gnd_id);
-       cfs_daemonize(name);
        cfs_block_allsigs();
        /* all gnilnd threads need to run fairly urgently */
        set_user_nice(current, *kgnilnd_tunables.kgn_nice);
@@ -2425,8 +2522,9 @@ kgnilnd_dgram_mover(void *arg)
                /* last second chance for others to poke us */
                did_something += xchg(&dev->gnd_dgram_ready, GNILND_DGRAM_IDLE);
 
-               /* check flag variables before comittingi even if we did something;
-                * if we are after the deadline call schedule */
+               /* check flag variables before committing even if we
+                * did something; if we are after the deadline call
+                * schedule */
                if ((!did_something || time_after(jiffies, deadline)) &&
                    !kgnilnd_data.kgn_shutdown &&
                    !kgnilnd_data.kgn_quiesce_trigger) {
@@ -2445,4 +2543,3 @@ kgnilnd_dgram_mover(void *arg)
        kgnilnd_thread_fini();
        return 0;
 }
-