Whamcloud - gitweb
LU-11300 lnet: remove lnd_query interface.
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd.c
index 92a139c..bcbc0ac 100644 (file)
@@ -1,6 +1,8 @@
 /*
  * Copyright (C) 2012 Cray, Inc.
  *
+ * Copyright (c) 2013, 2017, Intel Corporation.
+ *
  *   Author: Nic Henke <nic@cray.com>
  *   Author: James Shimek <jshimek@cray.com>
  *
@@ -23,7 +25,7 @@
 #include "gnilnd.h"
 
 /* Primary entry points from LNET.  There are no guarantees against reentrance. */
-lnd_t the_kgnilnd = {
+const struct lnet_lnd the_kgnilnd = {
        .lnd_type       = GNILND,
        .lnd_startup    = kgnilnd_startup,
        .lnd_shutdown   = kgnilnd_shutdown,
@@ -31,11 +33,56 @@ lnd_t the_kgnilnd = {
        .lnd_send       = kgnilnd_send,
        .lnd_recv       = kgnilnd_recv,
        .lnd_eager_recv = kgnilnd_eager_recv,
-       .lnd_query      = kgnilnd_query,
 };
 
 kgn_data_t      kgnilnd_data;
 
+int
+kgnilnd_thread_start(int(*fn)(void *arg), void *arg, char *name, int id)
+{
+       struct task_struct *thrd;
+
+       thrd = kthread_run(fn, arg, "%s_%02d", name, id);
+       if (IS_ERR(thrd))
+               return PTR_ERR(thrd);
+
+       atomic_inc(&kgnilnd_data.kgn_nthreads);
+       return 0;
+}
+
+/* bind scheduler threads to cpus */
+int
+kgnilnd_start_sd_threads(void)
+{
+       int cpu;
+       int i = 0;
+       struct task_struct *task;
+
+       for_each_online_cpu(cpu) {
+               /* don't bind to cpu 0 - all interrupts are processed here */
+               if (cpu == 0)
+                       continue;
+
+               task = kthread_create(kgnilnd_scheduler, (void *)((long)i),
+                                     "%s_%02d", "kgnilnd_sd", i);
+               if (!IS_ERR(task)) {
+                       kthread_bind(task, cpu);
+                       wake_up_process(task);
+               } else {
+                       CERROR("Can't spawn gnilnd scheduler[%d] %ld\n", i,
+                               PTR_ERR(task));
+                       return PTR_ERR(task);
+               }
+               atomic_inc(&kgnilnd_data.kgn_nthreads);
+
+               if (++i >= *kgnilnd_tunables.kgn_sched_threads) {
+                       break;
+               }
+       }
+
+       return 0;
+}
+
 /* needs write_lock on kgn_peer_conn_lock */
 int
 kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
@@ -65,8 +112,8 @@ kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
                    newconn->gnc_peer_connstamp == conn->gnc_my_connstamp) {
                        CDEBUG(D_NET, "skipping prune of %p, "
                                "loopback and matching stamps"
-                               " connstamp "LPU64"("LPU64")"
-                               " peerstamp "LPU64"("LPU64")\n",
+                               " connstamp %llu(%llu)"
+                               " peerstamp %llu(%llu)\n",
                                conn, newconn->gnc_my_connstamp,
                                conn->gnc_peer_connstamp,
                                newconn->gnc_peer_connstamp,
@@ -76,25 +123,25 @@ kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
 
                if (conn->gnc_peerstamp != newconn->gnc_peerstamp) {
                        LASSERTF(conn->gnc_peerstamp < newconn->gnc_peerstamp,
-                               "conn 0x%p peerstamp "LPU64" >= "
-                               "newconn 0x%p peerstamp "LPU64"\n",
+                               "conn 0x%p peerstamp %llu >= "
+                               "newconn 0x%p peerstamp %llu\n",
                                conn, conn->gnc_peerstamp,
                                newconn, newconn->gnc_peerstamp);
 
                        CDEBUG(D_NET, "Closing stale conn nid: %s "
-                              " peerstamp:"LPX64"("LPX64")\n",
+                              " peerstamp:%#llx(%#llx)\n",
                               libcfs_nid2str(peer->gnp_nid),
                               conn->gnc_peerstamp, newconn->gnc_peerstamp);
                } else {
 
                        LASSERTF(conn->gnc_peer_connstamp < newconn->gnc_peer_connstamp,
-                               "conn 0x%p peer_connstamp "LPU64" >= "
-                               "newconn 0x%p peer_connstamp "LPU64"\n",
+                               "conn 0x%p peer_connstamp %llu >= "
+                               "newconn 0x%p peer_connstamp %llu\n",
                                conn, conn->gnc_peer_connstamp,
                                newconn, newconn->gnc_peer_connstamp);
 
                        CDEBUG(D_NET, "Closing stale conn nid: %s"
-                              " connstamp:"LPU64"("LPU64")\n",
+                              " connstamp:%llu(%llu)\n",
                               libcfs_nid2str(peer->gnp_nid),
                               conn->gnc_peer_connstamp, newconn->gnc_peer_connstamp);
                }
@@ -123,8 +170,8 @@ kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn)
        list_for_each(tmp, &peer->gnp_conns) {
                conn = list_entry(tmp, kgn_conn_t, gnc_list);
                CDEBUG(D_NET, "checking conn 0x%p for peer %s"
-                       " lo %d new "LPU64" existing "LPU64
-                       " new peer "LPU64" existing peer "LPU64
+                       " lo %d new %llu existing %llu"
+                       " new peer %llu existing peer %llu"
                        " new dev %p existing dev %p\n",
                        conn, libcfs_nid2str(peer->gnp_nid),
                        loopback,
@@ -196,13 +243,15 @@ kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
                return -ENOMEM;
        }
 
-       LIBCFS_ALLOC(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
+       conn->gnc_tx_ref_table =
+               kgnilnd_vzalloc(GNILND_MAX_MSG_ID * sizeof(void *));
        if (conn->gnc_tx_ref_table == NULL) {
                CERROR("Can't allocate conn tx_ref_table\n");
-               rc = -ENOMEM;
-               GOTO(failed, rc);
+               GOTO(failed, rc = -ENOMEM);
        }
 
+       mutex_init(&conn->gnc_smsg_mutex);
+       mutex_init(&conn->gnc_rdma_mutex);
        atomic_set(&conn->gnc_refcount, 1);
        atomic_set(&conn->gnc_reaper_noop, 0);
        atomic_set(&conn->gnc_sched_noop, 0);
@@ -212,6 +261,7 @@ kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
        INIT_LIST_HEAD(&conn->gnc_schedlist);
        INIT_LIST_HEAD(&conn->gnc_fmaq);
        INIT_LIST_HEAD(&conn->gnc_mdd_list);
+       INIT_LIST_HEAD(&conn->gnc_delaylist);
        spin_lock_init(&conn->gnc_list_lock);
        spin_lock_init(&conn->gnc_tx_lock);
        conn->gnc_magic = GNILND_CONN_MAGIC;
@@ -221,7 +271,7 @@ kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
        conn->gnc_next_tx = (int) GNILND_MAX_MSG_ID - 10;
 
        /* if this fails, we have conflicts and MAX_TX is too large */
-       CLASSERT(GNILND_MAX_MSG_ID < GNILND_MSGID_CLOSE);
+       BUILD_BUG_ON(GNILND_MAX_MSG_ID >= GNILND_MSGID_CLOSE);
 
        /* get a new unique CQ id for this conn */
        write_lock(&kgnilnd_data.kgn_peer_conn_lock);
@@ -231,8 +281,7 @@ kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
 
        if (conn->gnc_cqid == 0) {
                CERROR("Could not allocate unique CQ ID for conn 0x%p\n", conn);
-               rc = -E2BIG;
-               GOTO(failed, rc);
+               GOTO(failed, rc = -E2BIG);
        }
 
        CDEBUG(D_NET, "alloc cqid %u for conn 0x%p\n",
@@ -251,10 +300,8 @@ kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
        rrc = kgnilnd_ep_create(dev->gnd_handle, dev->gnd_snd_fma_cqh,
                                &conn->gnc_ephandle);
        mutex_unlock(&dev->gnd_cq_mutex);
-       if (rrc != GNI_RC_SUCCESS) {
-               rc = -ENETDOWN;
-               GOTO(failed, rc);
-       }
+       if (rrc != GNI_RC_SUCCESS)
+               GOTO(failed, rc = -ENETDOWN);
 
        CDEBUG(D_NET, "created conn 0x%p ep_hndl 0x%p\n",
               conn, conn->gnc_ephandle);
@@ -268,7 +315,8 @@ kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev)
 
 failed:
        atomic_dec(&kgnilnd_data.kgn_nconns);
-       LIBCFS_FREE(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
+       kgnilnd_vfree(conn->gnc_tx_ref_table,
+                     GNILND_MAX_MSG_ID * sizeof(void *));
        LIBCFS_FREE(conn, sizeof(*conn));
        return rc;
 }
@@ -399,8 +447,9 @@ kgnilnd_destroy_conn(kgn_conn_t *conn)
                list_empty(&conn->gnc_hashlist) &&
                list_empty(&conn->gnc_schedlist) &&
                list_empty(&conn->gnc_mdd_list) &&
+               list_empty(&conn->gnc_delaylist) &&
                conn->gnc_magic == GNILND_CONN_MAGIC,
-               "conn 0x%p->%s IRQ %d sched %d purg %d ep 0x%p Mg %d lists %d/%d/%d/%d\n",
+               "conn 0x%p->%s IRQ %d sched %d purg %d ep 0x%p Mg %d lists %d/%d/%d/%d/%d\n",
                conn, conn->gnc_peer ? libcfs_nid2str(conn->gnc_peer->gnp_nid)
                                     : "<?>",
                !!in_interrupt(), conn->gnc_scheduled,
@@ -410,7 +459,8 @@ kgnilnd_destroy_conn(kgn_conn_t *conn)
                list_empty(&conn->gnc_list),
                list_empty(&conn->gnc_hashlist),
                list_empty(&conn->gnc_schedlist),
-               list_empty(&conn->gnc_mdd_list));
+               list_empty(&conn->gnc_mdd_list),
+               list_empty(&conn->gnc_delaylist));
 
        /* Tripping these is especially bad, as it means we have items on the
         *  lists that didn't keep their refcount on the connection - or
@@ -442,8 +492,8 @@ kgnilnd_destroy_conn(kgn_conn_t *conn)
                kgnilnd_peer_decref(conn->gnc_peer);
 
        if (conn->gnc_tx_ref_table != NULL) {
-               LIBCFS_FREE(conn->gnc_tx_ref_table,
-                           GNILND_MAX_MSG_ID * sizeof(void *));
+               kgnilnd_vfree(conn->gnc_tx_ref_table,
+                             GNILND_MAX_MSG_ID * sizeof(void *));
        }
 
        LIBCFS_FREE(conn, sizeof(*conn));
@@ -454,11 +504,13 @@ kgnilnd_destroy_conn(kgn_conn_t *conn)
 void
 kgnilnd_peer_alive(kgn_peer_t *peer)
 {
-       set_mb(peer->gnp_last_alive, jiffies);
+       time64_t now = ktime_get_seconds();
+
+       set_mb(peer->gnp_last_alive, now);
 }
 
 void
-kgnilnd_peer_notify(kgn_peer_t *peer, int error)
+kgnilnd_peer_notify(kgn_peer_t *peer, int error, int alive)
 {
        int                     tell_lnet = 0;
        int                     nnets = 0;
@@ -487,10 +539,10 @@ kgnilnd_peer_notify(kgn_peer_t *peer, int error)
               peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting, conn,
               kgnilnd_data.kgn_in_reset, error);
 
-       if ((peer->gnp_connecting == GNILND_PEER_IDLE) &&
+       if (((peer->gnp_connecting == GNILND_PEER_IDLE) &&
            (conn == NULL) &&
            (!kgnilnd_data.kgn_in_reset) &&
-           (!kgnilnd_conn_clean_errno(error))) {
+           (!kgnilnd_conn_clean_errno(error))) || alive) {
                tell_lnet = 1;
        }
 
@@ -550,12 +602,13 @@ kgnilnd_peer_notify(kgn_peer_t *peer, int error)
                        peer_nid = kgnilnd_lnd2lnetnid(net->gnn_ni->ni_nid,
                                                                 peer->gnp_nid);
 
-                       CDEBUG(D_NET, "peer 0x%p->%s last_alive %lu (%lus ago)\n",
+                       CDEBUG(D_NET, "peer 0x%p->%s last_alive %lld (%llds ago)\n",
                                peer, libcfs_nid2str(peer_nid), peer->gnp_last_alive,
-                               cfs_duration_sec(jiffies - peer->gnp_last_alive));
-
-                       lnet_notify(net->gnn_ni, peer_nid, 0, peer->gnp_last_alive);
+                               ktime_get_seconds() - peer->gnp_last_alive);
 
+                       lnet_notify(net->gnn_ni, peer_nid, alive,
+                                   (alive) ? true : false,
+                                   peer->gnp_last_alive);
 
                        kgnilnd_net_decref(net);
                }
@@ -584,7 +637,7 @@ kgnilnd_close_conn_locked(kgn_conn_t *conn, int error)
 
        /* if we NETERROR, make sure it is rate limited */
        if (!kgnilnd_conn_clean_errno(error) &&
-           peer->gnp_down == GNILND_RCA_NODE_UP) {
+           peer->gnp_state != GNILND_PEER_DOWN) {
                CNETERR("closing conn to %s: error %d\n",
                       libcfs_nid2str(peer->gnp_nid), error);
        } else {
@@ -620,6 +673,10 @@ kgnilnd_close_conn_locked(kgn_conn_t *conn, int error)
                conn->gnc_state = GNILND_CONN_CLOSING;
        }
 
+       if (CFS_FAIL_CHECK(CFS_FAIL_GNI_RDMA_CQ_ERROR)) {
+               msleep_interruptible(MSEC_PER_SEC);
+       }
+
        /* leave on peer->gnp_conns to make sure we don't let the reaper
         * or others try to unlink this peer until the conn is fully
         * processed for closing */
@@ -682,6 +739,11 @@ kgnilnd_complete_closed_conn(kgn_conn_t *conn)
                 kgnilnd_conn_state2str(conn));
 
        LASSERT(list_empty(&conn->gnc_hashlist));
+       /* We shouldnt be on the delay list, the conn can 
+        * get added to this list during a retransmit, and retransmits
+        * only occur within scheduler threads.
+        */
+       LASSERT(list_empty(&conn->gnc_delaylist));
 
        /* we've sent the close, start nuking */
        if (CFS_FAIL_CHECK(CFS_FAIL_GNI_SCHEDULE_COMPLETE))
@@ -753,19 +815,13 @@ kgnilnd_complete_closed_conn(kgn_conn_t *conn)
        logmsg = (nlive + nrdma + nq_rdma);
 
        if (logmsg) {
-               if (conn->gnc_peer->gnp_down == GNILND_RCA_NODE_UP) {
-                       CNETERR("Closed conn 0x%p->%s (errno %d, peer errno %d): "
-                               "canceled %d TX, %d/%d RDMA\n",
-                               conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
-                               conn->gnc_error, conn->gnc_peer_error,
-                               nlive, nq_rdma, nrdma);
-               } else {
-                       CDEBUG(D_NET, "Closed conn 0x%p->%s (errno %d,"
-                               " peer errno %d): canceled %d TX, %d/%d RDMA\n",
-                               conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
-                               conn->gnc_error, conn->gnc_peer_error,
-                               nlive, nq_rdma, nrdma);
-               }
+               int level = conn->gnc_peer->gnp_state == GNILND_PEER_UP ?
+                               D_NETERROR : D_NET;
+               CDEBUG(level, "Closed conn 0x%p->%s (errno %d,"
+                       " peer errno %d): canceled %d TX, %d/%d RDMA\n",
+                       conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
+                       conn->gnc_error, conn->gnc_peer_error,
+                       nlive, nq_rdma, nrdma);
        }
 
        kgnilnd_destroy_conn_ep(conn);
@@ -804,8 +860,8 @@ kgnilnd_complete_closed_conn(kgn_conn_t *conn)
 
        /* I'm telling Mommy! - use peer_error if they initiated close */
        kgnilnd_peer_notify(conn->gnc_peer,
-                           conn->gnc_error == -ECONNRESET ? conn->gnc_peer_error
-                                                          : conn->gnc_error);
+                           conn->gnc_error == -ECONNRESET ?
+                           conn->gnc_peer_error : conn->gnc_error, 0);
 
        EXIT;
 }
@@ -869,8 +925,8 @@ kgnilnd_set_conn_params(kgn_dgram_t *dgram)
 
        /* log this for help in debuggin SMSG buffer re-use */
        CDEBUG(D_NET, "conn %p src %s dst %s smsg %p acquired"
-               " local cqid %u SMSG %p->%u hndl "LPX64"."LPX64
-               " remote cqid %u SMSG %p->%u hndl "LPX64"."LPX64"\n",
+               " local cqid %u SMSG %p->%u hndl %#llx.%#llx"
+               " remote cqid %u SMSG %p->%u hndl %#llx.%#llx\n",
                conn, libcfs_nid2str(connreq->gncr_srcnid),
                libcfs_nid2str(connreq->gncr_dstnid),
                &conn->gnpr_smsg_attr,
@@ -914,7 +970,10 @@ return_out:
  * kgn_peer_conn_lock is held, we guarantee that nobody calls
  * kgnilnd_add_peer_locked without checking gnn_shutdown */
 int
-kgnilnd_create_peer_safe(kgn_peer_t **peerp, lnet_nid_t nid, kgn_net_t *net)
+kgnilnd_create_peer_safe(kgn_peer_t **peerp,
+                        lnet_nid_t nid,
+                        kgn_net_t *net,
+                        int node_state)
 {
        kgn_peer_t      *peer;
        int             rc;
@@ -946,7 +1005,7 @@ kgnilnd_create_peer_safe(kgn_peer_t **peerp, lnet_nid_t nid, kgn_net_t *net)
                return -ENOMEM;
        }
        peer->gnp_nid = nid;
-       peer->gnp_down = GNILND_RCA_NODE_UP;
+       peer->gnp_state = node_state;
 
        /* translate from nid to nic addr & store */
        rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(nid), 1, &peer->gnp_host_id);
@@ -1053,6 +1112,8 @@ kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer)
        CDEBUG(D_NET, "conn %p peer %p dev %p\n", conn, peer,
                conn->gnc_device);
 
+       LASSERTF(conn->gnc_in_purgatory == 0,
+               "Conn already in purgatory\n");
        conn->gnc_in_purgatory = 1;
 
        mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
@@ -1159,12 +1220,12 @@ kgnilnd_release_purgatory_list(struct list_head *conn_list)
                 * make sure we tell LNet - if this is from other context,
                 * the checks in the function will prevent an errant
                 * notification */
-               kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error);
+               kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error, 0);
 
                list_for_each_entry_safe(gmp, gmpN, &conn->gnc_mdd_list,
                                         gmp_list) {
                        CDEBUG(D_NET,
-                              "dev %p releasing held mdd "LPX64"."LPX64"\n",
+                              "dev %p releasing held mdd %#llx.%#llx\n",
                               conn->gnc_device, gmp->gmp_map_key.qword1,
                               gmp->gmp_map_key.qword2);
 
@@ -1334,11 +1395,14 @@ kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp)
 {
        kgn_peer_t        *peer;
        int                rc;
+       int                node_state;
        ENTRY;
 
        if (nid == LNET_NID_ANY)
                return -EINVAL;
 
+       node_state = kgnilnd_get_node_state(LNET_NIDADDR(nid));
+
        /* NB - this will not block during normal operations -
         * the only writer of this is in the startup/shutdown path. */
        rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
@@ -1346,7 +1410,7 @@ kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp)
                rc = -ESHUTDOWN;
                RETURN(rc);
        }
-       rc = kgnilnd_create_peer_safe(&peer, nid, net);
+       rc = kgnilnd_create_peer_safe(&peer, nid, net, node_state);
        if (rc != 0) {
                up_read(&kgnilnd_data.kgn_net_rw_sem);
                RETURN(rc);
@@ -1513,9 +1577,6 @@ kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
 
        write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
 
-       /* release all of the souls found held in purgatory */
-       kgnilnd_release_purgatory_list(&souls);
-
        /* nuke peer TX */
        kgnilnd_txlist_done(&zombies, error);
 
@@ -1537,7 +1598,8 @@ kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
               atomic_read(&kgnilnd_data.kgn_npending_detach)  ||
               atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
 
-               cfs_pause(cfs_time_seconds(1));
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               schedule_timeout(cfs_time_seconds(1));
                i++;
 
                CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
@@ -1607,8 +1669,8 @@ kgnilnd_get_conn_info(kgn_peer_t *peer,
 
        *device_id = conn->gnc_device->gnd_host_id;
        *peerstamp = conn->gnc_peerstamp;
-       *tx_seq = conn->gnc_tx_seq;
-       *rx_seq = conn->gnc_rx_seq;
+       *tx_seq = atomic_read(&conn->gnc_tx_seq);
+       *rx_seq = atomic_read(&conn->gnc_rx_seq);
        *fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq);
        *nfma = atomic_read(&conn->gnc_nlive_fma);
        *nrdma = atomic_read(&conn->gnc_nlive_rdma);
@@ -1663,9 +1725,8 @@ kgnilnd_report_node_state(lnet_nid_t nid, int down)
                write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
 
                /* Don't add a peer for node up events */
-               if (down == GNILND_RCA_NODE_UP) {
+               if (down == GNILND_PEER_UP)
                        return 0;
-               }
 
                /* find any valid net - we don't care which one... */
                down_read(&kgnilnd_data.kgn_net_rw_sem);
@@ -1709,9 +1770,9 @@ kgnilnd_report_node_state(lnet_nid_t nid, int down)
                }
        }
 
-       peer->gnp_down = down;
+       peer->gnp_state = down;
 
-       if (down == GNILND_RCA_NODE_DOWN) {
+       if (down == GNILND_PEER_DOWN) {
                kgn_conn_t *conn;
 
                peer->gnp_down_event_time = jiffies;
@@ -1727,23 +1788,21 @@ kgnilnd_report_node_state(lnet_nid_t nid, int down)
 
        write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
 
-       if (down == GNILND_RCA_NODE_DOWN) {
+       if (down == GNILND_PEER_DOWN) {
                /* using ENETRESET so we don't get messages from
                 * kgnilnd_tx_done
                 */
                kgnilnd_txlist_done(&zombies, -ENETRESET);
-
-               if (*kgnilnd_tunables.kgn_peer_health) {
-                       kgnilnd_peer_notify(peer, -ECONNRESET);
-               }
+               kgnilnd_peer_notify(peer, -ECONNRESET, 0);
+               LCONSOLE_INFO("Received down event for nid %d\n",
+                             LNET_NIDADDR(nid));
        }
 
-       CDEBUG(D_INFO, "marking nid %lld %s\n", nid, down ? "down" : "up");
        return 0;
 }
 
 int
-kgnilnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
+kgnilnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
 {
        struct libcfs_ioctl_data *data = arg;
        kgn_net_t                *net = ni->ni_data;
@@ -1862,74 +1921,6 @@ kgnilnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
        return rc;
 }
 
-void
-kgnilnd_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
-{
-       kgn_net_t               *net = ni->ni_data;
-       kgn_tx_t                *tx;
-       kgn_peer_t              *peer = NULL;
-       kgn_conn_t              *conn = NULL;
-       lnet_process_id_t       id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
-       ENTRY;
-
-       /* I expect to find him, so only take a read lock */
-       read_lock(&kgnilnd_data.kgn_peer_conn_lock);
-       peer = kgnilnd_find_peer_locked(nid);
-       if (peer != NULL) {
-               /* LIE if in a quiesce - we will update the timeouts after,
-                * but we don't want sends failing during it */
-               if (kgnilnd_data.kgn_quiesce_trigger) {
-                       *when = jiffies;
-                       read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
-                       GOTO(out, 0);
-               }
-
-               /* Update to best guess, might refine on later checks */
-               *when = peer->gnp_last_alive;
-
-               /* we have a peer, how about a conn? */
-               conn = kgnilnd_find_conn_locked(peer);
-
-               if (conn == NULL)  {
-                       /* if there is no conn, check peer last errno to see if clean disconnect
-                        * - if it was, we lie to LNet because we believe a TX would complete
-                        * on reconnect */
-                       if (kgnilnd_conn_clean_errno(peer->gnp_last_errno)) {
-                               *when = jiffies;
-                       }
-                       /* we still want to fire a TX and new conn in this case */
-               } else {
-                       /* gnp_last_alive is valid, run for the hills */
-                       read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
-                       GOTO(out, 0);
-               }
-       }
-       /* if we get here, either we have no peer or no conn for him, so fire off
-        * new TX to trigger conn setup */
-       read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
-
-       /* if we couldn't find him, we'll fire up a TX and get connected -
-        * if we don't do this, after ni_peer_timeout, LNet will declare him dead.
-        * So really we treat kgnilnd_query as a bit of a 'connect now' type
-        * event because it'll only do this when it wants to send
-        *
-        * Use a real TX for this to get the proper gnp_tx_queue behavior, etc
-        * normally we'd use kgnilnd_send_ctlmsg for this, but we don't really
-        * care that this goes out quickly since we already know we need a new conn
-        * formed */
-       if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NOOP_SEND))
-               return;
-
-       tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, ni->ni_nid);
-       if (tx != NULL) {
-               kgnilnd_launch_tx(tx, net, &id);
-       }
-out:
-       CDEBUG(D_NETTRACE, "peer 0x%p->%s when %lu\n", peer,
-              libcfs_nid2str(nid), *when);
-       EXIT;
-}
-
 int
 kgnilnd_dev_init(kgn_device_t *dev)
 {
@@ -1945,12 +1936,11 @@ kgnilnd_dev_init(kgn_device_t *dev)
        cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3;
 
        rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag,
-                                GNILND_COOKIE, 0,
+                                *kgnilnd_tunables.kgn_pkey, 0,
                                 &dev->gnd_domain);
        if (rrc != GNI_RC_SUCCESS) {
                CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc);
-               rc = -ENODEV;
-               GOTO(failed, rc);
+               GOTO(failed, rc = -ENODEV);
        }
 
        rrc = kgnilnd_cdm_attach(dev->gnd_domain, dev->gnd_id,
@@ -1958,17 +1948,14 @@ kgnilnd_dev_init(kgn_device_t *dev)
        if (rrc != GNI_RC_SUCCESS) {
                CERROR("Can't attach CDM to device %d (%d)\n",
                        dev->gnd_id, rrc);
-               rc = -ENODEV;
-               GOTO(failed, rc);
+               GOTO(failed, rc = -ENODEV);
        }
 
        /* a bit gross, but not much we can do - Aries Sim doesn't have
         * hardcoded NIC/NID that we can use */
        rc = kgnilnd_setup_nic_translation(dev->gnd_host_id);
-       if (rc != 0) {
-               rc = -ENODEV;
-               GOTO(failed, rc);
-       }
+       if (rc != 0)
+               GOTO(failed, rc = -ENODEV);
 
        /* only dev 0 gets the errors - no need to reset the stack twice
         * - this works because we have a single PTAG, if we had more
@@ -1982,8 +1969,7 @@ kgnilnd_dev_init(kgn_device_t *dev)
                if (rrc != GNI_RC_SUCCESS) {
                        CERROR("Can't subscribe for errors on device %d: rc %d\n",
                                dev->gnd_id, rrc);
-                       rc = -ENODEV;
-                       GOTO(failed, rc);
+                       GOTO(failed, rc = -ENODEV);
                }
 
                rc = kgnilnd_set_quiesce_callback(dev->gnd_handle,
@@ -1991,8 +1977,7 @@ kgnilnd_dev_init(kgn_device_t *dev)
                if (rc != GNI_RC_SUCCESS) {
                        CERROR("Can't subscribe for quiesce callback on device %d: rc %d\n",
                                dev->gnd_id, rrc);
-                       rc = -ENODEV;
-                       GOTO(failed, rc);
+                       GOTO(failed, rc = -ENODEV);
                }
        }
 
@@ -2003,19 +1988,17 @@ kgnilnd_dev_init(kgn_device_t *dev)
                        CERROR("couldn't translate host_id 0x%x to nid. rc %d\n",
                                dev->gnd_host_id, rc);
                }
-               rc = -ESRCH;
-               GOTO(failed, rc);
+               GOTO(failed, rc = -ESRCH);
        }
        CDEBUG(D_NET, "NIC %x -> NID %d\n", dev->gnd_host_id, dev->gnd_nid);
 
-       rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
+       rrc = kgnilnd_cq_create(dev->gnd_handle, *kgnilnd_tunables.kgn_credits,
                                0, kgnilnd_device_callback,
                                dev->gnd_id, &dev->gnd_snd_rdma_cqh);
        if (rrc != GNI_RC_SUCCESS) {
                CERROR("Can't create rdma send cq size %u for device "
                       "%d (%d)\n", cq_size, dev->gnd_id, rrc);
-               rc = -EINVAL;
-               GOTO(failed, rc);
+               GOTO(failed, rc = -EINVAL);
        }
 
        rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size,
@@ -2024,8 +2007,7 @@ kgnilnd_dev_init(kgn_device_t *dev)
        if (rrc != GNI_RC_SUCCESS) {
                CERROR("Can't create fma send cq size %u for device %d (%d)\n",
                       cq_size, dev->gnd_id, rrc);
-               rc = -EINVAL;
-               GOTO(failed, rc);
+               GOTO(failed, rc = -EINVAL);
        }
 
        /* This one we size differently - overflows are possible and it needs to be
@@ -2037,8 +2019,12 @@ kgnilnd_dev_init(kgn_device_t *dev)
        if (rrc != GNI_RC_SUCCESS) {
                CERROR("Can't create fma cq size %d for device %d (%d)\n",
                       *kgnilnd_tunables.kgn_fma_cq_size, dev->gnd_id, rrc);
-               rc = -EINVAL;
-               GOTO(failed, rc);
+               GOTO(failed, rc = -EINVAL);
+       }
+
+       rrc = kgnilnd_register_smdd_buf(dev);
+       if (rrc != GNI_RC_SUCCESS) {
+               GOTO(failed, rc = -EINVAL);
        }
 
        RETURN(0);
@@ -2057,9 +2043,12 @@ kgnilnd_dev_fini(kgn_device_t *dev)
        /* At quiesce or rest time, need to loop through and clear gnd_ready_conns ?*/
        LASSERTF(list_empty(&dev->gnd_ready_conns) &&
                 list_empty(&dev->gnd_map_tx) &&
-                list_empty(&dev->gnd_rdmaq),
-                "dev 0x%p ready_conns %d@0x%p map_tx %d@0x%p rdmaq %d@0x%p\n",
+                list_empty(&dev->gnd_rdmaq) &&
+                list_empty(&dev->gnd_delay_conns),
+                "dev 0x%p ready_conns %d@0x%p delay_conns %d@0x%p" 
+                "map_tx %d@0x%p rdmaq %d@0x%p\n",
                 dev, kgnilnd_count_list(&dev->gnd_ready_conns), &dev->gnd_ready_conns,
+                kgnilnd_count_list(&dev->gnd_delay_conns), &dev->gnd_delay_conns,
                 kgnilnd_count_list(&dev->gnd_map_tx), &dev->gnd_map_tx,
                 kgnilnd_count_list(&dev->gnd_rdmaq), &dev->gnd_rdmaq);
 
@@ -2069,7 +2058,7 @@ kgnilnd_dev_fini(kgn_device_t *dev)
                 dev->gnd_map_nphys, dev->gnd_map_physnop);
 
        LASSERTF(dev->gnd_map_nvirt == 0 && dev->gnd_map_virtnob == 0,
-               "%d virtual mappings of "LPU64" bytes still mapped\n",
+               "%d virtual mappings of %llu bytes still mapped\n",
                 dev->gnd_map_nvirt, dev->gnd_map_virtnob);
 
        LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
@@ -2091,6 +2080,13 @@ kgnilnd_dev_fini(kgn_device_t *dev)
                atomic_read(&kgnilnd_data.kgn_nthreads) == 0,
                "tried to shutdown with threads active\n");
 
+       if (dev->gnd_smdd_hold_buf) {
+               rrc = kgnilnd_deregister_smdd_buf(dev);
+               LASSERTF(rrc == GNI_RC_SUCCESS,
+                       "bad rc from deregistion of sMDD buffer: %d\n", rrc);
+               dev->gnd_smdd_hold_buf = NULL;
+       }
+
        if (dev->gnd_rcv_fma_cqh) {
                rrc = kgnilnd_cq_destroy(dev->gnd_rcv_fma_cqh);
                LASSERTF(rrc == GNI_RC_SUCCESS,
@@ -2129,7 +2125,6 @@ kgnilnd_dev_fini(kgn_device_t *dev)
        EXIT;
 }
 
-
 int kgnilnd_base_startup(void)
 {
        struct timeval       tv;
@@ -2138,6 +2133,15 @@ int kgnilnd_base_startup(void)
        int                  i;
        kgn_device_t        *dev;
        struct task_struct  *thrd;
+
+#if defined(CONFIG_CRAY_XT) && !defined(CONFIG_CRAY_COMPUTE)
+       /* limit how much memory can be allocated for fma blocks in
+        * instances where many nodes need to reconnects at the same time */
+       struct sysinfo si;
+       si_meminfo(&si);
+       kgnilnd_data.free_pages_limit = si.totalram/4;
+#endif
+
        ENTRY;
 
        LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_NOTHING,
@@ -2145,6 +2149,7 @@ int kgnilnd_base_startup(void)
 
        /* zero pointers, flags etc */
        memset(&kgnilnd_data, 0, sizeof(kgnilnd_data));
+       kgnilnd_check_kgni_version();
 
        /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and
         * a unique (for all time) connstamp so we can uniquely identify
@@ -2164,10 +2169,11 @@ int kgnilnd_base_startup(void)
 
                dev->gnd_id = i;
                INIT_LIST_HEAD(&dev->gnd_ready_conns);
+               INIT_LIST_HEAD(&dev->gnd_delay_conns);
                INIT_LIST_HEAD(&dev->gnd_map_tx);
                INIT_LIST_HEAD(&dev->gnd_fma_buffs);
                mutex_init(&dev->gnd_cq_mutex);
-               sema_init(&dev->gnd_fmablk_sem, 1);
+               mutex_init(&dev->gnd_fmablk_mutex);
                spin_lock_init(&dev->gnd_fmablk_lock);
                init_waitqueue_head(&dev->gnd_waitq);
                init_waitqueue_head(&dev->gnd_dgram_waitq);
@@ -2190,10 +2196,8 @@ int kgnilnd_base_startup(void)
                LIBCFS_ALLOC(dev->gnd_dgrams,
                            sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
 
-               if (dev->gnd_dgrams == NULL) {
-                       rc = -ENOMEM;
-                       GOTO(failed, rc);
-               }
+               if (dev->gnd_dgrams == NULL)
+                       GOTO(failed, rc = -ENOMEM);
 
                for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
                        INIT_LIST_HEAD(&dev->gnd_dgrams[i]);
@@ -2217,7 +2221,7 @@ int kgnilnd_base_startup(void)
        init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq);
        spin_lock_init(&kgnilnd_data.kgn_reaper_lock);
 
-       sema_init(&kgnilnd_data.kgn_quiesce_sem, 1);
+       mutex_init(&kgnilnd_data.kgn_quiesce_mutex);
        atomic_set(&kgnilnd_data.kgn_nquiesce, 0);
        atomic_set(&kgnilnd_data.kgn_npending_conns, 0);
        atomic_set(&kgnilnd_data.kgn_npending_unlink, 0);
@@ -2228,17 +2232,16 @@ int kgnilnd_base_startup(void)
 
        /* OK to call kgnilnd_api_shutdown() to cleanup now */
        kgnilnd_data.kgn_init = GNILND_INIT_DATA;
-       try_module_get(THIS_MODULE);
+       if (!try_module_get(THIS_MODULE))
+               GOTO(failed, rc = -ENOENT);
 
        rwlock_init(&kgnilnd_data.kgn_peer_conn_lock);
 
        LIBCFS_ALLOC(kgnilnd_data.kgn_peers,
                    sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
 
-       if (kgnilnd_data.kgn_peers == NULL) {
-               rc = -ENOMEM;
-               GOTO(failed, rc);
-       }
+       if (kgnilnd_data.kgn_peers == NULL)
+               GOTO(failed, rc = -ENOMEM);
 
        for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
                INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]);
@@ -2247,10 +2250,8 @@ int kgnilnd_base_startup(void)
        LIBCFS_ALLOC(kgnilnd_data.kgn_conns,
                    sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size);
 
-       if (kgnilnd_data.kgn_conns == NULL) {
-               rc = -ENOMEM;
-               GOTO(failed, rc);
-       }
+       if (kgnilnd_data.kgn_conns == NULL)
+               GOTO(failed, rc = -ENOMEM);
 
        for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
                INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]);
@@ -2259,38 +2260,33 @@ int kgnilnd_base_startup(void)
        LIBCFS_ALLOC(kgnilnd_data.kgn_nets,
                    sizeof(struct list_head) * *kgnilnd_tunables.kgn_net_hash_size);
 
-       if (kgnilnd_data.kgn_nets == NULL) {
-               rc = -ENOMEM;
-               GOTO(failed, rc);
-       }
+       if (kgnilnd_data.kgn_nets == NULL)
+               GOTO(failed, rc = -ENOMEM);
 
        for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) {
                INIT_LIST_HEAD(&kgnilnd_data.kgn_nets[i]);
        }
 
        kgnilnd_data.kgn_mbox_cache =
-               kmem_cache_create("kgn_mbox_block", KMALLOC_MAX_SIZE, 0,
+               kmem_cache_create("kgn_mbox_block", GNILND_MBOX_SIZE, 0,
                                  SLAB_HWCACHE_ALIGN, NULL);
        if (kgnilnd_data.kgn_mbox_cache == NULL) {
                CERROR("Can't create slab for physical mbox blocks\n");
-               rc = -ENOMEM;
-               GOTO(failed, rc);
+               GOTO(failed, rc = -ENOMEM);
        }
 
        kgnilnd_data.kgn_rx_cache =
                kmem_cache_create("kgn_rx_t", sizeof(kgn_rx_t), 0, 0, NULL);
        if (kgnilnd_data.kgn_rx_cache == NULL) {
                CERROR("Can't create slab for kgn_rx_t descriptors\n");
-               rc = -ENOMEM;
-               GOTO(failed, rc);
+               GOTO(failed, rc = -ENOMEM);
        }
 
        kgnilnd_data.kgn_tx_cache =
                kmem_cache_create("kgn_tx_t", sizeof(kgn_tx_t), 0, 0, NULL);
        if (kgnilnd_data.kgn_tx_cache == NULL) {
                CERROR("Can't create slab for kgn_tx_t\n");
-               rc = -ENOMEM;
-               GOTO(failed, rc);
+               GOTO(failed, rc = -ENOMEM);
        }
 
        kgnilnd_data.kgn_tx_phys_cache =
@@ -2299,16 +2295,14 @@ int kgnilnd_base_startup(void)
                                   0, 0, NULL);
        if (kgnilnd_data.kgn_tx_phys_cache == NULL) {
                CERROR("Can't create slab for kgn_tx_phys\n");
-               rc = -ENOMEM;
-               GOTO(failed, rc);
+               GOTO(failed, rc = -ENOMEM);
        }
 
        kgnilnd_data.kgn_dgram_cache =
                kmem_cache_create("kgn_dgram_t", sizeof(kgn_dgram_t), 0, 0, NULL);
        if (kgnilnd_data.kgn_dgram_cache == NULL) {
                CERROR("Can't create slab for outgoing datagrams\n");
-               rc = -ENOMEM;
-               GOTO(failed, rc);
+               GOTO(failed, rc = -ENOMEM);
        }
 
        /* allocate a MAX_IOV array of page pointers for each cpu */
@@ -2316,8 +2310,7 @@ int kgnilnd_base_startup(void)
                                                   GFP_KERNEL);
        if (kgnilnd_data.kgn_cksum_map_pages == NULL) {
                CERROR("Can't allocate vmap cksum pages\n");
-               rc = -ENOMEM;
-               GOTO(failed, rc);
+               GOTO(failed, rc = -ENOMEM);
        }
        kgnilnd_data.kgn_cksum_npages = num_possible_cpus();
        memset(kgnilnd_data.kgn_cksum_map_pages, 0,
@@ -2328,8 +2321,7 @@ int kgnilnd_base_startup(void)
                                                              GFP_KERNEL);
                if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) {
                        CERROR("Can't allocate vmap cksum pages for cpu %d\n", i);
-                       rc = -ENOMEM;
-                       GOTO(failed, rc);
+                       GOTO(failed, rc = -ENOMEM);
                }
        }
 
@@ -2345,16 +2337,14 @@ int kgnilnd_base_startup(void)
                        kgnilnd_data.kgn_ndevs++;
 
                        rc = kgnilnd_allocate_phys_fmablk(dev);
-                       if (rc) {
+                       if (rc)
                                GOTO(failed, rc);
-                       }
                }
        }
 
        if (kgnilnd_data.kgn_ndevs == 0) {
                CERROR("Can't initialise any GNI devices\n");
-               rc = -ENODEV;
-               GOTO(failed, rc);
+               GOTO(failed, rc = -ENODEV);
        }
 
        rc = kgnilnd_thread_start(kgnilnd_reaper, NULL, "kgnilnd_rpr", 0);
@@ -2383,13 +2373,20 @@ int kgnilnd_base_startup(void)
        }
 
        /* threads will load balance across devs as they are available */
-       for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
-               rc = kgnilnd_thread_start(kgnilnd_scheduler, (void *)((long)i),
-                                         "kgnilnd_sd", i);
-               if (rc != 0) {
-                       CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
-                              i, rc);
+       if (*kgnilnd_tunables.kgn_thread_affinity) {
+               rc = kgnilnd_start_sd_threads();
+               if (rc != 0)
                        GOTO(failed, rc);
+       } else {
+               for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) {
+                       rc = kgnilnd_thread_start(kgnilnd_scheduler,
+                                                 (void *)((long)i),
+                                                 "kgnilnd_sd", i);
+                       if (rc != 0) {
+                               CERROR("Can't spawn gnilnd scheduler[%d]: %d\n",
+                                      i, rc);
+                               GOTO(failed, rc);
+                       }
                }
        }
 
@@ -2420,8 +2417,6 @@ int kgnilnd_base_startup(void)
                }
        }
 
-
-
        /* flag everything initialised */
        kgnilnd_data.kgn_init = GNILND_INIT_ALL;
        /*****************************************************/
@@ -2438,7 +2433,7 @@ failed:
 void
 kgnilnd_base_shutdown(void)
 {
-       int                     i;
+       int                     i, j;
        ENTRY;
 
        while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_SHUTDOWN, 1)) {};
@@ -2448,10 +2443,30 @@ kgnilnd_base_shutdown(void)
        for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) {
                kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
                kgnilnd_cancel_wc_dgrams(dev);
+               kgnilnd_cancel_dgrams(dev);
                kgnilnd_del_conn_or_peer(NULL, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN);
                kgnilnd_wait_for_canceled_dgrams(dev);
        }
 
+       /* We need to verify there are no conns left before we let the threads
+        * shut down otherwise we could clean up the peers but still have
+        * some outstanding conns due to orphaned datagram conns that are
+        * being cleaned up.
+        */
+       i = 2;
+       while (atomic_read(&kgnilnd_data.kgn_nconns) != 0) {
+               i++;
+
+               for(j = 0; j < kgnilnd_data.kgn_ndevs; ++j) {
+                       kgn_device_t *dev = &kgnilnd_data.kgn_devices[j];
+                       kgnilnd_schedule_device(dev);
+               }
+
+               CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
+                       "Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               schedule_timeout(cfs_time_seconds(1));
+       }
        /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
         * have to worry about shutdown races.  NB connections may be created
         * while there are still active connds, but these will be temporary
@@ -2469,7 +2484,8 @@ kgnilnd_base_shutdown(void)
                i++;
                CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
                       "Waiting for ruhroh thread to terminate\n");
-               cfs_pause(cfs_time_seconds(1));
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               schedule_timeout(cfs_time_seconds(1));
        }
 
        /* Flag threads to terminate */
@@ -2479,7 +2495,7 @@ kgnilnd_base_shutdown(void)
                kgn_device_t *dev = &kgnilnd_data.kgn_devices[i];
 
                /* should clear all the MDDs */
-               kgnilnd_unmap_phys_fmablk(dev);
+               kgnilnd_unmap_fma_blocks(dev);
 
                kgnilnd_schedule_device(dev);
                wake_up_all(&dev->gnd_dgram_waitq);
@@ -2491,7 +2507,8 @@ kgnilnd_base_shutdown(void)
        wake_up_all(&kgnilnd_data.kgn_reaper_waitq);
        spin_unlock(&kgnilnd_data.kgn_reaper_lock);
 
-       kgnilnd_wakeup_rca_thread();
+       if (atomic_read(&kgnilnd_data.kgn_nthreads))
+               kgnilnd_wakeup_rca_thread();
 
        /* Wait for threads to exit */
        i = 2;
@@ -2500,7 +2517,8 @@ kgnilnd_base_shutdown(void)
                CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
                       "Waiting for %d threads to terminate\n",
                       atomic_read(&kgnilnd_data.kgn_nthreads));
-               cfs_pause(cfs_time_seconds(1));
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               schedule_timeout(cfs_time_seconds(1));
        }
 
        LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
@@ -2591,15 +2609,15 @@ kgnilnd_base_shutdown(void)
 }
 
 int
-kgnilnd_startup(lnet_ni_t *ni)
+kgnilnd_startup(struct lnet_ni *ni)
 {
        int               rc, devno;
        kgn_net_t        *net;
        ENTRY;
 
-       LASSERTF(ni->ni_lnd == &the_kgnilnd,
+       LASSERTF(ni->ni_net->net_lnd == &the_kgnilnd,
                "bad LND 0x%p != the_kgnilnd @ 0x%p\n",
-               ni->ni_lnd, &the_kgnilnd);
+               ni->ni_net->net_lnd, &the_kgnilnd);
 
        if (kgnilnd_data.kgn_init == GNILND_INIT_NOTHING) {
                rc = kgnilnd_base_startup();
@@ -2608,20 +2626,23 @@ kgnilnd_startup(lnet_ni_t *ni)
        }
 
        /* Serialize with shutdown. */
-       down(&kgnilnd_data.kgn_quiesce_sem);
+       mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
 
        LIBCFS_ALLOC(net, sizeof(*net));
        if (net == NULL) {
                CERROR("could not allocate net for new interface instance\n");
-               rc = -ENOMEM;
                /* no need to cleanup the CDM... */
-               GOTO(failed, rc);
+               GOTO(failed, rc = -ENOMEM);
        }
        INIT_LIST_HEAD(&net->gnn_list);
        ni->ni_data = net;
        net->gnn_ni = ni;
-       ni->ni_maxtxcredits = *kgnilnd_tunables.kgn_credits;
-       ni->ni_peertxcredits = *kgnilnd_tunables.kgn_peer_credits;
+       if (!ni->ni_net->net_tunables_set) {
+               ni->ni_net->net_tunables.lct_max_tx_credits =
+                       *kgnilnd_tunables.kgn_credits;
+               ni->ni_net->net_tunables.lct_peer_tx_credits =
+                       *kgnilnd_tunables.kgn_peer_credits;
+       }
 
        if (*kgnilnd_tunables.kgn_peer_health) {
                int     fudge;
@@ -2631,21 +2652,21 @@ kgnilnd_startup(lnet_ni_t *ni)
                fudge = (GNILND_TO2KA(*kgnilnd_tunables.kgn_timeout) / GNILND_REAPER_NCHECKS);
                timeout = *kgnilnd_tunables.kgn_timeout + fudge;
 
-               if (*kgnilnd_tunables.kgn_peer_timeout >= timeout)
-                       ni->ni_peertimeout = *kgnilnd_tunables.kgn_peer_timeout;
-               else if (*kgnilnd_tunables.kgn_peer_timeout > -1) {
+               if (*kgnilnd_tunables.kgn_peer_timeout >= timeout) {
+                       ni->ni_net->net_tunables.lct_peer_timeout =
+                                *kgnilnd_tunables.kgn_peer_timeout;
+               } else if (*kgnilnd_tunables.kgn_peer_timeout > -1) {
                        LCONSOLE_ERROR("Peer_timeout is set to %d but needs to be >= %d\n",
                                        *kgnilnd_tunables.kgn_peer_timeout,
                                        timeout);
                        ni->ni_data = NULL;
                        LIBCFS_FREE(net, sizeof(*net));
-                       rc = -EINVAL;
-                       GOTO(failed, rc);
+                       GOTO(failed, rc = -EINVAL);
                } else
-                       ni->ni_peertimeout = timeout;
+                       ni->ni_net->net_tunables.lct_peer_timeout = timeout;
 
                LCONSOLE_INFO("Enabling LNet peer health for gnilnd, timeout %ds\n",
-                             ni->ni_peertimeout);
+                             ni->ni_net->net_tunables.lct_peer_timeout);
        }
 
        atomic_set(&net->gnn_refcount, 1);
@@ -2677,16 +2698,16 @@ kgnilnd_startup(lnet_ni_t *ni)
 
        /* we need a separate thread to call probe_wait_by_id until
         * we get a function callback notifier from kgni */
-       up(&kgnilnd_data.kgn_quiesce_sem);
+       mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
        RETURN(0);
  failed:
-       up(&kgnilnd_data.kgn_quiesce_sem);
+       mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
        kgnilnd_shutdown(ni);
        RETURN(rc);
 }
 
 void
-kgnilnd_shutdown(lnet_ni_t *ni)
+kgnilnd_shutdown(struct lnet_ni *ni)
 {
        kgn_net_t     *net = ni->ni_data;
        int           i;
@@ -2699,14 +2720,13 @@ kgnilnd_shutdown(lnet_ni_t *ni)
                "init %d\n", kgnilnd_data.kgn_init);
 
        /* Serialize with startup. */
-       down(&kgnilnd_data.kgn_quiesce_sem);
+       mutex_lock(&kgnilnd_data.kgn_quiesce_mutex);
        CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
               atomic_read(&libcfs_kmemory));
 
        if (net == NULL) {
                CERROR("got NULL net for ni %p\n", ni);
-               rc = -EINVAL;
-               GOTO(out, rc);
+               GOTO(out, rc = -EINVAL);
        }
 
        LASSERTF(ni == net->gnn_ni,
@@ -2749,7 +2769,8 @@ kgnilnd_shutdown(lnet_ni_t *ni)
                                "Waiting for %d references to clear on net %d\n",
                                atomic_read(&net->gnn_refcount),
                                net->gnn_netnum);
-                       cfs_pause(cfs_time_seconds(1));
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       schedule_timeout(cfs_time_seconds(1));
                }
 
                /* release ref from kgnilnd_startup */
@@ -2783,22 +2804,18 @@ out:
        CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
               atomic_read(&libcfs_kmemory));
 
-       up(&kgnilnd_data.kgn_quiesce_sem);
+       mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex);
        EXIT;
-       return;
 }
 
-void __exit
-kgnilnd_module_fini(void)
+static void __exit kgnilnd_exit(void)
 {
        lnet_unregister_lnd(&the_kgnilnd);
        kgnilnd_proc_fini();
        kgnilnd_remove_sysctl();
-       kgnilnd_tunables_fini();
 }
 
-int __init
-kgnilnd_module_init(void)
+static int __init kgnilnd_init(void)
 {
        int    rc;
 
@@ -2806,7 +2823,7 @@ kgnilnd_module_init(void)
        if (rc != 0)
                return rc;
 
-       printk(KERN_INFO "Lustre: kgnilnd build version: "KGNILND_BUILD_REV"\n");
+       LCONSOLE_INFO("Lustre: kgnilnd build version: "LUSTRE_VERSION_STRING"\n");
 
        kgnilnd_insert_sysctl();
        kgnilnd_proc_init();
@@ -2817,8 +2834,9 @@ kgnilnd_module_init(void)
 }
 
 MODULE_AUTHOR("Cray, Inc. <nic@cray.com>");
-MODULE_DESCRIPTION("Kernel Gemini LND v"KGNILND_BUILD_REV);
+MODULE_DESCRIPTION("Gemini LNet Network Driver");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
 MODULE_LICENSE("GPL");
 
-module_init(kgnilnd_module_init);
-module_exit(kgnilnd_module_fini);
+module_init(kgnilnd_init);
+module_exit(kgnilnd_exit);