Whamcloud - gitweb
LU-13566 socklnd: fix local interface binding
[fs/lustre-release.git] / lnet / klnds / socklnd / socklnd.c
index 56045fb..5249227 100644 (file)
@@ -23,7 +23,7 @@
  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2015, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
  * Author: Eric Barton <eric@bartonsoftware.com>
  */
 
-#include <linux/pci.h>
+#include <linux/inetdevice.h>
 #include "socklnd.h"
 
-static struct lnet_lnd the_ksocklnd;
+static const struct lnet_lnd the_ksocklnd;
 struct ksock_nal_data ksocknal_data;
 
 static struct ksock_interface *
@@ -61,6 +61,58 @@ ksocknal_ip2iface(struct lnet_ni *ni, __u32 ip)
        return NULL;
 }
 
+static struct ksock_interface *
+ksocknal_index2iface(struct lnet_ni *ni, int index)
+{
+       struct ksock_net *net = ni->ni_data;
+       int i;
+       struct ksock_interface *iface;
+
+       for (i = 0; i < net->ksnn_ninterfaces; i++) {
+               LASSERT(i < LNET_INTERFACES_NUM);
+               iface = &net->ksnn_interfaces[i];
+
+               if (iface->ksni_index == index)
+                       return iface;
+       }
+
+       return NULL;
+}
+
+static int ksocknal_ip2index(__u32 ipaddress, struct lnet_ni *ni)
+{
+       struct net_device *dev;
+       int ret = -1;
+       DECLARE_CONST_IN_IFADDR(ifa);
+
+       rcu_read_lock();
+       for_each_netdev(ni->ni_net_ns, dev) {
+               int flags = dev_get_flags(dev);
+               struct in_device *in_dev;
+
+               if (flags & IFF_LOOPBACK) /* skip the loopback IF */
+                       continue;
+
+               if (!(flags & IFF_UP))
+                       continue;
+
+               in_dev = __in_dev_get_rcu(dev);
+               if (!in_dev)
+                       continue;
+
+               in_dev_for_each_ifa_rcu(ifa, in_dev) {
+                       if (ntohl(ifa->ifa_local) == ipaddress)
+                               ret = dev->ifindex;
+               }
+               endfor_ifa(in_dev);
+               if (ret >= 0)
+                       break;
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
 static struct ksock_route *
 ksocknal_create_route(__u32 ipaddr, int port)
 {
@@ -74,15 +126,16 @@ ksocknal_create_route(__u32 ipaddr, int port)
        route->ksnr_peer = NULL;
        route->ksnr_retry_interval = 0;         /* OK to connect at any time */
        route->ksnr_ipaddr = ipaddr;
-        route->ksnr_port = port;
-        route->ksnr_scheduled = 0;
-        route->ksnr_connecting = 0;
-        route->ksnr_connected = 0;
-        route->ksnr_deleted = 0;
-        route->ksnr_conn_count = 0;
-        route->ksnr_share_count = 0;
-
-        return (route);
+       route->ksnr_myiface = -1;
+       route->ksnr_port = port;
+       route->ksnr_scheduled = 0;
+       route->ksnr_connecting = 0;
+       route->ksnr_connected = 0;
+       route->ksnr_deleted = 0;
+       route->ksnr_conn_count = 0;
+       route->ksnr_share_count = 0;
+
+       return route;
 }
 
 void
@@ -96,9 +149,8 @@ ksocknal_destroy_route(struct ksock_route *route)
        LIBCFS_FREE (route, sizeof (*route));
 }
 
-static int
-ksocknal_create_peer(struct ksock_peer_ni **peerp, struct lnet_ni *ni,
-                    struct lnet_process_id id)
+static struct ksock_peer_ni *
+ksocknal_create_peer(struct lnet_ni *ni, struct lnet_process_id id)
 {
        int cpt = lnet_cpt_of_nid(id.nid, ni);
        struct ksock_net *net = ni->ni_data;
@@ -108,9 +160,16 @@ ksocknal_create_peer(struct ksock_peer_ni **peerp, struct lnet_ni *ni,
        LASSERT(id.pid != LNET_PID_ANY);
        LASSERT(!in_interrupt());
 
+       if (!atomic_inc_unless_negative(&net->ksnn_npeers)) {
+               CERROR("Can't create peer_ni: network shutdown\n");
+               return ERR_PTR(-ESHUTDOWN);
+       }
+
        LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
-       if (peer_ni == NULL)
-               return -ENOMEM;
+       if (!peer_ni) {
+               atomic_dec(&net->ksnn_npeers);
+               return ERR_PTR(-ENOMEM);
+       }
 
        peer_ni->ksnp_ni = ni;
        peer_ni->ksnp_id = id;
@@ -127,22 +186,7 @@ ksocknal_create_peer(struct ksock_peer_ni **peerp, struct lnet_ni *ni,
        INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
        spin_lock_init(&peer_ni->ksnp_lock);
 
-       spin_lock_bh(&net->ksnn_lock);
-
-       if (net->ksnn_shutdown) {
-               spin_unlock_bh(&net->ksnn_lock);
-
-               LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
-               CERROR("Can't create peer_ni: network shutdown\n");
-               return -ESHUTDOWN;
-       }
-
-       net->ksnn_npeers++;
-
-       spin_unlock_bh(&net->ksnn_lock);
-
-       *peerp = peer_ni;
-       return 0;
+       return peer_ni;
 }
 
 void
@@ -162,25 +206,22 @@ ksocknal_destroy_peer(struct ksock_peer_ni *peer_ni)
 
        LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
 
-        /* NB a peer_ni's connections and routes keep a reference on their peer_ni
-         * until they are destroyed, so we can be assured that _all_ state to
-         * do with this peer_ni has been cleaned up when its refcount drops to
-         * zero. */
-       spin_lock_bh(&net->ksnn_lock);
-       net->ksnn_npeers--;
-       spin_unlock_bh(&net->ksnn_lock);
+       /* NB a peer_ni's connections and routes keep a reference on their
+        * peer_ni until they are destroyed, so we can be assured that _all_
+        * state to do with this peer_ni has been cleaned up when its refcount
+        * drops to zero.
+        */
+       if (atomic_dec_and_test(&net->ksnn_npeers))
+               wake_up_var(&net->ksnn_npeers);
 }
 
 struct ksock_peer_ni *
 ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
 {
-       struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
-       struct list_head *tmp;
        struct ksock_peer_ni *peer_ni;
 
-       list_for_each(tmp, peer_list) {
-               peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list);
-
+       hash_for_each_possible(ksocknal_data.ksnd_peers, peer_ni,
+                              ksnp_list, id.nid) {
                LASSERT(!peer_ni->ksnp_closing);
 
                if (peer_ni->ksnp_ni != ni)
@@ -239,7 +280,7 @@ ksocknal_unlink_peer_locked(struct ksock_peer_ni *peer_ni)
        LASSERT(list_empty(&peer_ni->ksnp_routes));
        LASSERT(!peer_ni->ksnp_closing);
        peer_ni->ksnp_closing = 1;
-       list_del(&peer_ni->ksnp_list);
+       hlist_del(&peer_ni->ksnp_list);
        /* lose peerlist's ref */
        ksocknal_peer_decref(peer_ni);
 }
@@ -250,7 +291,6 @@ ksocknal_get_peer_info(struct lnet_ni *ni, int index,
                       int *port, int *conn_count, int *share_count)
 {
        struct ksock_peer_ni *peer_ni;
-       struct list_head *ptmp;
        struct ksock_route *route;
        struct list_head *rtmp;
        int i;
@@ -259,58 +299,56 @@ ksocknal_get_peer_info(struct lnet_ni *ni, int index,
 
        read_lock(&ksocknal_data.ksnd_global_lock);
 
-       for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
-               list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
-                       peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
-
-                       if (peer_ni->ksnp_ni != ni)
-                               continue;
+       hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
 
-                       if (peer_ni->ksnp_n_passive_ips == 0 &&
-                           list_empty(&peer_ni->ksnp_routes)) {
-                               if (index-- > 0)
-                                       continue;
+               if (peer_ni->ksnp_ni != ni)
+                       continue;
 
-                                *id = peer_ni->ksnp_id;
-                                *myip = 0;
-                                *peer_ip = 0;
-                                *port = 0;
-                                *conn_count = 0;
-                                *share_count = 0;
-                                rc = 0;
-                                goto out;
-                        }
+               if (peer_ni->ksnp_n_passive_ips == 0 &&
+                   list_empty(&peer_ni->ksnp_routes)) {
+                       if (index-- > 0)
+                               continue;
 
-                       for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) {
-                               if (index-- > 0)
-                                       continue;
+                       *id = peer_ni->ksnp_id;
+                       *myip = 0;
+                       *peer_ip = 0;
+                       *port = 0;
+                       *conn_count = 0;
+                       *share_count = 0;
+                       rc = 0;
+                       goto out;
+               }
 
-                                *id = peer_ni->ksnp_id;
-                                *myip = peer_ni->ksnp_passive_ips[j];
-                                *peer_ip = 0;
-                                *port = 0;
-                                *conn_count = 0;
-                                *share_count = 0;
-                                rc = 0;
-                                goto out;
-                        }
+               for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) {
+                       if (index-- > 0)
+                               continue;
 
-                       list_for_each(rtmp, &peer_ni->ksnp_routes) {
-                               if (index-- > 0)
-                                       continue;
+                       *id = peer_ni->ksnp_id;
+                       *myip = peer_ni->ksnp_passive_ips[j];
+                       *peer_ip = 0;
+                       *port = 0;
+                       *conn_count = 0;
+                       *share_count = 0;
+                       rc = 0;
+                       goto out;
+               }
 
-                               route = list_entry(rtmp, struct ksock_route,
-                                                  ksnr_list);
+               list_for_each(rtmp, &peer_ni->ksnp_routes) {
+                       if (index-- > 0)
+                               continue;
 
-                               *id = peer_ni->ksnp_id;
-                               *myip = route->ksnr_myipaddr;
-                               *peer_ip = route->ksnr_ipaddr;
-                               *port = route->ksnr_port;
-                               *conn_count = route->ksnr_conn_count;
-                               *share_count = route->ksnr_share_count;
-                               rc = 0;
-                               goto out;
-                       }
+                       route = list_entry(rtmp, struct ksock_route,
+                                          ksnr_list);
+
+                       *id = peer_ni->ksnp_id;
+                       rc = choose_ipv4_src(myip, route->ksnr_myiface,
+                                            route->ksnr_ipaddr,
+                                            ni->ni_net_ns);
+                       *peer_ip = route->ksnr_ipaddr;
+                       *port = route->ksnr_port;
+                       *conn_count = route->ksnr_conn_count;
+                       *share_count = route->ksnr_share_count;
+                       goto out;
                }
        }
 out:
@@ -319,47 +357,52 @@ out:
 }
 
 static void
-ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
+ksocknal_associate_route_conn_locked(struct ksock_route *route,
+                                    struct ksock_conn *conn)
 {
        struct ksock_peer_ni *peer_ni = route->ksnr_peer;
        int type = conn->ksnc_type;
        struct ksock_interface *iface;
+       int conn_iface = ksocknal_ip2index(conn->ksnc_myipaddr,
+                                          route->ksnr_peer->ksnp_ni);
 
        conn->ksnc_route = route;
        ksocknal_route_addref(route);
 
-       if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
-               if (route->ksnr_myipaddr == 0) {
+       if (route->ksnr_myiface != conn_iface) {
+               if (route->ksnr_myiface < 0) {
                        /* route wasn't bound locally yet (the initial route) */
-                       CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
+                       CDEBUG(D_NET, "Binding %s %pI4h to interface %d\n",
                               libcfs_id2str(peer_ni->ksnp_id),
                               &route->ksnr_ipaddr,
-                              &conn->ksnc_myipaddr);
+                              conn_iface);
                } else {
-                       CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h "
-                              "to %pI4h\n", libcfs_id2str(peer_ni->ksnp_id),
+                       CDEBUG(D_NET,
+                              "Rebinding %s %pI4h from interface %d to %d\n",
+                              libcfs_id2str(peer_ni->ksnp_id),
                               &route->ksnr_ipaddr,
-                              &route->ksnr_myipaddr,
-                              &conn->ksnc_myipaddr);
+                              route->ksnr_myiface,
+                              conn_iface);
 
-                        iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
-                                                  route->ksnr_myipaddr);
-                        if (iface != NULL)
-                                iface->ksni_nroutes--;
-                }
-                route->ksnr_myipaddr = conn->ksnc_myipaddr;
-                iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
-                                          route->ksnr_myipaddr);
-                if (iface != NULL)
-                        iface->ksni_nroutes++;
-        }
+                       iface = ksocknal_index2iface(route->ksnr_peer->ksnp_ni,
+                                                    route->ksnr_myiface);
+                       if (iface)
+                               iface->ksni_nroutes--;
+               }
+               route->ksnr_myiface = conn_iface;
+               iface = ksocknal_index2iface(route->ksnr_peer->ksnp_ni,
+                                            route->ksnr_myiface);
+               if (iface)
+                       iface->ksni_nroutes++;
+       }
 
-        route->ksnr_connected |= (1<<type);
-        route->ksnr_conn_count++;
+       route->ksnr_connected |= (1<<type);
+       route->ksnr_conn_count++;
 
-        /* Successful connection => further attempts can
-         * proceed immediately */
-        route->ksnr_retry_interval = 0;
+       /* Successful connection => further attempts can
+        * proceed immediately
+        */
+       route->ksnr_retry_interval = 0;
 }
 
 static void
@@ -368,12 +411,14 @@ ksocknal_add_route_locked(struct ksock_peer_ni *peer_ni, struct ksock_route *rou
        struct list_head *tmp;
        struct ksock_conn *conn;
        struct ksock_route *route2;
+       struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
 
        LASSERT(!peer_ni->ksnp_closing);
        LASSERT(route->ksnr_peer == NULL);
        LASSERT(!route->ksnr_scheduled);
        LASSERT(!route->ksnr_connecting);
        LASSERT(route->ksnr_connected == 0);
+       LASSERT(net->ksnn_ninterfaces > 0);
 
        /* LASSERT(unique) */
        list_for_each(tmp, &peer_ni->ksnp_routes) {
@@ -389,6 +434,11 @@ ksocknal_add_route_locked(struct ksock_peer_ni *peer_ni, struct ksock_route *rou
 
        route->ksnr_peer = peer_ni;
        ksocknal_peer_addref(peer_ni);
+
+       /* set the route's interface to the current net's interface */
+       route->ksnr_myiface = net->ksnn_interfaces[0].ksni_index;
+       net->ksnn_interfaces[0].ksni_nroutes++;
+
        /* peer_ni's routelist takes over my ref on 'route' */
        list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes);
 
@@ -424,10 +474,10 @@ ksocknal_del_route_locked(struct ksock_route *route)
                ksocknal_close_conn_locked(conn, 0);
        }
 
-       if (route->ksnr_myipaddr != 0) {
-               iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
-                                         route->ksnr_myipaddr);
-               if (iface != NULL)
+       if (route->ksnr_myiface >= 0) {
+               iface = ksocknal_index2iface(route->ksnr_peer->ksnp_ni,
+                                            route->ksnr_myiface);
+               if (iface)
                        iface->ksni_nroutes--;
        }
 
@@ -452,16 +502,15 @@ ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
        struct ksock_peer_ni *peer2;
        struct ksock_route *route;
        struct ksock_route *route2;
-       int rc;
 
         if (id.nid == LNET_NID_ANY ||
             id.pid == LNET_PID_ANY)
                 return (-EINVAL);
 
-        /* Have a brand new peer_ni ready... */
-        rc = ksocknal_create_peer(&peer_ni, ni, id);
-        if (rc != 0)
-                return rc;
+       /* Have a brand new peer_ni ready... */
+       peer_ni = ksocknal_create_peer(ni, id);
+       if (IS_ERR(peer_ni))
+               return PTR_ERR(peer_ni);
 
         route = ksocknal_create_route (ipaddr, port);
         if (route == NULL) {
@@ -472,7 +521,8 @@ ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
         /* always called with a ref on ni, so shutdown can't have started */
-       LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
+       LASSERT(atomic_read(&((struct ksock_net *)ni->ni_data)->ksnn_npeers)
+               >= 0);
 
        peer2 = ksocknal_find_peer_locked(ni, id);
        if (peer2 != NULL) {
@@ -480,8 +530,7 @@ ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
                peer_ni = peer2;
        } else {
                /* peer_ni table takes my ref on peer_ni */
-               list_add_tail(&peer_ni->ksnp_list,
-                             ksocknal_nid2peerlist(id.nid));
+               hash_add(ksocknal_data.ksnd_peers, &peer_ni->ksnp_list, id.nid);
        }
 
        route2 = NULL;
@@ -558,15 +607,14 @@ ksocknal_del_peer_locked(struct ksock_peer_ni *peer_ni, __u32 ip)
        }
 
        ksocknal_peer_decref(peer_ni);
-               /* NB peer_ni unlinks itself when last conn/route is removed */
+       /* NB peer_ni unlinks itself when last conn/route is removed */
 }
 
 static int
 ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
 {
-       struct list_head zombies = LIST_HEAD_INIT(zombies);
-       struct list_head *ptmp;
-       struct list_head *pnxt;
+       LIST_HEAD(zombies);
+       struct hlist_node *pnxt;
        struct ksock_peer_ni *peer_ni;
        int lo;
        int hi;
@@ -576,19 +624,17 @@ ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
        if (id.nid != LNET_NID_ANY) {
-               hi = (int)(ksocknal_nid2peerlist(id.nid) -
-                          ksocknal_data.ksnd_peers);
-               lo = hi;
+               lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers));
+               hi = lo;
        } else {
                lo = 0;
-               hi = ksocknal_data.ksnd_peer_hash_size - 1;
+               hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
        }
 
        for (i = lo; i <= hi; i++) {
-               list_for_each_safe(ptmp, pnxt,
-                                  &ksocknal_data.ksnd_peers[i]) {
-                       peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
-
+               hlist_for_each_entry_safe(peer_ni, pnxt,
+                                         &ksocknal_data.ksnd_peers[i],
+                                         ksnp_list) {
                        if (peer_ni->ksnp_ni != ni)
                                continue;
 
@@ -628,33 +674,27 @@ static struct ksock_conn *
 ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
 {
        struct ksock_peer_ni *peer_ni;
-       struct list_head *ptmp;
        struct ksock_conn *conn;
        struct list_head *ctmp;
        int i;
 
        read_lock(&ksocknal_data.ksnd_global_lock);
 
-       for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
-               list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
-                       peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
+       hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
+               LASSERT(!peer_ni->ksnp_closing);
 
-                       LASSERT(!peer_ni->ksnp_closing);
+               if (peer_ni->ksnp_ni != ni)
+                       continue;
 
-                       if (peer_ni->ksnp_ni != ni)
+               list_for_each(ctmp, &peer_ni->ksnp_conns) {
+                       if (index-- > 0)
                                continue;
 
-                       list_for_each(ctmp, &peer_ni->ksnp_conns) {
-                               if (index-- > 0)
-                                       continue;
-
-                               conn = list_entry(ctmp, struct ksock_conn,
-                                                 ksnc_list);
-                               ksocknal_conn_addref(conn);
-                               read_unlock(&ksocknal_data. \
-                                           ksnd_global_lock);
-                               return conn;
-                       }
+                       conn = list_entry(ctmp, struct ksock_conn,
+                                         ksnc_list);
+                       ksocknal_conn_addref(conn);
+                       read_unlock(&ksocknal_data.ksnd_global_lock);
+                       return conn;
                }
        }
 
@@ -665,33 +705,20 @@ ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
 static struct ksock_sched *
 ksocknal_choose_scheduler_locked(unsigned int cpt)
 {
-       struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
-       struct ksock_sched *sched;
+       struct ksock_sched *sched = ksocknal_data.ksnd_schedulers[cpt];
        int i;
 
-       if (info->ksi_nthreads == 0) {
-               cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
-                       if (info->ksi_nthreads > 0) {
+       if (sched->kss_nthreads == 0) {
+               cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
+                       if (sched->kss_nthreads > 0) {
                                CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
-                                      cpt, info->ksi_cpt);
-                               goto select_sched;
+                                      cpt, sched->kss_cpt);
+                               return sched;
                        }
                }
                return NULL;
        }
 
-select_sched:
-       sched = &info->ksi_scheds[0];
-       /*
-        * NB: it's safe so far, but info->ksi_nthreads could be changed
-        * at runtime when we have dynamic LNet configuration, then we
-        * need to take care of this.
-        */
-       for (i = 1; i < info->ksi_nthreads; i++) {
-               if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
-                       sched = &info->ksi_scheds[i];
-       }
-
        return sched;
 }
 
@@ -788,9 +815,10 @@ ksocknal_select_ips(struct ksock_peer_ni *peer_ni, __u32 *peerips, int n_peerips
        LASSERT(net->ksnn_ninterfaces <= LNET_INTERFACES_NUM);
 
        /* Only match interfaces for additional connections
-         * if I have > 1 interface */
-        n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
-                MIN(n_peerips, net->ksnn_ninterfaces);
+        * if I have > 1 interface
+        */
+       n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
+               min(n_peerips, net->ksnn_ninterfaces);
 
         for (i = 0; peer_ni->ksnp_n_passive_ips < n_ips; i++) {
                 /*              ^ yes really... */
@@ -943,7 +971,7 @@ ksocknal_create_routes(struct ksock_peer_ni *peer_ni, int port,
                                route = list_entry(rtmp, struct ksock_route,
                                                   ksnr_list);
 
-                               if (route->ksnr_myipaddr == iface->ksni_ipaddr)
+                               if (route->ksnr_myiface == iface->ksni_index)
                                        break;
 
                                route = NULL;
@@ -951,34 +979,34 @@ ksocknal_create_routes(struct ksock_peer_ni *peer_ni, int port,
                        if (route != NULL)
                                continue;
 
-                        this_netmatch = (((iface->ksni_ipaddr ^
-                                           newroute->ksnr_ipaddr) &
-                                           iface->ksni_netmask) == 0) ? 1 : 0;
+                       this_netmatch = (((iface->ksni_ipaddr ^
+                                          newroute->ksnr_ipaddr) &
+                                         iface->ksni_netmask) == 0) ? 1 : 0;
 
-                        if (!(best_iface == NULL ||
-                              best_netmatch < this_netmatch ||
-                              (best_netmatch == this_netmatch &&
-                               best_nroutes > iface->ksni_nroutes)))
-                                continue;
+                       if (!(best_iface == NULL ||
+                             best_netmatch < this_netmatch ||
+                             (best_netmatch == this_netmatch &&
+                              best_nroutes > iface->ksni_nroutes)))
+                               continue;
 
-                        best_iface = iface;
-                        best_netmatch = this_netmatch;
-                        best_nroutes = iface->ksni_nroutes;
-                }
+                       best_iface = iface;
+                       best_netmatch = this_netmatch;
+                       best_nroutes = iface->ksni_nroutes;
+               }
 
-                if (best_iface == NULL)
-                        continue;
+               if (best_iface == NULL)
+                       continue;
 
-                newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
-                best_iface->ksni_nroutes++;
+               newroute->ksnr_myiface = best_iface->ksni_index;
+               best_iface->ksni_nroutes++;
 
-                ksocknal_add_route_locked(peer_ni, newroute);
-                newroute = NULL;
-        }
+               ksocknal_add_route_locked(peer_ni, newroute);
+               newroute = NULL;
+       }
 
        write_unlock_bh(global_lock);
-        if (newroute != NULL)
-                ksocknal_route_decref(newroute);
+       if (newroute != NULL)
+               ksocknal_route_decref(newroute);
 }
 
 int
@@ -1029,7 +1057,7 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
                     struct socket *sock, int type)
 {
        rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
-       struct list_head zombies = LIST_HEAD_INIT(zombies);
+       LIST_HEAD(zombies);
        struct lnet_process_id peerid;
        struct list_head *tmp;
        u64 incarnation;
@@ -1136,25 +1164,27 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
 
        cpt = lnet_cpt_of_nid(peerid.nid, ni);
 
-        if (active) {
-                ksocknal_peer_addref(peer_ni);
+       if (active) {
+               ksocknal_peer_addref(peer_ni);
                write_lock_bh(global_lock);
-        } else {
-                rc = ksocknal_create_peer(&peer_ni, ni, peerid);
-                if (rc != 0)
-                        goto failed_1;
+       } else {
+               peer_ni = ksocknal_create_peer(ni, peerid);
+               if (IS_ERR(peer_ni)) {
+                       rc = PTR_ERR(peer_ni);
+                       goto failed_1;
+               }
 
                write_lock_bh(global_lock);
 
-                /* called with a ref on ni, so shutdown can't have started */
-               LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
+               /* called with a ref on ni, so shutdown can't have started */
+               LASSERT(atomic_read(&((struct ksock_net *)ni->ni_data)->ksnn_npeers) >= 0);
 
                peer2 = ksocknal_find_peer_locked(ni, peerid);
                if (peer2 == NULL) {
                        /* NB this puts an "empty" peer_ni in the peer_ni
                         * table (which takes my ref) */
-                       list_add_tail(&peer_ni->ksnp_list,
-                                     ksocknal_nid2peerlist(peerid.nid));
+                       hash_add(ksocknal_data.ksnd_peers,
+                                &peer_ni->ksnp_list, peerid.nid);
                } else {
                        ksocknal_peer_decref(peer_ni);
                        peer_ni = peer2;
@@ -1280,7 +1310,7 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
         * The cpt might have changed if we ended up selecting a non cpt
         * native scheduler. So use the scheduler's cpt instead.
         */
-       cpt = sched->kss_info->ksi_cpt;
+       cpt = sched->kss_cpt;
         sched->kss_nconns++;
         conn->ksnc_scheduler = sched;
 
@@ -1288,7 +1318,7 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
        /* Set the deadline for the outgoing HELLO to drain */
        conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
        conn->ksnc_tx_deadline = ktime_get_seconds() +
-                                *ksocknal_tunables.ksnd_timeout;
+                                ksocknal_timeout();
        smp_mb();   /* order with adding to peer_ni's conn list */
 
        list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
@@ -1319,11 +1349,10 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
          */
 
        CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
-              " incarnation:%lld sched[%d:%d]\n",
+              " incarnation:%lld sched[%d]\n",
               libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
               &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
-              conn->ksnc_port, incarnation, cpt,
-              (int)(sched - &sched->kss_info->ksi_scheds[0]));
+              conn->ksnc_port, incarnation, cpt);
 
         if (active) {
                 /* additional routes after interface exchange? */
@@ -1378,8 +1407,7 @@ failed_2:
        if (!peer_ni->ksnp_closing &&
            list_empty(&peer_ni->ksnp_conns) &&
            list_empty(&peer_ni->ksnp_routes)) {
-               list_add(&zombies, &peer_ni->ksnp_tx_queue);
-               list_del_init(&peer_ni->ksnp_tx_queue);
+               list_splice_init(&peer_ni->ksnp_tx_queue, &zombies);
                ksocknal_unlink_peer_locked(peer_ni);
        }
 
@@ -1451,7 +1479,7 @@ ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
        if (route != NULL) {
                /* dissociate conn from route... */
                LASSERT(!route->ksnr_deleted);
-               LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
+               LASSERT((route->ksnr_connected & BIT(conn->ksnc_type)) != 0);
 
                conn2 = NULL;
                list_for_each(tmp, &peer_ni->ksnp_conns) {
@@ -1464,7 +1492,7 @@ ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
                        conn2 = NULL;
                }
                if (conn2 == NULL)
-                       route->ksnr_connected &= ~(1 << conn->ksnc_type);
+                       route->ksnr_connected &= ~BIT(conn->ksnc_type);
 
                conn->ksnc_route = NULL;
 
@@ -1475,7 +1503,7 @@ ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
                /* No more connections to this peer_ni */
 
                if (!list_empty(&peer_ni->ksnp_tx_queue)) {
-                               struct ksock_tx *tx;
+                       struct ksock_tx *tx;
 
                        LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
 
@@ -1505,8 +1533,7 @@ ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
 
        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
-       list_add_tail(&conn->ksnc_list,
-                     &ksocknal_data.ksnd_deathrow_conns);
+       list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_deathrow_conns);
        wake_up(&ksocknal_data.ksnd_reaper_waitq);
 
        spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -1535,8 +1562,8 @@ ksocknal_peer_failed(struct ksock_peer_ni *peer_ni)
        read_unlock(&ksocknal_data.ksnd_global_lock);
 
        if (notify)
-               lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid, 0,
-                           cfs_time_seconds(last_alive)); /* to jiffies */
+               lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid,
+                           false, false, last_alive);
 }
 
 void
@@ -1545,7 +1572,7 @@ ksocknal_finalize_zcreq(struct ksock_conn *conn)
        struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
        struct ksock_tx *tx;
        struct ksock_tx *tmp;
-       struct list_head zlist = LIST_HEAD_INIT(zlist);
+       LIST_HEAD(zlist);
 
        /* NB safe to finalize TXs because closing of socket will
         * abort all buffered data */
@@ -1561,8 +1588,7 @@ ksocknal_finalize_zcreq(struct ksock_conn *conn)
 
                tx->tx_msg.ksm_zc_cookies[0] = 0;
                tx->tx_zc_aborted = 1;  /* mark it as not-acked */
-               list_del(&tx->tx_zc_list);
-               list_add(&tx->tx_zc_list, &zlist);
+               list_move(&tx->tx_zc_list, &zlist);
        }
 
        spin_unlock(&peer_ni->ksnp_lock);
@@ -1669,7 +1695,7 @@ ksocknal_destroy_conn(struct ksock_conn *conn)
         switch (conn->ksnc_rx_state) {
         case SOCKNAL_RX_LNET_PAYLOAD:
                 last_rcv = conn->ksnc_rx_deadline -
-                          *ksocknal_tunables.ksnd_timeout;
+                          ksocknal_timeout();
                CERROR("Completing partial receive from %s[%d], "
                       "ip %pI4h:%d, with error, wanted: %d, left: %d, "
                       "last alive is %lld secs ago\n",
@@ -1677,7 +1703,10 @@ ksocknal_destroy_conn(struct ksock_conn *conn)
                       &conn->ksnc_ipaddr, conn->ksnc_port,
                        conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
                       ktime_get_seconds() - last_rcv);
-               lnet_finalize(conn->ksnc_cookie, -EIO);
+               if (conn->ksnc_lnet_msg)
+                       conn->ksnc_lnet_msg->msg_health_status =
+                               LNET_MSG_STATUS_REMOTE_ERROR;
+               lnet_finalize(conn->ksnc_lnet_msg, -EIO);
                break;
         case SOCKNAL_RX_LNET_HEADER:
                 if (conn->ksnc_rx_started)
@@ -1753,8 +1782,7 @@ int
 ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
 {
        struct ksock_peer_ni *peer_ni;
-       struct list_head *ptmp;
-       struct list_head *pnxt;
+       struct hlist_node *pnxt;
        int lo;
        int hi;
        int i;
@@ -1762,37 +1790,41 @@ ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
 
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
-        if (id.nid != LNET_NID_ANY)
-                lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
-        else {
-                lo = 0;
-                hi = ksocknal_data.ksnd_peer_hash_size - 1;
-        }
-
-        for (i = lo; i <= hi; i++) {
-               list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
+       if (id.nid != LNET_NID_ANY) {
+               lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers));
+               hi = lo;
+       } else {
+               lo = 0;
+               hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
+       }
 
-                       peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
+       for (i = lo; i <= hi; i++) {
+               hlist_for_each_entry_safe(peer_ni, pnxt,
+                                         &ksocknal_data.ksnd_peers[i],
+                                         ksnp_list) {
 
-                        if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) &&
-                              (id.pid == LNET_PID_ANY || id.pid == peer_ni->ksnp_id.pid)))
-                                continue;
+                       if (!((id.nid == LNET_NID_ANY ||
+                              id.nid == peer_ni->ksnp_id.nid) &&
+                             (id.pid == LNET_PID_ANY ||
+                              id.pid == peer_ni->ksnp_id.pid)))
+                               continue;
 
-                        count += ksocknal_close_peer_conns_locked (peer_ni, ipaddr, 0);
-                }
-        }
+                       count += ksocknal_close_peer_conns_locked(peer_ni,
+                                                                 ipaddr, 0);
+               }
+       }
 
        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
-        /* wildcards always succeed */
-        if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
-                return (0);
+       /* wildcards always succeed */
+       if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
+               return 0;
 
-        return (count == 0 ? -ENOENT : 0);
+       return (count == 0 ? -ENOENT : 0);
 }
 
 void
-ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive)
+ksocknal_notify_gw_down(lnet_nid_t gw_nid)
 {
        /* The router is telling me she's been notified of a change in
         * gateway state....
@@ -1802,81 +1834,14 @@ ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive)
                .pid    = LNET_PID_ANY,
        };
 
-        CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
-                alive ? "up" : "down");
-
-        if (!alive) {
-                /* If the gateway crashed, close all open connections... */
-                ksocknal_close_matching_conns (id, 0);
-                return;
-        }
-
-        /* ...otherwise do nothing.  We can only establish new connections
-         * if we have autroutes, and these connect on demand. */
-}
-
-void
-ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when)
-{
-       int connect = 1;
-       time64_t last_alive = 0;
-       time64_t now = ktime_get_seconds();
-       struct ksock_peer_ni *peer_ni = NULL;
-       rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
-       struct lnet_process_id id = {
-               .nid = nid,
-               .pid = LNET_PID_LUSTRE,
-       };
-
-       read_lock(glock);
-
-       peer_ni = ksocknal_find_peer_locked(ni, id);
-       if (peer_ni != NULL) {
-               struct list_head *tmp;
-               struct ksock_conn *conn;
-               int bufnob;
-
-               list_for_each(tmp, &peer_ni->ksnp_conns) {
-                       conn = list_entry(tmp, struct ksock_conn, ksnc_list);
-                       bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
-
-                       if (bufnob < conn->ksnc_tx_bufnob) {
-                               /* something got ACKed */
-                               conn->ksnc_tx_deadline = ktime_get_seconds() +
-                                                        *ksocknal_tunables.ksnd_timeout;
-                                peer_ni->ksnp_last_alive = now;
-                                conn->ksnc_tx_bufnob = bufnob;
-                        }
-                }
-
-                last_alive = peer_ni->ksnp_last_alive;
-                if (ksocknal_find_connectable_route_locked(peer_ni) == NULL)
-                        connect = 0;
-        }
-
-       read_unlock(glock);
-
-        if (last_alive != 0)
-               *when = cfs_time_seconds(last_alive);
-
-       CDEBUG(D_NET, "peer_ni %s %p, alive %lld secs ago, connect %d\n",
-               libcfs_nid2str(nid), peer_ni,
-              last_alive ? now - last_alive : -1,
-               connect);
+       CDEBUG(D_NET, "gw %s down\n", libcfs_nid2str(gw_nid));
 
-        if (!connect)
-                return;
-
-        ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
-
-       write_lock_bh(glock);
-
-        peer_ni = ksocknal_find_peer_locked(ni, id);
-        if (peer_ni != NULL)
-                ksocknal_launch_all_connections_locked(peer_ni);
+       /* If the gateway crashed, close all open connections... */
+       ksocknal_close_matching_conns(id, 0);
+       return;
 
-       write_unlock_bh(glock);
-        return;
+       /* We can only establish new connections
+        * if we have autroutes, and these connect on demand. */
 }
 
 static void
@@ -1915,28 +1880,30 @@ ksocknal_push_peer(struct ksock_peer_ni *peer_ni)
 static int
 ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id)
 {
-       struct list_head *start;
-       struct list_head *end;
-       struct list_head *tmp;
-       int               rc = -ENOENT;
-       unsigned int      hsize = ksocknal_data.ksnd_peer_hash_size;
+       int lo;
+       int hi;
+       int bkt;
+       int rc = -ENOENT;
 
-       if (id.nid == LNET_NID_ANY) {
-               start = &ksocknal_data.ksnd_peers[0];
-               end = &ksocknal_data.ksnd_peers[hsize - 1];
+       if (id.nid != LNET_NID_ANY) {
+               lo = hash_min(id.nid, HASH_BITS(ksocknal_data.ksnd_peers));
+               hi = lo;
        } else {
-               start = end = ksocknal_nid2peerlist(id.nid);
+               lo = 0;
+               hi = HASH_SIZE(ksocknal_data.ksnd_peers) - 1;
        }
 
-       for (tmp = start; tmp <= end; tmp++) {
-               int     peer_off; /* searching offset in peer_ni hash table */
+       for (bkt = lo; bkt <= hi; bkt++) {
+               int peer_off; /* searching offset in peer_ni hash table */
 
                for (peer_off = 0; ; peer_off++) {
                        struct ksock_peer_ni *peer_ni;
                        int           i = 0;
 
                        read_lock(&ksocknal_data.ksnd_global_lock);
-                       list_for_each_entry(peer_ni, tmp, ksnp_list) {
+                       hlist_for_each_entry(peer_ni,
+                                            &ksocknal_data.ksnd_peers[bkt],
+                                            ksnp_list) {
                                if (!((id.nid == LNET_NID_ANY ||
                                       id.nid == peer_ni->ksnp_id.nid) &&
                                      (id.pid == LNET_PID_ANY ||
@@ -1950,7 +1917,7 @@ ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id)
                        }
                        read_unlock(&ksocknal_data.ksnd_global_lock);
 
-                       if (i == 0) /* no match */
+                       if (i <= peer_off) /* no match */
                                break;
 
                        rc = 0;
@@ -1969,7 +1936,6 @@ ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
        int rc;
        int i;
        int j;
-       struct list_head *ptmp;
        struct ksock_peer_ni *peer_ni;
        struct list_head *rtmp;
        struct ksock_route *route;
@@ -1989,33 +1955,32 @@ ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
        } else {
                iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
 
+               iface->ksni_index = ksocknal_ip2index(ipaddress, ni);
                iface->ksni_ipaddr = ipaddress;
                iface->ksni_netmask = netmask;
                iface->ksni_nroutes = 0;
                iface->ksni_npeers = 0;
 
-               for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
-                       list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
-                               peer_ni = list_entry(ptmp, struct ksock_peer_ni,
-                                                    ksnp_list);
-
-                               for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
-                                       if (peer_ni->ksnp_passive_ips[j] == ipaddress)
-                                               iface->ksni_npeers++;
+               hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
+                       for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
+                               if (peer_ni->ksnp_passive_ips[j] == ipaddress)
+                                       iface->ksni_npeers++;
 
-                               list_for_each(rtmp, &peer_ni->ksnp_routes) {
-                                       route = list_entry(rtmp,
-                                                          struct ksock_route,
-                                                          ksnr_list);
+                       list_for_each(rtmp, &peer_ni->ksnp_routes) {
+                               route = list_entry(rtmp,
+                                                  struct ksock_route,
+                                                  ksnr_list);
 
-                                       if (route->ksnr_myipaddr == ipaddress)
-                                               iface->ksni_nroutes++;
-                               }
+                               if (route->ksnr_myiface ==
+                                           iface->ksni_index)
+                                       iface->ksni_nroutes++;
                        }
                }
 
                rc = 0;
-               /* NB only new connections will pay attention to the new interface! */
+               /* NB only new connections will pay attention to the new
+                * interface!
+                */
        }
 
        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
@@ -2024,7 +1989,8 @@ ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
 }
 
 static void
-ksocknal_peer_del_interface_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
+ksocknal_peer_del_interface_locked(struct ksock_peer_ni *peer_ni,
+                                  __u32 ipaddr, int index)
 {
        struct list_head *tmp;
        struct list_head *nxt;
@@ -2045,16 +2011,16 @@ ksocknal_peer_del_interface_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
        list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
                route = list_entry(tmp, struct ksock_route, ksnr_list);
 
-                if (route->ksnr_myipaddr != ipaddr)
-                        continue;
+               if (route->ksnr_myiface != index)
+                       continue;
 
-                if (route->ksnr_share_count != 0) {
-                        /* Manually created; keep, but unbind */
-                        route->ksnr_myipaddr = 0;
-                } else {
-                        ksocknal_del_route_locked(route);
-                }
-        }
+               if (route->ksnr_share_count != 0) {
+                       /* Manually created; keep, but unbind */
+                       route->ksnr_myiface = -1;
+               } else {
+                       ksocknal_del_route_locked(route);
+               }
+       }
 
        list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
                conn = list_entry(tmp, struct ksock_conn, ksnc_list);
@@ -2069,47 +2035,45 @@ ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress)
 {
        struct ksock_net *net = ni->ni_data;
        int rc = -ENOENT;
-       struct list_head *tmp;
-       struct list_head *nxt;
+       struct hlist_node *nxt;
        struct ksock_peer_ni *peer_ni;
        u32 this_ip;
+       int index;
        int i;
        int j;
 
-       write_lock_bh(&ksocknal_data.ksnd_global_lock);
+       index = ksocknal_ip2index(ipaddress, ni);
 
-        for (i = 0; i < net->ksnn_ninterfaces; i++) {
-                this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
+       write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
-                if (!(ipaddress == 0 ||
-                      ipaddress == this_ip))
-                        continue;
+       for (i = 0; i < net->ksnn_ninterfaces; i++) {
+               this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
 
-                rc = 0;
+               if (!(ipaddress == 0 ||
+                     ipaddress == this_ip))
+                       continue;
 
-                for (j = i+1; j < net->ksnn_ninterfaces; j++)
-                        net->ksnn_interfaces[j-1] =
-                                net->ksnn_interfaces[j];
+               rc = 0;
 
-                net->ksnn_ninterfaces--;
+               for (j = i+1; j < net->ksnn_ninterfaces; j++)
+                       net->ksnn_interfaces[j-1] =
+                               net->ksnn_interfaces[j];
 
-                for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
-                       list_for_each_safe(tmp, nxt,
-                                          &ksocknal_data.ksnd_peers[j]) {
-                               peer_ni = list_entry(tmp, struct ksock_peer_ni,
-                                                    ksnp_list);
+               net->ksnn_ninterfaces--;
 
-                                if (peer_ni->ksnp_ni != ni)
-                                        continue;
+               hash_for_each_safe(ksocknal_data.ksnd_peers, j,
+                                  nxt, peer_ni, ksnp_list) {
+                       if (peer_ni->ksnp_ni != ni)
+                               continue;
 
-                                ksocknal_peer_del_interface_locked(peer_ni, this_ip);
-                        }
-                }
-        }
+                       ksocknal_peer_del_interface_locked(peer_ni,
+                                                          this_ip, index);
+               }
+       }
 
        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
-        return (rc);
+       return rc;
 }
 
 int
@@ -2205,7 +2169,7 @@ ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
                 data->ioc_u32[1] = conn->ksnc_port;
                 data->ioc_u32[2] = conn->ksnc_myipaddr;
                 data->ioc_u32[3] = conn->ksnc_type;
-               data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
+               data->ioc_u32[4] = conn->ksnc_scheduler->kss_cpt;
                 data->ioc_u32[5] = rxmem;
                 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
                 ksocknal_conn_decref(conn);
@@ -2244,32 +2208,16 @@ ksocknal_free_buffers (void)
 {
        LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
 
-       if (ksocknal_data.ksnd_sched_info != NULL) {
-               struct ksock_sched_info *info;
-               int                     i;
-
-               cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
-                       if (info->ksi_scheds != NULL) {
-                               LIBCFS_FREE(info->ksi_scheds,
-                                           info->ksi_nthreads_max *
-                                           sizeof(info->ksi_scheds[0]));
-                       }
-               }
-               cfs_percpt_free(ksocknal_data.ksnd_sched_info);
-       }
-
-        LIBCFS_FREE (ksocknal_data.ksnd_peers,
-                    sizeof(struct list_head) *
-                     ksocknal_data.ksnd_peer_hash_size);
+       if (ksocknal_data.ksnd_schedulers != NULL)
+               cfs_percpt_free(ksocknal_data.ksnd_schedulers);
 
        spin_lock(&ksocknal_data.ksnd_tx_lock);
 
        if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
-               struct list_head zlist;
+               LIST_HEAD(zlist);
                struct ksock_tx *tx;
 
-               list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
-               list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
+               list_splice_init(&ksocknal_data.ksnd_idle_noop_txs, &zlist);
                spin_unlock(&ksocknal_data.ksnd_tx_lock);
 
                while (!list_empty(&zlist)) {
@@ -2285,25 +2233,23 @@ ksocknal_free_buffers (void)
 static void
 ksocknal_base_shutdown(void)
 {
-       struct ksock_sched_info *info;
        struct ksock_sched *sched;
+       struct ksock_peer_ni *peer_ni;
        int i;
-       int j;
 
        CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
               atomic_read (&libcfs_kmemory));
        LASSERT (ksocknal_data.ksnd_nnets == 0);
 
-        switch (ksocknal_data.ksnd_init) {
-        default:
-                LASSERT (0);
+       switch (ksocknal_data.ksnd_init) {
+       default:
+               LASSERT(0);
+               /* fallthrough */
 
-        case SOCKNAL_INIT_ALL:
-        case SOCKNAL_INIT_DATA:
-                LASSERT (ksocknal_data.ksnd_peers != NULL);
-                for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
-                       LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
-                }
+       case SOCKNAL_INIT_ALL:
+       case SOCKNAL_INIT_DATA:
+               hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list)
+                       LASSERT(0);
 
                LASSERT(list_empty(&ksocknal_data.ksnd_nets));
                LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
@@ -2311,23 +2257,14 @@ ksocknal_base_shutdown(void)
                LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
                LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
 
-               if (ksocknal_data.ksnd_sched_info != NULL) {
-                       cfs_percpt_for_each(info, i,
-                                           ksocknal_data.ksnd_sched_info) {
-                               if (info->ksi_scheds == NULL)
-                                       continue;
+               if (ksocknal_data.ksnd_schedulers != NULL) {
+                       cfs_percpt_for_each(sched, i,
+                                           ksocknal_data.ksnd_schedulers) {
 
-                               for (j = 0; j < info->ksi_nthreads_max; j++) {
-
-                                       sched = &info->ksi_scheds[j];
-                                       LASSERT(list_empty(&sched->\
-                                                              kss_tx_conns));
-                                       LASSERT(list_empty(&sched->\
-                                                              kss_rx_conns));
-                                       LASSERT(list_empty(&sched-> \
-                                                 kss_zombie_noop_txs));
-                                       LASSERT(sched->kss_nconns == 0);
-                               }
+                               LASSERT(list_empty(&sched->kss_tx_conns));
+                               LASSERT(list_empty(&sched->kss_rx_conns));
+                               LASSERT(list_empty(&sched->kss_zombie_noop_txs));
+                               LASSERT(sched->kss_nconns == 0);
                        }
                }
 
@@ -2336,39 +2273,22 @@ ksocknal_base_shutdown(void)
                wake_up_all(&ksocknal_data.ksnd_connd_waitq);
                wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
 
-               if (ksocknal_data.ksnd_sched_info != NULL) {
-                       cfs_percpt_for_each(info, i,
-                                           ksocknal_data.ksnd_sched_info) {
-                               if (info->ksi_scheds == NULL)
-                                       continue;
-
-                               for (j = 0; j < info->ksi_nthreads_max; j++) {
-                                       sched = &info->ksi_scheds[j];
+               if (ksocknal_data.ksnd_schedulers != NULL) {
+                       cfs_percpt_for_each(sched, i,
+                                           ksocknal_data.ksnd_schedulers)
                                        wake_up_all(&sched->kss_waitq);
-                               }
-                       }
                }
 
-               i = 4;
-               read_lock(&ksocknal_data.ksnd_global_lock);
-               while (ksocknal_data.ksnd_nthreads != 0) {
-                       i++;
-                       /* power of 2? */
-                       CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
-                               "waiting for %d threads to terminate\n",
-                               ksocknal_data.ksnd_nthreads);
-                       read_unlock(&ksocknal_data.ksnd_global_lock);
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(cfs_time_seconds(1));
-                       read_lock(&ksocknal_data.ksnd_global_lock);
-               }
-               read_unlock(&ksocknal_data.ksnd_global_lock);
+               wait_var_event_warning(&ksocknal_data.ksnd_nthreads,
+                                      ksocknal_data.ksnd_nthreads == 0,
+                                      "waiting for %d threads to terminate\n",
+                                      ksocknal_data.ksnd_nthreads);
 
-                ksocknal_free_buffers();
+               ksocknal_free_buffers();
 
-                ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
-                break;
-        }
+               ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
+               break;
+       }
 
        CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
               atomic_read (&libcfs_kmemory));
@@ -2379,24 +2299,16 @@ ksocknal_base_shutdown(void)
 static int
 ksocknal_base_startup(void)
 {
-       struct ksock_sched_info *info;
-       int                     rc;
-       int                     i;
-
-        LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
-        LASSERT (ksocknal_data.ksnd_nnets == 0);
+       struct ksock_sched *sched;
+       int rc;
+       int i;
 
-        memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
+       LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
+       LASSERT(ksocknal_data.ksnd_nnets == 0);
 
-        ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
-       LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
-                    sizeof(struct list_head) *
-                    ksocknal_data.ksnd_peer_hash_size);
-        if (ksocknal_data.ksnd_peers == NULL)
-                return -ENOMEM;
+       memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */
 
-        for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
-               INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
+       hash_init(ksocknal_data.ksnd_peers);
 
        rwlock_init(&ksocknal_data.ksnd_global_lock);
        INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
@@ -2419,47 +2331,41 @@ ksocknal_base_startup(void)
 
        /* flag lists/ptrs/locks initialised */
        ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
-       try_module_get(THIS_MODULE);
+       if (!try_module_get(THIS_MODULE))
+               goto failed;
 
-       ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
-                                                        sizeof(*info));
-       if (ksocknal_data.ksnd_sched_info == NULL)
+       /* Create a scheduler block per available CPT */
+       ksocknal_data.ksnd_schedulers = cfs_percpt_alloc(lnet_cpt_table(),
+                                                        sizeof(*sched));
+       if (ksocknal_data.ksnd_schedulers == NULL)
                goto failed;
 
-       cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
-               struct ksock_sched *sched;
+       cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
                int nthrs;
 
+               /*
+                * make sure not to allocate more threads than there are
+                * cores/CPUs in teh CPT
+                */
                nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
                if (*ksocknal_tunables.ksnd_nscheds > 0) {
                        nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
                } else {
-                       /* max to half of CPUs, assume another half should be
-                        * reserved for upper layer modules */
+                       /*
+                        * max to half of CPUs, assume another half should be
+                        * reserved for upper layer modules
+                        */
                        nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
                }
 
-               info->ksi_nthreads_max = nthrs;
-               info->ksi_cpt = i;
-
-               if (nthrs != 0) {
-                       LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
-                                        info->ksi_nthreads_max *
-                                               sizeof(*sched));
-                       if (info->ksi_scheds == NULL)
-                               goto failed;
-
-                       for (; nthrs > 0; nthrs--) {
-                               sched = &info->ksi_scheds[nthrs - 1];
-
-                               sched->kss_info = info;
-                               spin_lock_init(&sched->kss_lock);
-                               INIT_LIST_HEAD(&sched->kss_rx_conns);
-                               INIT_LIST_HEAD(&sched->kss_tx_conns);
-                               INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
-                               init_waitqueue_head(&sched->kss_waitq);
-                       }
-               }
+               sched->kss_nthreads_max = nthrs;
+               sched->kss_cpt = i;
+
+               spin_lock_init(&sched->kss_lock);
+               INIT_LIST_HEAD(&sched->kss_rx_conns);
+               INIT_LIST_HEAD(&sched->kss_tx_conns);
+               INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
+               init_waitqueue_head(&sched->kss_waitq);
         }
 
         ksocknal_data.ksnd_connd_starting         = 0;
@@ -2511,58 +2417,49 @@ ksocknal_base_startup(void)
         return -ENETDOWN;
 }
 
-static void
+static int
 ksocknal_debug_peerhash(struct lnet_ni *ni)
 {
-       struct ksock_peer_ni *peer_ni = NULL;
-       struct list_head *tmp;
+       struct ksock_peer_ni *peer_ni;
        int i;
 
        read_lock(&ksocknal_data.ksnd_global_lock);
 
-        for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
-               list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
-                       peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list);
-
-                        if (peer_ni->ksnp_ni == ni) break;
+       hash_for_each(ksocknal_data.ksnd_peers, i, peer_ni, ksnp_list) {
+               struct ksock_route *route;
+               struct ksock_conn *conn;
 
-                        peer_ni = NULL;
-                }
-        }
+               if (peer_ni->ksnp_ni != ni)
+                       continue;
 
-        if (peer_ni != NULL) {
-               struct ksock_route *route;
-               struct ksock_conn  *conn;
-
-               CWARN ("Active peer_ni on shutdown: %s, ref %d, scnt %d, "
-                      "closing %d, accepting %d, err %d, zcookie %llu, "
-                      "txq %d, zc_req %d\n", libcfs_id2str(peer_ni->ksnp_id),
-                      atomic_read(&peer_ni->ksnp_refcount),
-                      peer_ni->ksnp_sharecount, peer_ni->ksnp_closing,
-                      peer_ni->ksnp_accepting, peer_ni->ksnp_error,
-                      peer_ni->ksnp_zc_next_cookie,
-                      !list_empty(&peer_ni->ksnp_tx_queue),
-                      !list_empty(&peer_ni->ksnp_zc_req_list));
-
-               list_for_each(tmp, &peer_ni->ksnp_routes) {
-                       route = list_entry(tmp, struct ksock_route, ksnr_list);
-                       CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
-                              "del %d\n", atomic_read(&route->ksnr_refcount),
-                              route->ksnr_scheduled, route->ksnr_connecting,
-                              route->ksnr_connected, route->ksnr_deleted);
+               CWARN("Active peer_ni on shutdown: %s, ref %d, "
+                     "closing %d, accepting %d, err %d, zcookie %llu, "
+                     "txq %d, zc_req %d\n", libcfs_id2str(peer_ni->ksnp_id),
+                     atomic_read(&peer_ni->ksnp_refcount),
+                     peer_ni->ksnp_closing,
+                     peer_ni->ksnp_accepting, peer_ni->ksnp_error,
+                     peer_ni->ksnp_zc_next_cookie,
+                     !list_empty(&peer_ni->ksnp_tx_queue),
+                     !list_empty(&peer_ni->ksnp_zc_req_list));
+
+               list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
+                       CWARN("Route: ref %d, schd %d, conn %d, cnted %d, "
+                             "del %d\n", atomic_read(&route->ksnr_refcount),
+                             route->ksnr_scheduled, route->ksnr_connecting,
+                             route->ksnr_connected, route->ksnr_deleted);
                }
 
-               list_for_each(tmp, &peer_ni->ksnp_conns) {
-                       conn = list_entry(tmp, struct ksock_conn, ksnc_list);
-                       CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
-                              atomic_read(&conn->ksnc_conn_refcount),
-                              atomic_read(&conn->ksnc_sock_refcount),
-                              conn->ksnc_type, conn->ksnc_closing);
+               list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
+                       CWARN("Conn: ref %d, sref %d, t %d, c %d\n",
+                             atomic_read(&conn->ksnc_conn_refcount),
+                             atomic_read(&conn->ksnc_sock_refcount),
+                             conn->ksnc_type, conn->ksnc_closing);
                }
+               break;
        }
 
        read_unlock(&ksocknal_data.ksnd_global_lock);
-       return;
+       return 0;
 }
 
 void
@@ -2575,103 +2472,35 @@ ksocknal_shutdown(struct lnet_ni *ni)
        };
        int i;
 
-        LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
-        LASSERT(ksocknal_data.ksnd_nnets > 0);
+       LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
+       LASSERT(ksocknal_data.ksnd_nnets > 0);
 
-       spin_lock_bh(&net->ksnn_lock);
-       net->ksnn_shutdown = 1;                 /* prevent new peers */
-       spin_unlock_bh(&net->ksnn_lock);
+       /* prevent new peers */
+       atomic_add(SOCKNAL_SHUTDOWN_BIAS, &net->ksnn_npeers);
 
        /* Delete all peers */
        ksocknal_del_peer(ni, anyid, 0);
 
        /* Wait for all peer_ni state to clean up */
-       i = 2;
-       spin_lock_bh(&net->ksnn_lock);
-       while (net->ksnn_npeers != 0) {
-               spin_unlock_bh(&net->ksnn_lock);
+       wait_var_event_warning(&net->ksnn_npeers,
+                              atomic_read(&net->ksnn_npeers) ==
+                              SOCKNAL_SHUTDOWN_BIAS,
+                              "waiting for %d peers to disconnect\n",
+                              ksocknal_debug_peerhash(ni) +
+                              atomic_read(&net->ksnn_npeers) -
+                              SOCKNAL_SHUTDOWN_BIAS);
 
-               i++;
-               CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
-                      "waiting for %d peers to disconnect\n",
-                      net->ksnn_npeers);
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
-
-               ksocknal_debug_peerhash(ni);
-
-               spin_lock_bh(&net->ksnn_lock);
+       for (i = 0; i < net->ksnn_ninterfaces; i++) {
+               LASSERT(net->ksnn_interfaces[i].ksni_npeers == 0);
+               LASSERT(net->ksnn_interfaces[i].ksni_nroutes == 0);
        }
-       spin_unlock_bh(&net->ksnn_lock);
-
-        for (i = 0; i < net->ksnn_ninterfaces; i++) {
-                LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
-                LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0);
-        }
 
        list_del(&net->ksnn_list);
        LIBCFS_FREE(net, sizeof(*net));
 
-        ksocknal_data.ksnd_nnets--;
-        if (ksocknal_data.ksnd_nnets == 0)
-                ksocknal_base_shutdown();
-}
-
-static int
-ksocknal_enumerate_interfaces(struct ksock_net *net)
-{
-       char **names;
-       int i;
-       int j;
-       int rc;
-       int n;
-
-       n = lnet_ipif_enumerate(&names);
-        if (n <= 0) {
-                CERROR("Can't enumerate interfaces: %d\n", n);
-                return n;
-        }
-
-        for (i = j = 0; i < n; i++) {
-                int        up;
-                __u32      ip;
-                __u32      mask;
-
-                if (!strcmp(names[i], "lo")) /* skip the loopback IF */
-                        continue;
-
-               rc = lnet_ipif_query(names[i], &up, &ip, &mask);
-                if (rc != 0) {
-                        CWARN("Can't get interface %s info: %d\n",
-                              names[i], rc);
-                        continue;
-                }
-
-                if (!up) {
-                        CWARN("Ignoring interface %s (down)\n",
-                              names[i]);
-                        continue;
-                }
-
-               if (j == LNET_INTERFACES_NUM) {
-                       CWARN("Ignoring interface %s (too many interfaces)\n",
-                             names[i]);
-                       continue;
-               }
-
-                net->ksnn_interfaces[j].ksni_ipaddr = ip;
-                net->ksnn_interfaces[j].ksni_netmask = mask;
-               strlcpy(net->ksnn_interfaces[j].ksni_name,
-                       names[i], sizeof(net->ksnn_interfaces[j].ksni_name));
-                j++;
-        }
-
-       lnet_ipif_free_enumeration(names, n);
-
-        if (j == 0)
-                CERROR("Can't find any usable interfaces\n");
-
-        return j;
+       ksocknal_data.ksnd_nnets--;
+       if (ksocknal_data.ksnd_nnets == 0)
+               ksocknal_base_shutdown();
 }
 
 static int
@@ -2717,37 +2546,35 @@ ksocknal_search_new_ipif(struct ksock_net *net)
 }
 
 static int
-ksocknal_start_schedulers(struct ksock_sched_info *info)
+ksocknal_start_schedulers(struct ksock_sched *sched)
 {
        int     nthrs;
        int     rc = 0;
        int     i;
 
-       if (info->ksi_nthreads == 0) {
+       if (sched->kss_nthreads == 0) {
                if (*ksocknal_tunables.ksnd_nscheds > 0) {
-                       nthrs = info->ksi_nthreads_max;
+                       nthrs = sched->kss_nthreads_max;
                } else {
                        nthrs = cfs_cpt_weight(lnet_cpt_table(),
-                                              info->ksi_cpt);
+                                              sched->kss_cpt);
                        nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
                        nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
                }
-               nthrs = min(nthrs, info->ksi_nthreads_max);
+               nthrs = min(nthrs, sched->kss_nthreads_max);
        } else {
-               LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
+               LASSERT(sched->kss_nthreads <= sched->kss_nthreads_max);
                /* increase two threads if there is new interface */
-               nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
+               nthrs = min(2, sched->kss_nthreads_max - sched->kss_nthreads);
        }
 
        for (i = 0; i < nthrs; i++) {
                long id;
                char name[20];
-               struct ksock_sched *sched;
 
-               id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
-               sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
+               id = KSOCK_THREAD_ID(sched->kss_cpt, sched->kss_nthreads + i);
                snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
-                        info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
+                        sched->kss_cpt, (int)KSOCK_THREAD_SID(id));
 
                rc = ksocknal_thread_start(ksocknal_scheduler,
                                           (void *)id, name);
@@ -2755,11 +2582,11 @@ ksocknal_start_schedulers(struct ksock_sched_info *info)
                        continue;
 
                CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
-                      info->ksi_cpt, info->ksi_nthreads + i, rc);
+                      sched->kss_cpt, (int) KSOCK_THREAD_SID(id), rc);
                break;
        }
 
-       info->ksi_nthreads += i;
+       sched->kss_nthreads += i;
        return rc;
 }
 
@@ -2774,16 +2601,16 @@ ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
                return -EINVAL;
 
        for (i = 0; i < ncpts; i++) {
-               struct ksock_sched_info *info;
+               struct ksock_sched *sched;
                int cpt = (cpts == NULL) ? i : cpts[i];
 
                LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
-               info = ksocknal_data.ksnd_sched_info[cpt];
+               sched = ksocknal_data.ksnd_schedulers[cpt];
 
-               if (!newif && info->ksi_nthreads > 0)
+               if (!newif && sched->kss_nthreads > 0)
                        continue;
 
-               rc = ksocknal_start_schedulers(info);
+               rc = ksocknal_start_schedulers(sched);
                if (rc != 0)
                        return rc;
        }
@@ -2794,10 +2621,11 @@ int
 ksocknal_startup(struct lnet_ni *ni)
 {
        struct ksock_net *net;
+       struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables;
+       struct ksock_interface *ksi = NULL;
+       struct lnet_inetdev *ifaces = NULL;
+       int i = 0;
        int rc;
-       int i;
-       struct net_device *net_dev;
-       int node_id;
 
         LASSERT (ni->ni_net->net_lnd == &the_ksocklnd);
 
@@ -2807,71 +2635,101 @@ ksocknal_startup(struct lnet_ni *ni)
                         return rc;
         }
 
-        LIBCFS_ALLOC(net, sizeof(*net));
-        if (net == NULL)
-                goto fail_0;
+       LIBCFS_ALLOC(net, sizeof(*net));
+       if (net == NULL)
+               goto fail_0;
 
-       spin_lock_init(&net->ksnn_lock);
        net->ksnn_incarnation = ktime_get_real_ns();
        ni->ni_data = net;
-       if (!ni->ni_net->net_tunables_set) {
-               ni->ni_net->net_tunables.lct_peer_timeout =
+       net_tunables = &ni->ni_net->net_tunables;
+
+       if (net_tunables->lct_peer_timeout == -1)
+               net_tunables->lct_peer_timeout =
                        *ksocknal_tunables.ksnd_peertimeout;
-               ni->ni_net->net_tunables.lct_max_tx_credits =
+
+       if (net_tunables->lct_max_tx_credits == -1)
+               net_tunables->lct_max_tx_credits =
                        *ksocknal_tunables.ksnd_credits;
-               ni->ni_net->net_tunables.lct_peer_tx_credits =
+
+       if (net_tunables->lct_peer_tx_credits == -1)
+               net_tunables->lct_peer_tx_credits =
                        *ksocknal_tunables.ksnd_peertxcredits;
-               ni->ni_net->net_tunables.lct_peer_rtr_credits =
+
+       if (net_tunables->lct_peer_tx_credits >
+           net_tunables->lct_max_tx_credits)
+               net_tunables->lct_peer_tx_credits =
+                       net_tunables->lct_max_tx_credits;
+
+       if (net_tunables->lct_peer_rtr_credits == -1)
+               net_tunables->lct_peer_rtr_credits =
                        *ksocknal_tunables.ksnd_peerrtrcredits;
-               ni->ni_net->net_tunables_set = true;
-       }
 
+       rc = lnet_inet_enumerate(&ifaces, ni->ni_net_ns);
+       if (rc < 0)
+               goto fail_1;
 
-       if (ni->ni_interfaces[0] == NULL) {
-               rc = ksocknal_enumerate_interfaces(net);
-               if (rc <= 0)
-                       goto fail_1;
+       if (!ni->ni_interfaces[0]) {
+               ksi = &net->ksnn_interfaces[0];
 
+               /* Use the first discovered interface */
                net->ksnn_ninterfaces = 1;
+               ni->ni_dev_cpt = ifaces[0].li_cpt;
+               ksi->ksni_ipaddr = ifaces[0].li_ipaddr;
+               ksi->ksni_index = ksocknal_ip2index(ksi->ksni_ipaddr, ni);
+               ksi->ksni_netmask = ifaces[0].li_netmask;
+               strlcpy(ksi->ksni_name, ifaces[0].li_name,
+                       sizeof(ksi->ksni_name));
        } else {
+               /* Before Multi-Rail ksocklnd would manage
+                * multiple interfaces with its own tcp bonding.
+                * If we encounter an old configuration using
+                * this tcp bonding approach then we need to
+                * handle more than one ni_interfaces.
+                *
+                * In Multi-Rail configuration only ONE ni_interface
+                * should exist. Each IP alias should be mapped to
+                * each 'struct net_ni'.
+                */
                for (i = 0; i < LNET_INTERFACES_NUM; i++) {
-                       int up;
+                       int j;
 
-                       if (ni->ni_interfaces[i] == NULL)
+                       if (!ni->ni_interfaces[i])
                                break;
 
-                       rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
-                               &net->ksnn_interfaces[i].ksni_ipaddr,
-                               &net->ksnn_interfaces[i].ksni_netmask);
-
-                       if (rc != 0) {
-                               CERROR("Can't get interface %s info: %d\n",
-                                      ni->ni_interfaces[i], rc);
-                               goto fail_1;
-                       }
-
-                       if (!up) {
-                               CERROR("Interface %s is down\n",
-                                      ni->ni_interfaces[i]);
-                               goto fail_1;
+                       for (j = 0; j < LNET_INTERFACES_NUM;  j++) {
+                               if (i != j && ni->ni_interfaces[j] &&
+                                   strcmp(ni->ni_interfaces[i],
+                                          ni->ni_interfaces[j]) == 0) {
+                                       rc = -EEXIST;
+                                       CERROR("ksocklnd: found duplicate %s at %d and %d, rc = %d\n",
+                                              ni->ni_interfaces[i], i, j, rc);
+                                       goto fail_1;
+                               }
                        }
 
-                       strlcpy(net->ksnn_interfaces[i].ksni_name,
-                               ni->ni_interfaces[i],
-                               sizeof(net->ksnn_interfaces[i].ksni_name));
+                       for (j = 0; j < rc; j++) {
+                               if (strcmp(ifaces[j].li_name,
+                                          ni->ni_interfaces[i]) != 0)
+                                       continue;
 
+                               ksi = &net->ksnn_interfaces[j];
+                               ni->ni_dev_cpt = ifaces[j].li_cpt;
+                               ksi->ksni_ipaddr = ifaces[j].li_ipaddr;
+                               ksi->ksni_index =
+                                 ksocknal_ip2index(ksi->ksni_ipaddr, ni);
+                               ksi->ksni_netmask = ifaces[j].li_netmask;
+                               strlcpy(ksi->ksni_name, ifaces[j].li_name,
+                                       sizeof(ksi->ksni_name));
+                               net->ksnn_ninterfaces++;
+                               break;
+                       }
+               }
+               /* ni_interfaces don't map to all network interfaces */
+               if (!ksi || net->ksnn_ninterfaces != i) {
+                       CERROR("ksocklnd: requested %d but only %d interfaces found\n",
+                              i, net->ksnn_ninterfaces);
+                       goto fail_1;
                }
-               net->ksnn_ninterfaces = i;
-       }
-
-       net_dev = dev_get_by_name(&init_net,
-                                 net->ksnn_interfaces[0].ksni_name);
-       if (net_dev != NULL) {
-               node_id = dev_to_node(&net_dev->dev);
-               ni->ni_dev_cpt = cfs_cpt_of_node(lnet_cpt_table(), node_id);
-               dev_put(net_dev);
-       } else {
-               ni->ni_dev_cpt = CFS_CPT_ANY;
        }
 
        /* call it before add it to ksocknal_data.ksnd_nets */
@@ -2879,8 +2737,8 @@ ksocknal_startup(struct lnet_ni *ni)
        if (rc != 0)
                goto fail_1;
 
-       ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
-                               net->ksnn_interfaces[0].ksni_ipaddr);
+       LASSERT(ksi);
+       ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ksi->ksni_ipaddr);
        list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
 
         ksocknal_data.ksnd_nnets++;
@@ -2902,24 +2760,24 @@ static void __exit ksocklnd_exit(void)
        lnet_unregister_lnd(&the_ksocklnd);
 }
 
+static const struct lnet_lnd the_ksocklnd = {
+       .lnd_type               = SOCKLND,
+       .lnd_startup            = ksocknal_startup,
+       .lnd_shutdown           = ksocknal_shutdown,
+       .lnd_ctl                = ksocknal_ctl,
+       .lnd_send               = ksocknal_send,
+       .lnd_recv               = ksocknal_recv,
+       .lnd_notify_peer_down   = ksocknal_notify_gw_down,
+       .lnd_accept             = ksocknal_accept,
+};
+
 static int __init ksocklnd_init(void)
 {
        int rc;
 
        /* check ksnr_connected/connecting field large enough */
-       CLASSERT(SOCKLND_CONN_NTYPES <= 4);
-       CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
-
-       /* initialize the_ksocklnd */
-       the_ksocklnd.lnd_type     = SOCKLND;
-       the_ksocklnd.lnd_startup  = ksocknal_startup;
-       the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
-       the_ksocklnd.lnd_ctl      = ksocknal_ctl;
-       the_ksocklnd.lnd_send     = ksocknal_send;
-       the_ksocklnd.lnd_recv     = ksocknal_recv;
-       the_ksocklnd.lnd_notify   = ksocknal_notify;
-       the_ksocklnd.lnd_query    = ksocknal_query;
-       the_ksocklnd.lnd_accept   = ksocknal_accept;
+       BUILD_BUG_ON(SOCKLND_CONN_NTYPES > 4);
+       BUILD_BUG_ON(SOCKLND_CONN_ACK != SOCKLND_CONN_BULK_IN);
 
        rc = ksocknal_tunables_init();
        if (rc != 0)