Whamcloud - gitweb
LU-11893 ksocklnd: add secondary IP address handling
[fs/lustre-release.git] / lnet / klnds / socklnd / socklnd.c
index be5545e..3409264 100644 (file)
@@ -23,7 +23,7 @@
  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2015, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
  * Author: Eric Barton <eric@bartonsoftware.com>
  */
 
-#include <linux/pci.h>
 #include "socklnd.h"
+#include <linux/inetdevice.h>
 
-static lnd_t                   the_ksocklnd;
-ksock_nal_data_t        ksocknal_data;
+static struct lnet_lnd the_ksocklnd;
+struct ksock_nal_data ksocknal_data;
 
-static ksock_interface_t *
-ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
+static struct ksock_interface *
+ksocknal_ip2iface(struct lnet_ni *ni, __u32 ip)
 {
-        ksock_net_t       *net = ni->ni_data;
-        int                i;
-        ksock_interface_t *iface;
+       struct ksock_net *net = ni->ni_data;
+       int i;
+       struct ksock_interface *iface;
 
-        for (i = 0; i < net->ksnn_ninterfaces; i++) {
-                LASSERT(i < LNET_MAX_INTERFACES);
-                iface = &net->ksnn_interfaces[i];
+       for (i = 0; i < net->ksnn_ninterfaces; i++) {
+               LASSERT(i < LNET_INTERFACES_NUM);
+               iface = &net->ksnn_interfaces[i];
 
-                if (iface->ksni_ipaddr == ip)
-                        return (iface);
-        }
+               if (iface->ksni_ipaddr == ip)
+                       return iface;
+       }
 
-        return (NULL);
+       return NULL;
 }
 
-static ksock_route_t *
-ksocknal_create_route (__u32 ipaddr, int port)
+static struct ksock_route *
+ksocknal_create_route(__u32 ipaddr, int port)
 {
-       ksock_route_t *route;
+       struct ksock_route *route;
 
        LIBCFS_ALLOC (route, sizeof (*route));
        if (route == NULL)
@@ -86,7 +86,7 @@ ksocknal_create_route (__u32 ipaddr, int port)
 }
 
 void
-ksocknal_destroy_route (ksock_route_t *route)
+ksocknal_destroy_route(struct ksock_route *route)
 {
        LASSERT (atomic_read(&route->ksnr_refcount) == 0);
 
@@ -97,42 +97,43 @@ ksocknal_destroy_route (ksock_route_t *route)
 }
 
 static int
-ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
+ksocknal_create_peer(struct ksock_peer_ni **peerp, struct lnet_ni *ni,
+                    struct lnet_process_id id)
 {
-       int             cpt = lnet_cpt_of_nid(id.nid, ni);
-       ksock_net_t     *net = ni->ni_data;
-       ksock_peer_t    *peer;
+       int cpt = lnet_cpt_of_nid(id.nid, ni);
+       struct ksock_net *net = ni->ni_data;
+       struct ksock_peer_ni *peer_ni;
 
        LASSERT(id.nid != LNET_NID_ANY);
        LASSERT(id.pid != LNET_PID_ANY);
        LASSERT(!in_interrupt());
 
-       LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
-       if (peer == NULL)
+       LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
+       if (peer_ni == NULL)
                return -ENOMEM;
 
-       peer->ksnp_ni = ni;
-       peer->ksnp_id = id;
-       atomic_set(&peer->ksnp_refcount, 1);    /* 1 ref for caller */
-       peer->ksnp_closing = 0;
-       peer->ksnp_accepting = 0;
-       peer->ksnp_proto = NULL;
-       peer->ksnp_last_alive = 0;
-       peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
-
-       INIT_LIST_HEAD(&peer->ksnp_conns);
-       INIT_LIST_HEAD(&peer->ksnp_routes);
-       INIT_LIST_HEAD(&peer->ksnp_tx_queue);
-       INIT_LIST_HEAD(&peer->ksnp_zc_req_list);
-       spin_lock_init(&peer->ksnp_lock);
+       peer_ni->ksnp_ni = ni;
+       peer_ni->ksnp_id = id;
+       atomic_set(&peer_ni->ksnp_refcount, 1); /* 1 ref for caller */
+       peer_ni->ksnp_closing = 0;
+       peer_ni->ksnp_accepting = 0;
+       peer_ni->ksnp_proto = NULL;
+       peer_ni->ksnp_last_alive = 0;
+       peer_ni->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
+
+       INIT_LIST_HEAD(&peer_ni->ksnp_conns);
+       INIT_LIST_HEAD(&peer_ni->ksnp_routes);
+       INIT_LIST_HEAD(&peer_ni->ksnp_tx_queue);
+       INIT_LIST_HEAD(&peer_ni->ksnp_zc_req_list);
+       spin_lock_init(&peer_ni->ksnp_lock);
 
        spin_lock_bh(&net->ksnn_lock);
 
        if (net->ksnn_shutdown) {
                spin_unlock_bh(&net->ksnn_lock);
 
-               LIBCFS_FREE(peer, sizeof(*peer));
-               CERROR("Can't create peer: network shutdown\n");
+               LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
+               CERROR("Can't create peer_ni: network shutdown\n");
                return -ESHUTDOWN;
        }
 
@@ -140,136 +141,137 @@ ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
 
        spin_unlock_bh(&net->ksnn_lock);
 
-       *peerp = peer;
+       *peerp = peer_ni;
        return 0;
 }
 
 void
-ksocknal_destroy_peer (ksock_peer_t *peer)
+ksocknal_destroy_peer(struct ksock_peer_ni *peer_ni)
 {
-       ksock_net_t    *net = peer->ksnp_ni->ni_data;
+       struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
 
-       CDEBUG (D_NET, "peer %s %p deleted\n",
-               libcfs_id2str(peer->ksnp_id), peer);
+       CDEBUG (D_NET, "peer_ni %s %p deleted\n",
+               libcfs_id2str(peer_ni->ksnp_id), peer_ni);
 
-       LASSERT(atomic_read(&peer->ksnp_refcount) == 0);
-       LASSERT(peer->ksnp_accepting == 0);
-       LASSERT(list_empty(&peer->ksnp_conns));
-       LASSERT(list_empty(&peer->ksnp_routes));
-       LASSERT(list_empty(&peer->ksnp_tx_queue));
-       LASSERT(list_empty(&peer->ksnp_zc_req_list));
+       LASSERT(atomic_read(&peer_ni->ksnp_refcount) == 0);
+       LASSERT(peer_ni->ksnp_accepting == 0);
+       LASSERT(list_empty(&peer_ni->ksnp_conns));
+       LASSERT(list_empty(&peer_ni->ksnp_routes));
+       LASSERT(list_empty(&peer_ni->ksnp_tx_queue));
+       LASSERT(list_empty(&peer_ni->ksnp_zc_req_list));
 
-       LIBCFS_FREE(peer, sizeof(*peer));
+       LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
 
-        /* NB a peer's connections and routes keep a reference on their peer
+        /* NB a peer_ni's connections and routes keep a reference on their peer_ni
          * until they are destroyed, so we can be assured that _all_ state to
-         * do with this peer has been cleaned up when its refcount drops to
+         * do with this peer_ni has been cleaned up when its refcount drops to
          * zero. */
        spin_lock_bh(&net->ksnn_lock);
        net->ksnn_npeers--;
        spin_unlock_bh(&net->ksnn_lock);
 }
 
-ksock_peer_t *
-ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id)
+struct ksock_peer_ni *
+ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
 {
        struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
        struct list_head *tmp;
-       ksock_peer_t     *peer;
+       struct ksock_peer_ni *peer_ni;
 
        list_for_each(tmp, peer_list) {
+               peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list);
 
-               peer = list_entry(tmp, ksock_peer_t, ksnp_list);
+               LASSERT(!peer_ni->ksnp_closing);
 
-               LASSERT(!peer->ksnp_closing);
-
-               if (peer->ksnp_ni != ni)
+               if (peer_ni->ksnp_ni != ni)
                        continue;
 
-               if (peer->ksnp_id.nid != id.nid ||
-                   peer->ksnp_id.pid != id.pid)
+               if (peer_ni->ksnp_id.nid != id.nid ||
+                   peer_ni->ksnp_id.pid != id.pid)
                        continue;
 
-               CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
-                      peer, libcfs_id2str(id),
-                      atomic_read(&peer->ksnp_refcount));
-               return peer;
+               CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d)\n",
+                      peer_ni, libcfs_id2str(id),
+                      atomic_read(&peer_ni->ksnp_refcount));
+               return peer_ni;
        }
        return NULL;
 }
 
-ksock_peer_t *
-ksocknal_find_peer (lnet_ni_t *ni, lnet_process_id_t id)
+struct ksock_peer_ni *
+ksocknal_find_peer(struct lnet_ni *ni, struct lnet_process_id id)
 {
-        ksock_peer_t     *peer;
+       struct ksock_peer_ni *peer_ni;
 
        read_lock(&ksocknal_data.ksnd_global_lock);
-       peer = ksocknal_find_peer_locked(ni, id);
-       if (peer != NULL)                       /* +1 ref for caller? */
-               ksocknal_peer_addref(peer);
+       peer_ni = ksocknal_find_peer_locked(ni, id);
+       if (peer_ni != NULL)                    /* +1 ref for caller? */
+               ksocknal_peer_addref(peer_ni);
        read_unlock(&ksocknal_data.ksnd_global_lock);
 
-        return (peer);
+        return (peer_ni);
 }
 
 static void
-ksocknal_unlink_peer_locked (ksock_peer_t *peer)
+ksocknal_unlink_peer_locked(struct ksock_peer_ni *peer_ni)
 {
-        int                i;
-        __u32              ip;
-        ksock_interface_t *iface;
-
-        for (i = 0; i < peer->ksnp_n_passive_ips; i++) {
-                LASSERT (i < LNET_MAX_INTERFACES);
-                ip = peer->ksnp_passive_ips[i];
-
-                iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
-                /* All IPs in peer->ksnp_passive_ips[] come from the
-                 * interface list, therefore the call must succeed. */
-                LASSERT (iface != NULL);
-
-                CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n",
-                       peer, iface, iface->ksni_nroutes);
-                iface->ksni_npeers--;
-        }
+       int i;
+       __u32 ip;
+       struct ksock_interface *iface;
+
+       for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++) {
+               LASSERT(i < LNET_INTERFACES_NUM);
+               ip = peer_ni->ksnp_passive_ips[i];
+
+               iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
+               /*
+                * All IPs in peer_ni->ksnp_passive_ips[] come from the
+                * interface list, therefore the call must succeed.
+                */
+               LASSERT(iface != NULL);
+
+               CDEBUG(D_NET, "peer_ni=%p iface=%p ksni_nroutes=%d\n",
+                      peer_ni, iface, iface->ksni_nroutes);
+               iface->ksni_npeers--;
+       }
 
-       LASSERT(list_empty(&peer->ksnp_conns));
-       LASSERT(list_empty(&peer->ksnp_routes));
-       LASSERT(!peer->ksnp_closing);
-       peer->ksnp_closing = 1;
-       list_del(&peer->ksnp_list);
+       LASSERT(list_empty(&peer_ni->ksnp_conns));
+       LASSERT(list_empty(&peer_ni->ksnp_routes));
+       LASSERT(!peer_ni->ksnp_closing);
+       peer_ni->ksnp_closing = 1;
+       list_del(&peer_ni->ksnp_list);
        /* lose peerlist's ref */
-       ksocknal_peer_decref(peer);
+       ksocknal_peer_decref(peer_ni);
 }
 
 static int
-ksocknal_get_peer_info (lnet_ni_t *ni, int index,
-                        lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
-                        int *port, int *conn_count, int *share_count)
+ksocknal_get_peer_info(struct lnet_ni *ni, int index,
+                      struct lnet_process_id *id, __u32 *myip, __u32 *peer_ip,
+                      int *port, int *conn_count, int *share_count)
 {
-       ksock_peer_t      *peer;
-       struct list_head  *ptmp;
-       ksock_route_t     *route;
-       struct list_head  *rtmp;
-       int                i;
-        int                j;
-       int                rc = -ENOENT;
+       struct ksock_peer_ni *peer_ni;
+       struct list_head *ptmp;
+       struct ksock_route *route;
+       struct list_head *rtmp;
+       int i;
+       int j;
+       int rc = -ENOENT;
 
        read_lock(&ksocknal_data.ksnd_global_lock);
 
        for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
                list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
-                       peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
+                       peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
 
-                       if (peer->ksnp_ni != ni)
+                       if (peer_ni->ksnp_ni != ni)
                                continue;
 
-                       if (peer->ksnp_n_passive_ips == 0 &&
-                           list_empty(&peer->ksnp_routes)) {
+                       if (peer_ni->ksnp_n_passive_ips == 0 &&
+                           list_empty(&peer_ni->ksnp_routes)) {
                                if (index-- > 0)
                                        continue;
 
-                                *id = peer->ksnp_id;
+                                *id = peer_ni->ksnp_id;
                                 *myip = 0;
                                 *peer_ip = 0;
                                 *port = 0;
@@ -279,12 +281,12 @@ ksocknal_get_peer_info (lnet_ni_t *ni, int index,
                                 goto out;
                         }
 
-                       for (j = 0; j < peer->ksnp_n_passive_ips; j++) {
+                       for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++) {
                                if (index-- > 0)
                                        continue;
 
-                                *id = peer->ksnp_id;
-                                *myip = peer->ksnp_passive_ips[j];
+                                *id = peer_ni->ksnp_id;
+                                *myip = peer_ni->ksnp_passive_ips[j];
                                 *peer_ip = 0;
                                 *port = 0;
                                 *conn_count = 0;
@@ -293,14 +295,14 @@ ksocknal_get_peer_info (lnet_ni_t *ni, int index,
                                 goto out;
                         }
 
-                       list_for_each(rtmp, &peer->ksnp_routes) {
+                       list_for_each(rtmp, &peer_ni->ksnp_routes) {
                                if (index-- > 0)
                                        continue;
 
-                               route = list_entry(rtmp, ksock_route_t,
+                               route = list_entry(rtmp, struct ksock_route,
                                                   ksnr_list);
 
-                               *id = peer->ksnp_id;
+                               *id = peer_ni->ksnp_id;
                                *myip = route->ksnr_myipaddr;
                                *peer_ip = route->ksnr_ipaddr;
                                *port = route->ksnr_port;
@@ -317,11 +319,11 @@ out:
 }
 
 static void
-ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
+ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
 {
-       ksock_peer_t      *peer = route->ksnr_peer;
-       int                type = conn->ksnc_type;
-       ksock_interface_t *iface;
+       struct ksock_peer_ni *peer_ni = route->ksnr_peer;
+       int type = conn->ksnc_type;
+       struct ksock_interface *iface;
 
        conn->ksnc_route = route;
        ksocknal_route_addref(route);
@@ -330,12 +332,12 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
                if (route->ksnr_myipaddr == 0) {
                        /* route wasn't bound locally yet (the initial route) */
                        CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
-                              libcfs_id2str(peer->ksnp_id),
+                              libcfs_id2str(peer_ni->ksnp_id),
                               &route->ksnr_ipaddr,
                               &conn->ksnc_myipaddr);
                } else {
                        CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h "
-                              "to %pI4h\n", libcfs_id2str(peer->ksnp_id),
+                              "to %pI4h\n", libcfs_id2str(peer_ni->ksnp_id),
                               &route->ksnr_ipaddr,
                               &route->ksnr_myipaddr,
                               &conn->ksnc_myipaddr);
@@ -361,37 +363,37 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
 }
 
 static void
-ksocknal_add_route_locked (ksock_peer_t *peer, ksock_route_t *route)
+ksocknal_add_route_locked(struct ksock_peer_ni *peer_ni, struct ksock_route *route)
 {
        struct list_head *tmp;
-       ksock_conn_t     *conn;
-       ksock_route_t    *route2;
+       struct ksock_conn *conn;
+       struct ksock_route *route2;
 
-       LASSERT(!peer->ksnp_closing);
+       LASSERT(!peer_ni->ksnp_closing);
        LASSERT(route->ksnr_peer == NULL);
        LASSERT(!route->ksnr_scheduled);
        LASSERT(!route->ksnr_connecting);
        LASSERT(route->ksnr_connected == 0);
 
        /* LASSERT(unique) */
-       list_for_each(tmp, &peer->ksnp_routes) {
-               route2 = list_entry(tmp, ksock_route_t, ksnr_list);
+       list_for_each(tmp, &peer_ni->ksnp_routes) {
+               route2 = list_entry(tmp, struct ksock_route, ksnr_list);
 
                if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
                        CERROR("Duplicate route %s %pI4h\n",
-                              libcfs_id2str(peer->ksnp_id),
+                              libcfs_id2str(peer_ni->ksnp_id),
                               &route->ksnr_ipaddr);
                        LBUG();
                }
        }
 
-       route->ksnr_peer = peer;
-       ksocknal_peer_addref(peer);
-       /* peer's routelist takes over my ref on 'route' */
-       list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
+       route->ksnr_peer = peer_ni;
+       ksocknal_peer_addref(peer_ni);
+       /* peer_ni's routelist takes over my ref on 'route' */
+       list_add_tail(&route->ksnr_list, &peer_ni->ksnp_routes);
 
-       list_for_each(tmp, &peer->ksnp_conns) {
-               conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+       list_for_each(tmp, &peer_ni->ksnp_conns) {
+               conn = list_entry(tmp, struct ksock_conn, ksnc_list);
 
                if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
                        continue;
@@ -402,19 +404,19 @@ ksocknal_add_route_locked (ksock_peer_t *peer, ksock_route_t *route)
 }
 
 static void
-ksocknal_del_route_locked (ksock_route_t *route)
+ksocknal_del_route_locked(struct ksock_route *route)
 {
-       ksock_peer_t      *peer = route->ksnr_peer;
-       ksock_interface_t *iface;
-       ksock_conn_t      *conn;
-       struct list_head  *ctmp;
-       struct list_head  *cnxt;
+       struct ksock_peer_ni *peer_ni = route->ksnr_peer;
+       struct ksock_interface *iface;
+       struct ksock_conn *conn;
+       struct list_head *ctmp;
+       struct list_head *cnxt;
 
        LASSERT(!route->ksnr_deleted);
 
        /* Close associated conns */
-       list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
-               conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+       list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
+               conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
 
                if (conn->ksnc_route != route)
                        continue;
@@ -431,59 +433,60 @@ ksocknal_del_route_locked (ksock_route_t *route)
 
        route->ksnr_deleted = 1;
        list_del(&route->ksnr_list);
-       ksocknal_route_decref(route);           /* drop peer's ref */
+       ksocknal_route_decref(route);           /* drop peer_ni's ref */
 
-       if (list_empty(&peer->ksnp_routes) &&
-           list_empty(&peer->ksnp_conns)) {
-               /* I've just removed the last route to a peer with no active
+       if (list_empty(&peer_ni->ksnp_routes) &&
+           list_empty(&peer_ni->ksnp_conns)) {
+               /* I've just removed the last route to a peer_ni with no active
                 * connections */
-               ksocknal_unlink_peer_locked(peer);
+               ksocknal_unlink_peer_locked(peer_ni);
        }
 }
 
 int
-ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
+ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
+                 int port)
 {
        struct list_head *tmp;
-       ksock_peer_t     *peer;
-       ksock_peer_t     *peer2;
-       ksock_route_t    *route;
-       ksock_route_t    *route2;
-       int               rc;
+       struct ksock_peer_ni *peer_ni;
+       struct ksock_peer_ni *peer2;
+       struct ksock_route *route;
+       struct ksock_route *route2;
+       int rc;
 
         if (id.nid == LNET_NID_ANY ||
             id.pid == LNET_PID_ANY)
                 return (-EINVAL);
 
-        /* Have a brand new peer ready... */
-        rc = ksocknal_create_peer(&peer, ni, id);
+        /* Have a brand new peer_ni ready... */
+        rc = ksocknal_create_peer(&peer_ni, ni, id);
         if (rc != 0)
                 return rc;
 
         route = ksocknal_create_route (ipaddr, port);
         if (route == NULL) {
-                ksocknal_peer_decref(peer);
+                ksocknal_peer_decref(peer_ni);
                 return (-ENOMEM);
         }
 
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
         /* always called with a ref on ni, so shutdown can't have started */
-        LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
+       LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
 
        peer2 = ksocknal_find_peer_locked(ni, id);
        if (peer2 != NULL) {
-               ksocknal_peer_decref(peer);
-               peer = peer2;
+               ksocknal_peer_decref(peer_ni);
+               peer_ni = peer2;
        } else {
-               /* peer table takes my ref on peer */
-               list_add_tail(&peer->ksnp_list,
+               /* peer_ni table takes my ref on peer_ni */
+               list_add_tail(&peer_ni->ksnp_list,
                              ksocknal_nid2peerlist(id.nid));
        }
 
        route2 = NULL;
-       list_for_each(tmp, &peer->ksnp_routes) {
-               route2 = list_entry(tmp, ksock_route_t, ksnr_list);
+       list_for_each(tmp, &peer_ni->ksnp_routes) {
+               route2 = list_entry(tmp, struct ksock_route, ksnr_list);
 
                if (route2->ksnr_ipaddr == ipaddr)
                        break;
@@ -491,7 +494,7 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
                route2 = NULL;
        }
        if (route2 == NULL) {
-               ksocknal_add_route_locked(peer, route);
+               ksocknal_add_route_locked(peer_ni, route);
                route->ksnr_share_count++;
        } else {
                ksocknal_route_decref(route);
@@ -504,21 +507,21 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
 }
 
 static void
-ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip)
+ksocknal_del_peer_locked(struct ksock_peer_ni *peer_ni, __u32 ip)
 {
-       ksock_conn_t     *conn;
-       ksock_route_t    *route;
+       struct ksock_conn *conn;
+       struct ksock_route *route;
        struct list_head *tmp;
        struct list_head *nxt;
-       int               nshared;
+       int nshared;
 
-       LASSERT(!peer->ksnp_closing);
+       LASSERT(!peer_ni->ksnp_closing);
 
-       /* Extra ref prevents peer disappearing until I'm done with it */
-       ksocknal_peer_addref(peer);
+       /* Extra ref prevents peer_ni disappearing until I'm done with it */
+       ksocknal_peer_addref(peer_ni);
 
-       list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
-               route = list_entry(tmp, ksock_route_t, ksnr_list);
+       list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
+               route = list_entry(tmp, struct ksock_route, ksnr_list);
 
                /* no match */
                if (!(ip == 0 || route->ksnr_ipaddr == ip))
@@ -530,8 +533,8 @@ ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip)
        }
 
        nshared = 0;
-       list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
-               route = list_entry(tmp, ksock_route_t, ksnr_list);
+       list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
+               route = list_entry(tmp, struct ksock_route, ksnr_list);
                nshared += route->ksnr_share_count;
        }
 
@@ -539,36 +542,36 @@ ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip)
                /* remove everything else if there are no explicit entries
                 * left */
 
-               list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
-                       route = list_entry(tmp, ksock_route_t, ksnr_list);
+               list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
+                       route = list_entry(tmp, struct ksock_route, ksnr_list);
 
                        /* we should only be removing auto-entries */
                        LASSERT(route->ksnr_share_count == 0);
                        ksocknal_del_route_locked(route);
                }
 
-               list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
-                       conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+               list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
+                       conn = list_entry(tmp, struct ksock_conn, ksnc_list);
 
                        ksocknal_close_conn_locked(conn, 0);
                }
        }
 
-       ksocknal_peer_decref(peer);
-               /* NB peer unlinks itself when last conn/route is removed */
+       ksocknal_peer_decref(peer_ni);
+       /* NB peer_ni unlinks itself when last conn/route is removed */
 }
 
 static int
-ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
+ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
 {
-       struct list_head  zombies = LIST_HEAD_INIT(zombies);
+       struct list_head zombies = LIST_HEAD_INIT(zombies);
        struct list_head *ptmp;
        struct list_head *pnxt;
-       ksock_peer_t     *peer;
-       int               lo;
-       int               hi;
-       int               i;
-       int               rc = -ENOENT;
+       struct ksock_peer_ni *peer_ni;
+       int lo;
+       int hi;
+       int i;
+       int rc = -ENOENT;
 
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
@@ -584,31 +587,31 @@ ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
        for (i = lo; i <= hi; i++) {
                list_for_each_safe(ptmp, pnxt,
                                   &ksocknal_data.ksnd_peers[i]) {
-                       peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
+                       peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
 
-                       if (peer->ksnp_ni != ni)
+                       if (peer_ni->ksnp_ni != ni)
                                continue;
 
                        if (!((id.nid == LNET_NID_ANY ||
-                              peer->ksnp_id.nid == id.nid) &&
+                              peer_ni->ksnp_id.nid == id.nid) &&
                              (id.pid == LNET_PID_ANY ||
-                              peer->ksnp_id.pid == id.pid)))
+                              peer_ni->ksnp_id.pid == id.pid)))
                                continue;
 
-                       ksocknal_peer_addref(peer);     /* a ref for me... */
+                       ksocknal_peer_addref(peer_ni);  /* a ref for me... */
 
-                       ksocknal_del_peer_locked(peer, ip);
+                       ksocknal_del_peer_locked(peer_ni, ip);
 
-                       if (peer->ksnp_closing &&
-                           !list_empty(&peer->ksnp_tx_queue)) {
-                               LASSERT(list_empty(&peer->ksnp_conns));
-                               LASSERT(list_empty(&peer->ksnp_routes));
+                       if (peer_ni->ksnp_closing &&
+                           !list_empty(&peer_ni->ksnp_tx_queue)) {
+                               LASSERT(list_empty(&peer_ni->ksnp_conns));
+                               LASSERT(list_empty(&peer_ni->ksnp_routes));
 
-                               list_splice_init(&peer->ksnp_tx_queue,
+                               list_splice_init(&peer_ni->ksnp_tx_queue,
                                                 &zombies);
                        }
 
-                       ksocknal_peer_decref(peer);     /* ...till here */
+                       ksocknal_peer_decref(peer_ni);  /* ...till here */
 
                        rc = 0;                         /* matched! */
                }
@@ -616,36 +619,36 @@ ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
 
        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
-       ksocknal_txlist_done(ni, &zombies, 1);
+       ksocknal_txlist_done(ni, &zombies, -ENETDOWN);
 
        return rc;
 }
 
-static ksock_conn_t *
-ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index)
+static struct ksock_conn *
+ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
 {
-       ksock_peer_t     *peer;
+       struct ksock_peer_ni *peer_ni;
        struct list_head *ptmp;
-       ksock_conn_t     *conn;
+       struct ksock_conn *conn;
        struct list_head *ctmp;
-       int               i;
+       int i;
 
        read_lock(&ksocknal_data.ksnd_global_lock);
 
        for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
                list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
-                       peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
+                       peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
 
-                       LASSERT(!peer->ksnp_closing);
+                       LASSERT(!peer_ni->ksnp_closing);
 
-                       if (peer->ksnp_ni != ni)
+                       if (peer_ni->ksnp_ni != ni)
                                continue;
 
-                       list_for_each(ctmp, &peer->ksnp_conns) {
+                       list_for_each(ctmp, &peer_ni->ksnp_conns) {
                                if (index-- > 0)
                                        continue;
 
-                               conn = list_entry(ctmp, ksock_conn_t,
+                               conn = list_entry(ctmp, struct ksock_conn,
                                                  ksnc_list);
                                ksocknal_conn_addref(conn);
                                read_unlock(&ksocknal_data. \
@@ -659,66 +662,65 @@ ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index)
        return NULL;
 }
 
-static ksock_sched_t *
+static struct ksock_sched *
 ksocknal_choose_scheduler_locked(unsigned int cpt)
 {
-       struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
-       ksock_sched_t           *sched;
-       int                     i;
-
-       LASSERT(info->ksi_nthreads > 0);
-
-       sched = &info->ksi_scheds[0];
-       /*
-        * NB: it's safe so far, but info->ksi_nthreads could be changed
-        * at runtime when we have dynamic LNet configuration, then we
-        * need to take care of this.
-        */
-       for (i = 1; i < info->ksi_nthreads; i++) {
-               if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
-                       sched = &info->ksi_scheds[i];
+       struct ksock_sched *sched = ksocknal_data.ksnd_schedulers[cpt];
+       int i;
+
+       if (sched->kss_nthreads == 0) {
+               cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
+                       if (sched->kss_nthreads > 0) {
+                               CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
+                                      cpt, sched->kss_cpt);
+                               return sched;
+                       }
+               }
+               return NULL;
        }
 
        return sched;
 }
 
 static int
-ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs)
+ksocknal_local_ipvec(struct lnet_ni *ni, __u32 *ipaddrs)
 {
-        ksock_net_t       *net = ni->ni_data;
-        int                i;
-        int                nip;
+       struct ksock_net *net = ni->ni_data;
+       int i;
+       int nip;
 
        read_lock(&ksocknal_data.ksnd_global_lock);
 
-        nip = net->ksnn_ninterfaces;
-        LASSERT (nip <= LNET_MAX_INTERFACES);
+       nip = net->ksnn_ninterfaces;
+       LASSERT(nip <= LNET_INTERFACES_NUM);
 
-       /* Only offer interfaces for additional connections if I have
-         * more than one. */
-        if (nip < 2) {
+       /*
+        * Only offer interfaces for additional connections if I have
+        * more than one.
+        */
+       if (nip < 2) {
                read_unlock(&ksocknal_data.ksnd_global_lock);
-                return 0;
-        }
+               return 0;
+       }
 
-        for (i = 0; i < nip; i++) {
-                ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
-                LASSERT (ipaddrs[i] != 0);
-        }
+       for (i = 0; i < nip; i++) {
+               ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
+               LASSERT(ipaddrs[i] != 0);
+       }
 
        read_unlock(&ksocknal_data.ksnd_global_lock);
-        return (nip);
+       return nip;
 }
 
 static int
-ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips)
+ksocknal_match_peerip(struct ksock_interface *iface, __u32 *ips, int nips)
 {
-        int   best_netmatch = 0;
-        int   best_xor      = 0;
-        int   best          = -1;
-        int   this_xor;
-        int   this_netmatch;
-        int   i;
+       int best_netmatch = 0;
+       int best_xor = 0;
+       int best = -1;
+       int this_xor;
+       int this_netmatch;
+       int i;
 
         for (i = 0; i < nips; i++) {
                 if (ips[i] == 0)
@@ -743,21 +745,21 @@ ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips)
 }
 
 static int
-ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
+ksocknal_select_ips(struct ksock_peer_ni *peer_ni, __u32 *peerips, int n_peerips)
 {
-       rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
-        ksock_net_t        *net = peer->ksnp_ni->ni_data;
-        ksock_interface_t  *iface;
-        ksock_interface_t  *best_iface;
-        int                 n_ips;
-        int                 i;
-        int                 j;
-        int                 k;
-        __u32               ip;
-        __u32               xor;
-        int                 this_netmatch;
-        int                 best_netmatch;
-        int                 best_npeers;
+       rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+       struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
+       struct ksock_interface *iface;
+       struct ksock_interface *best_iface;
+       int n_ips;
+       int i;
+       int j;
+       int k;
+       u32 ip;
+       u32 xor;
+       int this_netmatch;
+       int best_netmatch;
+       int best_npeers;
 
         /* CAVEAT EMPTOR: We do all our interface matching with an
          * exclusive hold of global lock at IRQ priority.  We're only
@@ -769,33 +771,33 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
 
        write_lock_bh(global_lock);
 
-        LASSERT (n_peerips <= LNET_MAX_INTERFACES);
-        LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
+       LASSERT(n_peerips <= LNET_INTERFACES_NUM);
+       LASSERT(net->ksnn_ninterfaces <= LNET_INTERFACES_NUM);
 
        /* Only match interfaces for additional connections
          * if I have > 1 interface */
         n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
                 MIN(n_peerips, net->ksnn_ninterfaces);
 
-        for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) {
+        for (i = 0; peer_ni->ksnp_n_passive_ips < n_ips; i++) {
                 /*              ^ yes really... */
 
                 /* If we have any new interfaces, first tick off all the
-                 * peer IPs that match old interfaces, then choose new
-                 * interfaces to match the remaining peer IPS.
+                 * peer_ni IPs that match old interfaces, then choose new
+                 * interfaces to match the remaining peer_ni IPS.
                  * We don't forget interfaces we've stopped using; we might
                  * start using them again... */
 
-                if (i < peer->ksnp_n_passive_ips) {
+                if (i < peer_ni->ksnp_n_passive_ips) {
                         /* Old interface. */
-                        ip = peer->ksnp_passive_ips[i];
-                        best_iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
+                        ip = peer_ni->ksnp_passive_ips[i];
+                        best_iface = ksocknal_ip2iface(peer_ni->ksnp_ni, ip);
 
-                        /* peer passive ips are kept up to date */
+                        /* peer_ni passive ips are kept up to date */
                         LASSERT(best_iface != NULL);
                 } else {
                         /* choose a new interface */
-                        LASSERT (i == peer->ksnp_n_passive_ips);
+                        LASSERT (i == peer_ni->ksnp_n_passive_ips);
 
                         best_iface = NULL;
                         best_netmatch = 0;
@@ -805,11 +807,11 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
                                 iface = &net->ksnn_interfaces[j];
                                 ip = iface->ksni_ipaddr;
 
-                                for (k = 0; k < peer->ksnp_n_passive_ips; k++)
-                                        if (peer->ksnp_passive_ips[k] == ip)
+                                for (k = 0; k < peer_ni->ksnp_n_passive_ips; k++)
+                                        if (peer_ni->ksnp_passive_ips[k] == ip)
                                                 break;
 
-                                if (k < peer->ksnp_n_passive_ips) /* using it already */
+                                if (k < peer_ni->ksnp_n_passive_ips) /* using it already */
                                         continue;
 
                                 k = ksocknal_match_peerip(iface, peerips, n_peerips);
@@ -831,17 +833,17 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
 
                         best_iface->ksni_npeers++;
                         ip = best_iface->ksni_ipaddr;
-                        peer->ksnp_passive_ips[i] = ip;
-                        peer->ksnp_n_passive_ips = i+1;
+                        peer_ni->ksnp_passive_ips[i] = ip;
+                        peer_ni->ksnp_n_passive_ips = i+1;
                 }
 
-                /* mark the best matching peer IP used */
+                /* mark the best matching peer_ni IP used */
                 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
                 peerips[j] = 0;
         }
 
-        /* Overwrite input peer IP addresses */
-        memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips));
+        /* Overwrite input peer_ni IP addresses */
+        memcpy(peerips, peer_ni->ksnp_passive_ips, n_ips * sizeof(*peerips));
 
        write_unlock_bh(global_lock);
 
@@ -849,17 +851,17 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
 }
 
 static void
-ksocknal_create_routes(ksock_peer_t *peer, int port,
+ksocknal_create_routes(struct ksock_peer_ni *peer_ni, int port,
                        __u32 *peer_ipaddrs, int npeer_ipaddrs)
 {
-       ksock_route_t           *newroute = NULL;
+       struct ksock_route              *newroute = NULL;
        rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
-       lnet_ni_t               *ni = peer->ksnp_ni;
-       ksock_net_t             *net = ni->ni_data;
+       struct lnet_ni *ni = peer_ni->ksnp_ni;
+       struct ksock_net                *net = ni->ni_data;
        struct list_head        *rtmp;
-       ksock_route_t           *route;
-       ksock_interface_t       *iface;
-       ksock_interface_t       *best_iface;
+       struct ksock_route              *route;
+       struct ksock_interface  *iface;
+       struct ksock_interface  *best_iface;
        int                     best_netmatch;
        int                     this_netmatch;
        int                     best_nroutes;
@@ -880,7 +882,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
                 return;
         }
 
-        LASSERT (npeer_ipaddrs <= LNET_MAX_INTERFACES);
+       LASSERT(npeer_ipaddrs <= LNET_INTERFACES_NUM);
 
         for (i = 0; i < npeer_ipaddrs; i++) {
                 if (newroute != NULL) {
@@ -895,15 +897,15 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
                        write_lock_bh(global_lock);
                 }
 
-                if (peer->ksnp_closing) {
-                        /* peer got closed under me */
+                if (peer_ni->ksnp_closing) {
+                        /* peer_ni got closed under me */
                         break;
                 }
 
                /* Already got a route? */
                route = NULL;
-               list_for_each(rtmp, &peer->ksnp_routes) {
-                       route = list_entry(rtmp, ksock_route_t, ksnr_list);
+               list_for_each(rtmp, &peer_ni->ksnp_routes) {
+                       route = list_entry(rtmp, struct ksock_route, ksnr_list);
 
                        if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
                                break;
@@ -917,15 +919,15 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
                best_nroutes = 0;
                best_netmatch = 0;
 
-               LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
+               LASSERT(net->ksnn_ninterfaces <= LNET_INTERFACES_NUM);
 
                /* Select interface to connect from */
                for (j = 0; j < net->ksnn_ninterfaces; j++) {
                        iface = &net->ksnn_interfaces[j];
 
                        /* Using this interface already? */
-                       list_for_each(rtmp, &peer->ksnp_routes) {
-                               route = list_entry(rtmp, ksock_route_t,
+                       list_for_each(rtmp, &peer_ni->ksnp_routes) {
+                               route = list_entry(rtmp, struct ksock_route,
                                                   ksnr_list);
 
                                if (route->ksnr_myipaddr == iface->ksni_ipaddr)
@@ -957,7 +959,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
                 newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
                 best_iface->ksni_nroutes++;
 
-                ksocknal_add_route_locked(peer, newroute);
+                ksocknal_add_route_locked(peer_ni, newroute);
                 newroute = NULL;
         }
 
@@ -967,12 +969,12 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
 }
 
 int
-ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
+ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
 {
-       ksock_connreq_t *cr;
-       int              rc;
-       __u32            peer_ip;
-       int              peer_port;
+       struct ksock_connreq *cr;
+       int rc;
+       u32 peer_ip;
+       int peer_port;
 
        rc = lnet_sock_getaddr(sock, true, &peer_ip, &peer_port);
        LASSERT(rc == 0);               /* we succeeded before */
@@ -998,11 +1000,11 @@ ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
 }
 
 static int
-ksocknal_connecting (ksock_peer_t *peer, __u32 ipaddr)
+ksocknal_connecting(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
 {
-       ksock_route_t *route;
+       struct ksock_route *route;
 
-       list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) {
+       list_for_each_entry(route, &peer_ni->ksnp_routes, ksnr_list) {
                if (route->ksnr_ipaddr == ipaddr)
                        return route->ksnr_connecting;
        }
@@ -1010,26 +1012,27 @@ ksocknal_connecting (ksock_peer_t *peer, __u32 ipaddr)
 }
 
 int
-ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
+ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
                     struct socket *sock, int type)
 {
-       rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
-       struct list_head        zombies = LIST_HEAD_INIT(zombies);
-       lnet_process_id_t       peerid;
-       struct list_head        *tmp;
-        __u64              incarnation;
-        ksock_conn_t      *conn;
-        ksock_conn_t      *conn2;
-        ksock_peer_t      *peer = NULL;
-        ksock_peer_t      *peer2;
-        ksock_sched_t     *sched;
+       rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+       struct list_head zombies = LIST_HEAD_INIT(zombies);
+       struct lnet_process_id peerid;
+       struct list_head *tmp;
+       u64 incarnation;
+       struct ksock_conn *conn;
+       struct ksock_conn *conn2;
+       struct ksock_peer_ni *peer_ni = NULL;
+       struct ksock_peer_ni *peer2;
+       struct ksock_sched *sched;
        struct ksock_hello_msg *hello;
-       int                cpt;
-        ksock_tx_t        *tx;
-        ksock_tx_t        *txtmp;
-        int                rc;
-        int                active;
-        char              *warn = NULL;
+       int cpt;
+       struct ksock_tx *tx;
+       struct ksock_tx *txtmp;
+       int rc;
+       int rc2;
+       int active;
+       char *warn = NULL;
 
         active = (route != NULL);
 
@@ -1061,7 +1064,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
        atomic_set (&conn->ksnc_tx_nob, 0);
 
        LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
-                                    kshm_ips[LNET_MAX_INTERFACES]));
+                                    kshm_ips[LNET_INTERFACES_NUM]));
         if (hello == NULL) {
                 rc = -ENOMEM;
                 goto failed_1;
@@ -1072,21 +1075,21 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
         if (rc != 0)
                 goto failed_1;
 
-        /* Find out/confirm peer's NID and connection type and get the
+        /* Find out/confirm peer_ni's NID and connection type and get the
          * vector of interfaces she's willing to let me connect to.
-         * Passive connections use the listener timeout since the peer sends
+         * Passive connections use the listener timeout since the peer_ni sends
          * eagerly */
 
         if (active) {
-                peer = route->ksnr_peer;
-                LASSERT(ni == peer->ksnp_ni);
+                peer_ni = route->ksnr_peer;
+                LASSERT(ni == peer_ni->ksnp_ni);
 
                 /* Active connection sends HELLO eagerly */
                 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
-                peerid = peer->ksnp_id;
+                peerid = peer_ni->ksnp_id;
 
                write_lock_bh(global_lock);
-                conn->ksnc_proto = peer->ksnp_proto;
+                conn->ksnc_proto = peer_ni->ksnp_proto;
                write_unlock_bh(global_lock);
 
                 if (conn->ksnc_proto == NULL) {
@@ -1106,7 +1109,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
                 peerid.nid = LNET_NID_ANY;
                 peerid.pid = LNET_PID_ANY;
 
-                /* Passive, get protocol from peer */
+                /* Passive, get protocol from peer_ni */
                 conn->ksnc_proto = NULL;
         }
 
@@ -1121,71 +1124,71 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
        cpt = lnet_cpt_of_nid(peerid.nid, ni);
 
         if (active) {
-                ksocknal_peer_addref(peer);
+                ksocknal_peer_addref(peer_ni);
                write_lock_bh(global_lock);
         } else {
-                rc = ksocknal_create_peer(&peer, ni, peerid);
+                rc = ksocknal_create_peer(&peer_ni, ni, peerid);
                 if (rc != 0)
                         goto failed_1;
 
                write_lock_bh(global_lock);
 
                 /* called with a ref on ni, so shutdown can't have started */
-                LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
+               LASSERT(((struct ksock_net *) ni->ni_data)->ksnn_shutdown == 0);
 
                peer2 = ksocknal_find_peer_locked(ni, peerid);
                if (peer2 == NULL) {
-                       /* NB this puts an "empty" peer in the peer
+                       /* NB this puts an "empty" peer_ni in the peer_ni
                         * table (which takes my ref) */
-                       list_add_tail(&peer->ksnp_list,
+                       list_add_tail(&peer_ni->ksnp_list,
                                      ksocknal_nid2peerlist(peerid.nid));
                } else {
-                       ksocknal_peer_decref(peer);
-                       peer = peer2;
+                       ksocknal_peer_decref(peer_ni);
+                       peer_ni = peer2;
                }
 
                 /* +1 ref for me */
-                ksocknal_peer_addref(peer);
-                peer->ksnp_accepting++;
+                ksocknal_peer_addref(peer_ni);
+                peer_ni->ksnp_accepting++;
 
                 /* Am I already connecting to this guy?  Resolve in
                  * favour of higher NID... */
                 if (peerid.nid < ni->ni_nid &&
-                    ksocknal_connecting(peer, conn->ksnc_ipaddr)) {
+                    ksocknal_connecting(peer_ni, conn->ksnc_ipaddr)) {
                         rc = EALREADY;
                         warn = "connection race resolution";
                         goto failed_2;
                 }
         }
 
-        if (peer->ksnp_closing ||
+        if (peer_ni->ksnp_closing ||
             (active && route->ksnr_deleted)) {
-                /* peer/route got closed under me */
+                /* peer_ni/route got closed under me */
                 rc = -ESTALE;
-                warn = "peer/route removed";
+                warn = "peer_ni/route removed";
                 goto failed_2;
         }
 
-       if (peer->ksnp_proto == NULL) {
+       if (peer_ni->ksnp_proto == NULL) {
                /* Never connected before.
-                * NB recv_hello may have returned EPROTO to signal my peer
+                * NB recv_hello may have returned EPROTO to signal my peer_ni
                 * wants a different protocol than the one I asked for.
                 */
-               LASSERT(list_empty(&peer->ksnp_conns));
+               LASSERT(list_empty(&peer_ni->ksnp_conns));
 
-               peer->ksnp_proto = conn->ksnc_proto;
-               peer->ksnp_incarnation = incarnation;
+               peer_ni->ksnp_proto = conn->ksnc_proto;
+               peer_ni->ksnp_incarnation = incarnation;
        }
 
-        if (peer->ksnp_proto != conn->ksnc_proto ||
-            peer->ksnp_incarnation != incarnation) {
-                /* Peer rebooted or I've got the wrong protocol version */
-                ksocknal_close_peer_conns_locked(peer, 0, 0);
+        if (peer_ni->ksnp_proto != conn->ksnc_proto ||
+            peer_ni->ksnp_incarnation != incarnation) {
+                /* peer_ni rebooted or I've got the wrong protocol version */
+                ksocknal_close_peer_conns_locked(peer_ni, 0, 0);
 
-                peer->ksnp_proto = NULL;
+                peer_ni->ksnp_proto = NULL;
                 rc = ESTALE;
-                warn = peer->ksnp_incarnation != incarnation ?
-                       "peer rebooted" :
+                warn = peer_ni->ksnp_incarnation != incarnation ?
+                       "peer_ni rebooted" :
                        "wrong proto version";
                 goto failed_2;
         }
@@ -1206,15 +1209,15 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
        /* Refuse to duplicate an existing connection, unless this is a
         * loopback connection */
        if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
-               list_for_each(tmp, &peer->ksnp_conns) {
-                       conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
+               list_for_each(tmp, &peer_ni->ksnp_conns) {
+                       conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
 
                         if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
                             conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
                             conn2->ksnc_type != conn->ksnc_type)
                                 continue;
 
-                        /* Reply on a passive connection attempt so the peer
+                        /* Reply on a passive connection attempt so the peer_ni
                          * realises we're connected. */
                         LASSERT (rc == 0);
                         if (!active)
@@ -1231,17 +1234,17 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
         if (active &&
             route->ksnr_ipaddr != conn->ksnc_ipaddr) {
                CERROR("Route %s %pI4h connected to %pI4h\n",
-                       libcfs_id2str(peer->ksnp_id),
+                       libcfs_id2str(peer_ni->ksnp_id),
                       &route->ksnr_ipaddr,
                       &conn->ksnc_ipaddr);
         }
 
        /* Search for a route corresponding to the new connection and
         * create an association.  This allows incoming connections created
-        * by routes in my peer to match my own route entries so I don't
+        * by routes in my peer_ni to match my own route entries so I don't
         * continually create duplicate routes. */
-       list_for_each(tmp, &peer->ksnp_routes) {
-               route = list_entry(tmp, ksock_route_t, ksnr_list);
+       list_for_each(tmp, &peer_ni->ksnp_routes) {
+               route = list_entry(tmp, struct ksock_route, ksnr_list);
 
                if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
                        continue;
@@ -1250,22 +1253,32 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
                break;
        }
 
-        conn->ksnc_peer = peer;                 /* conn takes my ref on peer */
-       peer->ksnp_last_alive = ktime_get_real_seconds();
-        peer->ksnp_send_keepalive = 0;
-        peer->ksnp_error = 0;
+       conn->ksnc_peer = peer_ni;                 /* conn takes my ref on peer_ni */
+       peer_ni->ksnp_last_alive = ktime_get_seconds();
+       peer_ni->ksnp_send_keepalive = 0;
+       peer_ni->ksnp_error = 0;
 
        sched = ksocknal_choose_scheduler_locked(cpt);
+       if (!sched) {
+               CERROR("no schedulers available. node is unhealthy\n");
+               goto failed_2;
+       }
+       /*
+        * The cpt might have changed if we ended up selecting a non cpt
+        * native scheduler. So use the scheduler's cpt instead.
+        */
+       cpt = sched->kss_cpt;
         sched->kss_nconns++;
         conn->ksnc_scheduler = sched;
 
-       conn->ksnc_tx_last_post = ktime_get_real_seconds();
+       conn->ksnc_tx_last_post = ktime_get_seconds();
        /* Set the deadline for the outgoing HELLO to drain */
        conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
-       conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
-       smp_mb();   /* order with adding to peer's conn list */
+       conn->ksnc_tx_deadline = ktime_get_seconds() +
+                                lnet_get_lnd_timeout();
+       smp_mb();   /* order with adding to peer_ni's conn list */
 
-       list_add(&conn->ksnc_list, &peer->ksnp_conns);
+       list_add(&conn->ksnc_list, &peer_ni->ksnp_conns);
        ksocknal_conn_addref(conn);
 
        ksocknal_new_packet(conn, 0);
@@ -1273,7 +1286,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
         conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
 
        /* Take packets blocking for this connection. */
-       list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
+       list_for_each_entry_safe(tx, txtmp, &peer_ni->ksnp_tx_queue, tx_list) {
                if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) ==
                    SOCKNAL_MATCH_NO)
                        continue;
@@ -1293,24 +1306,23 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
          */
 
        CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
-              " incarnation:%lld sched[%d:%d]\n",
+              " incarnation:%lld sched[%d]\n",
               libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
               &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
-              conn->ksnc_port, incarnation, cpt,
-              (int)(sched - &sched->kss_info->ksi_scheds[0]));
+              conn->ksnc_port, incarnation, cpt);
 
         if (active) {
                 /* additional routes after interface exchange? */
-                ksocknal_create_routes(peer, conn->ksnc_port,
+                ksocknal_create_routes(peer_ni, conn->ksnc_port,
                                        hello->kshm_ips, hello->kshm_nips);
         } else {
-                hello->kshm_nips = ksocknal_select_ips(peer, hello->kshm_ips,
+                hello->kshm_nips = ksocknal_select_ips(peer_ni, hello->kshm_ips,
                                                        hello->kshm_nips);
                 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
         }
 
        LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
-                                    kshm_ips[LNET_MAX_INTERFACES]));
+                                   kshm_ips[LNET_INTERFACES_NUM]));
 
         /* setup the socket AFTER I've received hello (it disables
          * SO_LINGER).  I might call back to the acceptor who may want
@@ -1326,7 +1338,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
         ksocknal_lib_set_callback(sock, conn);
 
         if (!active)
-                peer->ksnp_accepting--;
+                peer_ni->ksnp_accepting--;
 
        write_unlock_bh(global_lock);
 
@@ -1349,12 +1361,12 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
         return rc;
 
 failed_2:
-       if (!peer->ksnp_closing &&
-           list_empty(&peer->ksnp_conns) &&
-           list_empty(&peer->ksnp_routes)) {
-               list_add(&zombies, &peer->ksnp_tx_queue);
-               list_del_init(&peer->ksnp_tx_queue);
-               ksocknal_unlink_peer_locked(peer);
+       if (!peer_ni->ksnp_closing &&
+           list_empty(&peer_ni->ksnp_conns) &&
+           list_empty(&peer_ni->ksnp_routes)) {
+               list_add(&zombies, &peer_ni->ksnp_tx_queue);
+               list_del_init(&peer_ni->ksnp_tx_queue);
+               ksocknal_unlink_peer_locked(peer_ni);
        }
 
        write_unlock_bh(global_lock);
@@ -1378,17 +1390,23 @@ failed_2:
                 }
 
                write_lock_bh(global_lock);
-                peer->ksnp_accepting--;
+                peer_ni->ksnp_accepting--;
                write_unlock_bh(global_lock);
         }
 
-        ksocknal_txlist_done(ni, &zombies, 1);
-        ksocknal_peer_decref(peer);
+       /*
+        * If we get here without an error code, just use -EALREADY.
+        * Depending on how we got here, the error may be positive
+        * or negative. Normalize the value for ksocknal_txlist_done().
+        */
+       rc2 = (rc == 0 ? -EALREADY : (rc > 0 ? -rc : rc));
+       ksocknal_txlist_done(ni, &zombies, rc2);
+        ksocknal_peer_decref(peer_ni);
 
 failed_1:
        if (hello != NULL)
                LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
-                                           kshm_ips[LNET_MAX_INTERFACES]));
+                                           kshm_ips[LNET_INTERFACES_NUM]));
 
        LIBCFS_FREE(conn, sizeof(*conn));
 
@@ -1398,21 +1416,21 @@ failed_0:
 }
 
 void
-ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
+ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
 {
         /* This just does the immmediate housekeeping, and queues the
          * connection for the reaper to terminate.
          * Caller holds ksnd_global_lock exclusively in irq context */
-        ksock_peer_t      *peer = conn->ksnc_peer;
-        ksock_route_t     *route;
-        ksock_conn_t      *conn2;
-       struct list_head  *tmp;
+       struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
+       struct ksock_route *route;
+       struct ksock_conn *conn2;
+       struct list_head *tmp;
 
-       LASSERT(peer->ksnp_error == 0);
+       LASSERT(peer_ni->ksnp_error == 0);
        LASSERT(!conn->ksnc_closing);
        conn->ksnc_closing = 1;
 
-       /* ksnd_deathrow_conns takes over peer's ref */
+       /* ksnd_deathrow_conns takes over peer_ni's ref */
        list_del(&conn->ksnc_list);
 
        route = conn->ksnc_route;
@@ -1422,8 +1440,8 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
                LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
 
                conn2 = NULL;
-               list_for_each(tmp, &peer->ksnp_conns) {
-                       conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
+               list_for_each(tmp, &peer_ni->ksnp_conns) {
+                       conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
 
                        if (conn2->ksnc_route == route &&
                            conn2->ksnc_type == conn->ksnc_type)
@@ -1439,35 +1457,35 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
                ksocknal_route_decref(route);   /* drop conn's ref on route */
        }
 
-       if (list_empty(&peer->ksnp_conns)) {
-               /* No more connections to this peer */
+       if (list_empty(&peer_ni->ksnp_conns)) {
+               /* No more connections to this peer_ni */
 
-               if (!list_empty(&peer->ksnp_tx_queue)) {
-                               ksock_tx_t *tx;
+               if (!list_empty(&peer_ni->ksnp_tx_queue)) {
+                               struct ksock_tx *tx;
 
                        LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
 
                        /* throw them to the last connection...,
                         * these TXs will be send to /dev/null by scheduler */
-                       list_for_each_entry(tx, &peer->ksnp_tx_queue,
+                       list_for_each_entry(tx, &peer_ni->ksnp_tx_queue,
                                            tx_list)
                                ksocknal_tx_prep(conn, tx);
 
                        spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
-                       list_splice_init(&peer->ksnp_tx_queue,
+                       list_splice_init(&peer_ni->ksnp_tx_queue,
                                         &conn->ksnc_tx_queue);
                        spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
                }
 
                /* renegotiate protocol version */
-               peer->ksnp_proto = NULL;
+               peer_ni->ksnp_proto = NULL;
                /* stash last conn close reason */
-               peer->ksnp_error = error;
+               peer_ni->ksnp_error = error;
 
-               if (list_empty(&peer->ksnp_routes)) {
+               if (list_empty(&peer_ni->ksnp_routes)) {
                        /* I've just closed last conn belonging to a
-                        * peer with no routes to it */
-                       ksocknal_unlink_peer_locked(peer);
+                        * peer_ni with no routes to it */
+                       ksocknal_unlink_peer_locked(peer_ni);
                }
        }
 
@@ -1481,47 +1499,47 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
 }
 
 void
-ksocknal_peer_failed (ksock_peer_t *peer)
+ksocknal_peer_failed(struct ksock_peer_ni *peer_ni)
 {
-        int        notify = 0;
-        cfs_time_t last_alive = 0;
+       int notify = 0;
+       time64_t last_alive = 0;
 
        /* There has been a connection failure or comms error; but I'll only
-        * tell LNET I think the peer is dead if it's to another kernel and
+        * tell LNET I think the peer_ni is dead if it's to another kernel and
         * there are no connections or connection attempts in existence. */
 
        read_lock(&ksocknal_data.ksnd_global_lock);
 
-       if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
-            list_empty(&peer->ksnp_conns) &&
-            peer->ksnp_accepting == 0 &&
-            ksocknal_find_connecting_route_locked(peer) == NULL) {
+       if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
+            list_empty(&peer_ni->ksnp_conns) &&
+            peer_ni->ksnp_accepting == 0 &&
+            ksocknal_find_connecting_route_locked(peer_ni) == NULL) {
                notify = 1;
-               last_alive = peer->ksnp_last_alive;
+               last_alive = peer_ni->ksnp_last_alive;
        }
 
        read_unlock(&ksocknal_data.ksnd_global_lock);
 
        if (notify)
-               lnet_notify(peer->ksnp_ni, peer->ksnp_id.nid, 0,
+               lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid, 0,
                            last_alive);
 }
 
 void
-ksocknal_finalize_zcreq(ksock_conn_t *conn)
+ksocknal_finalize_zcreq(struct ksock_conn *conn)
 {
-       ksock_peer_t     *peer = conn->ksnc_peer;
-       ksock_tx_t       *tx;
-       ksock_tx_t       *tmp;
-       struct list_head  zlist = LIST_HEAD_INIT(zlist);
+       struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
+       struct ksock_tx *tx;
+       struct ksock_tx *tmp;
+       struct list_head zlist = LIST_HEAD_INIT(zlist);
 
        /* NB safe to finalize TXs because closing of socket will
         * abort all buffered data */
        LASSERT(conn->ksnc_sock == NULL);
 
-       spin_lock(&peer->ksnp_lock);
+       spin_lock(&peer_ni->ksnp_lock);
 
-       list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list, tx_zc_list) {
+       list_for_each_entry_safe(tx, tmp, &peer_ni->ksnp_zc_req_list, tx_zc_list) {
                if (tx->tx_conn != conn)
                        continue;
 
@@ -1533,10 +1551,10 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn)
                list_add(&tx->tx_zc_list, &zlist);
        }
 
-       spin_unlock(&peer->ksnp_lock);
+       spin_unlock(&peer_ni->ksnp_lock);
 
        while (!list_empty(&zlist)) {
-               tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
+               tx = list_entry(zlist.next, struct ksock_tx, tx_zc_list);
 
                list_del(&tx->tx_zc_list);
                ksocknal_tx_decref(tx);
@@ -1544,15 +1562,15 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn)
 }
 
 void
-ksocknal_terminate_conn(ksock_conn_t *conn)
+ksocknal_terminate_conn(struct ksock_conn *conn)
 {
         /* This gets called by the reaper (guaranteed thread context) to
          * disengage the socket from its callbacks and close it.
          * ksnc_refcount will eventually hit zero, and then the reaper will
          * destroy it. */
-        ksock_peer_t     *peer = conn->ksnc_peer;
-        ksock_sched_t    *sched = conn->ksnc_scheduler;
-        int               failed = 0;
+       struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
+       struct ksock_sched *sched = conn->ksnc_scheduler;
+       int failed = 0;
 
         LASSERT(conn->ksnc_closing);
 
@@ -1584,17 +1602,17 @@ ksocknal_terminate_conn(ksock_conn_t *conn)
          * scheduler yet, but it _has_ committed to terminate... */
         conn->ksnc_scheduler->kss_nconns--;
 
-        if (peer->ksnp_error != 0) {
-                /* peer's last conn closed in error */
-               LASSERT(list_empty(&peer->ksnp_conns));
+        if (peer_ni->ksnp_error != 0) {
+                /* peer_ni's last conn closed in error */
+               LASSERT(list_empty(&peer_ni->ksnp_conns));
                 failed = 1;
-                peer->ksnp_error = 0;     /* avoid multiple notifications */
+                peer_ni->ksnp_error = 0;     /* avoid multiple notifications */
         }
 
        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
         if (failed)
-                ksocknal_peer_failed(peer);
+                ksocknal_peer_failed(peer_ni);
 
         /* The socket is closed on the final put; either here, or in
          * ksocknal_{send,recv}msg().  Since we set up the linger2 option
@@ -1605,10 +1623,9 @@ ksocknal_terminate_conn(ksock_conn_t *conn)
 }
 
 void
-ksocknal_queue_zombie_conn (ksock_conn_t *conn)
+ksocknal_queue_zombie_conn(struct ksock_conn *conn)
 {
        /* Queue the conn for the reaper to destroy */
-
        LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
@@ -1619,9 +1636,9 @@ ksocknal_queue_zombie_conn (ksock_conn_t *conn)
 }
 
 void
-ksocknal_destroy_conn (ksock_conn_t *conn)
+ksocknal_destroy_conn(struct ksock_conn *conn)
 {
-       cfs_time_t      last_rcv;
+       time64_t last_rcv;
 
        /* Final coup-de-grace of the reaper */
        CDEBUG (D_NET, "connection %p\n", conn);
@@ -1638,18 +1655,19 @@ ksocknal_destroy_conn (ksock_conn_t *conn)
         switch (conn->ksnc_rx_state) {
         case SOCKNAL_RX_LNET_PAYLOAD:
                 last_rcv = conn->ksnc_rx_deadline -
-                           cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
+                          lnet_get_lnd_timeout();
                CERROR("Completing partial receive from %s[%d], "
                       "ip %pI4h:%d, with error, wanted: %d, left: %d, "
-                       "last alive is %ld secs ago\n",
+                      "last alive is %lld secs ago\n",
                        libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
                       &conn->ksnc_ipaddr, conn->ksnc_port,
                        conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
-                      cfs_duration_sec(cfs_time_sub(ktime_get_real_seconds(),
-                                        last_rcv)));
-                lnet_finalize (conn->ksnc_peer->ksnp_ni,
-                               conn->ksnc_cookie, -EIO);
-                break;
+                      ktime_get_seconds() - last_rcv);
+               if (conn->ksnc_lnet_msg)
+                       conn->ksnc_lnet_msg->msg_health_status =
+                               LNET_MSG_STATUS_REMOTE_ERROR;
+               lnet_finalize(conn->ksnc_lnet_msg, -EIO);
+               break;
         case SOCKNAL_RX_LNET_HEADER:
                 if (conn->ksnc_rx_started)
                        CERROR("Incomplete receive of lnet header from %s, "
@@ -1684,15 +1702,15 @@ ksocknal_destroy_conn (ksock_conn_t *conn)
 }
 
 int
-ksocknal_close_peer_conns_locked (ksock_peer_t *peer, __u32 ipaddr, int why)
+ksocknal_close_peer_conns_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr, int why)
 {
-        ksock_conn_t       *conn;
-       struct list_head         *ctmp;
-       struct list_head         *cnxt;
-        int                 count = 0;
+       struct ksock_conn *conn;
+       struct list_head *ctmp;
+       struct list_head *cnxt;
+       int count = 0;
 
-       list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
-               conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+       list_for_each_safe(ctmp, cnxt, &peer_ni->ksnp_conns) {
+               conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
 
                 if (ipaddr == 0 ||
                     conn->ksnc_ipaddr == ipaddr) {
@@ -1705,15 +1723,15 @@ ksocknal_close_peer_conns_locked (ksock_peer_t *peer, __u32 ipaddr, int why)
 }
 
 int
-ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why)
+ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
 {
-        ksock_peer_t     *peer = conn->ksnc_peer;
-        __u32             ipaddr = conn->ksnc_ipaddr;
-        int               count;
+       struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
+       u32 ipaddr = conn->ksnc_ipaddr;
+       int count;
 
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
-        count = ksocknal_close_peer_conns_locked (peer, ipaddr, why);
+        count = ksocknal_close_peer_conns_locked (peer_ni, ipaddr, why);
 
        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
@@ -1721,15 +1739,15 @@ ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why)
 }
 
 int
-ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
+ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
 {
-        ksock_peer_t       *peer;
-       struct list_head         *ptmp;
-       struct list_head         *pnxt;
-        int                 lo;
-        int                 hi;
-        int                 i;
-        int                 count = 0;
+       struct ksock_peer_ni *peer_ni;
+       struct list_head *ptmp;
+       struct list_head *pnxt;
+       int lo;
+       int hi;
+       int i;
+       int count = 0;
 
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
@@ -1743,13 +1761,13 @@ ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
         for (i = lo; i <= hi; i++) {
                list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
 
-                       peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
+                       peer_ni = list_entry(ptmp, struct ksock_peer_ni, ksnp_list);
 
-                        if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
-                              (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
+                        if (!((id.nid == LNET_NID_ANY || id.nid == peer_ni->ksnp_id.nid) &&
+                              (id.pid == LNET_PID_ANY || id.pid == peer_ni->ksnp_id.pid)))
                                 continue;
 
-                        count += ksocknal_close_peer_conns_locked (peer, ipaddr, 0);
+                        count += ksocknal_close_peer_conns_locked (peer_ni, ipaddr, 0);
                 }
         }
 
@@ -1763,14 +1781,15 @@ ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
 }
 
 void
-ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
+ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive)
 {
-        /* The router is telling me she's been notified of a change in
-         * gateway state.... */
-        lnet_process_id_t  id = {0};
-
-        id.nid = gw_nid;
-        id.pid = LNET_PID_ANY;
+       /* The router is telling me she's been notified of a change in
+        * gateway state....
+        */
+       struct lnet_process_id id = {
+               .nid    = gw_nid,
+               .pid    = LNET_PID_ANY,
+       };
 
         CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
                 alive ? "up" : "down");
@@ -1786,52 +1805,52 @@ ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
 }
 
 void
-ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
+ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, time64_t *when)
 {
        int connect = 1;
        time64_t last_alive = 0;
-       time64_t now = ktime_get_real_seconds();
-       ksock_peer_t *peer = NULL;
+       time64_t now = ktime_get_seconds();
+       struct ksock_peer_ni *peer_ni = NULL;
        rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
-       lnet_process_id_t id = {
+       struct lnet_process_id id = {
                .nid = nid,
                .pid = LNET_PID_LUSTRE,
        };
 
        read_lock(glock);
 
-        peer = ksocknal_find_peer_locked(ni, id);
-        if (peer != NULL) {
-               struct list_head       *tmp;
-                ksock_conn_t     *conn;
-                int               bufnob;
+       peer_ni = ksocknal_find_peer_locked(ni, id);
+       if (peer_ni != NULL) {
+               struct list_head *tmp;
+               struct ksock_conn *conn;
+               int bufnob;
 
-               list_for_each(tmp, &peer->ksnp_conns) {
-                       conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+               list_for_each(tmp, &peer_ni->ksnp_conns) {
+                       conn = list_entry(tmp, struct ksock_conn, ksnc_list);
                        bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
 
-                        if (bufnob < conn->ksnc_tx_bufnob) {
-                                /* something got ACKed */
-                                conn->ksnc_tx_deadline =
-                                        cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
-                                peer->ksnp_last_alive = now;
+                       if (bufnob < conn->ksnc_tx_bufnob) {
+                               /* something got ACKed */
+                               conn->ksnc_tx_deadline = ktime_get_seconds() +
+                                                        lnet_get_lnd_timeout();
+                                peer_ni->ksnp_last_alive = now;
                                 conn->ksnc_tx_bufnob = bufnob;
                         }
                 }
 
-                last_alive = peer->ksnp_last_alive;
-                if (ksocknal_find_connectable_route_locked(peer) == NULL)
+                last_alive = peer_ni->ksnp_last_alive;
+                if (ksocknal_find_connectable_route_locked(peer_ni) == NULL)
                         connect = 0;
         }
 
        read_unlock(glock);
 
         if (last_alive != 0)
-                *when = last_alive;
+               *when = last_alive;
 
-        CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n",
-               libcfs_nid2str(nid), peer,
-               last_alive ? cfs_duration_sec(now - last_alive) : -1,
+       CDEBUG(D_NET, "peer_ni %s %p, alive %lld secs ago, connect %d\n",
+               libcfs_nid2str(nid), peer_ni,
+              last_alive ? now - last_alive : -1,
                connect);
 
         if (!connect)
@@ -1841,21 +1860,21 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
 
        write_lock_bh(glock);
 
-        peer = ksocknal_find_peer_locked(ni, id);
-        if (peer != NULL)
-                ksocknal_launch_all_connections_locked(peer);
+        peer_ni = ksocknal_find_peer_locked(ni, id);
+        if (peer_ni != NULL)
+                ksocknal_launch_all_connections_locked(peer_ni);
 
        write_unlock_bh(glock);
         return;
 }
 
 static void
-ksocknal_push_peer (ksock_peer_t *peer)
+ksocknal_push_peer(struct ksock_peer_ni *peer_ni)
 {
-        int               index;
-        int               i;
-       struct list_head       *tmp;
-        ksock_conn_t     *conn;
+       int index;
+       int i;
+       struct list_head *tmp;
+       struct ksock_conn *conn;
 
         for (index = 0; ; index++) {
                read_lock(&ksocknal_data.ksnd_global_lock);
@@ -1863,10 +1882,10 @@ ksocknal_push_peer (ksock_peer_t *peer)
                 i = 0;
                 conn = NULL;
 
-               list_for_each(tmp, &peer->ksnp_conns) {
+               list_for_each(tmp, &peer_ni->ksnp_conns) {
                         if (i++ == index) {
-                               conn = list_entry(tmp, ksock_conn_t,
-                                                       ksnc_list);
+                               conn = list_entry(tmp, struct ksock_conn,
+                                                 ksnc_list);
                                 ksocknal_conn_addref(conn);
                                 break;
                         }
@@ -1883,7 +1902,7 @@ ksocknal_push_peer (ksock_peer_t *peer)
 }
 
 static int
-ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
+ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id)
 {
        struct list_head *start;
        struct list_head *end;
@@ -1899,22 +1918,22 @@ ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
        }
 
        for (tmp = start; tmp <= end; tmp++) {
-               int     peer_off; /* searching offset in peer hash table */
+               int     peer_off; /* searching offset in peer_ni hash table */
 
                for (peer_off = 0; ; peer_off++) {
-                       ksock_peer_t *peer;
+                       struct ksock_peer_ni *peer_ni;
                        int           i = 0;
 
                        read_lock(&ksocknal_data.ksnd_global_lock);
-                       list_for_each_entry(peer, tmp, ksnp_list) {
+                       list_for_each_entry(peer_ni, tmp, ksnp_list) {
                                if (!((id.nid == LNET_NID_ANY ||
-                                      id.nid == peer->ksnp_id.nid) &&
+                                      id.nid == peer_ni->ksnp_id.nid) &&
                                      (id.pid == LNET_PID_ANY ||
-                                      id.pid == peer->ksnp_id.pid)))
+                                      id.pid == peer_ni->ksnp_id.pid)))
                                        continue;
 
                                if (i++ == peer_off) {
-                                       ksocknal_peer_addref(peer);
+                                       ksocknal_peer_addref(peer_ni);
                                        break;
                                }
                        }
@@ -1924,96 +1943,96 @@ ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
                                break;
 
                        rc = 0;
-                       ksocknal_push_peer(peer);
-                       ksocknal_peer_decref(peer);
+                       ksocknal_push_peer(peer_ni);
+                       ksocknal_peer_decref(peer_ni);
                }
        }
        return rc;
 }
 
 static int
-ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
+ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
 {
-        ksock_net_t       *net = ni->ni_data;
-        ksock_interface_t *iface;
-        int                rc;
-        int                i;
-        int                j;
-       struct list_head        *ptmp;
-        ksock_peer_t      *peer;
-       struct list_head        *rtmp;
-        ksock_route_t     *route;
-
-        if (ipaddress == 0 ||
-            netmask == 0)
-                return (-EINVAL);
+       struct ksock_net *net = ni->ni_data;
+       struct ksock_interface *iface;
+       int rc;
+       int i;
+       int j;
+       struct list_head *ptmp;
+       struct ksock_peer_ni *peer_ni;
+       struct list_head *rtmp;
+       struct ksock_route *route;
+
+       if (ipaddress == 0 ||
+           netmask == 0)
+               return -EINVAL;
 
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
-        iface = ksocknal_ip2iface(ni, ipaddress);
-        if (iface != NULL) {
-                /* silently ignore dups */
-                rc = 0;
-        } else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) {
-                rc = -ENOSPC;
-        } else {
-                iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
+       iface = ksocknal_ip2iface(ni, ipaddress);
+       if (iface != NULL) {
+               /* silently ignore dups */
+               rc = 0;
+       } else if (net->ksnn_ninterfaces == LNET_INTERFACES_NUM) {
+               rc = -ENOSPC;
+       } else {
+               iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
 
-                iface->ksni_ipaddr = ipaddress;
-                iface->ksni_netmask = netmask;
-                iface->ksni_nroutes = 0;
-                iface->ksni_npeers = 0;
+               iface->ksni_ipaddr = ipaddress;
+               iface->ksni_netmask = netmask;
+               iface->ksni_nroutes = 0;
+               iface->ksni_npeers = 0;
 
-                for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
+               for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
                        list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
-                               peer = list_entry(ptmp, ksock_peer_t,
-                                                      ksnp_list);
+                               peer_ni = list_entry(ptmp, struct ksock_peer_ni,
+                                                    ksnp_list);
 
-                                for (j = 0; j < peer->ksnp_n_passive_ips; j++)
-                                        if (peer->ksnp_passive_ips[j] == ipaddress)
-                                                iface->ksni_npeers++;
+                               for (j = 0; j < peer_ni->ksnp_n_passive_ips; j++)
+                                       if (peer_ni->ksnp_passive_ips[j] == ipaddress)
+                                               iface->ksni_npeers++;
 
-                               list_for_each(rtmp, &peer->ksnp_routes) {
+                               list_for_each(rtmp, &peer_ni->ksnp_routes) {
                                        route = list_entry(rtmp,
-                                                               ksock_route_t,
-                                                               ksnr_list);
+                                                          struct ksock_route,
+                                                          ksnr_list);
 
-                                        if (route->ksnr_myipaddr == ipaddress)
-                                                iface->ksni_nroutes++;
-                                }
-                        }
-                }
+                                       if (route->ksnr_myipaddr == ipaddress)
+                                               iface->ksni_nroutes++;
+                               }
+                       }
+               }
 
-                rc = 0;
-                /* NB only new connections will pay attention to the new interface! */
-        }
+               rc = 0;
+               /* NB only new connections will pay attention to the new interface! */
+       }
 
        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
-        return (rc);
+       return rc;
 }
 
 static void
-ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
+ksocknal_peer_del_interface_locked(struct ksock_peer_ni *peer_ni, __u32 ipaddr)
 {
-       struct list_head         *tmp;
-       struct list_head         *nxt;
-        ksock_route_t      *route;
-        ksock_conn_t       *conn;
-        int                 i;
-        int                 j;
-
-        for (i = 0; i < peer->ksnp_n_passive_ips; i++)
-                if (peer->ksnp_passive_ips[i] == ipaddr) {
-                        for (j = i+1; j < peer->ksnp_n_passive_ips; j++)
-                                peer->ksnp_passive_ips[j-1] =
-                                        peer->ksnp_passive_ips[j];
-                        peer->ksnp_n_passive_ips--;
+       struct list_head *tmp;
+       struct list_head *nxt;
+       struct ksock_route *route;
+       struct ksock_conn *conn;
+       int i;
+       int j;
+
+        for (i = 0; i < peer_ni->ksnp_n_passive_ips; i++)
+                if (peer_ni->ksnp_passive_ips[i] == ipaddr) {
+                        for (j = i+1; j < peer_ni->ksnp_n_passive_ips; j++)
+                                peer_ni->ksnp_passive_ips[j-1] =
+                                        peer_ni->ksnp_passive_ips[j];
+                        peer_ni->ksnp_n_passive_ips--;
                         break;
                 }
 
-       list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
-               route = list_entry(tmp, ksock_route_t, ksnr_list);
+       list_for_each_safe(tmp, nxt, &peer_ni->ksnp_routes) {
+               route = list_entry(tmp, struct ksock_route, ksnr_list);
 
                 if (route->ksnr_myipaddr != ipaddr)
                         continue;
@@ -2026,8 +2045,8 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
                 }
         }
 
-       list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
-               conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+       list_for_each_safe(tmp, nxt, &peer_ni->ksnp_conns) {
+               conn = list_entry(tmp, struct ksock_conn, ksnc_list);
 
                 if (conn->ksnc_myipaddr == ipaddr)
                         ksocknal_close_conn_locked (conn, 0);
@@ -2035,16 +2054,16 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
 }
 
 static int
-ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
+ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress)
 {
-        ksock_net_t       *net = ni->ni_data;
-        int                rc = -ENOENT;
-       struct list_head        *tmp;
-       struct list_head        *nxt;
-        ksock_peer_t      *peer;
-        __u32              this_ip;
-        int                i;
-        int                j;
+       struct ksock_net *net = ni->ni_data;
+       int rc = -ENOENT;
+       struct list_head *tmp;
+       struct list_head *nxt;
+       struct ksock_peer_ni *peer_ni;
+       u32 this_ip;
+       int i;
+       int j;
 
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
@@ -2065,14 +2084,14 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
 
                 for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
                        list_for_each_safe(tmp, nxt,
-                                               &ksocknal_data.ksnd_peers[j]) {
-                               peer = list_entry(tmp, ksock_peer_t,
-                                                      ksnp_list);
+                                          &ksocknal_data.ksnd_peers[j]) {
+                               peer_ni = list_entry(tmp, struct ksock_peer_ni,
+                                                    ksnp_list);
 
-                                if (peer->ksnp_ni != ni)
+                                if (peer_ni->ksnp_ni != ni)
                                         continue;
 
-                                ksocknal_peer_del_interface_locked(peer, this_ip);
+                                ksocknal_peer_del_interface_locked(peer_ni, this_ip);
                         }
                 }
         }
@@ -2083,16 +2102,16 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
 }
 
 int
-ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
+ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
 {
-       lnet_process_id_t id = {0};
+       struct lnet_process_id id = {0};
         struct libcfs_ioctl_data *data = arg;
         int rc;
 
         switch(cmd) {
         case IOC_LIBCFS_GET_INTERFACE: {
-                ksock_net_t       *net = ni->ni_data;
-                ksock_interface_t *iface;
+               struct ksock_net *net = ni->ni_data;
+               struct ksock_interface *iface;
 
                read_lock(&ksocknal_data.ksnd_global_lock);
 
@@ -2161,7 +2180,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
                 int           txmem;
                 int           rxmem;
                 int           nagle;
-                ksock_conn_t *conn = ksocknal_get_conn_by_idx (ni, data->ioc_count);
+               struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
 
                 if (conn == NULL)
                         return -ENOENT;
@@ -2175,7 +2194,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
                 data->ioc_u32[1] = conn->ksnc_port;
                 data->ioc_u32[2] = conn->ksnc_myipaddr;
                 data->ioc_u32[3] = conn->ksnc_type;
-               data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
+               data->ioc_u32[4] = conn->ksnc_scheduler->kss_cpt;
                 data->ioc_u32[5] = rxmem;
                 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
                 ksocknal_conn_decref(conn);
@@ -2214,19 +2233,8 @@ ksocknal_free_buffers (void)
 {
        LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
 
-       if (ksocknal_data.ksnd_sched_info != NULL) {
-               struct ksock_sched_info *info;
-               int                     i;
-
-               cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
-                       if (info->ksi_scheds != NULL) {
-                               LIBCFS_FREE(info->ksi_scheds,
-                                           info->ksi_nthreads_max *
-                                           sizeof(info->ksi_scheds[0]));
-                       }
-               }
-               cfs_percpt_free(ksocknal_data.ksnd_sched_info);
-       }
+       if (ksocknal_data.ksnd_schedulers != NULL)
+               cfs_percpt_free(ksocknal_data.ksnd_schedulers);
 
         LIBCFS_FREE (ksocknal_data.ksnd_peers,
                     sizeof(struct list_head) *
@@ -2235,15 +2243,15 @@ ksocknal_free_buffers (void)
        spin_lock(&ksocknal_data.ksnd_tx_lock);
 
        if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
-               struct list_head        zlist;
-               ksock_tx_t      *tx;
+               struct list_head zlist;
+               struct ksock_tx *tx;
 
                list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
                list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
                spin_unlock(&ksocknal_data.ksnd_tx_lock);
 
                while (!list_empty(&zlist)) {
-                       tx = list_entry(zlist.next, ksock_tx_t, tx_list);
+                       tx = list_entry(zlist.next, struct ksock_tx, tx_list);
                        list_del(&tx->tx_list);
                        LIBCFS_FREE(tx, tx->tx_desc_size);
                }
@@ -2255,10 +2263,8 @@ ksocknal_free_buffers (void)
 static void
 ksocknal_base_shutdown(void)
 {
-       struct ksock_sched_info *info;
-       ksock_sched_t           *sched;
-       int                     i;
-       int                     j;
+       struct ksock_sched *sched;
+       int i;
 
        CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
               atomic_read (&libcfs_kmemory));
@@ -2281,23 +2287,14 @@ ksocknal_base_shutdown(void)
                LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
                LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
 
-               if (ksocknal_data.ksnd_sched_info != NULL) {
-                       cfs_percpt_for_each(info, i,
-                                           ksocknal_data.ksnd_sched_info) {
-                               if (info->ksi_scheds == NULL)
-                                       continue;
-
-                               for (j = 0; j < info->ksi_nthreads_max; j++) {
+               if (ksocknal_data.ksnd_schedulers != NULL) {
+                       cfs_percpt_for_each(sched, i,
+                                           ksocknal_data.ksnd_schedulers) {
 
-                                       sched = &info->ksi_scheds[j];
-                                       LASSERT(list_empty(&sched->\
-                                                              kss_tx_conns));
-                                       LASSERT(list_empty(&sched->\
-                                                              kss_rx_conns));
-                                       LASSERT(list_empty(&sched-> \
-                                                 kss_zombie_noop_txs));
-                                       LASSERT(sched->kss_nconns == 0);
-                               }
+                               LASSERT(list_empty(&sched->kss_tx_conns));
+                               LASSERT(list_empty(&sched->kss_rx_conns));
+                               LASSERT(list_empty(&sched->kss_zombie_noop_txs));
+                               LASSERT(sched->kss_nconns == 0);
                        }
                }
 
@@ -2306,17 +2303,10 @@ ksocknal_base_shutdown(void)
                wake_up_all(&ksocknal_data.ksnd_connd_waitq);
                wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
 
-               if (ksocknal_data.ksnd_sched_info != NULL) {
-                       cfs_percpt_for_each(info, i,
-                                           ksocknal_data.ksnd_sched_info) {
-                               if (info->ksi_scheds == NULL)
-                                       continue;
-
-                               for (j = 0; j < info->ksi_nthreads_max; j++) {
-                                       sched = &info->ksi_scheds[j];
+               if (ksocknal_data.ksnd_schedulers != NULL) {
+                       cfs_percpt_for_each(sched, i,
+                                           ksocknal_data.ksnd_schedulers)
                                        wake_up_all(&sched->kss_waitq);
-                               }
-                       }
                }
 
                i = 4;
@@ -2346,26 +2336,12 @@ ksocknal_base_shutdown(void)
        module_put(THIS_MODULE);
 }
 
-static __u64 ksocknal_new_incarnation(void)
-{
-       struct timeval tv;
-
-       /* The incarnation number is the time this module loaded and it
-        * identifies this particular instance of the socknal.  Hopefully
-        * we won't be able to reboot more frequently than 1MHz for the
-        * forseeable future :) */
-
-       do_gettimeofday(&tv);
-
-       return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
-}
-
 static int
 ksocknal_base_startup(void)
 {
-       struct ksock_sched_info *info;
-       int                     rc;
-       int                     i;
+       struct ksock_sched *sched;
+       int rc;
+       int i;
 
         LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
         LASSERT (ksocknal_data.ksnd_nnets == 0);
@@ -2405,42 +2381,38 @@ ksocknal_base_startup(void)
        ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
        try_module_get(THIS_MODULE);
 
-       ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
-                                                        sizeof(*info));
-       if (ksocknal_data.ksnd_sched_info == NULL)
+       /* Create a scheduler block per available CPT */
+       ksocknal_data.ksnd_schedulers = cfs_percpt_alloc(lnet_cpt_table(),
+                                                        sizeof(*sched));
+       if (ksocknal_data.ksnd_schedulers == NULL)
                goto failed;
 
-       cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
-               ksock_sched_t   *sched;
-               int             nthrs;
+       cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
+               int nthrs;
 
+               /*
+                * make sure not to allocate more threads than there are
+                * cores/CPUs in teh CPT
+                */
                nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
                if (*ksocknal_tunables.ksnd_nscheds > 0) {
                        nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
                } else {
-                       /* max to half of CPUs, assume another half should be
-                        * reserved for upper layer modules */
+                       /*
+                        * max to half of CPUs, assume another half should be
+                        * reserved for upper layer modules
+                        */
                        nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
                }
 
-               info->ksi_nthreads_max = nthrs;
-               info->ksi_cpt = i;
-
-               LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
-                                info->ksi_nthreads_max * sizeof(*sched));
-               if (info->ksi_scheds == NULL)
-                       goto failed;
-
-               for (; nthrs > 0; nthrs--) {
-                       sched = &info->ksi_scheds[nthrs - 1];
+               sched->kss_nthreads_max = nthrs;
+               sched->kss_cpt = i;
 
-                       sched->kss_info = info;
-                       spin_lock_init(&sched->kss_lock);
-                       INIT_LIST_HEAD(&sched->kss_rx_conns);
-                       INIT_LIST_HEAD(&sched->kss_tx_conns);
-                       INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
-                       init_waitqueue_head(&sched->kss_waitq);
-               }
+               spin_lock_init(&sched->kss_lock);
+               INIT_LIST_HEAD(&sched->kss_rx_conns);
+               INIT_LIST_HEAD(&sched->kss_tx_conns);
+               INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
+               init_waitqueue_head(&sched->kss_waitq);
         }
 
         ksocknal_data.ksnd_connd_starting         = 0;
@@ -2493,48 +2465,48 @@ ksocknal_base_startup(void)
 }
 
 static void
-ksocknal_debug_peerhash (lnet_ni_t *ni)
+ksocknal_debug_peerhash(struct lnet_ni *ni)
 {
-       ksock_peer_t    *peer = NULL;
-       struct list_head        *tmp;
-       int             i;
+       struct ksock_peer_ni *peer_ni = NULL;
+       struct list_head *tmp;
+       int i;
 
        read_lock(&ksocknal_data.ksnd_global_lock);
 
         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
                list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
-                       peer = list_entry(tmp, ksock_peer_t, ksnp_list);
+                       peer_ni = list_entry(tmp, struct ksock_peer_ni, ksnp_list);
 
-                        if (peer->ksnp_ni == ni) break;
+                        if (peer_ni->ksnp_ni == ni) break;
 
-                        peer = NULL;
+                        peer_ni = NULL;
                 }
         }
 
-        if (peer != NULL) {
-                ksock_route_t *route;
-                ksock_conn_t  *conn;
+        if (peer_ni != NULL) {
+               struct ksock_route *route;
+               struct ksock_conn  *conn;
 
-               CWARN ("Active peer on shutdown: %s, ref %d, scnt %d, "
+               CWARN ("Active peer_ni on shutdown: %s, ref %d, scnt %d, "
                       "closing %d, accepting %d, err %d, zcookie %llu, "
-                      "txq %d, zc_req %d\n", libcfs_id2str(peer->ksnp_id),
-                      atomic_read(&peer->ksnp_refcount),
-                      peer->ksnp_sharecount, peer->ksnp_closing,
-                      peer->ksnp_accepting, peer->ksnp_error,
-                      peer->ksnp_zc_next_cookie,
-                      !list_empty(&peer->ksnp_tx_queue),
-                      !list_empty(&peer->ksnp_zc_req_list));
-
-               list_for_each(tmp, &peer->ksnp_routes) {
-                       route = list_entry(tmp, ksock_route_t, ksnr_list);
+                      "txq %d, zc_req %d\n", libcfs_id2str(peer_ni->ksnp_id),
+                      atomic_read(&peer_ni->ksnp_refcount),
+                      peer_ni->ksnp_sharecount, peer_ni->ksnp_closing,
+                      peer_ni->ksnp_accepting, peer_ni->ksnp_error,
+                      peer_ni->ksnp_zc_next_cookie,
+                      !list_empty(&peer_ni->ksnp_tx_queue),
+                      !list_empty(&peer_ni->ksnp_zc_req_list));
+
+               list_for_each(tmp, &peer_ni->ksnp_routes) {
+                       route = list_entry(tmp, struct ksock_route, ksnr_list);
                        CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
                               "del %d\n", atomic_read(&route->ksnr_refcount),
                               route->ksnr_scheduled, route->ksnr_connecting,
                               route->ksnr_connected, route->ksnr_deleted);
                }
 
-               list_for_each(tmp, &peer->ksnp_conns) {
-                       conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+               list_for_each(tmp, &peer_ni->ksnp_conns) {
+                       conn = list_entry(tmp, struct ksock_conn, ksnc_list);
                        CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
                               atomic_read(&conn->ksnc_conn_refcount),
                               atomic_read(&conn->ksnc_sock_refcount),
@@ -2547,14 +2519,14 @@ ksocknal_debug_peerhash (lnet_ni_t *ni)
 }
 
 void
-ksocknal_shutdown (lnet_ni_t *ni)
+ksocknal_shutdown(struct lnet_ni *ni)
 {
-        ksock_net_t      *net = ni->ni_data;
-        int               i;
-        lnet_process_id_t anyid = {0};
-
-        anyid.nid =  LNET_NID_ANY;
-        anyid.pid =  LNET_PID_ANY;
+       struct ksock_net *net = ni->ni_data;
+       struct lnet_process_id anyid = {
+               .nid = LNET_NID_ANY,
+               .pid = LNET_PID_ANY,
+       };
+       int i;
 
         LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
         LASSERT(ksocknal_data.ksnd_nnets > 0);
@@ -2566,7 +2538,7 @@ ksocknal_shutdown (lnet_ni_t *ni)
        /* Delete all peers */
        ksocknal_del_peer(ni, anyid, 0);
 
-       /* Wait for all peer state to clean up */
+       /* Wait for all peer_ni state to clean up */
        i = 2;
        spin_lock_bh(&net->ksnn_lock);
        while (net->ksnn_npeers != 0) {
@@ -2599,74 +2571,73 @@ ksocknal_shutdown (lnet_ni_t *ni)
 }
 
 static int
-ksocknal_enumerate_interfaces(ksock_net_t *net)
+ksocknal_enumerate_interfaces(struct ksock_net *net, char *iname)
 {
-        char      **names;
-        int         i;
-        int         j;
-        int         rc;
-        int         n;
-
-       n = lnet_ipif_enumerate(&names);
-        if (n <= 0) {
-                CERROR("Can't enumerate interfaces: %d\n", n);
-                return n;
-        }
+       struct net_device *dev;
 
-        for (i = j = 0; i < n; i++) {
-                int        up;
-                __u32      ip;
-                __u32      mask;
+       rtnl_lock();
+       for_each_netdev(&init_net, dev) {
+               /* The iname specified by an user land configuration can
+                * map to an ifa_label so always treat iname as an ifa_label.
+                * If iname is NULL then fall back to the net device name.
+                */
+               const char *name = iname ? iname : dev->name;
+               struct in_device *in_dev;
 
-                if (!strcmp(names[i], "lo")) /* skip the loopback IF */
-                        continue;
+               if (strcmp(dev->name, "lo") == 0) /* skip the loopback IF */
+                       continue;
 
-               rc = lnet_ipif_query(names[i], &up, &ip, &mask);
-                if (rc != 0) {
-                        CWARN("Can't get interface %s info: %d\n",
-                              names[i], rc);
-                        continue;
-                }
+               if (!(dev_get_flags(dev) & IFF_UP)) {
+                       CWARN("Ignoring interface %s (down)\n", dev->name);
+                       continue;
+               }
 
-                if (!up) {
-                        CWARN("Ignoring interface %s (down)\n",
-                              names[i]);
-                        continue;
-                }
+               in_dev = __in_dev_get_rtnl(dev);
+               if (!in_dev) {
+                       CWARN("Interface %s has no IPv4 status.\n", dev->name);
+                       continue;
+               }
 
-                if (j == LNET_MAX_INTERFACES) {
-                        CWARN("Ignoring interface %s (too many interfaces)\n",
-                              names[i]);
-                        continue;
-                }
+               for_ifa(in_dev)
+                       if (strcmp(name, ifa->ifa_label) == 0) {
+                               int idx = net->ksnn_ninterfaces;
+                               struct ksock_interface *ksi;
 
-                net->ksnn_interfaces[j].ksni_ipaddr = ip;
-                net->ksnn_interfaces[j].ksni_netmask = mask;
-               strlcpy(net->ksnn_interfaces[j].ksni_name,
-                       names[i], sizeof(net->ksnn_interfaces[j].ksni_name));
-                j++;
-        }
+                               if (idx >= ARRAY_SIZE(net->ksnn_interfaces)) {
+                                       rtnl_unlock();
+                                       return -E2BIG;
+                               }
 
-       lnet_ipif_free_enumeration(names, n);
+                               ksi = &net->ksnn_interfaces[idx];
+                               ksi->ksni_ipaddr = ntohl(ifa->ifa_local);
+                               ksi->ksni_netmask = ifa->ifa_mask;
+                               strlcpy(ksi->ksni_name,
+                                       name, sizeof(ksi->ksni_name));
+                               net->ksnn_ninterfaces++;
+                               break;
+                       }
+               endfor_ifa(in_dev);
+        }
+       rtnl_unlock();
 
-        if (j == 0)
+       if (net->ksnn_ninterfaces == 0)
                 CERROR("Can't find any usable interfaces\n");
 
-        return j;
+       return net->ksnn_ninterfaces > 0 ? 0 : -ENOENT;
 }
 
 static int
-ksocknal_search_new_ipif(ksock_net_t *net)
+ksocknal_search_new_ipif(struct ksock_net *net)
 {
-       int     new_ipif = 0;
-       int     i;
+       int new_ipif = 0;
+       int i;
 
        for (i = 0; i < net->ksnn_ninterfaces; i++) {
-               char            *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
-               char            *colon = strchr(ifnam, ':');
-               int             found  = 0;
-               ksock_net_t     *tmp;
-               int             j;
+               char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
+               char *colon = strchr(ifnam, ':');
+               int found  = 0;
+               struct ksock_net *tmp;
+               int j;
 
                if (colon != NULL) /* ignore alias device */
                        *colon = 0;
@@ -2698,36 +2669,35 @@ ksocknal_search_new_ipif(ksock_net_t *net)
 }
 
 static int
-ksocknal_start_schedulers(struct ksock_sched_info *info)
+ksocknal_start_schedulers(struct ksock_sched *sched)
 {
        int     nthrs;
        int     rc = 0;
        int     i;
 
-       if (info->ksi_nthreads == 0) {
+       if (sched->kss_nthreads == 0) {
                if (*ksocknal_tunables.ksnd_nscheds > 0) {
-                       nthrs = info->ksi_nthreads_max;
+                       nthrs = sched->kss_nthreads_max;
                } else {
                        nthrs = cfs_cpt_weight(lnet_cpt_table(),
-                                              info->ksi_cpt);
+                                              sched->kss_cpt);
                        nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
                        nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
                }
-               nthrs = min(nthrs, info->ksi_nthreads_max);
+               nthrs = min(nthrs, sched->kss_nthreads_max);
        } else {
-               LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
+               LASSERT(sched->kss_nthreads <= sched->kss_nthreads_max);
                /* increase two threads if there is new interface */
-               nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
+               nthrs = min(2, sched->kss_nthreads_max - sched->kss_nthreads);
        }
 
        for (i = 0; i < nthrs; i++) {
-               long            id;
-               char            name[20];
-               ksock_sched_t   *sched;
-               id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
-               sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
+               long id;
+               char name[20];
+
+               id = KSOCK_THREAD_ID(sched->kss_cpt, sched->kss_nthreads + i);
                snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
-                        info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
+                        sched->kss_cpt, (int)KSOCK_THREAD_SID(id));
 
                rc = ksocknal_thread_start(ksocknal_scheduler,
                                           (void *)id, name);
@@ -2735,35 +2705,35 @@ ksocknal_start_schedulers(struct ksock_sched_info *info)
                        continue;
 
                CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
-                      info->ksi_cpt, info->ksi_nthreads + i, rc);
+                      sched->kss_cpt, (int) KSOCK_THREAD_SID(id), rc);
                break;
        }
 
-       info->ksi_nthreads += i;
+       sched->kss_nthreads += i;
        return rc;
 }
 
 static int
-ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
+ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
 {
-       int     newif = ksocknal_search_new_ipif(net);
-       int     rc;
-       int     i;
+       int newif = ksocknal_search_new_ipif(net);
+       int rc;
+       int i;
 
        if (ncpts > 0 && ncpts > cfs_cpt_number(lnet_cpt_table()))
                return -EINVAL;
 
        for (i = 0; i < ncpts; i++) {
-               struct ksock_sched_info *info;
+               struct ksock_sched *sched;
                int cpt = (cpts == NULL) ? i : cpts[i];
 
                LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
-               info = ksocknal_data.ksnd_sched_info[cpt];
+               sched = ksocknal_data.ksnd_schedulers[cpt];
 
-               if (!newif && info->ksi_nthreads > 0)
+               if (!newif && sched->kss_nthreads > 0)
                        continue;
 
-               rc = ksocknal_start_schedulers(info);
+               rc = ksocknal_start_schedulers(sched);
                if (rc != 0)
                        return rc;
        }
@@ -2771,11 +2741,12 @@ ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
 }
 
 int
-ksocknal_startup (lnet_ni_t *ni)
+ksocknal_startup(struct lnet_ni *ni)
 {
-       ksock_net_t  *net;
-       int           rc;
-       int           i;
+       struct ksock_net *net;
+       struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables;
+       int rc;
+       int i;
        struct net_device *net_dev;
        int node_id;
 
@@ -2792,66 +2763,80 @@ ksocknal_startup (lnet_ni_t *ni)
                 goto fail_0;
 
        spin_lock_init(&net->ksnn_lock);
-        net->ksnn_incarnation = ksocknal_new_incarnation();
-        ni->ni_data = net;
-       if (!ni->ni_net->net_tunables_set) {
-               ni->ni_net->net_tunables.lct_peer_timeout =
+       net->ksnn_incarnation = ktime_get_real_ns();
+       ni->ni_data = net;
+       net_tunables = &ni->ni_net->net_tunables;
+
+       if (net_tunables->lct_peer_timeout == -1)
+               net_tunables->lct_peer_timeout =
                        *ksocknal_tunables.ksnd_peertimeout;
-               ni->ni_net->net_tunables.lct_max_tx_credits =
+
+       if (net_tunables->lct_max_tx_credits == -1)
+               net_tunables->lct_max_tx_credits =
                        *ksocknal_tunables.ksnd_credits;
-               ni->ni_net->net_tunables.lct_peer_tx_credits =
-                       *ksocknal_tunables.ksnd_peertxcredits;
-               ni->ni_net->net_tunables.lct_peer_rtr_credits =
-                       *ksocknal_tunables.ksnd_peerrtrcredits;
-               ni->ni_net->net_tunables_set = true;
-       }
 
+       if (net_tunables->lct_peer_tx_credits == -1)
+               net_tunables->lct_peer_tx_credits =
+                       *ksocknal_tunables.ksnd_peertxcredits;
 
-        if (ni->ni_interfaces[0] == NULL) {
-                rc = ksocknal_enumerate_interfaces(net);
-                if (rc <= 0)
-                        goto fail_1;
+       if (net_tunables->lct_peer_tx_credits >
+           net_tunables->lct_max_tx_credits)
+               net_tunables->lct_peer_tx_credits =
+                       net_tunables->lct_max_tx_credits;
 
-                net->ksnn_ninterfaces = 1;
-        } else {
-                for (i = 0; i < LNET_MAX_INTERFACES; i++) {
-                        int    up;
+       if (net_tunables->lct_peer_rtr_credits == -1)
+               net_tunables->lct_peer_rtr_credits =
+                       *ksocknal_tunables.ksnd_peerrtrcredits;
 
-                        if (ni->ni_interfaces[i] == NULL)
-                                break;
+       if (!ni->ni_interfaces[0]) {
+               rc = ksocknal_enumerate_interfaces(net, NULL);
+               if (rc < 0)
+                       goto fail_1;
+       } else {
+               /* Before Multi-Rail ksocklnd would manage
+                * multiple interfaces with its own tcp bonding.
+                * If we encounter an old configuration using
+                * this tcp bonding approach then we need to
+                * handle more than one ni_interfaces.
+                *
+                * In Multi-Rail configuration only ONE ni_interface
+                * should exist. Each IP alias should be mapped to
+                * each 'struct net_ni'.
+                */
+               for (i = 0; i < LNET_INTERFACES_NUM; i++) {
+                       int j;
 
-                       rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
-                                &net->ksnn_interfaces[i].ksni_ipaddr,
-                                &net->ksnn_interfaces[i].ksni_netmask);
+                       if (!ni->ni_interfaces[i])
+                               break;
 
-                        if (rc != 0) {
-                                CERROR("Can't get interface %s info: %d\n",
-                                       ni->ni_interfaces[i], rc);
-                                goto fail_1;
-                        }
+                       for (j = 0; j < net->ksnn_ninterfaces;  j++) {
+                               struct ksock_interface *ksi;
 
-                        if (!up) {
-                                CERROR("Interface %s is down\n",
-                                       ni->ni_interfaces[i]);
-                                goto fail_1;
-                        }
+                               ksi = &net->ksnn_interfaces[j];
 
-                       strlcpy(net->ksnn_interfaces[i].ksni_name,
-                               ni->ni_interfaces[i],
-                               sizeof(net->ksnn_interfaces[i].ksni_name));
+                               if (strcmp(ni->ni_interfaces[i],
+                                          ksi->ksni_name) == 0) {
+                                       CERROR("found duplicate %s\n",
+                                              ksi->ksni_name);
+                                       rc = -EEXIST;
+                                       goto fail_1;
+                               }
+                       }
 
+                       rc = ksocknal_enumerate_interfaces(net, ni->ni_interfaces[i]);
+                       if (rc < 0)
+                               goto fail_1;
                }
-               net->ksnn_ninterfaces = i;
        }
 
        net_dev = dev_get_by_name(&init_net,
                                  net->ksnn_interfaces[0].ksni_name);
        if (net_dev != NULL) {
                node_id = dev_to_node(&net_dev->dev);
-               ni->dev_cpt = cfs_cpt_of_node(lnet_cpt_table(), node_id);
+               ni->ni_dev_cpt = cfs_cpt_of_node(lnet_cpt_table(), node_id);
                dev_put(net_dev);
        } else {
-               ni->dev_cpt = CFS_CPT_ANY;
+               ni->ni_dev_cpt = CFS_CPT_ANY;
        }
 
        /* call it before add it to ksocknal_data.ksnd_nets */