Whamcloud - gitweb
LU-9680 net: Netlink improvements
[fs/lustre-release.git] / lnet / lnet / api-ni.c
index 3fda86b..f7be169 100644 (file)
@@ -27,7 +27,6 @@
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
  */
 
 #define DEBUG_SUBSYSTEM S_LNET
@@ -40,7 +39,7 @@
 #ifdef HAVE_SCHED_HEADERS
 #include <linux/sched/signal.h>
 #endif
-
+#include <lnet/udsp.h>
 #include <lnet/lib-lnet.h>
 
 #define D_LNI D_CONSOLE
@@ -71,10 +70,10 @@ static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
 module_param(rnet_htable_size, int, 0444);
 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
 
-static int use_tcp_bonding = false;
+static int use_tcp_bonding;
 module_param(use_tcp_bonding, int, 0444);
 MODULE_PARM_DESC(use_tcp_bonding,
-                "Set to 1 to use socklnd bonding. 0 to use Multi-Rail");
+                "use_tcp_bonding parameter has been removed");
 
 unsigned int lnet_numa_range = 0;
 module_param(lnet_numa_range, uint, 0444);
@@ -122,7 +121,12 @@ module_param_call(lnet_recovery_interval, recovery_interval_set, param_get_int,
                  &lnet_recovery_interval, S_IRUGO|S_IWUSR);
 #endif
 MODULE_PARM_DESC(lnet_recovery_interval,
-               "Interval to recover unhealthy interfaces in seconds");
+               "DEPRECATED - Interval to recover unhealthy interfaces in seconds");
+
+unsigned int lnet_recovery_limit;
+module_param(lnet_recovery_limit, uint, 0644);
+MODULE_PARM_DESC(lnet_recovery_limit,
+                "How long to attempt recovery of unhealthy peer interfaces in seconds. Set to 0 to allow indefinite recovery");
 
 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
 static int intf_max_set(const char *val, cfs_kernel_param_arg_t *kp);
@@ -182,10 +186,8 @@ module_param_call(lnet_drop_asym_route, drop_asym_route_set, param_get_int,
 MODULE_PARM_DESC(lnet_drop_asym_route,
                 "Set to 1 to drop asymmetrical route messages.");
 
-#define LNET_TRANSACTION_TIMEOUT_NO_HEALTH_DEFAULT 50
-#define LNET_TRANSACTION_TIMEOUT_HEALTH_DEFAULT 50
-
-unsigned lnet_transaction_timeout = LNET_TRANSACTION_TIMEOUT_HEALTH_DEFAULT;
+#define LNET_TRANSACTION_TIMEOUT_DEFAULT 50
+unsigned int lnet_transaction_timeout = LNET_TRANSACTION_TIMEOUT_DEFAULT;
 static int transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp);
 #ifdef HAVE_KERNEL_PARAM_OPS
 static struct kernel_param_ops param_ops_transaction_timeout = {
@@ -203,8 +205,8 @@ module_param_call(lnet_transaction_timeout, transaction_to_set, param_get_int,
 MODULE_PARM_DESC(lnet_transaction_timeout,
                "Maximum number of seconds to wait for a peer response.");
 
-#define LNET_RETRY_COUNT_HEALTH_DEFAULT 2
-unsigned lnet_retry_count = LNET_RETRY_COUNT_HEALTH_DEFAULT;
+#define LNET_RETRY_COUNT_DEFAULT 2
+unsigned int lnet_retry_count = LNET_RETRY_COUNT_DEFAULT;
 static int retry_count_set(const char *val, cfs_kernel_param_arg_t *kp);
 #ifdef HAVE_KERNEL_PARAM_OPS
 static struct kernel_param_ops param_ops_retry_count = {
@@ -222,9 +224,33 @@ module_param_call(lnet_retry_count, retry_count_set, param_get_int,
 MODULE_PARM_DESC(lnet_retry_count,
                 "Maximum number of times to retry transmitting a message");
 
+unsigned int lnet_response_tracking = 3;
+static int response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp);
+
+#ifdef HAVE_KERNEL_PARAM_OPS
+static struct kernel_param_ops param_ops_response_tracking = {
+       .set = response_tracking_set,
+       .get = param_get_int,
+};
 
-unsigned lnet_lnd_timeout = LNET_LND_DEFAULT_TIMEOUT;
-unsigned int lnet_current_net_count;
+#define param_check_response_tracking(name, p)  \
+       __param_check(name, p, int)
+module_param(lnet_response_tracking, response_tracking, 0644);
+#else
+module_param_call(lnet_response_tracking, response_tracking_set, param_get_int,
+                 &lnet_response_tracking, 0644);
+#endif
+MODULE_PARM_DESC(lnet_response_tracking,
+                "(0|1|2|3) LNet Internal Only|GET Reply only|PUT ACK only|Full Tracking (default)");
+
+#define LNET_LND_TIMEOUT_DEFAULT ((LNET_TRANSACTION_TIMEOUT_DEFAULT - 1) / \
+                                 (LNET_RETRY_COUNT_DEFAULT + 1))
+unsigned int lnet_lnd_timeout = LNET_LND_TIMEOUT_DEFAULT;
+static void lnet_set_lnd_timeout(void)
+{
+       lnet_lnd_timeout = (lnet_transaction_timeout - 1) /
+                          (lnet_retry_count + 1);
+}
 
 /*
  * This sequence number keeps track of how many times DLC was used to
@@ -267,21 +293,9 @@ sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
                return -EINVAL;
        }
 
-       /*
-        * if we're turning on health then use the health timeout
-        * defaults.
-        */
-       if (*sensitivity == 0 && value != 0) {
-               lnet_transaction_timeout = LNET_TRANSACTION_TIMEOUT_HEALTH_DEFAULT;
-               lnet_retry_count = LNET_RETRY_COUNT_HEALTH_DEFAULT;
-       /*
-        * if we're turning off health then use the no health timeout
-        * default.
-        */
-       } else if (*sensitivity != 0 && value == 0) {
-               lnet_transaction_timeout =
-                       LNET_TRANSACTION_TIMEOUT_NO_HEALTH_DEFAULT;
+       if (*sensitivity != 0 && value == 0 && lnet_retry_count != 0) {
                lnet_retry_count = 0;
+               lnet_set_lnd_timeout();
        }
 
        *sensitivity = value;
@@ -294,30 +308,7 @@ sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
 static int
 recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp)
 {
-       int rc;
-       unsigned *interval = (unsigned *)kp->arg;
-       unsigned long value;
-
-       rc = kstrtoul(val, 0, &value);
-       if (rc) {
-               CERROR("Invalid module parameter value for 'lnet_recovery_interval'\n");
-               return rc;
-       }
-
-       if (value < 1) {
-               CERROR("lnet_recovery_interval must be at least 1 second\n");
-               return -EINVAL;
-       }
-
-       /*
-        * The purpose of locking the api_mutex here is to ensure that
-        * the correct value ends up stored properly.
-        */
-       mutex_lock(&the_lnet.ln_api_mutex);
-
-       *interval = value;
-
-       mutex_unlock(&the_lnet.ln_api_mutex);
+       CWARN("'lnet_recovery_interval' has been deprecated\n");
 
        return 0;
 }
@@ -326,7 +317,7 @@ static int
 discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
 {
        int rc;
-       unsigned *discovery = (unsigned *)kp->arg;
+       unsigned *discovery_off = (unsigned *)kp->arg;
        unsigned long value;
        struct lnet_ping_buffer *pbuf;
 
@@ -344,7 +335,7 @@ discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
         */
        mutex_lock(&the_lnet.ln_api_mutex);
 
-       if (value == *discovery) {
+       if (value == *discovery_off) {
                mutex_unlock(&the_lnet.ln_api_mutex);
                return 0;
        }
@@ -357,7 +348,7 @@ discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
         * updating the peers
         */
        if (the_lnet.ln_state != LNET_STATE_RUNNING) {
-               *discovery = value;
+               *discovery_off = value;
                mutex_unlock(&the_lnet.ln_api_mutex);
                return 0;
        }
@@ -371,23 +362,10 @@ discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
                pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
        lnet_net_unlock(LNET_LOCK_EX);
 
-       /*
-        * Always update the peers. This will result in a push to the
-        * peers with the updated capabilities feature mask. The peer can
-        * then take appropriate action to update its representation of
-        * the node.
-        *
-        * If discovery is already off, turn it on first before pushing
-        * the update. The discovery flag must be on before pushing.
-        * otherwise if the flag is on and we're turning it off then push
-        * first before turning the flag off. In the former case the flag
-        * is being set twice, but I find it's better to do that rather
-        * than have duplicate code in an if/else statement.
-        */
-       if (*discovery > 0 && value == 0)
-               *discovery = value;
-       lnet_push_update_to_peers(1);
-       *discovery = value;
+       /* only send a push when we're turning off discovery */
+       if (*discovery_off <= 0 && value > 0)
+               lnet_push_update_to_peers(1);
+       *discovery_off = value;
 
        mutex_unlock(&the_lnet.ln_api_mutex);
 
@@ -445,7 +423,7 @@ transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp)
         */
        mutex_lock(&the_lnet.ln_api_mutex);
 
-       if (value < lnet_retry_count || value == 0) {
+       if (value <= lnet_retry_count || value == 0) {
                mutex_unlock(&the_lnet.ln_api_mutex);
                CERROR("Invalid value for lnet_transaction_timeout (%lu). "
                       "Has to be greater than lnet_retry_count (%u)\n",
@@ -459,10 +437,10 @@ transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp)
        }
 
        *transaction_to = value;
-       if (lnet_retry_count == 0)
-               lnet_lnd_timeout = value;
-       else
-               lnet_lnd_timeout = value / lnet_retry_count;
+       /* Update the lnet_lnd_timeout now that we've modified the
+        * transaction timeout
+        */
+       lnet_set_lnd_timeout();
 
        mutex_unlock(&the_lnet.ln_api_mutex);
 
@@ -488,9 +466,9 @@ retry_count_set(const char *val, cfs_kernel_param_arg_t *kp)
         */
        mutex_lock(&the_lnet.ln_api_mutex);
 
-       if (lnet_health_sensitivity == 0) {
+       if (lnet_health_sensitivity == 0 && value > 0) {
                mutex_unlock(&the_lnet.ln_api_mutex);
-               CERROR("Can not set retry_count when health feature is turned off\n");
+               CERROR("Can not set lnet_retry_count when health feature is turned off\n");
                return -EINVAL;
        }
 
@@ -504,10 +482,10 @@ retry_count_set(const char *val, cfs_kernel_param_arg_t *kp)
 
        *retry_count = value;
 
-       if (value == 0)
-               lnet_lnd_timeout = lnet_transaction_timeout;
-       else
-               lnet_lnd_timeout = lnet_transaction_timeout / value;
+       /* Update the lnet_lnd_timeout now that we've modified the
+        * retry count
+        */
+       lnet_set_lnd_timeout();
 
        mutex_unlock(&the_lnet.ln_api_mutex);
 
@@ -536,17 +514,40 @@ intf_max_set(const char *val, cfs_kernel_param_arg_t *kp)
        return 0;
 }
 
-static char *
+static int
+response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp)
+{
+       int rc;
+       unsigned long new_value;
+
+       rc = kstrtoul(val, 0, &new_value);
+       if (rc) {
+               CERROR("Invalid value for 'lnet_response_tracking'\n");
+               return -EINVAL;
+       }
+
+       if (new_value < 0 || new_value > 3) {
+               CWARN("Invalid value (%lu) for 'lnet_response_tracking'\n",
+                     new_value);
+               return -EINVAL;
+       }
+
+       lnet_response_tracking = new_value;
+
+       return 0;
+}
+
+static const char *
 lnet_get_routes(void)
 {
        return routes;
 }
 
-static char *
+static const char *
 lnet_get_networks(void)
 {
-       char   *nets;
-       int     rc;
+       const char *nets;
+       int rc;
 
        if (*networks != 0 && *ip2nets != 0) {
                LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
@@ -577,6 +578,7 @@ lnet_init_locks(void)
 struct kmem_cache *lnet_mes_cachep;       /* MEs kmem_cache */
 struct kmem_cache *lnet_small_mds_cachep;  /* <= LNET_SMALL_MD_SIZE bytes
                                            *  MDs kmem_cache */
+struct kmem_cache *lnet_udsp_cachep;      /* udsp cache */
 struct kmem_cache *lnet_rspt_cachep;      /* response tracker cache */
 struct kmem_cache *lnet_msg_cachep;
 
@@ -597,6 +599,12 @@ lnet_slab_setup(void)
        if (!lnet_small_mds_cachep)
                return -ENOMEM;
 
+       lnet_udsp_cachep = kmem_cache_create("lnet_udsp",
+                                            sizeof(struct lnet_udsp),
+                                            0, 0, NULL);
+       if (!lnet_udsp_cachep)
+               return -ENOMEM;
+
        lnet_rspt_cachep = kmem_cache_create("lnet_rspt", sizeof(struct lnet_rsp_tracker),
                                            0, 0, NULL);
        if (!lnet_rspt_cachep)
@@ -618,12 +626,16 @@ lnet_slab_cleanup(void)
                lnet_msg_cachep = NULL;
        }
 
-
        if (lnet_rspt_cachep) {
                kmem_cache_destroy(lnet_rspt_cachep);
                lnet_rspt_cachep = NULL;
        }
 
+       if (lnet_udsp_cachep) {
+               kmem_cache_destroy(lnet_udsp_cachep);
+               lnet_udsp_cachep = NULL;
+       }
+
        if (lnet_small_mds_cachep) {
                kmem_cache_destroy(lnet_small_mds_cachep);
                lnet_small_mds_cachep = NULL;
@@ -710,7 +722,8 @@ static void lnet_assert_wire_constants(void)
        /* Wire protocol assertions generated by 'wirecheck'
         * running on Linux robert.bartonsoftware.com 2.6.8-1.521
         * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
-        * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
+        * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7)
+        */
 
        /* Constants... */
        BUILD_BUG_ON(LNET_PROTO_TCP_MAGIC != 0xeebc0ded);
@@ -722,6 +735,27 @@ static void lnet_assert_wire_constants(void)
        BUILD_BUG_ON(LNET_MSG_REPLY != 3);
        BUILD_BUG_ON(LNET_MSG_HELLO != 4);
 
+       BUILD_BUG_ON((int)sizeof(lnet_nid_t) != 8);
+       BUILD_BUG_ON((int)sizeof(lnet_pid_t) != 4);
+
+       /* Checks for struct lnet_nid */
+       BUILD_BUG_ON((int)sizeof(struct lnet_nid) != 20);
+       BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_size) != 0);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_size) != 1);
+       BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_type) != 1);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_type) != 1);
+       BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_num) != 2);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_num) != 2);
+       BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_addr) != 4);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_addr) != 16);
+
+       /* Checks for struct lnet_process_id_packed */
+       BUILD_BUG_ON((int)sizeof(struct lnet_process_id_packed) != 12);
+       BUILD_BUG_ON((int)offsetof(struct lnet_process_id_packed, nid) != 0);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_process_id_packed *)0)->nid) != 8);
+       BUILD_BUG_ON((int)offsetof(struct lnet_process_id_packed, pid) != 8);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_process_id_packed *)0)->pid) != 4);
+
        /* Checks for struct lnet_handle_wire */
        BUILD_BUG_ON((int)sizeof(struct lnet_handle_wire) != 16);
        BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
@@ -836,6 +870,52 @@ static void lnet_assert_wire_constants(void)
        BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) != 4);
        BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_ni) != 16);
        BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) != 0);
+
+       /* Acceptor connection request */
+       BUILD_BUG_ON(LNET_PROTO_ACCEPTOR_VERSION != 1);
+
+       /* Checks for struct lnet_acceptor_connreq */
+       BUILD_BUG_ON((int)sizeof(struct lnet_acceptor_connreq) != 16);
+       BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_magic) != 0);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_magic) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_version) != 4);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_version) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_nid) != 8);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_nid) != 8);
+
+       /* Checks for struct lnet_acceptor_connreq_v2 */
+       BUILD_BUG_ON((int)sizeof(struct lnet_acceptor_connreq_v2) != 28);
+       BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_magic) != 0);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_magic) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_version) != 4);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_version) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_nid) != 8);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_nid) != 20);
+
+       /* Checks for struct lnet_counters_common */
+       BUILD_BUG_ON((int)sizeof(struct lnet_counters_common) != 60);
+       BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_msgs_alloc) != 0);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_msgs_alloc) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_msgs_max) != 4);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_msgs_max) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_errors) != 8);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_errors) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_send_count) != 12);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_send_count) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_recv_count) != 16);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_recv_count) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_route_count) != 20);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_route_count) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_drop_count) != 24);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_drop_count) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_send_length) != 28);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_send_length) != 8);
+       BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_recv_length) != 36);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_recv_length) != 8);
+       BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_route_length) != 44);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_route_length) != 8);
+       BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_drop_length) != 52);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_drop_length) != 8);
 }
 
 static const struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
@@ -888,16 +968,17 @@ lnet_unregister_lnd(const struct lnet_lnd *lnd)
 }
 EXPORT_SYMBOL(lnet_unregister_lnd);
 
-void
-lnet_counters_get_common(struct lnet_counters_common *common)
+static void
+lnet_counters_get_common_locked(struct lnet_counters_common *common)
 {
        struct lnet_counters *ctr;
        int i;
 
+       /* FIXME !!! Their is no assert_lnet_net_locked() to ensure this
+        * actually called under the protection of the lnet_net_lock.
+        */
        memset(common, 0, sizeof(*common));
 
-       lnet_net_lock(LNET_LOCK_EX);
-
        cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
                common->lcc_msgs_max     += ctr->lct_common.lcc_msgs_max;
                common->lcc_msgs_alloc   += ctr->lct_common.lcc_msgs_alloc;
@@ -911,23 +992,33 @@ lnet_counters_get_common(struct lnet_counters_common *common)
                common->lcc_route_length += ctr->lct_common.lcc_route_length;
                common->lcc_drop_length  += ctr->lct_common.lcc_drop_length;
        }
+}
+
+void
+lnet_counters_get_common(struct lnet_counters_common *common)
+{
+       lnet_net_lock(LNET_LOCK_EX);
+       lnet_counters_get_common_locked(common);
        lnet_net_unlock(LNET_LOCK_EX);
 }
 EXPORT_SYMBOL(lnet_counters_get_common);
 
-void
+int
 lnet_counters_get(struct lnet_counters *counters)
 {
        struct lnet_counters *ctr;
        struct lnet_counters_health *health = &counters->lct_health;
-       int             i;
+       int i, rc = 0;
 
        memset(counters, 0, sizeof(*counters));
 
-       lnet_counters_get_common(&counters->lct_common);
-
        lnet_net_lock(LNET_LOCK_EX);
 
+       if (the_lnet.ln_state != LNET_STATE_RUNNING)
+               GOTO(out_unlock, rc = -ENODEV);
+
+       lnet_counters_get_common_locked(&counters->lct_common);
+
        cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
                health->lch_rst_alloc    += ctr->lct_health.lch_rst_alloc;
                health->lch_resend_count += ctr->lct_health.lch_resend_count;
@@ -954,7 +1045,9 @@ lnet_counters_get(struct lnet_counters *counters)
                health->lch_network_timeout_count +=
                                ctr->lct_health.lch_network_timeout_count;
        }
+out_unlock:
        lnet_net_unlock(LNET_LOCK_EX);
+       return rc;
 }
 EXPORT_SYMBOL(lnet_counters_get);
 
@@ -966,9 +1059,12 @@ lnet_counters_reset(void)
 
        lnet_net_lock(LNET_LOCK_EX);
 
+       if (the_lnet.ln_state != LNET_STATE_RUNNING)
+               goto avoid_reset;
+
        cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
                memset(counters, 0, sizeof(struct lnet_counters));
-
+avoid_reset:
        lnet_net_unlock(LNET_LOCK_EX);
 }
 
@@ -1185,6 +1281,7 @@ lnet_prepare(lnet_pid_t requested_pid)
        INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
        INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq);
        INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
+       INIT_LIST_HEAD(&the_lnet.ln_udsp_list);
        init_waitqueue_head(&the_lnet.ln_dc_waitq);
        the_lnet.ln_mt_handler = NULL;
        init_completion(&the_lnet.ln_started);
@@ -1291,6 +1388,7 @@ lnet_unprepare (void)
                the_lnet.ln_counters = NULL;
        }
        lnet_destroy_remote_nets_table();
+       lnet_udsp_destroy(true);
        lnet_slab_cleanup();
 
        return 0;
@@ -1343,8 +1441,83 @@ lnet_get_net_locked(__u32 net_id)
        return NULL;
 }
 
-unsigned int
-lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
+void
+lnet_net_clr_pref_rtrs(struct lnet_net *net)
+{
+       struct list_head zombies;
+       struct lnet_nid_list *ne;
+       struct lnet_nid_list *tmp;
+
+       INIT_LIST_HEAD(&zombies);
+
+       lnet_net_lock(LNET_LOCK_EX);
+       list_splice_init(&net->net_rtr_pref_nids, &zombies);
+       lnet_net_unlock(LNET_LOCK_EX);
+
+       list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
+               list_del_init(&ne->nl_list);
+               LIBCFS_FREE(ne, sizeof(*ne));
+       }
+}
+
+int
+lnet_net_add_pref_rtr(struct lnet_net *net,
+                     struct lnet_nid *gw_nid)
+__must_hold(&the_lnet.ln_api_mutex)
+{
+       struct lnet_nid_list *ne;
+
+       /* This function is called with api_mutex held. When the api_mutex
+        * is held the list can not be modified, as it is only modified as
+        * a result of applying a UDSP and that happens under api_mutex
+        * lock.
+        */
+       list_for_each_entry(ne, &net->net_rtr_pref_nids, nl_list) {
+               if (nid_same(&ne->nl_nid, gw_nid))
+                       return -EEXIST;
+       }
+
+       LIBCFS_ALLOC(ne, sizeof(*ne));
+       if (!ne)
+               return -ENOMEM;
+
+       ne->nl_nid = *gw_nid;
+
+       /* Lock the cpt to protect against addition and checks in the
+        * selection algorithm
+        */
+       lnet_net_lock(LNET_LOCK_EX);
+       list_add(&ne->nl_list, &net->net_rtr_pref_nids);
+       lnet_net_unlock(LNET_LOCK_EX);
+
+       return 0;
+}
+
+bool
+lnet_net_is_pref_rtr_locked(struct lnet_net *net, struct lnet_nid *rtr_nid)
+{
+       struct lnet_nid_list *ne;
+
+       CDEBUG(D_NET, "%s: rtr pref empty: %d\n",
+              libcfs_net2str(net->net_id),
+              list_empty(&net->net_rtr_pref_nids));
+
+       if (list_empty(&net->net_rtr_pref_nids))
+               return false;
+
+       list_for_each_entry(ne, &net->net_rtr_pref_nids, nl_list) {
+               CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
+                      libcfs_nidstr(&ne->nl_nid),
+                      libcfs_nidstr(rtr_nid));
+               if (nid_same(rtr_nid, &ne->nl_nid))
+                       return true;
+       }
+
+       return false;
+}
+
+static unsigned int
+lnet_nid4_cpt_hash(lnet_nid_t nid, unsigned int number)
 {
        __u64           key = nid;
        unsigned int    val;
@@ -1362,8 +1535,31 @@ lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
        return (unsigned int)(key + val + (val >> 1)) % number;
 }
 
+unsigned int
+lnet_nid_cpt_hash(struct lnet_nid *nid, unsigned int number)
+{
+       unsigned int val;
+       u32 h = 0;
+       int i;
+
+       LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
+
+       if (number == 1)
+               return 0;
+
+       if (nid_is_nid4(nid))
+               return lnet_nid4_cpt_hash(lnet_nid_to_nid4(nid), number);
+
+       for (i = 0; i < 4; i++)
+               h = hash_32(nid->nid_addr[i]^h, 32);
+       val = hash_32(LNET_NID_NET(nid) ^ h, LNET_CPT_BITS);
+       if (val < number)
+               return val;
+       return (unsigned int)(h + val + (val >> 1)) % number;
+}
+
 int
-lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
+lnet_cpt_of_nid_locked(struct lnet_nid *nid, struct lnet_ni *ni)
 {
        struct lnet_net *net;
 
@@ -1387,7 +1583,7 @@ lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
        }
 
        /* no NI provided so look at the net */
-       net = lnet_get_net_locked(LNET_NIDNET(nid));
+       net = lnet_get_net_locked(LNET_NID_NET(nid));
 
        if (net != NULL && net->net_cpts != NULL) {
                return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
@@ -1397,7 +1593,7 @@ lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
 }
 
 int
-lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
+lnet_nid2cpt(struct lnet_nid *nid, struct lnet_ni *ni)
 {
        int     cpt;
        int     cpt2;
@@ -1413,6 +1609,19 @@ lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
 
        return cpt2;
 }
+EXPORT_SYMBOL(lnet_nid2cpt);
+
+int
+lnet_cpt_of_nid(lnet_nid_t nid4, struct lnet_ni *ni)
+{
+       struct lnet_nid nid;
+
+       if (LNET_CPT_NUMBER == 1)
+               return 0; /* the only one */
+
+       lnet_nid4_to_nid(nid4, &nid);
+       return lnet_nid2cpt(&nid, ni);
+}
 EXPORT_SYMBOL(lnet_cpt_of_nid);
 
 int
@@ -1444,16 +1653,16 @@ lnet_islocalnet(__u32 net_id)
 }
 
 struct lnet_ni  *
-lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
+lnet_nid_to_ni_locked(struct lnet_nid *nid, int cpt)
 {
        struct lnet_net  *net;
-       struct lnet_ni   *ni;
+       struct lnet_ni *ni;
 
        LASSERT(cpt != LNET_LOCK_EX);
 
        list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
                list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
-                       if (ni->ni_nid == nid)
+                       if (nid_same(&ni->ni_nid, nid))
                                return ni;
                }
        }
@@ -1461,13 +1670,25 @@ lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
        return NULL;
 }
 
+struct lnet_ni  *
+lnet_nid2ni_locked(lnet_nid_t nid4, int cpt)
+{
+       struct lnet_nid nid;
+
+       lnet_nid4_to_nid(nid4, &nid);
+       return lnet_nid_to_ni_locked(&nid, cpt);
+}
+
 struct lnet_ni *
-lnet_nid2ni_addref(lnet_nid_t nid)
+lnet_nid2ni_addref(lnet_nid_t nid4)
 {
        struct lnet_ni *ni;
+       struct lnet_nid nid;
+
+       lnet_nid4_to_nid(nid4, &nid);
 
        lnet_net_lock(0);
-       ni = lnet_nid2ni_locked(nid, 0);
+       ni = lnet_nid_to_ni_locked(&nid, 0);
        if (ni)
                lnet_ni_addref_locked(ni, 0);
        lnet_net_unlock(0);
@@ -1476,8 +1697,23 @@ lnet_nid2ni_addref(lnet_nid_t nid)
 }
 EXPORT_SYMBOL(lnet_nid2ni_addref);
 
+struct lnet_ni *
+lnet_nid_to_ni_addref(struct lnet_nid *nid)
+{
+       struct lnet_ni *ni;
+
+       lnet_net_lock(0);
+       ni = lnet_nid_to_ni_locked(nid, 0);
+       if (ni)
+               lnet_ni_addref_locked(ni, 0);
+       lnet_net_unlock(0);
+
+       return ni;
+}
+EXPORT_SYMBOL(lnet_nid_to_ni_addref);
+
 int
-lnet_islocalnid(lnet_nid_t nid)
+lnet_islocalnid4(lnet_nid_t nid)
 {
        struct lnet_ni  *ni;
        int             cpt;
@@ -1490,6 +1726,19 @@ lnet_islocalnid(lnet_nid_t nid)
 }
 
 int
+lnet_islocalnid(struct lnet_nid *nid)
+{
+       struct lnet_ni  *ni;
+       int             cpt;
+
+       cpt = lnet_net_lock_current();
+       ni = lnet_nid_to_ni_locked(nid, cpt);
+       lnet_net_unlock(cpt);
+
+       return ni != NULL;
+}
+
+int
 lnet_count_acceptor_nets(void)
 {
        /* Return the # of NIs that need the acceptor. */
@@ -1595,23 +1844,6 @@ lnet_get_ni_count(void)
        return count;
 }
 
-int
-lnet_get_net_count(void)
-{
-       struct lnet_net *net;
-       int count = 0;
-
-       lnet_net_lock(0);
-
-       list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
-               count++;
-       }
-
-       lnet_net_unlock(0);
-
-       return count;
-}
-
 void
 lnet_swap_pinginfo(struct lnet_ping_buffer *pbuf)
 {
@@ -1686,8 +1918,8 @@ lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
                       struct lnet_handle_md *ping_mdh,
                       int ni_count, bool set_eq)
 {
-       struct lnet_process_id id = {
-               .nid = LNET_NID_ANY,
+       struct lnet_processid id = {
+               .nid = LNET_ANY_NID,
                .pid = LNET_PID_ANY
        };
        struct lnet_me *me;
@@ -1705,7 +1937,7 @@ lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
        }
 
        /* Ping target ME/MD */
-       me = LNetMEAttach(LNET_RESERVED_PORTAL, id,
+       me = LNetMEAttach(LNET_RESERVED_PORTAL, &id,
                          LNET_PROTO_PING_MATCHBITS, 0,
                          LNET_UNLINK, LNET_INS_AFTER);
        if (IS_ERR(me)) {
@@ -1724,17 +1956,15 @@ lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
        md.handler   = the_lnet.ln_ping_target_handler;
        md.user_ptr  = *ppbuf;
 
-       rc = LNetMDAttach(me, md, LNET_RETAIN, ping_mdh);
+       rc = LNetMDAttach(me, &md, LNET_RETAIN, ping_mdh);
        if (rc != 0) {
                CERROR("Can't attach ping target MD: %d\n", rc);
-               goto fail_unlink_ping_me;
+               goto fail_decref_ping_buffer;
        }
        lnet_ping_buffer_addref(*ppbuf);
 
        return 0;
 
-fail_unlink_ping_me:
-       LNetMEUnlink(me);
 fail_decref_ping_buffer:
        LASSERT(atomic_read(&(*ppbuf)->pb_refcnt) == 1);
        lnet_ping_buffer_decref(*ppbuf);
@@ -1772,12 +2002,12 @@ lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
 
                        ns = &pbuf->pb_info.pi_ni[i];
 
-                       ns->ns_nid = ni->ni_nid;
+                       if (!nid_is_nid4(&ni->ni_nid))
+                               continue;
+                       ns->ns_nid = lnet_nid_to_nid4(&ni->ni_nid);
 
                        lnet_ni_lock(ni);
-                       ns->ns_status = (ni->ni_status != NULL) ?
-                                        ni->ni_status->ns_status :
-                                               LNET_NI_STATUS_UP;
+                       ns->ns_status = lnet_ni_get_status_locked(ni);
                        ni->ni_status = ns;
                        lnet_ni_unlock(ni);
 
@@ -1905,12 +2135,12 @@ again:
 int lnet_push_target_post(struct lnet_ping_buffer *pbuf,
                          struct lnet_handle_md *mdhp)
 {
-       struct lnet_process_id id = { LNET_NID_ANY, LNET_PID_ANY };
+       struct lnet_processid id = { LNET_ANY_NID, LNET_PID_ANY };
        struct lnet_md md = { NULL };
        struct lnet_me *me;
        int rc;
 
-       me = LNetMEAttach(LNET_RESERVED_PORTAL, id,
+       me = LNetMEAttach(LNET_RESERVED_PORTAL, &id,
                          LNET_PROTO_PING_MATCHBITS, 0,
                          LNET_UNLINK, LNET_INS_AFTER);
        if (IS_ERR(me)) {
@@ -1933,10 +2163,9 @@ int lnet_push_target_post(struct lnet_ping_buffer *pbuf,
        md.user_ptr  = pbuf;
        md.handler   = the_lnet.ln_push_target_handler;
 
-       rc = LNetMDAttach(me, md, LNET_UNLINK, mdhp);
+       rc = LNetMDAttach(me, &md, LNET_UNLINK, mdhp);
        if (rc) {
                CERROR("Can't attach push MD: %d\n", rc);
-               LNetMEUnlink(me);
                lnet_ping_buffer_decref(pbuf);
                pbuf->pb_needs_post = true;
                return rc;
@@ -2079,14 +2308,22 @@ lnet_clear_zombies_nis_locked(struct lnet_net *net)
                }
 
                if (!list_empty(&ni->ni_netlist)) {
+                       /* Unlock mutex while waiting to allow other
+                        * threads to read the LNet state and fall through
+                        * to avoid deadlock
+                        */
                        lnet_net_unlock(LNET_LOCK_EX);
+                       mutex_unlock(&the_lnet.ln_api_mutex);
+
                        ++i;
                        if ((i & (-i)) == i) {
                                CDEBUG(D_WARNING,
                                       "Waiting for zombie LNI %s\n",
-                                      libcfs_nid2str(ni->ni_nid));
+                                      libcfs_nidstr(&ni->ni_nid));
                        }
                        schedule_timeout_uninterruptible(cfs_time_seconds(1));
+
+                       mutex_lock(&the_lnet.ln_api_mutex);
                        lnet_net_lock(LNET_LOCK_EX);
                        continue;
                }
@@ -2107,7 +2344,7 @@ lnet_clear_zombies_nis_locked(struct lnet_net *net)
 
                if (!islo)
                        CDEBUG(D_LNI, "Removed LNI %s\n",
-                             libcfs_nid2str(ni->ni_nid));
+                             libcfs_nidstr(&ni->ni_nid));
 
                lnet_ni_free(ni);
                i = 2;
@@ -2281,7 +2518,7 @@ lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
        atomic_set(&ni->ni_healthv, LNET_MAX_HEALTH_VALUE);
 
        CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
-               libcfs_nid2str(ni->ni_nid),
+               libcfs_nidstr(&ni->ni_nid),
                ni->ni_net->net_tunables.lct_peer_tx_credits,
                lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
                ni->ni_net->net_tunables.lct_peer_rtr_credits,
@@ -2372,19 +2609,6 @@ lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
         * After than we want to delete the network being added,
         * to avoid a memory leak.
         */
-
-       /*
-        * When a network uses TCP bonding then all its interfaces
-        * must be specified when the network is first defined: the
-        * TCP bonding code doesn't allow for interfaces to be added
-        * or removed.
-        */
-       if (net_l != net && net_l != NULL && use_tcp_bonding &&
-           LNET_NETTYP(net_l->net_id) == SOCKLND) {
-               rc = -EINVAL;
-               goto failed0;
-       }
-
        while (!list_empty(&net->net_ni_added)) {
                ni = list_entry(net->net_ni_added.next, struct lnet_ni,
                                ni_netlist);
@@ -2393,7 +2617,7 @@ lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
                /* make sure that the the NI we're about to start
                 * up is actually unique. if it's not fail. */
                if (!lnet_ni_unique_net(&net_l->net_ni_list,
-                                       ni->ni_interfaces[0])) {
+                                       ni->ni_interface)) {
                        rc = -EEXIST;
                        goto failed1;
                }
@@ -2444,9 +2668,6 @@ lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
                lnet_net_unlock(LNET_LOCK_EX);
        }
 
-       /* update net count */
-       lnet_current_net_count = lnet_get_net_count();
-
        return ni_count;
 
 failed1:
@@ -2502,6 +2723,86 @@ failed:
        return rc;
 }
 
+static int lnet_genl_parse_list(struct sk_buff *msg,
+                               const struct ln_key_list *data[], u16 idx)
+{
+       const struct ln_key_list *list = data[idx];
+       const struct ln_key_props *props;
+       struct nlattr *node;
+       u16 count;
+
+       if (!list)
+               return 0;
+
+       if (!list->lkl_maxattr)
+               return -ERANGE;
+
+       props = list->lkl_list;
+       if (!props)
+               return -EINVAL;
+
+       node = nla_nest_start(msg, LN_SCALAR_ATTR_LIST);
+       if (!node)
+               return -ENOBUFS;
+
+       for (count = 1; count <= list->lkl_maxattr; count++) {
+               struct nlattr *key = nla_nest_start(msg, count);
+
+               if (count == 1)
+                       nla_put_u16(msg, LN_SCALAR_ATTR_LIST_SIZE,
+                                   list->lkl_maxattr);
+
+               nla_put_u16(msg, LN_SCALAR_ATTR_INDEX, count);
+               if (props[count].lkp_value)
+                       nla_put_string(msg, LN_SCALAR_ATTR_VALUE,
+                                      props[count].lkp_value);
+               if (props[count].lkp_key_format)
+                       nla_put_u16(msg, LN_SCALAR_ATTR_KEY_FORMAT,
+                                   props[count].lkp_key_format);
+               nla_put_u16(msg, LN_SCALAR_ATTR_NLA_TYPE,
+                           props[count].lkp_data_type);
+               if (props[count].lkp_data_type == NLA_NESTED) {
+                       int rc;
+
+                       rc = lnet_genl_parse_list(msg, data, ++idx);
+                       if (rc < 0)
+                               return rc;
+                       idx = rc;
+               }
+
+               nla_nest_end(msg, key);
+       }
+
+       nla_nest_end(msg, node);
+       return idx;
+}
+
+int lnet_genl_send_scalar_list(struct sk_buff *msg, u32 portid, u32 seq,
+                              const struct genl_family *family, int flags,
+                              u8 cmd, const struct ln_key_list *data[])
+{
+       int rc = 0;
+       void *hdr;
+
+       if (!data[0])
+               return -EINVAL;
+
+       hdr = genlmsg_put(msg, portid, seq, family, flags, cmd);
+       if (!hdr)
+               GOTO(canceled, rc = -EMSGSIZE);
+
+       rc = lnet_genl_parse_list(msg, data, 0);
+       if (rc < 0)
+               GOTO(canceled, rc);
+
+       genlmsg_end(msg, hdr);
+canceled:
+       if (rc < 0)
+               genlmsg_cancel(msg, hdr);
+       return rc > 0 ? 0 : rc;
+}
+EXPORT_SYMBOL(lnet_genl_send_scalar_list);
+
 /**
  * Initialize LNet library.
  *
@@ -2636,6 +2937,9 @@ LNetNIInit(lnet_pid_t requested_pid)
                goto err_empty_list;
        }
 
+       if (use_tcp_bonding)
+               CWARN("use_tcp_bonding has been removed. Use Multi-Rail and Dynamic Discovery instead, see LU-13641\n");
+
        /* If LNet is being initialized via DLC it is possible
         * that the user requests not to load module parameters (ones which
         * are supported by DLC) on initialization.  Therefore, make sure not
@@ -2643,8 +2947,7 @@ LNetNIInit(lnet_pid_t requested_pid)
         * in this case.  On cleanup in case of failure only clean up
         * routes if it has been loaded */
        if (!the_lnet.ln_nis_from_mod_params) {
-               rc = lnet_parse_networks(&net_head, lnet_get_networks(),
-                                        use_tcp_bonding);
+               rc = lnet_parse_networks(&net_head, lnet_get_networks());
                if (rc < 0)
                        goto err_empty_list;
        }
@@ -2793,25 +3096,17 @@ lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
        size_t min_size = 0;
        int i;
 
-       if (!ni || !cfg_ni || !tun)
+       if (!ni || !cfg_ni || !tun || !nid_is_nid4(&ni->ni_nid))
                return;
 
-       if (ni->ni_interfaces[0] != NULL) {
-               for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
-                       if (ni->ni_interfaces[i] != NULL) {
-                               strncpy(cfg_ni->lic_ni_intf[i],
-                                       ni->ni_interfaces[i],
-                                       sizeof(cfg_ni->lic_ni_intf[i]));
-                       }
-               }
+       if (ni->ni_interface != NULL) {
+               strncpy(cfg_ni->lic_ni_intf,
+                       ni->ni_interface,
+                       sizeof(cfg_ni->lic_ni_intf));
        }
 
-       cfg_ni->lic_nid = ni->ni_nid;
-       if (ni->ni_nid == LNET_NID_LO_0)
-               cfg_ni->lic_status = LNET_NI_STATUS_UP;
-       else
-               cfg_ni->lic_status = ni->ni_status->ns_status;
-       cfg_ni->lic_tcp_bonding = use_tcp_bonding;
+       cfg_ni->lic_nid = lnet_nid_to_nid4(&ni->ni_nid);
+       cfg_ni->lic_status = lnet_ni_get_status_locked(ni);
        cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
 
        memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
@@ -2870,26 +3165,21 @@ lnet_fill_ni_info_legacy(struct lnet_ni *ni,
        size_t min_size, tunable_size = 0;
        int i;
 
-       if (!ni || !config)
+       if (!ni || !config || !nid_is_nid4(&ni->ni_nid))
                return;
 
        net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
        if (!net_config)
                return;
 
-       BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
-                    ARRAY_SIZE(net_config->ni_interfaces));
-
-       for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
-               if (!ni->ni_interfaces[i])
-                       break;
+       if (!ni->ni_interface)
+               return;
 
-               strncpy(net_config->ni_interfaces[i],
-                       ni->ni_interfaces[i],
-                       sizeof(net_config->ni_interfaces[i]));
-       }
+       strncpy(net_config->ni_interface,
+               ni->ni_interface,
+               sizeof(net_config->ni_interface));
 
-       config->cfg_nid = ni->ni_nid;
+       config->cfg_nid = lnet_nid_to_nid4(&ni->ni_nid);
        config->cfg_config_u.cfg_net.net_peer_timeout =
                ni->ni_net->net_tunables.lct_peer_timeout;
        config->cfg_config_u.cfg_net.net_max_tx_credits =
@@ -2899,10 +3189,7 @@ lnet_fill_ni_info_legacy(struct lnet_ni *ni,
        config->cfg_config_u.cfg_net.net_peer_rtr_credits =
                ni->ni_net->net_tunables.lct_peer_rtr_credits;
 
-       if (ni->ni_nid == LNET_NID_LO_0)
-               net_config->ni_status = LNET_NI_STATUS_UP;
-       else
-               net_config->ni_status = ni->ni_status->ns_status;
+       net_config->ni_status = lnet_ni_get_status_locked(ni);
 
        if (ni->ni_cpts) {
                int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
@@ -2954,6 +3241,22 @@ lnet_get_ni_idx_locked(int idx)
        return NULL;
 }
 
+int lnet_get_net_healthv_locked(struct lnet_net *net)
+{
+       struct lnet_ni *ni;
+       int best_healthv = 0;
+       int healthv, ni_fatal;
+
+       list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
+               healthv = atomic_read(&ni->ni_healthv);
+               ni_fatal = atomic_read(&ni->ni_fatal_error_on);
+               if (!ni_fatal && healthv > best_healthv)
+                       best_healthv = healthv;
+       }
+
+       return best_healthv;
+}
+
 struct lnet_ni *
 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
 {
@@ -3087,12 +3390,13 @@ int lnet_get_ni_stats(struct lnet_ioctl_element_msg_stats *msg_stats)
 static int lnet_add_net_common(struct lnet_net *net,
                               struct lnet_ioctl_config_lnd_tunables *tun)
 {
-       __u32                   net_id;
+       struct lnet_handle_md ping_mdh;
        struct lnet_ping_buffer *pbuf;
-       struct lnet_handle_md   ping_mdh;
-       int                     rc;
        struct lnet_remotenet *rnet;
-       int                     net_ni_count;
+       struct lnet_ni *ni;
+       int net_ni_count;
+       __u32 net_id;
+       int rc;
 
        lnet_net_lock(LNET_LOCK_EX);
        rnet = lnet_find_rnet_locked(net->net_id);
@@ -3142,10 +3446,25 @@ static int lnet_add_net_common(struct lnet_net *net,
 
        lnet_net_lock(LNET_LOCK_EX);
        net = lnet_get_net_locked(net_id);
-       lnet_net_unlock(LNET_LOCK_EX);
-
        LASSERT(net);
 
+       /* apply the UDSPs */
+       rc = lnet_udsp_apply_policies_on_net(net);
+       if (rc)
+               CERROR("Failed to apply UDSPs on local net %s\n",
+                      libcfs_net2str(net->net_id));
+
+       /* At this point we lost track of which NI was just added, so we
+        * just re-apply the policies on all of the NIs on this net
+        */
+       list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
+               rc = lnet_udsp_apply_policies_on_ni(ni);
+               if (rc)
+                       CERROR("Failed to apply UDSPs on ni %s\n",
+                              libcfs_nidstr(&ni->ni_nid));
+       }
+       lnet_net_unlock(LNET_LOCK_EX);
+
        /*
         * Start the acceptor thread if this is the first network
         * being added that requires the thread.
@@ -3174,11 +3493,24 @@ failed:
        return rc;
 }
 
+static void
+lnet_set_tune_defaults(struct lnet_ioctl_config_lnd_tunables *tun)
+{
+       if (tun) {
+               if (!tun->lt_cmn.lct_peer_timeout)
+                       tun->lt_cmn.lct_peer_timeout = DEFAULT_PEER_TIMEOUT;
+               if (!tun->lt_cmn.lct_peer_tx_credits)
+                       tun->lt_cmn.lct_peer_tx_credits = DEFAULT_PEER_CREDITS;
+               if (!tun->lt_cmn.lct_max_tx_credits)
+                       tun->lt_cmn.lct_max_tx_credits = DEFAULT_CREDITS;
+       }
+}
+
 static int lnet_handle_legacy_ip2nets(char *ip2nets,
                                      struct lnet_ioctl_config_lnd_tunables *tun)
 {
        struct lnet_net *net;
-       char *nets;
+       const char *nets;
        int rc;
        LIST_HEAD(net_head);
 
@@ -3186,10 +3518,12 @@ static int lnet_handle_legacy_ip2nets(char *ip2nets,
        if (rc < 0)
                return rc;
 
-       rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
+       rc = lnet_parse_networks(&net_head, nets);
        if (rc < 0)
                return rc;
 
+       lnet_set_tune_defaults(tun);
+
        mutex_lock(&the_lnet.ln_api_mutex);
        while (!list_empty(&net_head)) {
                net = list_entry(net_head.next, struct lnet_net, net_list);
@@ -3247,10 +3581,12 @@ int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
        }
 
        ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
-                                      conf->lic_ni_intf[0]);
+                                      conf->lic_ni_intf);
        if (!ni)
                return -ENOMEM;
 
+       lnet_set_tune_defaults(tun);
+
        mutex_lock(&the_lnet.ln_api_mutex);
 
        rc = lnet_add_net_common(net, tun);
@@ -3361,10 +3697,10 @@ lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
        LIST_HEAD(net_head);
        int rc;
        struct lnet_ioctl_config_lnd_tunables tun;
-       char *nets = conf->cfg_config_u.cfg_net.net_intf;
+       const char *nets = conf->cfg_config_u.cfg_net.net_intf;
 
        /* Create a net/ni structures for the network string */
-       rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
+       rc = lnet_parse_networks(&net_head, nets);
        if (rc <= 0)
                return rc == 0 ? -EINVAL : rc;
 
@@ -3383,13 +3719,16 @@ lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
        memset(&tun, 0, sizeof(tun));
 
        tun.lt_cmn.lct_peer_timeout =
-         conf->cfg_config_u.cfg_net.net_peer_timeout;
+         (!conf->cfg_config_u.cfg_net.net_peer_timeout) ? DEFAULT_PEER_TIMEOUT :
+               conf->cfg_config_u.cfg_net.net_peer_timeout;
        tun.lt_cmn.lct_peer_tx_credits =
-         conf->cfg_config_u.cfg_net.net_peer_tx_credits;
+         (!conf->cfg_config_u.cfg_net.net_peer_tx_credits) ? DEFAULT_PEER_CREDITS :
+               conf->cfg_config_u.cfg_net.net_peer_tx_credits;
        tun.lt_cmn.lct_peer_rtr_credits =
          conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
        tun.lt_cmn.lct_max_tx_credits =
-         conf->cfg_config_u.cfg_net.net_max_tx_credits;
+         (!conf->cfg_config_u.cfg_net.net_max_tx_credits) ? DEFAULT_CREDITS :
+               conf->cfg_config_u.cfg_net.net_max_tx_credits;
 
        rc = lnet_add_net_common(net, &tun);
 
@@ -3469,12 +3808,13 @@ lnet_ni_set_healthv(lnet_nid_t nid, int value, bool all)
        lnet_net_lock(LNET_LOCK_EX);
        list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
                list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
-                       if (ni->ni_nid == nid || all) {
+                       if (all || (nid_is_nid4(&ni->ni_nid) &&
+                                   lnet_nid_to_nid4(&ni->ni_nid) == nid)) {
                                atomic_set(&ni->ni_healthv, value);
                                if (list_empty(&ni->ni_recovery) &&
                                    value < LNET_MAX_HEALTH_VALUE) {
                                        CERROR("manually adding local NI %s to recovery\n",
-                                              libcfs_nid2str(ni->ni_nid));
+                                              libcfs_nidstr(&ni->ni_nid));
                                        list_add_tail(&ni->ni_recovery,
                                                      &the_lnet.ln_mt_localNIRecovq);
                                        lnet_ni_addref_locked(ni, 0);
@@ -3489,6 +3829,30 @@ lnet_ni_set_healthv(lnet_nid_t nid, int value, bool all)
        lnet_net_unlock(LNET_LOCK_EX);
 }
 
+static void
+lnet_ni_set_conns_per_peer(lnet_nid_t nid, int value, bool all)
+{
+       struct lnet_net *net;
+       struct lnet_ni *ni;
+
+       lnet_net_lock(LNET_LOCK_EX);
+       list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
+               list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
+                       if (lnet_nid_to_nid4(&ni->ni_nid) != nid && !all)
+                               continue;
+                       if (LNET_NETTYP(net->net_id) == SOCKLND)
+                               ni->ni_lnd_tunables.lnd_tun_u.lnd_sock.lnd_conns_per_peer = value;
+                       else if (LNET_NETTYP(net->net_id) == O2IBLND)
+                               ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib.lnd_conns_per_peer = value;
+                       if (!all) {
+                               lnet_net_unlock(LNET_LOCK_EX);
+                               return;
+                       }
+               }
+       }
+       lnet_net_unlock(LNET_LOCK_EX);
+}
+
 static int
 lnet_get_local_ni_hstats(struct lnet_ioctl_local_ni_hstats *stats)
 {
@@ -3510,7 +3874,10 @@ lnet_get_local_ni_hstats(struct lnet_ioctl_local_ni_hstats *stats)
        stats->hlni_local_no_route = atomic_read(&ni->ni_hstats.hlt_local_no_route);
        stats->hlni_local_timeout = atomic_read(&ni->ni_hstats.hlt_local_timeout);
        stats->hlni_local_error = atomic_read(&ni->ni_hstats.hlt_local_error);
+       stats->hlni_fatal_error = atomic_read(&ni->ni_fatal_error_on);
        stats->hlni_health_value = atomic_read(&ni->ni_healthv);
+       stats->hlni_ping_count = ni->ni_ping_count;
+       stats->hlni_next_ping = ni->ni_next_ping;
 
 unlock:
        lnet_net_unlock(cpt);
@@ -3526,7 +3893,9 @@ lnet_get_local_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
 
        lnet_net_lock(LNET_LOCK_EX);
        list_for_each_entry(ni, &the_lnet.ln_mt_localNIRecovq, ni_recovery) {
-               list->rlst_nid_array[i] = ni->ni_nid;
+               if (!nid_is_nid4(&ni->ni_nid))
+                       continue;
+               list->rlst_nid_array[i] = lnet_nid_to_nid4(&ni->ni_nid);
                i++;
                if (i >= LNET_MAX_SHOW_NUM_NID)
                        break;
@@ -3545,7 +3914,7 @@ lnet_get_peer_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
 
        lnet_net_lock(LNET_LOCK_EX);
        list_for_each_entry(lpni, &the_lnet.ln_mt_peerNIRecovq, lpni_recovery) {
-               list->rlst_nid_array[i] = lpni->lpni_nid;
+               list->rlst_nid_array[i] = lnet_nid_to_nid4(&lpni->lpni_nid);
                i++;
                if (i >= LNET_MAX_SHOW_NUM_NID)
                        break;
@@ -3567,6 +3936,7 @@ LNetCtl(unsigned int cmd, void *arg)
        struct lnet_ioctl_config_data *config;
        struct lnet_process_id    id = {0};
        struct lnet_ni           *ni;
+       struct lnet_nid           nid;
        int                       rc;
 
        BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
@@ -3594,10 +3964,11 @@ LNetCtl(unsigned int cmd, void *arg)
                          config->cfg_config_u.cfg_route.rtr_sensitivity;
                }
 
+               lnet_nid4_to_nid(config->cfg_nid, &nid);
                mutex_lock(&the_lnet.ln_api_mutex);
                rc = lnet_add_route(config->cfg_net,
                                    config->cfg_config_u.cfg_route.rtr_hop,
-                                   config->cfg_nid,
+                                   &nid,
                                    config->cfg_config_u.cfg_route.
                                        rtr_priority, sensitivity);
                mutex_unlock(&the_lnet.ln_api_mutex);
@@ -3696,7 +4067,15 @@ LNetCtl(unsigned int cmd, void *arg)
                        return -EINVAL;
 
                mutex_lock(&the_lnet.ln_api_mutex);
-               lnet_counters_get(&lnet_stats->st_cntrs);
+               rc = lnet_counters_get(&lnet_stats->st_cntrs);
+               mutex_unlock(&the_lnet.ln_api_mutex);
+               return rc;
+       }
+
+       case IOC_LIBCFS_RESET_LNET_STATS:
+       {
+               mutex_lock(&the_lnet.ln_api_mutex);
+               lnet_counters_reset();
                mutex_unlock(&the_lnet.ln_api_mutex);
                return 0;
        }
@@ -3806,7 +4185,7 @@ LNetCtl(unsigned int cmd, void *arg)
                mutex_lock(&the_lnet.ln_api_mutex);
                rc = lnet_add_peer_ni(cfg->prcfg_prim_nid,
                                      cfg->prcfg_cfg_nid,
-                                     cfg->prcfg_mr);
+                                     cfg->prcfg_mr, false);
                mutex_unlock(&the_lnet.ln_api_mutex);
                return rc;
        }
@@ -3896,6 +4275,25 @@ LNetCtl(unsigned int cmd, void *arg)
                return 0;
        }
 
+       case IOC_LIBCFS_SET_CONNS_PER_PEER: {
+               struct lnet_ioctl_reset_conns_per_peer_cfg *cfg = arg;
+               int value;
+
+               if (cfg->rcpp_hdr.ioc_len < sizeof(*cfg))
+                       return -EINVAL;
+               if (cfg->rcpp_value < 0)
+                       value = 1;
+               else
+                       value = cfg->rcpp_value;
+               CDEBUG(D_NET,
+                      "Setting conns_per_peer to %d for %s. all = %d\n",
+                      value, libcfs_nid2str(cfg->rcpp_nid), cfg->rcpp_all);
+               mutex_lock(&the_lnet.ln_api_mutex);
+               lnet_ni_set_conns_per_peer(cfg->rcpp_nid, value, cfg->rcpp_all);
+               mutex_unlock(&the_lnet.ln_api_mutex);
+               return 0;
+       }
+
        case IOC_LIBCFS_NOTIFY_ROUTER: {
                time64_t deadline = ktime_get_real_seconds() - data->ioc_u64[0];
 
@@ -3967,7 +4365,8 @@ LNetCtl(unsigned int cmd, void *arg)
                mutex_lock(&the_lnet.ln_api_mutex);
                lp = lnet_find_peer(ping->ping_id.nid);
                if (lp) {
-                       ping->ping_id.nid = lp->lp_primary_nid;
+                       ping->ping_id.nid =
+                               lnet_nid_to_nid4(&lp->lp_primary_nid);
                        ping->mr_info = lnet_peer_is_multi_rail(lp);
                        lnet_peer_decref_locked(lp);
                }
@@ -3990,7 +4389,8 @@ LNetCtl(unsigned int cmd, void *arg)
                mutex_lock(&the_lnet.ln_api_mutex);
                lp = lnet_find_peer(discover->ping_id.nid);
                if (lp) {
-                       discover->ping_id.nid = lp->lp_primary_nid;
+                       discover->ping_id.nid =
+                               lnet_nid_to_nid4(&lp->lp_primary_nid);
                        discover->mr_info = lnet_peer_is_multi_rail(lp);
                        lnet_peer_decref_locked(lp);
                }
@@ -4000,6 +4400,106 @@ LNetCtl(unsigned int cmd, void *arg)
                return 0;
        }
 
+       case IOC_LIBCFS_ADD_UDSP: {
+               struct lnet_ioctl_udsp *ioc_udsp = arg;
+               __u32 bulk_size = ioc_udsp->iou_hdr.ioc_len;
+
+               mutex_lock(&the_lnet.ln_api_mutex);
+               rc = lnet_udsp_demarshal_add(arg, bulk_size);
+               if (!rc) {
+                       rc = lnet_udsp_apply_policies(NULL, false);
+                       CDEBUG(D_NET, "policy application returned %d\n", rc);
+                       rc = 0;
+               }
+               mutex_unlock(&the_lnet.ln_api_mutex);
+
+               return rc;
+       }
+
+       case IOC_LIBCFS_DEL_UDSP: {
+               struct lnet_ioctl_udsp *ioc_udsp = arg;
+               int idx = ioc_udsp->iou_idx;
+
+               if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
+                       return -EINVAL;
+
+               mutex_lock(&the_lnet.ln_api_mutex);
+               rc = lnet_udsp_del_policy(idx);
+               if (!rc) {
+                       rc = lnet_udsp_apply_policies(NULL, false);
+                       CDEBUG(D_NET, "policy re-application returned %d\n",
+                              rc);
+                       rc = 0;
+               }
+               mutex_unlock(&the_lnet.ln_api_mutex);
+
+               return rc;
+       }
+
+       case IOC_LIBCFS_GET_UDSP_SIZE: {
+               struct lnet_ioctl_udsp *ioc_udsp = arg;
+               struct lnet_udsp *udsp;
+
+               if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
+                       return -EINVAL;
+
+               rc = 0;
+
+               mutex_lock(&the_lnet.ln_api_mutex);
+               udsp = lnet_udsp_get_policy(ioc_udsp->iou_idx);
+               if (!udsp) {
+                       rc = -ENOENT;
+               } else {
+                       /* coming in iou_idx will hold the idx of the udsp
+                        * to get the size of. going out the iou_idx will
+                        * hold the size of the UDSP found at the passed
+                        * in index.
+                        */
+                       ioc_udsp->iou_idx = lnet_get_udsp_size(udsp);
+                       if (ioc_udsp->iou_idx < 0)
+                               rc = -EINVAL;
+               }
+               mutex_unlock(&the_lnet.ln_api_mutex);
+
+               return rc;
+       }
+
+       case IOC_LIBCFS_GET_UDSP: {
+               struct lnet_ioctl_udsp *ioc_udsp = arg;
+               struct lnet_udsp *udsp;
+
+               if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
+                       return -EINVAL;
+
+               rc = 0;
+
+               mutex_lock(&the_lnet.ln_api_mutex);
+               udsp = lnet_udsp_get_policy(ioc_udsp->iou_idx);
+               if (!udsp)
+                       rc = -ENOENT;
+               else
+                       rc = lnet_udsp_marshal(udsp, ioc_udsp);
+               mutex_unlock(&the_lnet.ln_api_mutex);
+
+               return rc;
+       }
+
+       case IOC_LIBCFS_GET_CONST_UDSP_INFO: {
+               struct lnet_ioctl_construct_udsp_info *info = arg;
+
+               if (info->cud_hdr.ioc_len < sizeof(*info))
+                       return -EINVAL;
+
+               CDEBUG(D_NET, "GET_UDSP_INFO for %s\n",
+                      libcfs_nid2str(info->cud_nid));
+
+               mutex_lock(&the_lnet.ln_api_mutex);
+               lnet_udsp_get_construct_info(info);
+               mutex_unlock(&the_lnet.ln_api_mutex);
+
+               return 0;
+       }
+
        default:
                ni = lnet_net2ni_addref(data->ioc_net);
                if (ni == NULL)
@@ -4040,7 +4540,7 @@ bool LNetIsPeerLocal(lnet_nid_t nid)
        cpt = lnet_net_lock_current();
        list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
                list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
-                       if (ni->ni_nid == nid) {
+                       if (lnet_nid_to_nid4(&ni->ni_nid) == nid) {
                                lnet_net_unlock(cpt);
                                return true;
                        }
@@ -4077,10 +4577,13 @@ LNetGetId(unsigned int index, struct lnet_process_id *id)
 
        list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
                list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
+                       if (!nid_is_nid4(&ni->ni_nid))
+                               /* FIXME this needs to be handled */
+                               continue;
                        if (index-- != 0)
                                continue;
 
-                       id->nid = ni->ni_nid;
+                       id->nid = lnet_nid_to_nid4(&ni->ni_nid);
                        id->pid = the_lnet.ln_pid;
                        rc = 0;
                        break;
@@ -4160,7 +4663,7 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout,
 
        init_completion(&pd.completion);
 
-       rc = LNetMDBind(md, LNET_UNLINK, &pd.mdh);
+       rc = LNetMDBind(&md, LNET_UNLINK, &pd.mdh);
        if (rc != 0) {
                CERROR("Can't bind MD: %d\n", rc);
                goto fail_ping_buffer_decref;
@@ -4257,7 +4760,6 @@ lnet_discover(struct lnet_process_id id, __u32 force,
        int cpt;
        int i;
        int rc;
-       int max_intf = lnet_interfaces_max;
 
        if (n_ids <= 0 ||
            id.nid == LNET_NID_ANY)
@@ -4267,11 +4769,11 @@ lnet_discover(struct lnet_process_id id, __u32 force,
                id.pid = LNET_PID_LUSTRE;
 
        /*
-        * if the user buffer has more space than the max_intf
-        * then only fill it up to max_intf
+        * If the user buffer has more space than the lnet_interfaces_max,
+        * then only fill it up to lnet_interfaces_max.
         */
-       if (n_ids > max_intf)
-               n_ids = max_intf;
+       if (n_ids > lnet_interfaces_max)
+               n_ids = lnet_interfaces_max;
 
        CFS_ALLOC_PTR_ARRAY(buf, n_ids);
        if (!buf)
@@ -4299,33 +4801,36 @@ lnet_discover(struct lnet_process_id id, __u32 force,
        if (rc)
                goto out_decref;
 
-       /* Peer may have changed. */
+       /* The lpni (or lp) for this NID may have changed and our ref is
+        * the only thing keeping the old one around. Release the ref
+        * and lookup the lpni again
+        */
+       lnet_peer_ni_decref_locked(lpni);
+       lpni = lnet_find_peer_ni_locked(id.nid);
+       if (!lpni) {
+               rc = -ENOENT;
+               goto out;
+       }
        lp = lpni->lpni_peer_net->lpn_peer;
-       if (lp->lp_nnis < n_ids)
-               n_ids = lp->lp_nnis;
 
        i = 0;
        p = NULL;
        while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
                buf[i].pid = id.pid;
-               buf[i].nid = p->lpni_nid;
+               buf[i].nid = lnet_nid_to_nid4(&p->lpni_nid);
                if (++i >= n_ids)
                        break;
        }
+       rc = i;
 
-       lnet_net_unlock(cpt);
-
-       rc = -EFAULT;
-       if (copy_to_user(ids, buf, n_ids * sizeof(*buf)))
-               goto out_relock;
-       rc = n_ids;
-out_relock:
-       lnet_net_lock(cpt);
 out_decref:
        lnet_peer_ni_decref_locked(lpni);
 out:
        lnet_net_unlock(cpt);
 
+       if (rc >= 0)
+               if (copy_to_user(ids, buf, rc * sizeof(*buf)))
+                       rc = -EFAULT;
        CFS_FREE_PTR_ARRAY(buf, n_ids);
 
        return rc;