Whamcloud - gitweb
LU-12930 various: use schedule_timeout_*interruptible
[fs/lustre-release.git] / lnet / lnet / api-ni.c
index e63feac..d39e6b9 100644 (file)
@@ -23,7 +23,7 @@
  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2016, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
  */
 
 #define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/ctype.h>
 #include <linux/log2.h>
 #include <linux/ktime.h>
+#include <linux/moduleparam.h>
+#include <linux/uaccess.h>
 
 #include <lnet/lib-lnet.h>
 
 #define D_LNI D_CONSOLE
 
-struct lnet the_lnet;          /* THE state of the network */
+/*
+ * initialize ln_api_mutex statically, since it needs to be used in
+ * discovery_set callback. That module parameter callback can be called
+ * before module init completes. The mutex needs to be ready for use then.
+ */
+struct lnet the_lnet = {
+       .ln_api_mutex = __MUTEX_INITIALIZER(the_lnet.ln_api_mutex),
+};             /* THE state of the network */
 EXPORT_SYMBOL(the_lnet);
 
 static char *ip2nets = "";
@@ -68,6 +79,151 @@ MODULE_PARM_DESC(lnet_numa_range,
                "NUMA range to consider during Multi-Rail selection");
 
 /*
+ * lnet_health_sensitivity determines by how much we decrement the health
+ * value on sending error. The value defaults to 100, which means health
+ * interface health is decremented by 100 points every failure.
+ */
+unsigned int lnet_health_sensitivity = 100;
+static int sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
+#ifdef HAVE_KERNEL_PARAM_OPS
+static struct kernel_param_ops param_ops_health_sensitivity = {
+       .set = sensitivity_set,
+       .get = param_get_int,
+};
+#define param_check_health_sensitivity(name, p) \
+               __param_check(name, p, int)
+module_param(lnet_health_sensitivity, health_sensitivity, S_IRUGO|S_IWUSR);
+#else
+module_param_call(lnet_health_sensitivity, sensitivity_set, param_get_int,
+                 &lnet_health_sensitivity, S_IRUGO|S_IWUSR);
+#endif
+MODULE_PARM_DESC(lnet_health_sensitivity,
+               "Value to decrement the health value by on error");
+
+/*
+ * lnet_recovery_interval determines how often we should perform recovery
+ * on unhealthy interfaces.
+ */
+unsigned int lnet_recovery_interval = 1;
+static int recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp);
+#ifdef HAVE_KERNEL_PARAM_OPS
+static struct kernel_param_ops param_ops_recovery_interval = {
+       .set = recovery_interval_set,
+       .get = param_get_int,
+};
+#define param_check_recovery_interval(name, p) \
+               __param_check(name, p, int)
+module_param(lnet_recovery_interval, recovery_interval, S_IRUGO|S_IWUSR);
+#else
+module_param_call(lnet_recovery_interval, recovery_interval_set, param_get_int,
+                 &lnet_recovery_interval, S_IRUGO|S_IWUSR);
+#endif
+MODULE_PARM_DESC(lnet_recovery_interval,
+               "Interval to recover unhealthy interfaces in seconds");
+
+static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
+static int intf_max_set(const char *val, cfs_kernel_param_arg_t *kp);
+
+static struct kernel_param_ops param_ops_interfaces_max = {
+       .set = intf_max_set,
+       .get = param_get_int,
+};
+
+#define param_check_interfaces_max(name, p) \
+               __param_check(name, p, int)
+
+#ifdef HAVE_KERNEL_PARAM_OPS
+module_param(lnet_interfaces_max, interfaces_max, 0644);
+#else
+module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
+                 &param_ops_interfaces_max, 0644);
+#endif
+MODULE_PARM_DESC(lnet_interfaces_max,
+               "Maximum number of interfaces in a node.");
+
+unsigned lnet_peer_discovery_disabled = 0;
+static int discovery_set(const char *val, cfs_kernel_param_arg_t *kp);
+
+static struct kernel_param_ops param_ops_discovery_disabled = {
+       .set = discovery_set,
+       .get = param_get_int,
+};
+
+#define param_check_discovery_disabled(name, p) \
+               __param_check(name, p, int)
+#ifdef HAVE_KERNEL_PARAM_OPS
+module_param(lnet_peer_discovery_disabled, discovery_disabled, 0644);
+#else
+module_param_call(lnet_peer_discovery_disabled, discovery_set, param_get_int,
+                 &param_ops_discovery_disabled, 0644);
+#endif
+MODULE_PARM_DESC(lnet_peer_discovery_disabled,
+               "Set to 1 to disable peer discovery on this node.");
+
+unsigned int lnet_drop_asym_route;
+static int drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp);
+
+static struct kernel_param_ops param_ops_drop_asym_route = {
+       .set = drop_asym_route_set,
+       .get = param_get_int,
+};
+
+#define param_check_drop_asym_route(name, p)   \
+       __param_check(name, p, int)
+#ifdef HAVE_KERNEL_PARAM_OPS
+module_param(lnet_drop_asym_route, drop_asym_route, 0644);
+#else
+module_param_call(lnet_drop_asym_route, drop_asym_route_set, param_get_int,
+                 &param_ops_drop_asym_route, 0644);
+#endif
+MODULE_PARM_DESC(lnet_drop_asym_route,
+                "Set to 1 to drop asymmetrical route messages.");
+
+#define LNET_TRANSACTION_TIMEOUT_NO_HEALTH_DEFAULT 50
+#define LNET_TRANSACTION_TIMEOUT_HEALTH_DEFAULT 50
+
+unsigned lnet_transaction_timeout = LNET_TRANSACTION_TIMEOUT_HEALTH_DEFAULT;
+static int transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp);
+#ifdef HAVE_KERNEL_PARAM_OPS
+static struct kernel_param_ops param_ops_transaction_timeout = {
+       .set = transaction_to_set,
+       .get = param_get_int,
+};
+
+#define param_check_transaction_timeout(name, p) \
+               __param_check(name, p, int)
+module_param(lnet_transaction_timeout, transaction_timeout, S_IRUGO|S_IWUSR);
+#else
+module_param_call(lnet_transaction_timeout, transaction_to_set, param_get_int,
+                 &lnet_transaction_timeout, S_IRUGO|S_IWUSR);
+#endif
+MODULE_PARM_DESC(lnet_transaction_timeout,
+               "Maximum number of seconds to wait for a peer response.");
+
+#define LNET_RETRY_COUNT_HEALTH_DEFAULT 2
+unsigned lnet_retry_count = LNET_RETRY_COUNT_HEALTH_DEFAULT;
+static int retry_count_set(const char *val, cfs_kernel_param_arg_t *kp);
+#ifdef HAVE_KERNEL_PARAM_OPS
+static struct kernel_param_ops param_ops_retry_count = {
+       .set = retry_count_set,
+       .get = param_get_int,
+};
+
+#define param_check_retry_count(name, p) \
+               __param_check(name, p, int)
+module_param(lnet_retry_count, retry_count, S_IRUGO|S_IWUSR);
+#else
+module_param_call(lnet_retry_count, retry_count_set, param_get_int,
+                 &lnet_retry_count, S_IRUGO|S_IWUSR);
+#endif
+MODULE_PARM_DESC(lnet_retry_count,
+                "Maximum number of times to retry transmitting a message");
+
+
+unsigned lnet_lnd_timeout = LNET_LND_DEFAULT_TIMEOUT;
+unsigned int lnet_current_net_count;
+
+/*
  * This sequence number keeps track of how many times DLC was used to
  * update the local NIs. It is incremented when a NI is added or
  * removed and checked when sending a message to determine if there is
@@ -79,6 +235,282 @@ static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
 static int lnet_ping(struct lnet_process_id id, signed long timeout,
                     struct lnet_process_id __user *ids, int n_ids);
 
+static int lnet_discover(struct lnet_process_id id, __u32 force,
+                        struct lnet_process_id __user *ids, int n_ids);
+
+static int
+sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
+{
+       int rc;
+       unsigned *sensitivity = (unsigned *)kp->arg;
+       unsigned long value;
+
+       rc = kstrtoul(val, 0, &value);
+       if (rc) {
+               CERROR("Invalid module parameter value for 'lnet_health_sensitivity'\n");
+               return rc;
+       }
+
+       /*
+        * The purpose of locking the api_mutex here is to ensure that
+        * the correct value ends up stored properly.
+        */
+       mutex_lock(&the_lnet.ln_api_mutex);
+
+       if (value > LNET_MAX_HEALTH_VALUE) {
+               mutex_unlock(&the_lnet.ln_api_mutex);
+               CERROR("Invalid health value. Maximum: %d value = %lu\n",
+                      LNET_MAX_HEALTH_VALUE, value);
+               return -EINVAL;
+       }
+
+       /*
+        * if we're turning on health then use the health timeout
+        * defaults.
+        */
+       if (*sensitivity == 0 && value != 0) {
+               lnet_transaction_timeout = LNET_TRANSACTION_TIMEOUT_HEALTH_DEFAULT;
+               lnet_retry_count = LNET_RETRY_COUNT_HEALTH_DEFAULT;
+       /*
+        * if we're turning off health then use the no health timeout
+        * default.
+        */
+       } else if (*sensitivity != 0 && value == 0) {
+               lnet_transaction_timeout =
+                       LNET_TRANSACTION_TIMEOUT_NO_HEALTH_DEFAULT;
+               lnet_retry_count = 0;
+       }
+
+       *sensitivity = value;
+
+       mutex_unlock(&the_lnet.ln_api_mutex);
+
+       return 0;
+}
+
+static int
+recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp)
+{
+       int rc;
+       unsigned *interval = (unsigned *)kp->arg;
+       unsigned long value;
+
+       rc = kstrtoul(val, 0, &value);
+       if (rc) {
+               CERROR("Invalid module parameter value for 'lnet_recovery_interval'\n");
+               return rc;
+       }
+
+       if (value < 1) {
+               CERROR("lnet_recovery_interval must be at least 1 second\n");
+               return -EINVAL;
+       }
+
+       /*
+        * The purpose of locking the api_mutex here is to ensure that
+        * the correct value ends up stored properly.
+        */
+       mutex_lock(&the_lnet.ln_api_mutex);
+
+       *interval = value;
+
+       mutex_unlock(&the_lnet.ln_api_mutex);
+
+       return 0;
+}
+
+static int
+discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
+{
+       int rc;
+       unsigned *discovery = (unsigned *)kp->arg;
+       unsigned long value;
+       struct lnet_ping_buffer *pbuf;
+
+       rc = kstrtoul(val, 0, &value);
+       if (rc) {
+               CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
+               return rc;
+       }
+
+       value = (value) ? 1 : 0;
+
+       /*
+        * The purpose of locking the api_mutex here is to ensure that
+        * the correct value ends up stored properly.
+        */
+       mutex_lock(&the_lnet.ln_api_mutex);
+
+       if (value == *discovery) {
+               mutex_unlock(&the_lnet.ln_api_mutex);
+               return 0;
+       }
+
+       *discovery = value;
+
+       if (the_lnet.ln_state != LNET_STATE_RUNNING) {
+               mutex_unlock(&the_lnet.ln_api_mutex);
+               return 0;
+       }
+
+       /* tell peers that discovery setting has changed */
+       lnet_net_lock(LNET_LOCK_EX);
+       pbuf = the_lnet.ln_ping_target;
+       if (value)
+               pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
+       else
+               pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
+       lnet_net_unlock(LNET_LOCK_EX);
+
+       lnet_push_update_to_peers(1);
+
+       mutex_unlock(&the_lnet.ln_api_mutex);
+
+       return 0;
+}
+
+static int
+drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp)
+{
+       int rc;
+       unsigned int *drop_asym_route = (unsigned int *)kp->arg;
+       unsigned long value;
+
+       rc = kstrtoul(val, 0, &value);
+       if (rc) {
+               CERROR("Invalid module parameter value for "
+                      "'lnet_drop_asym_route'\n");
+               return rc;
+       }
+
+       /*
+        * The purpose of locking the api_mutex here is to ensure that
+        * the correct value ends up stored properly.
+        */
+       mutex_lock(&the_lnet.ln_api_mutex);
+
+       if (value == *drop_asym_route) {
+               mutex_unlock(&the_lnet.ln_api_mutex);
+               return 0;
+       }
+
+       *drop_asym_route = value;
+
+       mutex_unlock(&the_lnet.ln_api_mutex);
+
+       return 0;
+}
+
+static int
+transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp)
+{
+       int rc;
+       unsigned *transaction_to = (unsigned *)kp->arg;
+       unsigned long value;
+
+       rc = kstrtoul(val, 0, &value);
+       if (rc) {
+               CERROR("Invalid module parameter value for 'lnet_transaction_timeout'\n");
+               return rc;
+       }
+
+       /*
+        * The purpose of locking the api_mutex here is to ensure that
+        * the correct value ends up stored properly.
+        */
+       mutex_lock(&the_lnet.ln_api_mutex);
+
+       if (value < lnet_retry_count || value == 0) {
+               mutex_unlock(&the_lnet.ln_api_mutex);
+               CERROR("Invalid value for lnet_transaction_timeout (%lu). "
+                      "Has to be greater than lnet_retry_count (%u)\n",
+                      value, lnet_retry_count);
+               return -EINVAL;
+       }
+
+       if (value == *transaction_to) {
+               mutex_unlock(&the_lnet.ln_api_mutex);
+               return 0;
+       }
+
+       *transaction_to = value;
+       if (lnet_retry_count == 0)
+               lnet_lnd_timeout = value;
+       else
+               lnet_lnd_timeout = value / lnet_retry_count;
+
+       mutex_unlock(&the_lnet.ln_api_mutex);
+
+       return 0;
+}
+
+static int
+retry_count_set(const char *val, cfs_kernel_param_arg_t *kp)
+{
+       int rc;
+       unsigned *retry_count = (unsigned *)kp->arg;
+       unsigned long value;
+
+       rc = kstrtoul(val, 0, &value);
+       if (rc) {
+               CERROR("Invalid module parameter value for 'lnet_retry_count'\n");
+               return rc;
+       }
+
+       /*
+        * The purpose of locking the api_mutex here is to ensure that
+        * the correct value ends up stored properly.
+        */
+       mutex_lock(&the_lnet.ln_api_mutex);
+
+       if (lnet_health_sensitivity == 0) {
+               mutex_unlock(&the_lnet.ln_api_mutex);
+               CERROR("Can not set retry_count when health feature is turned off\n");
+               return -EINVAL;
+       }
+
+       if (value > lnet_transaction_timeout) {
+               mutex_unlock(&the_lnet.ln_api_mutex);
+               CERROR("Invalid value for lnet_retry_count (%lu). "
+                      "Has to be smaller than lnet_transaction_timeout (%u)\n",
+                      value, lnet_transaction_timeout);
+               return -EINVAL;
+       }
+
+       *retry_count = value;
+
+       if (value == 0)
+               lnet_lnd_timeout = lnet_transaction_timeout;
+       else
+               lnet_lnd_timeout = lnet_transaction_timeout / value;
+
+       mutex_unlock(&the_lnet.ln_api_mutex);
+
+       return 0;
+}
+
+static int
+intf_max_set(const char *val, cfs_kernel_param_arg_t *kp)
+{
+       int value, rc;
+
+       rc = kstrtoint(val, 0, &value);
+       if (rc) {
+               CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
+               return rc;
+       }
+
+       if (value < LNET_INTERFACES_MIN) {
+               CWARN("max interfaces provided are too small, setting to %d\n",
+                     LNET_INTERFACES_MAX_DEFAULT);
+               value = LNET_INTERFACES_MAX_DEFAULT;
+       }
+
+       *(int *)kp->arg = value;
+
+       return 0;
+}
+
 static char *
 lnet_get_routes(void)
 {
@@ -112,23 +544,20 @@ static void
 lnet_init_locks(void)
 {
        spin_lock_init(&the_lnet.ln_eq_wait_lock);
+       spin_lock_init(&the_lnet.ln_msg_resend_lock);
        init_waitqueue_head(&the_lnet.ln_eq_waitq);
-       init_waitqueue_head(&the_lnet.ln_rc_waitq);
+       init_completion(&the_lnet.ln_mt_wait_complete);
        mutex_init(&the_lnet.ln_lnd_mutex);
-       mutex_init(&the_lnet.ln_api_mutex);
-}
-
-static void
-lnet_fini_locks(void)
-{
 }
 
 struct kmem_cache *lnet_mes_cachep;       /* MEs kmem_cache */
 struct kmem_cache *lnet_small_mds_cachep;  /* <= LNET_SMALL_MD_SIZE bytes
                                            *  MDs kmem_cache */
+struct kmem_cache *lnet_rspt_cachep;      /* response tracker cache */
+struct kmem_cache *lnet_msg_cachep;
 
 static int
-lnet_descriptor_setup(void)
+lnet_slab_setup(void)
 {
        /* create specific kmem_cache for MEs and small MDs (i.e., originally
         * allocated in <size-xxx> kmem_cache).
@@ -144,12 +573,32 @@ lnet_descriptor_setup(void)
        if (!lnet_small_mds_cachep)
                return -ENOMEM;
 
+       lnet_rspt_cachep = kmem_cache_create("lnet_rspt", sizeof(struct lnet_rsp_tracker),
+                                           0, 0, NULL);
+       if (!lnet_rspt_cachep)
+               return -ENOMEM;
+
+       lnet_msg_cachep = kmem_cache_create("lnet_msg", sizeof(struct lnet_msg),
+                                           0, 0, NULL);
+       if (!lnet_msg_cachep)
+               return -ENOMEM;
+
        return 0;
 }
 
 static void
-lnet_descriptor_cleanup(void)
+lnet_slab_cleanup(void)
 {
+       if (lnet_msg_cachep) {
+               kmem_cache_destroy(lnet_msg_cachep);
+               lnet_msg_cachep = NULL;
+       }
+
+
+       if (lnet_rspt_cachep) {
+               kmem_cache_destroy(lnet_rspt_cachep);
+               lnet_rspt_cachep = NULL;
+       }
 
        if (lnet_small_mds_cachep) {
                kmem_cache_destroy(lnet_small_mds_cachep);
@@ -211,8 +660,6 @@ lnet_destroy_locks(void)
                cfs_percpt_lock_free(the_lnet.ln_net_lock);
                the_lnet.ln_net_lock = NULL;
        }
-
-       lnet_fini_locks();
 }
 
 static int
@@ -243,116 +690,160 @@ static void lnet_assert_wire_constants(void)
         * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
 
        /* Constants... */
-       CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
-       CLASSERT(LNET_PROTO_TCP_VERSION_MAJOR == 1);
-       CLASSERT(LNET_PROTO_TCP_VERSION_MINOR == 0);
-       CLASSERT(LNET_MSG_ACK == 0);
-       CLASSERT(LNET_MSG_PUT == 1);
-       CLASSERT(LNET_MSG_GET == 2);
-       CLASSERT(LNET_MSG_REPLY == 3);
-       CLASSERT(LNET_MSG_HELLO == 4);
+       BUILD_BUG_ON(LNET_PROTO_TCP_MAGIC != 0xeebc0ded);
+       BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MAJOR != 1);
+       BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MINOR != 0);
+       BUILD_BUG_ON(LNET_MSG_ACK != 0);
+       BUILD_BUG_ON(LNET_MSG_PUT != 1);
+       BUILD_BUG_ON(LNET_MSG_GET != 2);
+       BUILD_BUG_ON(LNET_MSG_REPLY != 3);
+       BUILD_BUG_ON(LNET_MSG_HELLO != 4);
 
        /* Checks for struct lnet_handle_wire */
-       CLASSERT((int)sizeof(struct lnet_handle_wire) == 16);
-       CLASSERT((int)offsetof(struct lnet_handle_wire, wh_interface_cookie) == 0);
-       CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) == 8);
-       CLASSERT((int)offsetof(struct lnet_handle_wire, wh_object_cookie) == 8);
-       CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) == 8);
+       BUILD_BUG_ON((int)sizeof(struct lnet_handle_wire) != 16);
+       BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
+                                  wh_interface_cookie) != 0);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) != 8);
+       BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
+                                  wh_object_cookie) != 8);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) != 8);
 
        /* Checks for struct struct lnet_magicversion */
-       CLASSERT((int)sizeof(struct lnet_magicversion) == 8);
-       CLASSERT((int)offsetof(struct lnet_magicversion, magic) == 0);
-       CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->magic) == 4);
-       CLASSERT((int)offsetof(struct lnet_magicversion, version_major) == 4);
-       CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_major) == 2);
-       CLASSERT((int)offsetof(struct lnet_magicversion, version_minor) == 6);
-       CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_minor) == 2);
+       BUILD_BUG_ON((int)sizeof(struct lnet_magicversion) != 8);
+       BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, magic) != 0);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->magic) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, version_major) != 4);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_major) != 2);
+       BUILD_BUG_ON((int)offsetof(struct lnet_magicversion,
+                                  version_minor) != 6);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_minor) != 2);
 
        /* Checks for struct struct lnet_hdr */
-       CLASSERT((int)sizeof(struct lnet_hdr) == 72);
-       CLASSERT((int)offsetof(struct lnet_hdr, dest_nid) == 0);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_nid) == 8);
-       CLASSERT((int)offsetof(struct lnet_hdr, src_nid) == 8);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_nid) == 8);
-       CLASSERT((int)offsetof(struct lnet_hdr, dest_pid) == 16);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_pid) == 4);
-       CLASSERT((int)offsetof(struct lnet_hdr, src_pid) == 20);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_pid) == 4);
-       CLASSERT((int)offsetof(struct lnet_hdr, type) == 24);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->type) == 4);
-       CLASSERT((int)offsetof(struct lnet_hdr, payload_length) == 28);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->payload_length) == 4);
-       CLASSERT((int)offsetof(struct lnet_hdr, msg) == 32);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg) == 40);
+       BUILD_BUG_ON((int)sizeof(struct lnet_hdr) != 72);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_nid) != 0);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_nid) != 8);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_nid) != 8);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_nid) != 8);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_pid) != 16);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_pid) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_pid) != 20);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_pid) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, type) != 24);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->type) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, payload_length) != 28);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->payload_length) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg) != 32);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg) != 40);
 
        /* Ack */
-       CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) == 32);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) == 16);
-       CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.match_bits) == 48);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) == 8);
-       CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.mlength) == 56);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) == 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) != 32);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) != 16);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.match_bits) != 48);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) != 8);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.mlength) != 56);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) != 4);
 
        /* Put */
-       CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) == 32);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) == 16);
-       CLASSERT((int)offsetof(struct lnet_hdr, msg.put.match_bits) == 48);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) == 8);
-       CLASSERT((int)offsetof(struct lnet_hdr, msg.put.hdr_data) == 56);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) == 8);
-       CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ptl_index) == 64);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) == 4);
-       CLASSERT((int)offsetof(struct lnet_hdr, msg.put.offset) == 68);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) == 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) != 32);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) != 16);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.match_bits) != 48);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) != 8);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.hdr_data) != 56);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) != 8);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ptl_index) != 64);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.offset) != 68);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) != 4);
 
        /* Get */
-       CLASSERT((int)offsetof(struct lnet_hdr, msg.get.return_wmd) == 32);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) == 16);
-       CLASSERT((int)offsetof(struct lnet_hdr, msg.get.match_bits) == 48);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) == 8);
-       CLASSERT((int)offsetof(struct lnet_hdr, msg.get.ptl_index) == 56);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) == 4);
-       CLASSERT((int)offsetof(struct lnet_hdr, msg.get.src_offset) == 60);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) == 4);
-       CLASSERT((int)offsetof(struct lnet_hdr, msg.get.sink_length) == 64);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) == 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.return_wmd) != 32);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) != 16);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.match_bits) != 48);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) != 8);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.ptl_index) != 56);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.src_offset) != 60);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.sink_length) != 64);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) != 4);
 
        /* Reply */
-       CLASSERT((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) == 32);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) == 16);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) != 32);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) != 16);
 
        /* Hello */
-       CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.incarnation) == 32);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) == 8);
-       CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.type) == 40);
-       CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) == 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.incarnation) != 32);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) != 8);
+       BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.type) != 40);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) != 4);
+
+       /* Checks for struct lnet_ni_status and related constants */
+       BUILD_BUG_ON(LNET_NI_STATUS_INVALID != 0x00000000);
+       BUILD_BUG_ON(LNET_NI_STATUS_UP != 0x15aac0de);
+       BUILD_BUG_ON(LNET_NI_STATUS_DOWN != 0xdeadface);
+
+       /* Checks for struct lnet_ni_status */
+       BUILD_BUG_ON((int)sizeof(struct lnet_ni_status) != 16);
+       BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_nid) != 0);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) != 8);
+       BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_status) != 8);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_status) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_unused) != 12);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_unused) != 4);
+
+       /* Checks for struct lnet_ping_info and related constants */
+       BUILD_BUG_ON(LNET_PROTO_PING_MAGIC != 0x70696E67);
+       BUILD_BUG_ON(LNET_PING_FEAT_INVAL != 0);
+       BUILD_BUG_ON(LNET_PING_FEAT_BASE != 1);
+       BUILD_BUG_ON(LNET_PING_FEAT_NI_STATUS != 2);
+       BUILD_BUG_ON(LNET_PING_FEAT_RTE_DISABLED != 4);
+       BUILD_BUG_ON(LNET_PING_FEAT_MULTI_RAIL != 8);
+       BUILD_BUG_ON(LNET_PING_FEAT_DISCOVERY != 16);
+       BUILD_BUG_ON(LNET_PING_FEAT_BITS != 31);
+
+       /* Checks for struct lnet_ping_info */
+       BUILD_BUG_ON((int)sizeof(struct lnet_ping_info) != 16);
+       BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_magic) != 0);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_features) != 4);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_features) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_pid) != 8);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_nnis) != 12);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) != 4);
+       BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_ni) != 16);
+       BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) != 0);
 }
 
-static struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
+static const struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
 {
-       struct lnet_lnd *lnd;
-       struct list_head *tmp;
+       const struct lnet_lnd *lnd;
 
        /* holding lnd mutex */
-       list_for_each(tmp, &the_lnet.ln_lnds) {
-               lnd = list_entry(tmp, struct lnet_lnd, lnd_list);
+       if (type >= NUM_LNDS)
+               return NULL;
+       lnd = the_lnet.ln_lnds[type];
+       LASSERT(!lnd || lnd->lnd_type == type);
 
-               if (lnd->lnd_type == type)
-                       return lnd;
-       }
-       return NULL;
+       return lnd;
 }
 
+unsigned int
+lnet_get_lnd_timeout(void)
+{
+       return lnet_lnd_timeout;
+}
+EXPORT_SYMBOL(lnet_get_lnd_timeout);
+
 void
-lnet_register_lnd(struct lnet_lnd *lnd)
+lnet_register_lnd(const struct lnet_lnd *lnd)
 {
        mutex_lock(&the_lnet.ln_lnd_mutex);
 
        LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
        LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
 
-       list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
-       lnd->lnd_refcount = 0;
+       the_lnet.ln_lnds[lnd->lnd_type] = lnd;
 
        CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
 
@@ -361,14 +852,13 @@ lnet_register_lnd(struct lnet_lnd *lnd)
 EXPORT_SYMBOL(lnet_register_lnd);
 
 void
-lnet_unregister_lnd(struct lnet_lnd *lnd)
+lnet_unregister_lnd(const struct lnet_lnd *lnd)
 {
        mutex_lock(&the_lnet.ln_lnd_mutex);
 
        LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
-       LASSERT(lnd->lnd_refcount == 0);
 
-       list_del(&lnd->lnd_list);
+       the_lnet.ln_lnds[lnd->lnd_type] = NULL;
        CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
 
        mutex_unlock(&the_lnet.ln_lnd_mutex);
@@ -376,28 +866,70 @@ lnet_unregister_lnd(struct lnet_lnd *lnd)
 EXPORT_SYMBOL(lnet_unregister_lnd);
 
 void
+lnet_counters_get_common(struct lnet_counters_common *common)
+{
+       struct lnet_counters *ctr;
+       int i;
+
+       memset(common, 0, sizeof(*common));
+
+       lnet_net_lock(LNET_LOCK_EX);
+
+       cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
+               common->lcc_msgs_max     += ctr->lct_common.lcc_msgs_max;
+               common->lcc_msgs_alloc   += ctr->lct_common.lcc_msgs_alloc;
+               common->lcc_errors       += ctr->lct_common.lcc_errors;
+               common->lcc_send_count   += ctr->lct_common.lcc_send_count;
+               common->lcc_recv_count   += ctr->lct_common.lcc_recv_count;
+               common->lcc_route_count  += ctr->lct_common.lcc_route_count;
+               common->lcc_drop_count   += ctr->lct_common.lcc_drop_count;
+               common->lcc_send_length  += ctr->lct_common.lcc_send_length;
+               common->lcc_recv_length  += ctr->lct_common.lcc_recv_length;
+               common->lcc_route_length += ctr->lct_common.lcc_route_length;
+               common->lcc_drop_length  += ctr->lct_common.lcc_drop_length;
+       }
+       lnet_net_unlock(LNET_LOCK_EX);
+}
+EXPORT_SYMBOL(lnet_counters_get_common);
+
+void
 lnet_counters_get(struct lnet_counters *counters)
 {
        struct lnet_counters *ctr;
+       struct lnet_counters_health *health = &counters->lct_health;
        int             i;
 
        memset(counters, 0, sizeof(*counters));
 
+       lnet_counters_get_common(&counters->lct_common);
+
        lnet_net_lock(LNET_LOCK_EX);
 
        cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
-               counters->msgs_max     += ctr->msgs_max;
-               counters->msgs_alloc   += ctr->msgs_alloc;
-               counters->errors       += ctr->errors;
-               counters->send_count   += ctr->send_count;
-               counters->recv_count   += ctr->recv_count;
-               counters->route_count  += ctr->route_count;
-               counters->drop_count   += ctr->drop_count;
-               counters->send_length  += ctr->send_length;
-               counters->recv_length  += ctr->recv_length;
-               counters->route_length += ctr->route_length;
-               counters->drop_length  += ctr->drop_length;
-
+               health->lch_rst_alloc    += ctr->lct_health.lch_rst_alloc;
+               health->lch_resend_count += ctr->lct_health.lch_resend_count;
+               health->lch_response_timeout_count +=
+                               ctr->lct_health.lch_response_timeout_count;
+               health->lch_local_interrupt_count +=
+                               ctr->lct_health.lch_local_interrupt_count;
+               health->lch_local_dropped_count +=
+                               ctr->lct_health.lch_local_dropped_count;
+               health->lch_local_aborted_count +=
+                               ctr->lct_health.lch_local_aborted_count;
+               health->lch_local_no_route_count +=
+                               ctr->lct_health.lch_local_no_route_count;
+               health->lch_local_timeout_count +=
+                               ctr->lct_health.lch_local_timeout_count;
+               health->lch_local_error_count +=
+                               ctr->lct_health.lch_local_error_count;
+               health->lch_remote_dropped_count +=
+                               ctr->lct_health.lch_remote_dropped_count;
+               health->lch_remote_error_count +=
+                               ctr->lct_health.lch_remote_error_count;
+               health->lch_remote_timeout_count +=
+                               ctr->lct_health.lch_remote_timeout_count;
+               health->lch_network_timeout_count +=
+                               ctr->lct_health.lch_network_timeout_count;
        }
        lnet_net_unlock(LNET_LOCK_EX);
 }
@@ -582,6 +1114,26 @@ lnet_res_lh_initialize(struct lnet_res_container *rec,
        list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
 }
 
+struct list_head **
+lnet_create_array_of_queues(void)
+{
+       struct list_head **qs;
+       struct list_head *q;
+       int i;
+
+       qs = cfs_percpt_alloc(lnet_cpt_table(),
+                             sizeof(struct list_head));
+       if (!qs) {
+               CERROR("Failed to allocate queues\n");
+               return NULL;
+       }
+
+       cfs_percpt_for_each(q, i, qs)
+               INIT_LIST_HEAD(q);
+
+       return qs;
+}
+
 static int lnet_unprepare(void);
 
 static int
@@ -604,14 +1156,21 @@ lnet_prepare(lnet_pid_t requested_pid)
        the_lnet.ln_pid = requested_pid;
 
        INIT_LIST_HEAD(&the_lnet.ln_test_peers);
-       INIT_LIST_HEAD(&the_lnet.ln_peers);
        INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
        INIT_LIST_HEAD(&the_lnet.ln_nets);
        INIT_LIST_HEAD(&the_lnet.ln_routers);
        INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
        INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
-
-       rc = lnet_descriptor_setup();
+       INIT_LIST_HEAD(&the_lnet.ln_dc_request);
+       INIT_LIST_HEAD(&the_lnet.ln_dc_working);
+       INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
+       INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq);
+       INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
+       init_waitqueue_head(&the_lnet.ln_dc_waitq);
+       LNetInvalidateEQHandle(&the_lnet.ln_mt_eqh);
+       init_completion(&the_lnet.ln_started);
+
+       rc = lnet_slab_setup();
        if (rc != 0)
                goto failed;
 
@@ -646,14 +1205,6 @@ lnet_prepare(lnet_pid_t requested_pid)
        if (rc != 0)
                goto failed;
 
-       recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
-       if (recs == NULL) {
-               rc = -ENOMEM;
-               goto failed;
-       }
-
-       the_lnet.ln_me_containers = recs;
-
        recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
        if (recs == NULL) {
                rc = -ENOMEM;
@@ -668,6 +1219,12 @@ lnet_prepare(lnet_pid_t requested_pid)
                goto failed;
        }
 
+       the_lnet.ln_mt_zombie_rstqs = lnet_create_array_of_queues();
+       if (!the_lnet.ln_mt_zombie_rstqs) {
+               rc = -ENOMEM;
+               goto failed;
+       }
+
        return 0;
 
  failed:
@@ -678,6 +1235,8 @@ lnet_prepare(lnet_pid_t requested_pid)
 static int
 lnet_unprepare (void)
 {
+       int rc;
+
        /* NB no LNET_LOCK since this is the last reference.  All LND instances
         * have shut down already, so it is safe to unlink and free all
         * descriptors, even those that appear committed to a network op (eg MD
@@ -689,6 +1248,17 @@ lnet_unprepare (void)
        LASSERT(list_empty(&the_lnet.ln_test_peers));
        LASSERT(list_empty(&the_lnet.ln_nets));
 
+       if (the_lnet.ln_mt_zombie_rstqs) {
+               lnet_clean_zombie_rstqs();
+               the_lnet.ln_mt_zombie_rstqs = NULL;
+       }
+
+       if (!LNetEQHandleIsInvalid(the_lnet.ln_mt_eqh)) {
+               rc = LNetEQFree(the_lnet.ln_mt_eqh);
+               LNetInvalidateEQHandle(&the_lnet.ln_mt_eqh);
+               LASSERT(rc == 0);
+       }
+
        lnet_portals_destroy();
 
        if (the_lnet.ln_md_containers != NULL) {
@@ -696,11 +1266,6 @@ lnet_unprepare (void)
                the_lnet.ln_md_containers = NULL;
        }
 
-       if (the_lnet.ln_me_containers != NULL) {
-               lnet_res_containers_destroy(the_lnet.ln_me_containers);
-               the_lnet.ln_me_containers = NULL;
-       }
-
        lnet_res_container_cleanup(&the_lnet.ln_eq_container);
 
        lnet_msg_containers_destroy();
@@ -712,7 +1277,7 @@ lnet_unprepare (void)
                the_lnet.ln_counters = NULL;
        }
        lnet_destroy_remote_nets_table();
-       lnet_descriptor_cleanup();
+       lnet_slab_cleanup();
 
        return 0;
 }
@@ -837,31 +1402,31 @@ lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
 EXPORT_SYMBOL(lnet_cpt_of_nid);
 
 int
-lnet_islocalnet(__u32 net_id)
+lnet_islocalnet_locked(__u32 net_id)
 {
        struct lnet_net *net;
-       int             cpt;
-       bool            local;
-
-       cpt = lnet_net_lock_current();
+       bool local;
 
        net = lnet_get_net_locked(net_id);
 
        local = net != NULL;
 
-       lnet_net_unlock(cpt);
-
        return local;
 }
 
-bool
-lnet_is_ni_healthy_locked(struct lnet_ni *ni)
+int
+lnet_islocalnet(__u32 net_id)
 {
-       if (ni->ni_state == LNET_NI_STATE_ACTIVE ||
-           ni->ni_state == LNET_NI_STATE_DEGRADED)
-               return true;
+       int cpt;
+       bool local;
 
-       return false;
+       cpt = lnet_net_lock_current();
+
+       local = lnet_islocalnet_locked(net_id);
+
+       lnet_net_unlock(cpt);
+
+       return local;
 }
 
 struct lnet_ni  *
@@ -931,25 +1496,45 @@ lnet_count_acceptor_nets(void)
        return count;
 }
 
-static struct lnet_ping_info *
-lnet_ping_info_create(int num_ni)
+struct lnet_ping_buffer *
+lnet_ping_buffer_alloc(int nnis, gfp_t gfp)
+{
+       struct lnet_ping_buffer *pbuf;
+
+       LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nnis), gfp);
+       if (pbuf) {
+               pbuf->pb_nnis = nnis;
+               atomic_set(&pbuf->pb_refcnt, 1);
+       }
+
+       return pbuf;
+}
+
+void
+lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
+{
+       LASSERT(atomic_read(&pbuf->pb_refcnt) == 0);
+       LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
+}
+
+static struct lnet_ping_buffer *
+lnet_ping_target_create(int nnis)
 {
-       struct lnet_ping_info *ping_info;
-       unsigned int     infosz;
+       struct lnet_ping_buffer *pbuf;
 
-       infosz = offsetof(struct lnet_ping_info, pi_ni[num_ni]);
-       LIBCFS_ALLOC(ping_info, infosz);
-       if (ping_info == NULL) {
-               CERROR("Can't allocate ping info[%d]\n", num_ni);
+       pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
+       if (pbuf == NULL) {
+               CERROR("Can't allocate ping source [%d]\n", nnis);
                return NULL;
        }
 
-       ping_info->pi_nnis = num_ni;
-       ping_info->pi_pid = the_lnet.ln_pid;
-       ping_info->pi_magic = LNET_PROTO_PING_MAGIC;
-       ping_info->pi_features = LNET_PING_FEAT_NI_STATUS;
+       pbuf->pb_info.pi_nnis = nnis;
+       pbuf->pb_info.pi_pid = the_lnet.ln_pid;
+       pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
+       pbuf->pb_info.pi_features =
+               LNET_PING_FEAT_NI_STATUS | LNET_PING_FEAT_MULTI_RAIL;
 
-       return ping_info;
+       return pbuf;
 }
 
 static inline int
@@ -995,16 +1580,63 @@ lnet_get_ni_count(void)
        return count;
 }
 
-static inline void
-lnet_ping_info_free(struct lnet_ping_info *pinfo)
+int
+lnet_get_net_count(void)
+{
+       struct lnet_net *net;
+       int count = 0;
+
+       lnet_net_lock(0);
+
+       list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
+               count++;
+       }
+
+       lnet_net_unlock(0);
+
+       return count;
+}
+
+void
+lnet_swap_pinginfo(struct lnet_ping_buffer *pbuf)
+{
+       struct lnet_ni_status *stat;
+       int nnis;
+       int i;
+
+       __swab32s(&pbuf->pb_info.pi_magic);
+       __swab32s(&pbuf->pb_info.pi_features);
+       __swab32s(&pbuf->pb_info.pi_pid);
+       __swab32s(&pbuf->pb_info.pi_nnis);
+       nnis = pbuf->pb_info.pi_nnis;
+       if (nnis > pbuf->pb_nnis)
+               nnis = pbuf->pb_nnis;
+       for (i = 0; i < nnis; i++) {
+               stat = &pbuf->pb_info.pi_ni[i];
+               __swab64s(&stat->ns_nid);
+               __swab32s(&stat->ns_status);
+       }
+}
+
+int
+lnet_ping_info_validate(struct lnet_ping_info *pinfo)
 {
-       LIBCFS_FREE(pinfo,
-                   offsetof(struct lnet_ping_info,
-                            pi_ni[pinfo->pi_nnis]));
+       if (!pinfo)
+               return -EINVAL;
+       if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
+               return -EPROTO;
+       if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
+               return -EPROTO;
+       /* Loopback is guaranteed to be present */
+       if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
+               return -ERANGE;
+       if (LNET_NETTYP(LNET_NIDNET(LNET_PING_INFO_LONI(pinfo))) != LOLND)
+               return -EPROTO;
+       return 0;
 }
 
 static void
-lnet_ping_info_destroy(void)
+lnet_ping_target_destroy(void)
 {
        struct lnet_net *net;
        struct lnet_ni  *ni;
@@ -1019,166 +1651,189 @@ lnet_ping_info_destroy(void)
                }
        }
 
-       lnet_ping_info_free(the_lnet.ln_ping_info);
-       the_lnet.ln_ping_info = NULL;
+       lnet_ping_buffer_decref(the_lnet.ln_ping_target);
+       the_lnet.ln_ping_target = NULL;
 
        lnet_net_unlock(LNET_LOCK_EX);
 }
 
 static void
-lnet_ping_event_handler(struct lnet_event *event)
+lnet_ping_target_event_handler(struct lnet_event *event)
 {
-       struct lnet_ping_info *pinfo = event->md.user_ptr;
+       struct lnet_ping_buffer *pbuf = event->md.user_ptr;
 
        if (event->unlinked)
-               pinfo->pi_features = LNET_PING_FEAT_INVAL;
+               lnet_ping_buffer_decref(pbuf);
 }
 
 static int
-lnet_ping_info_setup(struct lnet_ping_info **ppinfo,
-                    struct lnet_handle_md *md_handle,
-                    int ni_count, bool set_eq)
+lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
+                      struct lnet_handle_md *ping_mdh,
+                      int ni_count, bool set_eq)
 {
        struct lnet_process_id id = {
                .nid = LNET_NID_ANY,
                .pid = LNET_PID_ANY
        };
-       struct lnet_handle_me me_handle;
+       struct lnet_me *me;
        struct lnet_md md = { NULL };
        int rc, rc2;
 
        if (set_eq) {
-               rc = LNetEQAlloc(0, lnet_ping_event_handler,
+               rc = LNetEQAlloc(0, lnet_ping_target_event_handler,
                                 &the_lnet.ln_ping_target_eq);
                if (rc != 0) {
-                       CERROR("Can't allocate ping EQ: %d\n", rc);
+                       CERROR("Can't allocate ping buffer EQ: %d\n", rc);
                        return rc;
                }
        }
 
-       *ppinfo = lnet_ping_info_create(ni_count);
-       if (*ppinfo == NULL) {
+       *ppbuf = lnet_ping_target_create(ni_count);
+       if (*ppbuf == NULL) {
                rc = -ENOMEM;
-               goto failed_0;
+               goto fail_free_eq;
        }
 
-       rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
+       /* Ping target ME/MD */
+       me = LNetMEAttach(LNET_RESERVED_PORTAL, id,
                          LNET_PROTO_PING_MATCHBITS, 0,
-                         LNET_UNLINK, LNET_INS_AFTER,
-                         &me_handle);
-       if (rc != 0) {
-               CERROR("Can't create ping ME: %d\n", rc);
-               goto failed_1;
+                         LNET_UNLINK, LNET_INS_AFTER);
+       if (IS_ERR(me)) {
+               rc = PTR_ERR(me);
+               CERROR("Can't create ping target ME: %d\n", rc);
+               goto fail_decref_ping_buffer;
        }
 
        /* initialize md content */
-       md.start     = *ppinfo;
-       md.length    = offsetof(struct lnet_ping_info,
-                               pi_ni[(*ppinfo)->pi_nnis]);
+       md.start     = &(*ppbuf)->pb_info;
+       md.length    = LNET_PING_INFO_SIZE((*ppbuf)->pb_nnis);
        md.threshold = LNET_MD_THRESH_INF;
        md.max_size  = 0;
        md.options   = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
                       LNET_MD_MANAGE_REMOTE;
-       md.user_ptr  = NULL;
        md.eq_handle = the_lnet.ln_ping_target_eq;
-       md.user_ptr = *ppinfo;
+       md.user_ptr  = *ppbuf;
 
-       rc = LNetMDAttach(me_handle, md, LNET_RETAIN, md_handle);
+       rc = LNetMDAttach(me, md, LNET_RETAIN, ping_mdh);
        if (rc != 0) {
-               CERROR("Can't attach ping MD: %d\n", rc);
-               goto failed_2;
+               CERROR("Can't attach ping target MD: %d\n", rc);
+               goto fail_unlink_ping_me;
        }
+       lnet_ping_buffer_addref(*ppbuf);
 
        return 0;
 
-failed_2:
-       rc2 = LNetMEUnlink(me_handle);
-       LASSERT(rc2 == 0);
-failed_1:
-       lnet_ping_info_free(*ppinfo);
-       *ppinfo = NULL;
-failed_0:
-       if (set_eq)
-               LNetEQFree(the_lnet.ln_ping_target_eq);
+fail_unlink_ping_me:
+       LNetMEUnlink(me);
+fail_decref_ping_buffer:
+       LASSERT(atomic_read(&(*ppbuf)->pb_refcnt) == 1);
+       lnet_ping_buffer_decref(*ppbuf);
+       *ppbuf = NULL;
+fail_free_eq:
+       if (set_eq) {
+               rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
+               LASSERT(rc2 == 0);
+       }
        return rc;
 }
 
 static void
-lnet_ping_md_unlink(struct lnet_ping_info *pinfo, struct lnet_handle_md *md_handle)
+lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
+                   struct lnet_handle_md *ping_mdh)
 {
        sigset_t        blocked = cfs_block_allsigs();
 
-       LNetMDUnlink(*md_handle);
-       LNetInvalidateMDHandle(md_handle);
+       LNetMDUnlink(*ping_mdh);
+       LNetInvalidateMDHandle(ping_mdh);
 
-       /* NB md could be busy; this just starts the unlink */
-       while (pinfo->pi_features != LNET_PING_FEAT_INVAL) {
-               CDEBUG(D_NET, "Still waiting for ping MD to unlink\n");
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+       /* NB the MD could be busy; this just starts the unlink */
+       while (atomic_read(&pbuf->pb_refcnt) > 1) {
+               CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
+               schedule_timeout_uninterruptible(cfs_time_seconds(1));
        }
 
        cfs_restore_sigs(blocked);
 }
 
 static void
-lnet_ping_info_install_locked(struct lnet_ping_info *ping_info)
+lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
 {
-       int                     i;
        struct lnet_ni          *ni;
        struct lnet_net         *net;
        struct lnet_ni_status *ns;
+       int                     i;
+       int                     rc;
 
        i = 0;
        list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
                list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
-                       LASSERT(i < ping_info->pi_nnis);
+                       LASSERT(i < pbuf->pb_nnis);
 
-                       ns = &ping_info->pi_ni[i];
+                       ns = &pbuf->pb_info.pi_ni[i];
 
                        ns->ns_nid = ni->ni_nid;
 
                        lnet_ni_lock(ni);
                        ns->ns_status = (ni->ni_status != NULL) ?
-                                       ni->ni_status->ns_status :
+                                        ni->ni_status->ns_status :
                                                LNET_NI_STATUS_UP;
                        ni->ni_status = ns;
                        lnet_ni_unlock(ni);
 
                        i++;
                }
-
        }
+       /*
+        * We (ab)use the ns_status of the loopback interface to
+        * transmit the sequence number. The first interface listed
+        * must be the loopback interface.
+        */
+       rc = lnet_ping_info_validate(&pbuf->pb_info);
+       if (rc) {
+               LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
+               LBUG();
+       }
+       LNET_PING_BUFFER_SEQNO(pbuf) =
+               atomic_inc_return(&the_lnet.ln_ping_target_seqno);
 }
 
 static void
-lnet_ping_target_update(struct lnet_ping_info *pinfo,
-                       struct lnet_handle_md md_handle)
+lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
+                       struct lnet_handle_md ping_mdh)
 {
-       struct lnet_ping_info *old_pinfo = NULL;
-       struct lnet_handle_md old_md;
+       struct lnet_ping_buffer *old_pbuf = NULL;
+       struct lnet_handle_md old_ping_md;
 
        /* switch the NIs to point to the new ping info created */
        lnet_net_lock(LNET_LOCK_EX);
 
        if (!the_lnet.ln_routing)
-               pinfo->pi_features |= LNET_PING_FEAT_RTE_DISABLED;
-       lnet_ping_info_install_locked(pinfo);
+               pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
+       if (!lnet_peer_discovery_disabled)
+               pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
+
+       /* Ensure only known feature bits have been set. */
+       LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
+       LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
 
-       if (the_lnet.ln_ping_info != NULL) {
-               old_pinfo = the_lnet.ln_ping_info;
-               old_md = the_lnet.ln_ping_target_md;
+       lnet_ping_target_install_locked(pbuf);
+
+       if (the_lnet.ln_ping_target) {
+               old_pbuf = the_lnet.ln_ping_target;
+               old_ping_md = the_lnet.ln_ping_target_md;
        }
-       the_lnet.ln_ping_target_md = md_handle;
-       the_lnet.ln_ping_info = pinfo;
+       the_lnet.ln_ping_target_md = ping_mdh;
+       the_lnet.ln_ping_target = pbuf;
 
        lnet_net_unlock(LNET_LOCK_EX);
 
-       if (old_pinfo != NULL) {
-               /* unlink the old ping info */
-               lnet_ping_md_unlink(old_pinfo, &old_md);
-               lnet_ping_info_free(old_pinfo);
+       if (old_pbuf) {
+               /* unlink and free the old ping info */
+               lnet_ping_md_unlink(old_pbuf, &old_ping_md);
+               lnet_ping_buffer_decref(old_pbuf);
        }
+
+       lnet_push_update_to_peers(0);
 }
 
 static void
@@ -1186,13 +1841,156 @@ lnet_ping_target_fini(void)
 {
        int             rc;
 
-       lnet_ping_md_unlink(the_lnet.ln_ping_info,
+       lnet_ping_md_unlink(the_lnet.ln_ping_target,
                            &the_lnet.ln_ping_target_md);
 
        rc = LNetEQFree(the_lnet.ln_ping_target_eq);
        LASSERT(rc == 0);
 
-       lnet_ping_info_destroy();
+       lnet_ping_target_destroy();
+}
+
+/* Resize the push target. */
+int lnet_push_target_resize(void)
+{
+       struct lnet_process_id id = { LNET_NID_ANY, LNET_PID_ANY };
+       struct lnet_md md = { NULL };
+       struct lnet_me *me;
+       struct lnet_handle_md mdh;
+       struct lnet_handle_md old_mdh;
+       struct lnet_ping_buffer *pbuf;
+       struct lnet_ping_buffer *old_pbuf;
+       int nnis = the_lnet.ln_push_target_nnis;
+       int rc;
+
+       if (nnis <= 0) {
+               rc = -EINVAL;
+               goto fail_return;
+       }
+again:
+       pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
+       if (!pbuf) {
+               rc = -ENOMEM;
+               goto fail_return;
+       }
+
+       me = LNetMEAttach(LNET_RESERVED_PORTAL, id,
+                         LNET_PROTO_PING_MATCHBITS, 0,
+                         LNET_UNLINK, LNET_INS_AFTER);
+
+       if (IS_ERR(me)) {
+               rc = PTR_ERR(me);
+               CERROR("Can't create push target ME: %d\n", rc);
+               goto fail_decref_pbuf;
+       }
+
+       /* initialize md content */
+       md.start     = &pbuf->pb_info;
+       md.length    = LNET_PING_INFO_SIZE(nnis);
+       md.threshold = LNET_MD_THRESH_INF;
+       md.max_size  = 0;
+       md.options   = LNET_MD_OP_PUT | LNET_MD_TRUNCATE |
+                      LNET_MD_MANAGE_REMOTE;
+       md.user_ptr  = pbuf;
+       md.eq_handle = the_lnet.ln_push_target_eq;
+
+       rc = LNetMDAttach(me, md, LNET_RETAIN, &mdh);
+       if (rc) {
+               CERROR("Can't attach push MD: %d\n", rc);
+               goto fail_unlink_me;
+       }
+       lnet_ping_buffer_addref(pbuf);
+
+       lnet_net_lock(LNET_LOCK_EX);
+       old_pbuf = the_lnet.ln_push_target;
+       old_mdh = the_lnet.ln_push_target_md;
+       the_lnet.ln_push_target = pbuf;
+       the_lnet.ln_push_target_md = mdh;
+       lnet_net_unlock(LNET_LOCK_EX);
+
+       if (old_pbuf) {
+               LNetMDUnlink(old_mdh);
+               lnet_ping_buffer_decref(old_pbuf);
+       }
+
+       if (nnis < the_lnet.ln_push_target_nnis)
+               goto again;
+
+       CDEBUG(D_NET, "nnis %d success\n", nnis);
+
+       return 0;
+
+fail_unlink_me:
+       LNetMEUnlink(me);
+fail_decref_pbuf:
+       lnet_ping_buffer_decref(pbuf);
+fail_return:
+       CDEBUG(D_NET, "nnis %d error %d\n", nnis, rc);
+       return rc;
+}
+
+static void lnet_push_target_event_handler(struct lnet_event *ev)
+{
+       struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
+
+       if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
+               lnet_swap_pinginfo(pbuf);
+
+       lnet_peer_push_event(ev);
+       if (ev->unlinked)
+               lnet_ping_buffer_decref(pbuf);
+}
+
+/* Initialize the push target. */
+static int lnet_push_target_init(void)
+{
+       int rc;
+
+       if (the_lnet.ln_push_target)
+               return -EALREADY;
+
+       rc = LNetEQAlloc(0, lnet_push_target_event_handler,
+                        &the_lnet.ln_push_target_eq);
+       if (rc) {
+               CERROR("Can't allocated push target EQ: %d\n", rc);
+               return rc;
+       }
+
+       /* Start at the required minimum, we'll enlarge if required. */
+       the_lnet.ln_push_target_nnis = LNET_INTERFACES_MIN;
+
+       rc = lnet_push_target_resize();
+
+       if (rc) {
+               LNetEQFree(the_lnet.ln_push_target_eq);
+               LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
+       }
+
+       return rc;
+}
+
+/* Clean up the push target. */
+static void lnet_push_target_fini(void)
+{
+       if (!the_lnet.ln_push_target)
+               return;
+
+       /* Unlink and invalidate to prevent new references. */
+       LNetMDUnlink(the_lnet.ln_push_target_md);
+       LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
+
+       /* Wait for the unlink to complete. */
+       while (atomic_read(&the_lnet.ln_push_target->pb_refcnt) > 1) {
+               CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
+               schedule_timeout_uninterruptible(cfs_time_seconds(1));
+       }
+
+       lnet_ping_buffer_decref(the_lnet.ln_push_target);
+       the_lnet.ln_push_target = NULL;
+       the_lnet.ln_push_target_nnis = 0;
+
+       LNetEQFree(the_lnet.ln_push_target_eq);
+       LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
 }
 
 static int
@@ -1215,11 +2013,6 @@ lnet_ni_tq_credits(struct lnet_ni *ni)
 static void
 lnet_ni_unlink_locked(struct lnet_ni *ni)
 {
-       if (!list_empty(&ni->ni_cptlist)) {
-               list_del_init(&ni->ni_cptlist);
-               lnet_ni_decref_locked(ni, 0);
-       }
-
        /* move it to zombie list and nobody can find it anymore */
        LASSERT(!list_empty(&ni->ni_netlist));
        list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
@@ -1265,8 +2058,7 @@ lnet_clear_zombies_nis_locked(struct lnet_net *net)
                                       "Waiting for zombie LNI %s\n",
                                       libcfs_nid2str(ni->ni_nid));
                        }
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(cfs_time_seconds(1));
+                       schedule_timeout_uninterruptible(cfs_time_seconds(1));
                        lnet_net_lock(LNET_LOCK_EX);
                        continue;
                }
@@ -1276,7 +2068,14 @@ lnet_clear_zombies_nis_locked(struct lnet_net *net)
                islo = ni->ni_net->net_lnd->lnd_type == LOLND;
 
                LASSERT(!in_interrupt());
+               /* Holding the mutex makes it safe for lnd_shutdown
+                * to call module_put(). Module unload cannot finish
+                * until lnet_unregister_lnd() completes, and that
+                * requires the mutex.
+                */
+               mutex_lock(&the_lnet.ln_lnd_mutex);
                (net->net_lnd->lnd_shutdown)(ni);
+               mutex_unlock(&the_lnet.ln_lnd_mutex);
 
                if (!islo)
                        CDEBUG(D_LNI, "Removed LNI %s\n",
@@ -1296,7 +2095,9 @@ lnet_shutdown_lndni(struct lnet_ni *ni)
        struct lnet_net *net = ni->ni_net;
 
        lnet_net_lock(LNET_LOCK_EX);
+       lnet_ni_lock(ni);
        ni->ni_state = LNET_NI_STATE_DELETING;
+       lnet_ni_unlock(ni);
        lnet_ni_unlink_locked(ni);
        lnet_incr_dlc_seq();
        lnet_net_unlock(LNET_LOCK_EX);
@@ -1317,8 +2118,6 @@ lnet_shutdown_lndnet(struct lnet_net *net)
 
        lnet_net_lock(LNET_LOCK_EX);
 
-       net->net_state = LNET_NET_STATE_DELETING;
-
        list_del_init(&net->net_list);
 
        while (!list_empty(&net->net_ni_list)) {
@@ -1334,15 +2133,6 @@ lnet_shutdown_lndnet(struct lnet_net *net)
        /* Do peer table cleanup for this net */
        lnet_peer_tables_cleanup(net);
 
-       lnet_net_lock(LNET_LOCK_EX);
-       /*
-        * decrement ref count on lnd only when the entire network goes
-        * away
-        */
-       net->net_lnd->lnd_refcount--;
-
-       lnet_net_unlock(LNET_LOCK_EX);
-
        lnet_net_free(net);
 }
 
@@ -1350,6 +2140,8 @@ static void
 lnet_shutdown_lndnets(void)
 {
        struct lnet_net *net;
+       LIST_HEAD(resend);
+       struct lnet_msg *msg, *tmp;
 
        /* NB called holding the global mutex */
 
@@ -1385,6 +2177,16 @@ lnet_shutdown_lndnets(void)
                lnet_shutdown_lndnet(net);
        }
 
+       spin_lock(&the_lnet.ln_msg_resend_lock);
+       list_splice(&the_lnet.ln_msg_resend, &resend);
+       spin_unlock(&the_lnet.ln_msg_resend_lock);
+
+       list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
+               list_del_init(&msg->msg_list);
+               msg->msg_no_resend = true;
+               lnet_finalize(msg, -ECANCELED);
+       }
+
        lnet_net_lock(LNET_LOCK_EX);
        the_lnet.ln_state = LNET_STATE_SHUTDOWN;
        lnet_net_unlock(LNET_LOCK_EX);
@@ -1412,13 +2214,12 @@ lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
        if (rc != 0) {
                LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
                                   rc, libcfs_lnd2str(net->net_lnd->lnd_type));
-               lnet_net_lock(LNET_LOCK_EX);
-               net->net_lnd->lnd_refcount--;
-               lnet_net_unlock(LNET_LOCK_EX);
                goto failed0;
        }
 
+       lnet_ni_lock(ni);
        ni->ni_state = LNET_NI_STATE_ACTIVE;
+       lnet_ni_unlock(ni);
 
        /* We keep a reference on the loopback net through the loopback NI */
        if (net->net_lnd->lnd_type == LOLND) {
@@ -1453,6 +2254,7 @@ lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
 
        atomic_set(&ni->ni_tx_credits,
                   lnet_ni_tq_credits(ni) * ni->ni_ncpts);
+       atomic_set(&ni->ni_healthv, LNET_MAX_HEALTH_VALUE);
 
        CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
                libcfs_nid2str(ni->ni_nid),
@@ -1471,21 +2273,19 @@ static int
 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
 {
        struct lnet_ni *ni;
-       struct lnet_net *net_l = NULL;
-       struct list_head        local_ni_list;
-       int                     rc;
-       int                     ni_count = 0;
-       __u32                   lnd_type;
-       struct lnet_lnd *lnd;
-       int                     peer_timeout =
+       struct lnet_net *net_l = NULL;
+       LIST_HEAD(local_ni_list);
+       int rc;
+       int ni_count = 0;
+       __u32 lnd_type;
+       const struct lnet_lnd  *lnd;
+       int peer_timeout =
                net->net_tunables.lct_peer_timeout;
-       int                     maxtxcredits =
+       int maxtxcredits =
                net->net_tunables.lct_max_tx_credits;
-       int                     peerrtrcredits =
+       int peerrtrcredits =
                net->net_tunables.lct_peer_rtr_credits;
 
-       INIT_LIST_HEAD(&local_ni_list);
-
        /*
         * make sure that this net is unique. If it isn't then
         * we are adding interfaces to an already existing network, and
@@ -1496,8 +2296,6 @@ lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
        if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
                lnd_type = LNET_NETTYP(net->net_id);
 
-               LASSERT(libcfs_isknown_lnd(lnd_type));
-
                mutex_lock(&the_lnet.ln_lnd_mutex);
                lnd = lnet_find_lnd_by_type(lnd_type);
 
@@ -1522,10 +2320,6 @@ lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
                        }
                }
 
-               lnet_net_lock(LNET_LOCK_EX);
-               lnd->lnd_refcount++;
-               lnet_net_unlock(LNET_LOCK_EX);
-
                net->net_lnd = lnd;
 
                mutex_unlock(&the_lnet.ln_lnd_mutex);
@@ -1576,7 +2370,7 @@ lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
                 * up is actually unique. if it's not fail. */
                if (!lnet_ni_unique_net(&net_l->net_ni_list,
                                        ni->ni_interfaces[0])) {
-                       rc = -EINVAL;
+                       rc = -EEXIST;
                        goto failed1;
                }
 
@@ -1586,9 +2380,6 @@ lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
 
                rc = lnet_startup_lndni(ni, tun);
 
-               LASSERT(ni->ni_net->net_tunables.lct_peer_timeout <= 0 ||
-                       ni->ni_net->net_lnd->lnd_query != NULL);
-
                if (rc < 0)
                        goto failed1;
 
@@ -1613,7 +2404,6 @@ lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
                 */
                lnet_net_free(net);
        } else {
-               net->net_state = LNET_NET_STATE_ACTIVE;
                /*
                 * restore tunables after it has been overwitten by the
                 * lnd
@@ -1630,6 +2420,9 @@ lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
                lnet_net_unlock(LNET_LOCK_EX);
        }
 
+       /* update net count */
+       lnet_current_net_count = lnet_get_net_count();
+
        return ni_count;
 
 failed1:
@@ -1701,8 +2494,6 @@ int lnet_lib_init(void)
 
        lnet_assert_wire_constants();
 
-       memset(&the_lnet, 0, sizeof(the_lnet));
-
        /* refer to global cfs_cpt_table for now */
        the_lnet.ln_cpt_table   = cfs_cpt_table;
        the_lnet.ln_cpt_number  = cfs_cpt_number(cfs_cpt_table);
@@ -1726,11 +2517,8 @@ int lnet_lib_init(void)
        }
 
        the_lnet.ln_refcount = 0;
-       LNetInvalidateEQHandle(&the_lnet.ln_rc_eqh);
-       INIT_LIST_HEAD(&the_lnet.ln_lnds);
        INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
-       INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
-       INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
+       INIT_LIST_HEAD(&the_lnet.ln_msg_resend);
 
        /* The hash table size is the number of bits it takes to express the set
         * ln_num_routes, minus 1 (better to under estimate than over so we
@@ -1754,14 +2542,18 @@ int lnet_lib_init(void)
  *
  * \pre lnet_lib_init() called with success.
  * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
+ *
+ * As this happens at module-unload, all lnds must already be unloaded,
+ * so they must already be unregistered.
  */
 void lnet_lib_exit(void)
 {
-       LASSERT(the_lnet.ln_refcount == 0);
+       int i;
 
-       while (!list_empty(&the_lnet.ln_lnds))
-               lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
-                                              struct lnet_lnd, lnd_list));
+       LASSERT(the_lnet.ln_refcount == 0);
+       lnet_unregister_lnd(&the_lolnd);
+       for (i = 0; i < NUM_LNDS; i++)
+               LASSERT(!the_lnet.ln_lnds[i]);
        lnet_destroy_locks();
 }
 
@@ -1786,13 +2578,11 @@ LNetNIInit(lnet_pid_t requested_pid)
        int                     im_a_router = 0;
        int                     rc;
        int                     ni_count;
-       struct lnet_ping_info   *pinfo;
-       struct lnet_handle_md   md_handle;
-       struct list_head        net_head;
+       struct lnet_ping_buffer *pbuf;
+       struct lnet_handle_md   ping_mdh;
+       LIST_HEAD(net_head);
        struct lnet_net         *net;
 
-       INIT_LIST_HEAD(&net_head);
-
        mutex_lock(&the_lnet.ln_api_mutex);
 
        CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
@@ -1846,10 +2636,6 @@ LNetNIInit(lnet_pid_t requested_pid)
                if (rc != 0)
                        goto err_shutdown_lndnis;
 
-               rc = lnet_check_routes();
-               if (rc != 0)
-                       goto err_destroy_routes;
-
                rc = lnet_rtrpools_alloc(im_a_router);
                if (rc != 0)
                        goto err_destroy_routes;
@@ -1862,23 +2648,46 @@ LNetNIInit(lnet_pid_t requested_pid)
        the_lnet.ln_refcount = 1;
        /* Now I may use my own API functions... */
 
-       rc = lnet_ping_info_setup(&pinfo, &md_handle, ni_count, true);
+       rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_count, true);
        if (rc != 0)
                goto err_acceptor_stop;
 
-       lnet_ping_target_update(pinfo, md_handle);
+       lnet_ping_target_update(pbuf, ping_mdh);
 
-       rc = lnet_router_checker_start();
+       rc = LNetEQAlloc(0, lnet_mt_event_handler, &the_lnet.ln_mt_eqh);
+       if (rc != 0) {
+               CERROR("Can't allocate monitor thread EQ: %d\n", rc);
+               goto err_stop_ping;
+       }
+
+       rc = lnet_push_target_init();
        if (rc != 0)
                goto err_stop_ping;
 
+       rc = lnet_peer_discovery_start();
+       if (rc != 0)
+               goto err_destroy_push_target;
+
+       rc = lnet_monitor_thr_start();
+       if (rc != 0)
+               goto err_stop_discovery_thr;
+
        lnet_fault_init();
-       lnet_proc_init();
+       lnet_router_debugfs_init();
 
        mutex_unlock(&the_lnet.ln_api_mutex);
 
+       complete_all(&the_lnet.ln_started);
+
+       /* wait for all routers to start */
+       lnet_wait_router_start();
+
        return 0;
 
+err_stop_discovery_thr:
+       lnet_peer_discovery_stop();
+err_destroy_push_target:
+       lnet_push_target_fini();
 err_stop_ping:
        lnet_ping_target_fini();
 err_acceptor_stop:
@@ -1914,7 +2723,7 @@ EXPORT_SYMBOL(LNetNIInit);
  * \return always 0 for current implementation.
  */
 int
-LNetNIFini()
+LNetNIFini(void)
 {
        mutex_lock(&the_lnet.ln_api_mutex);
 
@@ -1927,8 +2736,10 @@ LNetNIFini()
 
                lnet_fault_fini();
 
-               lnet_proc_fini();
-               lnet_router_checker_stop();
+               lnet_router_debugfs_fini();
+               lnet_monitor_thr_stop();
+               lnet_peer_discovery_stop();
+               lnet_push_target_fini();
                lnet_ping_target_fini();
 
                /* Teardown fns that use my own API functions BEFORE here */
@@ -1976,15 +2787,22 @@ lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
        }
 
        cfg_ni->lic_nid = ni->ni_nid;
-       cfg_ni->lic_status = ni->ni_status->ns_status;
+       if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
+               cfg_ni->lic_status = LNET_NI_STATUS_UP;
+       else
+               cfg_ni->lic_status = ni->ni_status->ns_status;
        cfg_ni->lic_tcp_bonding = use_tcp_bonding;
        cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
 
        memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
 
        if (stats) {
-               stats->iel_send_count = atomic_read(&ni->ni_stats.send_count);
-               stats->iel_recv_count = atomic_read(&ni->ni_stats.recv_count);
+               stats->iel_send_count = lnet_sum_stats(&ni->ni_stats,
+                                                      LNET_STATS_TYPE_SEND);
+               stats->iel_recv_count = lnet_sum_stats(&ni->ni_stats,
+                                                      LNET_STATS_TYPE_RECV);
+               stats->iel_drop_count = lnet_sum_stats(&ni->ni_stats,
+                                                      LNET_STATS_TYPE_DROP);
        }
 
        /*
@@ -2061,7 +2879,10 @@ lnet_fill_ni_info_legacy(struct lnet_ni *ni,
        config->cfg_config_u.cfg_net.net_peer_rtr_credits =
                ni->ni_net->net_tunables.lct_peer_rtr_credits;
 
-       net_config->ni_status = ni->ni_status->ns_status;
+       if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
+               net_config->ni_status = LNET_NI_STATUS_UP;
+       else
+               net_config->ni_status = ni->ni_status->ns_status;
 
        if (ni->ni_cpts) {
                int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
@@ -2119,10 +2940,17 @@ lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
        struct lnet_ni          *ni;
        struct lnet_net         *net = mynet;
 
+       /*
+        * It is possible that the net has been cleaned out while there is
+        * a message being sent. This function accessed the net without
+        * checking if the list is empty
+        */
        if (prev == NULL) {
                if (net == NULL)
                        net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
                                        net_list);
+               if (list_empty(&net->net_ni_list))
+                       return NULL;
                ni = list_entry(net->net_ni_list.next, struct lnet_ni,
                                ni_netlist);
 
@@ -2144,6 +2972,8 @@ lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
                /* get the next net */
                net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
                                 net_list);
+               if (list_empty(&net->net_ni_list))
+                       return NULL;
                /* get the ni on it */
                ni = list_entry(net->net_ni_list.next, struct lnet_ni,
                                ni_netlist);
@@ -2151,6 +2981,9 @@ lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
                return ni;
        }
 
+       if (list_empty(&prev->ni_netlist))
+               return NULL;
+
        /* there are more nis left */
        ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
 
@@ -2208,12 +3041,35 @@ lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
        return rc;
 }
 
+int lnet_get_ni_stats(struct lnet_ioctl_element_msg_stats *msg_stats)
+{
+       struct lnet_ni *ni;
+       int cpt;
+       int rc = -ENOENT;
+
+       if (!msg_stats)
+               return -EINVAL;
+
+       cpt = lnet_net_lock_current();
+
+       ni = lnet_get_ni_idx_locked(msg_stats->im_idx);
+
+       if (ni) {
+               lnet_usr_translate_stats(msg_stats, &ni->ni_stats);
+               rc = 0;
+       }
+
+       lnet_net_unlock(cpt);
+
+       return rc;
+}
+
 static int lnet_add_net_common(struct lnet_net *net,
                               struct lnet_ioctl_config_lnd_tunables *tun)
 {
        __u32                   net_id;
-       struct lnet_ping_info   *pinfo;
-       struct lnet_handle_md   md_handle;
+       struct lnet_ping_buffer *pbuf;
+       struct lnet_handle_md   ping_mdh;
        int                     rc;
        struct lnet_remotenet *rnet;
        int                     net_ni_count;
@@ -2235,7 +3091,7 @@ static int lnet_add_net_common(struct lnet_net *net,
 
        /*
         * make sure you calculate the correct number of slots in the ping
-        * info. Since the ping info is a flattened list of all the NIs,
+        * buffer. Since the ping info is a flattened list of all the NIs,
         * we should allocate enough slots to accomodate the number of NIs
         * which will be added.
         *
@@ -2244,9 +3100,9 @@ static int lnet_add_net_common(struct lnet_net *net,
         */
        net_ni_count = lnet_get_net_ni_count_pre(net);
 
-       rc = lnet_ping_info_setup(&pinfo, &md_handle,
-                                 net_ni_count + lnet_get_ni_count(),
-                                 false);
+       rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
+                                   net_ni_count + lnet_get_ni_count(),
+                                   false);
        if (rc < 0) {
                lnet_net_free(net);
                return rc;
@@ -2297,13 +3153,13 @@ static int lnet_add_net_common(struct lnet_net *net,
        lnet_peer_net_added(net);
        lnet_net_unlock(LNET_LOCK_EX);
 
-       lnet_ping_target_update(pinfo, md_handle);
+       lnet_ping_target_update(pbuf, ping_mdh);
 
        return 0;
 
 failed:
-       lnet_ping_md_unlink(pinfo, &md_handle);
-       lnet_ping_info_free(pinfo);
+       lnet_ping_md_unlink(pbuf, &ping_mdh);
+       lnet_ping_buffer_decref(pbuf);
        return rc;
 }
 
@@ -2313,9 +3169,7 @@ static int lnet_handle_legacy_ip2nets(char *ip2nets,
        struct lnet_net *net;
        char *nets;
        int rc;
-       struct list_head net_head;
-
-       INIT_LIST_HEAD(&net_head);
+       LIST_HEAD(net_head);
 
        rc = lnet_parse_ip2nets(&nets, ip2nets);
        if (rc < 0)
@@ -2351,7 +3205,7 @@ int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
        struct lnet_ni *ni;
        struct lnet_ioctl_config_lnd_tunables *tun = NULL;
        int rc, i;
-       __u32 net_id;
+       __u32 net_id, lnd_type;
 
        /* get the tunables if they are available */
        if (conf->lic_cfg_hdr.ioc_len >=
@@ -2365,6 +3219,12 @@ int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
                                                  tun);
 
        net_id = LNET_NIDNET(conf->lic_nid);
+       lnd_type = LNET_NETTYP(net_id);
+
+       if (!libcfs_isknown_lnd(lnd_type)) {
+               CERROR("No valid net and lnd information provided\n");
+               return -EINVAL;
+       }
 
        net = lnet_net_alloc(net_id, NULL);
        if (!net)
@@ -2394,8 +3254,8 @@ int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
        struct lnet_net  *net;
        struct lnet_ni *ni;
        __u32 net_id = LNET_NIDNET(conf->lic_nid);
-       struct lnet_ping_info *pinfo;
-       struct lnet_handle_md md_handle;
+       struct lnet_ping_buffer *pbuf;
+       struct lnet_handle_md  ping_mdh;
        int               rc;
        int               net_count;
        __u32             addr;
@@ -2413,7 +3273,7 @@ int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
                CERROR("net %s not found\n",
                       libcfs_net2str(net_id));
                rc = -ENOENT;
-               goto net_unlock;
+               goto unlock_net;
        }
 
        addr = LNET_NIDADDR(conf->lic_nid);
@@ -2424,28 +3284,28 @@ int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
                lnet_net_unlock(0);
 
                /* create and link a new ping info, before removing the old one */
-               rc = lnet_ping_info_setup(&pinfo, &md_handle,
+               rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
                                        lnet_get_ni_count() - net_count,
                                        false);
                if (rc != 0)
-                       goto out;
+                       goto unlock_api_mutex;
 
                lnet_shutdown_lndnet(net);
 
                if (lnet_count_acceptor_nets() == 0)
                        lnet_acceptor_stop();
 
-               lnet_ping_target_update(pinfo, md_handle);
+               lnet_ping_target_update(pbuf, ping_mdh);
 
-               goto out;
+               goto unlock_api_mutex;
        }
 
        ni = lnet_nid2ni_locked(conf->lic_nid, 0);
        if (!ni) {
-               CERROR("nid %s not found \n",
+               CERROR("nid %s not found\n",
                       libcfs_nid2str(conf->lic_nid));
                rc = -ENOENT;
-               goto net_unlock;
+               goto unlock_net;
        }
 
        net_count = lnet_get_net_ni_count_locked(net);
@@ -2453,27 +3313,27 @@ int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
        lnet_net_unlock(0);
 
        /* create and link a new ping info, before removing the old one */
-       rc = lnet_ping_info_setup(&pinfo, &md_handle,
+       rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
                                  lnet_get_ni_count() - 1, false);
        if (rc != 0)
-               goto out;
+               goto unlock_api_mutex;
 
        lnet_shutdown_lndni(ni);
 
        if (lnet_count_acceptor_nets() == 0)
                lnet_acceptor_stop();
 
-       lnet_ping_target_update(pinfo, md_handle);
+       lnet_ping_target_update(pbuf, ping_mdh);
 
        /* check if the net is empty and remove it if it is */
        if (net_count == 1)
                lnet_shutdown_lndnet(net);
 
-       goto out;
+       goto unlock_api_mutex;
 
-net_unlock:
+unlock_net:
        lnet_net_unlock(0);
-out:
+unlock_api_mutex:
        mutex_unlock(&the_lnet.ln_api_mutex);
 
        return rc;
@@ -2488,14 +3348,12 @@ out:
 int
 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
 {
-       struct lnet_net         *net;
-       struct list_head        net_head;
-       int                     rc;
+       struct lnet_net *net;
+       LIST_HEAD(net_head);
+       int rc;
        struct lnet_ioctl_config_lnd_tunables tun;
        char *nets = conf->cfg_config_u.cfg_net.net_intf;
 
-       INIT_LIST_HEAD(&net_head);
-
        /* Create a net/ni structures for the network string */
        rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
        if (rc <= 0)
@@ -2505,7 +3363,7 @@ lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
 
        if (rc > 1) {
                rc = -EINVAL; /* only add one network per call */
-               goto failed;
+               goto out_unlock_clean;
        }
 
        net = list_entry(net_head.next, struct lnet_net, net_list);
@@ -2525,14 +3383,11 @@ lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
          conf->cfg_config_u.cfg_net.net_max_tx_credits;
 
        rc = lnet_add_net_common(net, &tun);
-       if (rc != 0)
-               goto failed;
-
-       return 0;
 
-failed:
+out_unlock_clean:
        mutex_unlock(&the_lnet.ln_api_mutex);
        while (!list_empty(&net_head)) {
+               /* net_head list is empty in success case */
                net = list_entry(net_head.next, struct lnet_net, net_list);
                list_del_init(&net->net_list);
                lnet_net_free(net);
@@ -2544,8 +3399,8 @@ int
 lnet_dyn_del_net(__u32 net_id)
 {
        struct lnet_net  *net;
-       struct lnet_ping_info *pinfo;
-       struct lnet_handle_md md_handle;
+       struct lnet_ping_buffer *pbuf;
+       struct lnet_handle_md ping_mdh;
        int               rc;
        int               net_ni_count;
 
@@ -2559,6 +3414,7 @@ lnet_dyn_del_net(__u32 net_id)
 
        net = lnet_get_net_locked(net_id);
        if (net == NULL) {
+               lnet_net_unlock(0);
                rc = -EINVAL;
                goto out;
        }
@@ -2568,8 +3424,8 @@ lnet_dyn_del_net(__u32 net_id)
        lnet_net_unlock(0);
 
        /* create and link a new ping info, before removing the old one */
-       rc = lnet_ping_info_setup(&pinfo, &md_handle,
-                                 lnet_get_ni_count() - net_ni_count, false);
+       rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
+                                   lnet_get_ni_count() - net_ni_count, false);
        if (rc != 0)
                goto out;
 
@@ -2578,7 +3434,7 @@ lnet_dyn_del_net(__u32 net_id)
        if (lnet_count_acceptor_nets() == 0)
                lnet_acceptor_stop();
 
-       lnet_ping_target_update(pinfo, md_handle);
+       lnet_ping_target_update(pbuf, ping_mdh);
 
 out:
        mutex_unlock(&the_lnet.ln_api_mutex);
@@ -2596,6 +3452,102 @@ __u32 lnet_get_dlc_seq_locked(void)
        return atomic_read(&lnet_dlc_seq_no);
 }
 
+static void
+lnet_ni_set_healthv(lnet_nid_t nid, int value, bool all)
+{
+       struct lnet_net *net;
+       struct lnet_ni *ni;
+
+       lnet_net_lock(LNET_LOCK_EX);
+       list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
+               list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
+                       if (ni->ni_nid == nid || all) {
+                               atomic_set(&ni->ni_healthv, value);
+                               if (list_empty(&ni->ni_recovery) &&
+                                   value < LNET_MAX_HEALTH_VALUE) {
+                                       CERROR("manually adding local NI %s to recovery\n",
+                                              libcfs_nid2str(ni->ni_nid));
+                                       list_add_tail(&ni->ni_recovery,
+                                                     &the_lnet.ln_mt_localNIRecovq);
+                                       lnet_ni_addref_locked(ni, 0);
+                               }
+                               if (!all) {
+                                       lnet_net_unlock(LNET_LOCK_EX);
+                                       return;
+                               }
+                       }
+               }
+       }
+       lnet_net_unlock(LNET_LOCK_EX);
+}
+
+static int
+lnet_get_local_ni_hstats(struct lnet_ioctl_local_ni_hstats *stats)
+{
+       int cpt, rc = 0;
+       struct lnet_ni *ni;
+       lnet_nid_t nid = stats->hlni_nid;
+
+       cpt = lnet_net_lock_current();
+       ni = lnet_nid2ni_locked(nid, cpt);
+
+       if (!ni) {
+               rc = -ENOENT;
+               goto unlock;
+       }
+
+       stats->hlni_local_interrupt = atomic_read(&ni->ni_hstats.hlt_local_interrupt);
+       stats->hlni_local_dropped = atomic_read(&ni->ni_hstats.hlt_local_dropped);
+       stats->hlni_local_aborted = atomic_read(&ni->ni_hstats.hlt_local_aborted);
+       stats->hlni_local_no_route = atomic_read(&ni->ni_hstats.hlt_local_no_route);
+       stats->hlni_local_timeout = atomic_read(&ni->ni_hstats.hlt_local_timeout);
+       stats->hlni_local_error = atomic_read(&ni->ni_hstats.hlt_local_error);
+       stats->hlni_health_value = atomic_read(&ni->ni_healthv);
+
+unlock:
+       lnet_net_unlock(cpt);
+
+       return rc;
+}
+
+static int
+lnet_get_local_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
+{
+       struct lnet_ni *ni;
+       int i = 0;
+
+       lnet_net_lock(LNET_LOCK_EX);
+       list_for_each_entry(ni, &the_lnet.ln_mt_localNIRecovq, ni_recovery) {
+               list->rlst_nid_array[i] = ni->ni_nid;
+               i++;
+               if (i >= LNET_MAX_SHOW_NUM_NID)
+                       break;
+       }
+       lnet_net_unlock(LNET_LOCK_EX);
+       list->rlst_num_nids = i;
+
+       return 0;
+}
+
+static int
+lnet_get_peer_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
+{
+       struct lnet_peer_ni *lpni;
+       int i = 0;
+
+       lnet_net_lock(LNET_LOCK_EX);
+       list_for_each_entry(lpni, &the_lnet.ln_mt_peerNIRecovq, lpni_recovery) {
+               list->rlst_nid_array[i] = lpni->lpni_nid;
+               i++;
+               if (i >= LNET_MAX_SHOW_NUM_NID)
+                       break;
+       }
+       lnet_net_unlock(LNET_LOCK_EX);
+       list->rlst_num_nids = i;
+
+       return 0;
+}
+
 /**
  * LNet ioctl handler.
  *
@@ -2621,26 +3573,28 @@ LNetCtl(unsigned int cmd, void *arg)
        case IOC_LIBCFS_FAIL_NID:
                return lnet_fail_nid(data->ioc_nid, data->ioc_count);
 
-       case IOC_LIBCFS_ADD_ROUTE:
+       case IOC_LIBCFS_ADD_ROUTE: {
+               /* default router sensitivity to 1 */
+               unsigned int sensitivity = 1;
                config = arg;
 
                if (config->cfg_hdr.ioc_len < sizeof(*config))
                        return -EINVAL;
 
+               if (config->cfg_config_u.cfg_route.rtr_sensitivity) {
+                       sensitivity =
+                         config->cfg_config_u.cfg_route.rtr_sensitivity;
+               }
+
                mutex_lock(&the_lnet.ln_api_mutex);
                rc = lnet_add_route(config->cfg_net,
                                    config->cfg_config_u.cfg_route.rtr_hop,
                                    config->cfg_nid,
                                    config->cfg_config_u.cfg_route.
-                                       rtr_priority);
-               if (rc == 0) {
-                       rc = lnet_check_routes();
-                       if (rc != 0)
-                               lnet_del_route(config->cfg_net,
-                                              config->cfg_nid);
-               }
+                                       rtr_priority, sensitivity);
                mutex_unlock(&the_lnet.ln_api_mutex);
                return rc;
+       }
 
        case IOC_LIBCFS_DEL_ROUTE:
                config = arg;
@@ -2666,7 +3620,9 @@ LNetCtl(unsigned int cmd, void *arg)
                                    &config->cfg_nid,
                                    &config->cfg_config_u.cfg_route.rtr_flags,
                                    &config->cfg_config_u.cfg_route.
-                                       rtr_priority);
+                                       rtr_priority,
+                                   &config->cfg_config_u.cfg_route.
+                                       rtr_sensitivity);
                mutex_unlock(&the_lnet.ln_api_mutex);
                return rc;
 
@@ -2677,9 +3633,10 @@ LNetCtl(unsigned int cmd, void *arg)
                __u32 tun_size;
 
                cfg_ni = arg;
+
                /* get the tunables if they are available */
                if (cfg_ni->lic_cfg_hdr.ioc_len <
-                   sizeof(*cfg_ni) + sizeof(*stats)+ sizeof(*tun))
+                   sizeof(*cfg_ni) + sizeof(*stats) + sizeof(*tun))
                        return -EINVAL;
 
                stats = (struct lnet_ioctl_element_stats *)
@@ -2696,6 +3653,19 @@ LNetCtl(unsigned int cmd, void *arg)
                return rc;
        }
 
+       case IOC_LIBCFS_GET_LOCAL_NI_MSG_STATS: {
+               struct lnet_ioctl_element_msg_stats *msg_stats = arg;
+
+               if (msg_stats->im_hdr.ioc_len != sizeof(*msg_stats))
+                       return -EINVAL;
+
+               mutex_lock(&the_lnet.ln_api_mutex);
+               rc = lnet_get_ni_stats(msg_stats);
+               mutex_unlock(&the_lnet.ln_api_mutex);
+
+               return rc;
+       }
+
        case IOC_LIBCFS_GET_NET: {
                size_t total = sizeof(*config) +
                               sizeof(struct lnet_ioctl_net_config);
@@ -2756,22 +3726,22 @@ LNetCtl(unsigned int cmd, void *arg)
                return rc;
 
        case IOC_LIBCFS_SET_NUMA_RANGE: {
-               struct lnet_ioctl_numa_range *numa;
+               struct lnet_ioctl_set_value *numa;
                numa = arg;
-               if (numa->nr_hdr.ioc_len != sizeof(*numa))
+               if (numa->sv_hdr.ioc_len != sizeof(*numa))
                        return -EINVAL;
-               mutex_lock(&the_lnet.ln_api_mutex);
-               lnet_numa_range = numa->nr_range;
-               mutex_unlock(&the_lnet.ln_api_mutex);
+               lnet_net_lock(LNET_LOCK_EX);
+               lnet_numa_range = numa->sv_value;
+               lnet_net_unlock(LNET_LOCK_EX);
                return 0;
        }
 
        case IOC_LIBCFS_GET_NUMA_RANGE: {
-               struct lnet_ioctl_numa_range *numa;
+               struct lnet_ioctl_set_value *numa;
                numa = arg;
-               if (numa->nr_hdr.ioc_len != sizeof(*numa))
+               if (numa->sv_hdr.ioc_len != sizeof(*numa))
                        return -EINVAL;
-               numa->nr_range = lnet_numa_range;
+               numa->sv_value = lnet_numa_range;
                return 0;
        }
 
@@ -2792,6 +3762,33 @@ LNetCtl(unsigned int cmd, void *arg)
                return rc;
        }
 
+       case IOC_LIBCFS_GET_LOCAL_HSTATS: {
+               struct lnet_ioctl_local_ni_hstats *stats = arg;
+
+               if (stats->hlni_hdr.ioc_len < sizeof(*stats))
+                       return -EINVAL;
+
+               mutex_lock(&the_lnet.ln_api_mutex);
+               rc = lnet_get_local_ni_hstats(stats);
+               mutex_unlock(&the_lnet.ln_api_mutex);
+
+               return rc;
+       }
+
+       case IOC_LIBCFS_GET_RECOVERY_QUEUE: {
+               struct lnet_ioctl_recovery_list *list = arg;
+               if (list->rlst_hdr.ioc_len < sizeof(*list))
+                       return -EINVAL;
+
+               mutex_lock(&the_lnet.ln_api_mutex);
+               if (list->rlst_type == LNET_HEALTH_TYPE_LOCAL_NI)
+                       rc = lnet_get_local_ni_recovery_list(list);
+               else
+                       rc = lnet_get_peer_ni_recovery_list(list);
+               mutex_unlock(&the_lnet.ln_api_mutex);
+               return rc;
+       }
+
        case IOC_LIBCFS_ADD_PEER_NI: {
                struct lnet_ioctl_peer_cfg *cfg = arg;
 
@@ -2799,9 +3796,9 @@ LNetCtl(unsigned int cmd, void *arg)
                        return -EINVAL;
 
                mutex_lock(&the_lnet.ln_api_mutex);
-               rc = lnet_add_peer_ni_to_peer(cfg->prcfg_prim_nid,
-                                             cfg->prcfg_cfg_nid,
-                                             cfg->prcfg_mr);
+               rc = lnet_add_peer_ni(cfg->prcfg_prim_nid,
+                                     cfg->prcfg_cfg_nid,
+                                     cfg->prcfg_mr);
                mutex_unlock(&the_lnet.ln_api_mutex);
                return rc;
        }
@@ -2813,8 +3810,8 @@ LNetCtl(unsigned int cmd, void *arg)
                        return -EINVAL;
 
                mutex_lock(&the_lnet.ln_api_mutex);
-               rc = lnet_del_peer_ni_from_peer(cfg->prcfg_prim_nid,
-                                               cfg->prcfg_cfg_nid);
+               rc = lnet_del_peer_ni(cfg->prcfg_prim_nid,
+                                     cfg->prcfg_cfg_nid);
                mutex_unlock(&the_lnet.ln_api_mutex);
                return rc;
        }
@@ -2843,33 +3840,64 @@ LNetCtl(unsigned int cmd, void *arg)
 
        case IOC_LIBCFS_GET_PEER_NI: {
                struct lnet_ioctl_peer_cfg *cfg = arg;
-               struct lnet_peer_ni_credit_info __user *lpni_cri;
-               struct lnet_ioctl_element_stats __user *lpni_stats;
-               size_t usr_size = sizeof(*lpni_cri) + sizeof(*lpni_stats);
 
-               if ((cfg->prcfg_hdr.ioc_len != sizeof(*cfg)) ||
-                   (cfg->prcfg_size != usr_size))
+               if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
                        return -EINVAL;
 
-               lpni_cri = cfg->prcfg_bulk;
-               lpni_stats = cfg->prcfg_bulk + sizeof(*lpni_cri);
+               mutex_lock(&the_lnet.ln_api_mutex);
+               rc = lnet_get_peer_info(cfg,
+                                       (void __user *)cfg->prcfg_bulk);
+               mutex_unlock(&the_lnet.ln_api_mutex);
+               return rc;
+       }
+
+       case IOC_LIBCFS_GET_PEER_LIST: {
+               struct lnet_ioctl_peer_cfg *cfg = arg;
+
+               if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
+                       return -EINVAL;
 
                mutex_lock(&the_lnet.ln_api_mutex);
-               rc = lnet_get_peer_info(cfg->prcfg_count, &cfg->prcfg_prim_nid,
-                                       &cfg->prcfg_cfg_nid, &cfg->prcfg_mr,
-                                       lpni_cri, lpni_stats);
+               rc = lnet_get_peer_list(&cfg->prcfg_count, &cfg->prcfg_size,
+                               (struct lnet_process_id __user *)cfg->prcfg_bulk);
                mutex_unlock(&the_lnet.ln_api_mutex);
                return rc;
        }
 
-       case IOC_LIBCFS_NOTIFY_ROUTER: {
-               unsigned long jiffies_passed;
+       case IOC_LIBCFS_SET_HEALHV: {
+               struct lnet_ioctl_reset_health_cfg *cfg = arg;
+               int value;
+               if (cfg->rh_hdr.ioc_len < sizeof(*cfg))
+                       return -EINVAL;
+               if (cfg->rh_value < 0 ||
+                   cfg->rh_value > LNET_MAX_HEALTH_VALUE)
+                       value = LNET_MAX_HEALTH_VALUE;
+               else
+                       value = cfg->rh_value;
+               CDEBUG(D_NET, "Manually setting healthv to %d for %s:%s. all = %d\n",
+                      value, (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI) ?
+                      "local" : "peer", libcfs_nid2str(cfg->rh_nid), cfg->rh_all);
+               mutex_lock(&the_lnet.ln_api_mutex);
+               if (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI)
+                       lnet_ni_set_healthv(cfg->rh_nid, value,
+                                            cfg->rh_all);
+               else
+                       lnet_peer_ni_set_healthv(cfg->rh_nid, value,
+                                                 cfg->rh_all);
+               mutex_unlock(&the_lnet.ln_api_mutex);
+               return 0;
+       }
 
-               jiffies_passed = ktime_get_real_seconds() - data->ioc_u64[0];
-               jiffies_passed = cfs_time_seconds(jiffies_passed);
+       case IOC_LIBCFS_NOTIFY_ROUTER: {
+               time64_t deadline = ktime_get_real_seconds() - data->ioc_u64[0];
 
-               return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
-                                  jiffies - jiffies_passed);
+               /* The deadline passed in by the user should be some time in
+                * seconds in the future since the UNIX epoch. We have to map
+                * that deadline to the wall clock.
+                */
+               deadline += ktime_get_seconds();
+               return lnet_notify(NULL, data->ioc_nid, data->ioc_flags, false,
+                                  deadline);
        }
 
        case IOC_LIBCFS_LNET_DIST:
@@ -2881,9 +3909,7 @@ LNetCtl(unsigned int cmd, void *arg)
                return 0;
 
        case IOC_LIBCFS_TESTPROTOCOMPAT:
-               lnet_net_lock(LNET_LOCK_EX);
                the_lnet.ln_testprotocompat = data->ioc_flags;
-               lnet_net_unlock(LNET_LOCK_EX);
                return 0;
 
        case IOC_LIBCFS_LNET_FAULT:
@@ -2895,24 +3921,77 @@ LNetCtl(unsigned int cmd, void *arg)
                id.nid = data->ioc_nid;
                id.pid = data->ioc_u32[0];
 
-               /* Don't block longer than 2 minutes */
-               if (data->ioc_u32[1] > 120 * MSEC_PER_SEC)
-                       return -EINVAL;
-
-               /* If timestamp is negative then disable timeout */
-               if ((s32)data->ioc_u32[1] < 0)
-                       timeout = MAX_SCHEDULE_TIMEOUT;
+               /* If timeout is negative then set default of 3 minutes */
+               if (((s32)data->ioc_u32[1] <= 0) ||
+                   data->ioc_u32[1] > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
+                       timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
                else
-                       timeout = msecs_to_jiffies(data->ioc_u32[1]);
+                       timeout = nsecs_to_jiffies(data->ioc_u32[1] * NSEC_PER_MSEC);
 
                rc = lnet_ping(id, timeout, data->ioc_pbuf1,
                               data->ioc_plen1 / sizeof(struct lnet_process_id));
+
                if (rc < 0)
                        return rc;
+
                data->ioc_count = rc;
                return 0;
        }
 
+       case IOC_LIBCFS_PING_PEER: {
+               struct lnet_ioctl_ping_data *ping = arg;
+               struct lnet_peer *lp;
+               signed long timeout;
+
+               /* If timeout is negative then set default of 3 minutes */
+               if (((s32)ping->op_param) <= 0 ||
+                   ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
+                       timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
+               else
+                       timeout = nsecs_to_jiffies(ping->op_param * NSEC_PER_MSEC);
+
+               rc = lnet_ping(ping->ping_id, timeout,
+                              ping->ping_buf,
+                              ping->ping_count);
+               if (rc < 0)
+                       return rc;
+
+               mutex_lock(&the_lnet.ln_api_mutex);
+               lp = lnet_find_peer(ping->ping_id.nid);
+               if (lp) {
+                       ping->ping_id.nid = lp->lp_primary_nid;
+                       ping->mr_info = lnet_peer_is_multi_rail(lp);
+                       lnet_peer_decref_locked(lp);
+               }
+               mutex_unlock(&the_lnet.ln_api_mutex);
+
+               ping->ping_count = rc;
+               return 0;
+       }
+
+       case IOC_LIBCFS_DISCOVER: {
+               struct lnet_ioctl_ping_data *discover = arg;
+               struct lnet_peer *lp;
+
+               rc = lnet_discover(discover->ping_id, discover->op_param,
+                                  discover->ping_buf,
+                                  discover->ping_count);
+               if (rc < 0)
+                       return rc;
+
+               mutex_lock(&the_lnet.ln_api_mutex);
+               lp = lnet_find_peer(discover->ping_id.nid);
+               if (lp) {
+                       discover->ping_id.nid = lp->lp_primary_nid;
+                       discover->mr_info = lnet_peer_is_multi_rail(lp);
+                       lnet_peer_decref_locked(lp);
+               }
+               mutex_unlock(&the_lnet.ln_api_mutex);
+
+               discover->ping_count = rc;
+               return 0;
+       }
+
        default:
                ni = lnet_net2ni_addref(data->ioc_net);
                if (ni == NULL)
@@ -2937,6 +4016,35 @@ void LNetDebugPeer(struct lnet_process_id id)
 EXPORT_SYMBOL(LNetDebugPeer);
 
 /**
+ * Determine if the specified peer \a nid is on the local node.
+ *
+ * \param nid  peer nid to check
+ *
+ * \retval true                If peer NID is on the local node.
+ * \retval false       If peer NID is not on the local node.
+ */
+bool LNetIsPeerLocal(lnet_nid_t nid)
+{
+       struct lnet_net *net;
+       struct lnet_ni *ni;
+       int cpt;
+
+       cpt = lnet_net_lock_current();
+       list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
+               list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
+                       if (ni->ni_nid == nid) {
+                               lnet_net_unlock(cpt);
+                               return true;
+                       }
+               }
+       }
+       lnet_net_unlock(cpt);
+
+       return false;
+}
+EXPORT_SYMBOL(LNetIsPeerLocal);
+
+/**
  * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
  * Note that all interfaces share a same PID, as requested by LNetNIInit().
  *
@@ -2983,43 +4091,47 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout,
        struct lnet_handle_md mdh;
        struct lnet_event event;
        struct lnet_md md = { NULL };
-       int                  which;
-       int                  unlinked = 0;
-       int                  replied = 0;
-       const signed long a_long_time = msecs_to_jiffies(60 * MSEC_PER_SEC);
-       int                  infosz;
-       struct lnet_ping_info *info;
+       int which;
+       int unlinked = 0;
+       int replied = 0;
+       const signed long a_long_time = cfs_time_seconds(60);
+       struct lnet_ping_buffer *pbuf;
        struct lnet_process_id tmpid;
-       int                  i;
-       int                  nob;
-       int                  rc;
-       int                  rc2;
-       sigset_t         blocked;
-
-       infosz = offsetof(struct lnet_ping_info, pi_ni[n_ids]);
+       int i;
+       int nob;
+       int rc;
+       int rc2;
+       sigset_t blocked;
 
        /* n_ids limit is arbitrary */
-       if (n_ids <= 0 || n_ids > 20 || id.nid == LNET_NID_ANY)
+       if (n_ids <= 0 || id.nid == LNET_NID_ANY)
                return -EINVAL;
 
+       /*
+        * if the user buffer has more space than the lnet_interfaces_max
+        * then only fill it up to lnet_interfaces_max
+        */
+       if (n_ids > lnet_interfaces_max)
+               n_ids = lnet_interfaces_max;
+
        if (id.pid == LNET_PID_ANY)
                id.pid = LNET_PID_LUSTRE;
 
-       LIBCFS_ALLOC(info, infosz);
-       if (info == NULL)
+       pbuf = lnet_ping_buffer_alloc(n_ids, GFP_NOFS);
+       if (!pbuf)
                return -ENOMEM;
 
        /* NB 2 events max (including any unlink event) */
        rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
        if (rc != 0) {
                CERROR("Can't allocate EQ: %d\n", rc);
-               goto out_0;
+               goto fail_ping_buffer_decref;
        }
 
        /* initialize md content */
-       md.start     = info;
-       md.length    = infosz;
-       md.threshold = 2; /*GET/REPLY*/
+       md.start     = &pbuf->pb_info;
+       md.length    = LNET_PING_INFO_SIZE(n_ids);
+       md.threshold = 2; /* GET/REPLY */
        md.max_size  = 0;
        md.options   = LNET_MD_TRUNCATE;
        md.user_ptr  = NULL;
@@ -3028,16 +4140,15 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout,
        rc = LNetMDBind(md, LNET_UNLINK, &mdh);
        if (rc != 0) {
                CERROR("Can't bind MD: %d\n", rc);
-               goto out_1;
+               goto fail_free_eq;
        }
 
        rc = LNetGet(LNET_NID_ANY, mdh, id,
                     LNET_RESERVED_PORTAL,
-                    LNET_PROTO_PING_MATCHBITS, 0);
+                    LNET_PROTO_PING_MATCHBITS, 0, false);
 
        if (rc != 0) {
                /* Don't CERROR; this could be deliberate! */
-
                rc2 = LNetMDUnlink(mdh);
                LASSERT(rc2 == 0);
 
@@ -3085,7 +4196,6 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout,
                        replied = 1;
                        rc = event.mlength;
                }
-
        } while (rc2 <= 0 || !event.unlinked);
 
        if (!replied) {
@@ -3093,68 +4203,170 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout,
                        CWARN("%s: Unexpected rc >= 0 but no reply!\n",
                              libcfs_id2str(id));
                rc = -EIO;
-               goto out_1;
+               goto fail_free_eq;
        }
 
        nob = rc;
-       LASSERT(nob >= 0 && nob <= infosz);
+       LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
 
-       rc = -EPROTO;                           /* if I can't parse... */
+       rc = -EPROTO;           /* if I can't parse... */
 
        if (nob < 8) {
-               /* can't check magic/version */
                CERROR("%s: ping info too short %d\n",
                       libcfs_id2str(id), nob);
-               goto out_1;
+               goto fail_free_eq;
        }
 
-       if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
-               lnet_swap_pinginfo(info);
-       } else if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
+       if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
+               lnet_swap_pinginfo(pbuf);
+       } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
                CERROR("%s: Unexpected magic %08x\n",
-                      libcfs_id2str(id), info->pi_magic);
-               goto out_1;
+                      libcfs_id2str(id), pbuf->pb_info.pi_magic);
+               goto fail_free_eq;
        }
 
-       if ((info->pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
+       if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
                CERROR("%s: ping w/o NI status: 0x%x\n",
-                      libcfs_id2str(id), info->pi_features);
-               goto out_1;
+                      libcfs_id2str(id), pbuf->pb_info.pi_features);
+               goto fail_free_eq;
        }
 
-       if (nob < offsetof(struct lnet_ping_info, pi_ni[0])) {
-               CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
-                      nob, (int)offsetof(struct lnet_ping_info, pi_ni[0]));
-               goto out_1;
+       if (nob < LNET_PING_INFO_SIZE(0)) {
+               CERROR("%s: Short reply %d(%d min)\n",
+                      libcfs_id2str(id),
+                      nob, (int)LNET_PING_INFO_SIZE(0));
+               goto fail_free_eq;
        }
 
-       if (info->pi_nnis < n_ids)
-               n_ids = info->pi_nnis;
+       if (pbuf->pb_info.pi_nnis < n_ids)
+               n_ids = pbuf->pb_info.pi_nnis;
 
-       if (nob < offsetof(struct lnet_ping_info, pi_ni[n_ids])) {
-               CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
-                      nob, (int)offsetof(struct lnet_ping_info, pi_ni[n_ids]));
-               goto out_1;
+       if (nob < LNET_PING_INFO_SIZE(n_ids)) {
+               CERROR("%s: Short reply %d(%d expected)\n",
+                      libcfs_id2str(id),
+                      nob, (int)LNET_PING_INFO_SIZE(n_ids));
+               goto fail_free_eq;
        }
 
-       rc = -EFAULT;                           /* If I SEGV... */
+       rc = -EFAULT;           /* if I segv in copy_to_user()... */
 
        memset(&tmpid, 0, sizeof(tmpid));
        for (i = 0; i < n_ids; i++) {
-               tmpid.pid = info->pi_pid;
-               tmpid.nid = info->pi_ni[i].ns_nid;
+               tmpid.pid = pbuf->pb_info.pi_pid;
+               tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
                if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
-                       goto out_1;
+                       goto fail_free_eq;
        }
-       rc = info->pi_nnis;
+       rc = pbuf->pb_info.pi_nnis;
 
out_1:
fail_free_eq:
        rc2 = LNetEQFree(eqh);
        if (rc2 != 0)
                CERROR("rc2 %d\n", rc2);
        LASSERT(rc2 == 0);
 
- out_0:
-       LIBCFS_FREE(info, infosz);
+ fail_ping_buffer_decref:
+       lnet_ping_buffer_decref(pbuf);
+       return rc;
+}
+
+static int
+lnet_discover(struct lnet_process_id id, __u32 force,
+             struct lnet_process_id __user *ids, int n_ids)
+{
+       struct lnet_peer_ni *lpni;
+       struct lnet_peer_ni *p;
+       struct lnet_peer *lp;
+       struct lnet_process_id *buf;
+       int cpt;
+       int i;
+       int rc;
+       int max_intf = lnet_interfaces_max;
+       size_t buf_size;
+
+       if (n_ids <= 0 ||
+           id.nid == LNET_NID_ANY)
+               return -EINVAL;
+
+       if (id.pid == LNET_PID_ANY)
+               id.pid = LNET_PID_LUSTRE;
+
+       /*
+        * if the user buffer has more space than the max_intf
+        * then only fill it up to max_intf
+        */
+       if (n_ids > max_intf)
+               n_ids = max_intf;
+
+       buf_size = n_ids * sizeof(*buf);
+
+       LIBCFS_ALLOC(buf, buf_size);
+       if (!buf)
+               return -ENOMEM;
+
+       cpt = lnet_net_lock_current();
+       lpni = lnet_nid2peerni_locked(id.nid, LNET_NID_ANY, cpt);
+       if (IS_ERR(lpni)) {
+               rc = PTR_ERR(lpni);
+               goto out;
+       }
+
+       /*
+        * Clearing the NIDS_UPTODATE flag ensures the peer will
+        * be discovered, provided discovery has not been disabled.
+        */
+       lp = lpni->lpni_peer_net->lpn_peer;
+       spin_lock(&lp->lp_lock);
+       lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
+       /* If the force flag is set, force a PING and PUSH as well. */
+       if (force)
+               lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
+       spin_unlock(&lp->lp_lock);
+       rc = lnet_discover_peer_locked(lpni, cpt, true);
+       if (rc)
+               goto out_decref;
+
+       /* Peer may have changed. */
+       lp = lpni->lpni_peer_net->lpn_peer;
+       if (lp->lp_nnis < n_ids)
+               n_ids = lp->lp_nnis;
+
+       i = 0;
+       p = NULL;
+       while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
+               buf[i].pid = id.pid;
+               buf[i].nid = p->lpni_nid;
+               if (++i >= n_ids)
+                       break;
+       }
+
+       lnet_net_unlock(cpt);
+
+       rc = -EFAULT;
+       if (copy_to_user(ids, buf, n_ids * sizeof(*buf)))
+               goto out_relock;
+       rc = n_ids;
+out_relock:
+       lnet_net_lock(cpt);
+out_decref:
+       lnet_peer_ni_decref_locked(lpni);
+out:
+       lnet_net_unlock(cpt);
+
+       LIBCFS_FREE(buf, buf_size);
+
        return rc;
 }
+
+/**
+ * Retrieve peer discovery status.
+ *
+ * \retval 1 if lnet_peer_discovery_disabled is 0
+ * \retval 0 if lnet_peer_discovery_disabled is 1
+ */
+int
+LNetGetPeerDiscoveryStatus(void)
+{
+       return !lnet_peer_discovery_disabled;
+}
+EXPORT_SYMBOL(LNetGetPeerDiscoveryStatus);