* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2016, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
#define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/ctype.h>
#include <linux/log2.h>
#include <linux/ktime.h>
+#include <linux/moduleparam.h>
+#include <linux/uaccess.h>
#include <lnet/lib-lnet.h>
#define D_LNI D_CONSOLE
-lnet_t the_lnet; /* THE state of the network */
+/*
+ * initialize ln_api_mutex statically, since it needs to be used in
+ * discovery_set callback. That module parameter callback can be called
+ * before module init completes. The mutex needs to be ready for use then.
+ */
+struct lnet the_lnet = {
+ .ln_api_mutex = __MUTEX_INITIALIZER(the_lnet.ln_api_mutex),
+}; /* THE state of the network */
EXPORT_SYMBOL(the_lnet);
static char *ip2nets = "";
"NUMA range to consider during Multi-Rail selection");
/*
+ * lnet_health_sensitivity determines by how much we decrement the health
+ * value on sending error. The value defaults to 0, which means health
+ * checking is turned off by default.
+ */
+unsigned int lnet_health_sensitivity = 0;
+static int sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
+#ifdef HAVE_KERNEL_PARAM_OPS
+static struct kernel_param_ops param_ops_health_sensitivity = {
+ .set = sensitivity_set,
+ .get = param_get_int,
+};
+#define param_check_health_sensitivity(name, p) \
+ __param_check(name, p, int)
+module_param(lnet_health_sensitivity, health_sensitivity, S_IRUGO|S_IWUSR);
+#else
+module_param_call(lnet_health_sensitivity, sensitivity_set, param_get_int,
+ &lnet_health_sensitivity, S_IRUGO|S_IWUSR);
+#endif
+MODULE_PARM_DESC(lnet_health_sensitivity,
+ "Value to decrement the health value by on error");
+
+/*
+ * lnet_recovery_interval determines how often we should perform recovery
+ * on unhealthy interfaces.
+ */
+unsigned int lnet_recovery_interval = 1;
+static int recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp);
+#ifdef HAVE_KERNEL_PARAM_OPS
+static struct kernel_param_ops param_ops_recovery_interval = {
+ .set = recovery_interval_set,
+ .get = param_get_int,
+};
+#define param_check_recovery_interval(name, p) \
+ __param_check(name, p, int)
+module_param(lnet_recovery_interval, recovery_interval, S_IRUGO|S_IWUSR);
+#else
+module_param_call(lnet_recovery_interval, recovery_interval_set, param_get_int,
+ &lnet_recovery_interval, S_IRUGO|S_IWUSR);
+#endif
+MODULE_PARM_DESC(lnet_recovery_interval,
+ "Interval to recover unhealthy interfaces in seconds");
+
+static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
+static int intf_max_set(const char *val, cfs_kernel_param_arg_t *kp);
+
+static struct kernel_param_ops param_ops_interfaces_max = {
+ .set = intf_max_set,
+ .get = param_get_int,
+};
+
+#define param_check_interfaces_max(name, p) \
+ __param_check(name, p, int)
+
+#ifdef HAVE_KERNEL_PARAM_OPS
+module_param(lnet_interfaces_max, interfaces_max, 0644);
+#else
+module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
+ ¶m_ops_interfaces_max, 0644);
+#endif
+MODULE_PARM_DESC(lnet_interfaces_max,
+ "Maximum number of interfaces in a node.");
+
+unsigned lnet_peer_discovery_disabled = 0;
+static int discovery_set(const char *val, cfs_kernel_param_arg_t *kp);
+
+static struct kernel_param_ops param_ops_discovery_disabled = {
+ .set = discovery_set,
+ .get = param_get_int,
+};
+
+#define param_check_discovery_disabled(name, p) \
+ __param_check(name, p, int)
+#ifdef HAVE_KERNEL_PARAM_OPS
+module_param(lnet_peer_discovery_disabled, discovery_disabled, 0644);
+#else
+module_param_call(lnet_peer_discovery_disabled, discovery_set, param_get_int,
+ ¶m_ops_discovery_disabled, 0644);
+#endif
+MODULE_PARM_DESC(lnet_peer_discovery_disabled,
+ "Set to 1 to disable peer discovery on this node.");
+
+unsigned int lnet_drop_asym_route;
+static int drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp);
+
+static struct kernel_param_ops param_ops_drop_asym_route = {
+ .set = drop_asym_route_set,
+ .get = param_get_int,
+};
+
+#define param_check_drop_asym_route(name, p) \
+ __param_check(name, p, int)
+#ifdef HAVE_KERNEL_PARAM_OPS
+module_param(lnet_drop_asym_route, drop_asym_route, 0644);
+#else
+module_param_call(lnet_drop_asym_route, drop_asym_route_set, param_get_int,
+ ¶m_ops_drop_asym_route, 0644);
+#endif
+MODULE_PARM_DESC(lnet_drop_asym_route,
+ "Set to 1 to drop asymmetrical route messages.");
+
+unsigned lnet_transaction_timeout = 50;
+static int transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp);
+#ifdef HAVE_KERNEL_PARAM_OPS
+static struct kernel_param_ops param_ops_transaction_timeout = {
+ .set = transaction_to_set,
+ .get = param_get_int,
+};
+
+#define param_check_transaction_timeout(name, p) \
+ __param_check(name, p, int)
+module_param(lnet_transaction_timeout, transaction_timeout, S_IRUGO|S_IWUSR);
+#else
+module_param_call(lnet_transaction_timeout, transaction_to_set, param_get_int,
+ &lnet_transaction_timeout, S_IRUGO|S_IWUSR);
+#endif
+MODULE_PARM_DESC(lnet_transaction_timeout,
+ "Maximum number of seconds to wait for a peer response.");
+
+unsigned lnet_retry_count = 0;
+static int retry_count_set(const char *val, cfs_kernel_param_arg_t *kp);
+#ifdef HAVE_KERNEL_PARAM_OPS
+static struct kernel_param_ops param_ops_retry_count = {
+ .set = retry_count_set,
+ .get = param_get_int,
+};
+
+#define param_check_retry_count(name, p) \
+ __param_check(name, p, int)
+module_param(lnet_retry_count, retry_count, S_IRUGO|S_IWUSR);
+#else
+module_param_call(lnet_retry_count, retry_count_set, param_get_int,
+ &lnet_retry_count, S_IRUGO|S_IWUSR);
+#endif
+MODULE_PARM_DESC(lnet_retry_count,
+ "Maximum number of times to retry transmitting a message");
+
+
+unsigned lnet_lnd_timeout = LNET_LND_DEFAULT_TIMEOUT;
+
+/*
* This sequence number keeps track of how many times DLC was used to
* update the local NIs. It is incremented when a NI is added or
* removed and checked when sending a message to determine if there is
*/
static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
-static int lnet_ping(lnet_process_id_t id, signed long timeout,
- lnet_process_id_t __user *ids, int n_ids);
+static int lnet_ping(struct lnet_process_id id, signed long timeout,
+ struct lnet_process_id __user *ids, int n_ids);
+
+static int lnet_discover(struct lnet_process_id id, __u32 force,
+ struct lnet_process_id __user *ids, int n_ids);
+
+static int
+sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
+{
+ int rc;
+ unsigned *sensitivity = (unsigned *)kp->arg;
+ unsigned long value;
+
+ rc = kstrtoul(val, 0, &value);
+ if (rc) {
+ CERROR("Invalid module parameter value for 'lnet_health_sensitivity'\n");
+ return rc;
+ }
+
+ /*
+ * The purpose of locking the api_mutex here is to ensure that
+ * the correct value ends up stored properly.
+ */
+ mutex_lock(&the_lnet.ln_api_mutex);
+
+ if (the_lnet.ln_state != LNET_STATE_RUNNING) {
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return 0;
+ }
+
+ if (value > LNET_MAX_HEALTH_VALUE) {
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ CERROR("Invalid health value. Maximum: %d value = %lu\n",
+ LNET_MAX_HEALTH_VALUE, value);
+ return -EINVAL;
+ }
+
+ *sensitivity = value;
+
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ return 0;
+}
+
+static int
+recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp)
+{
+ int rc;
+ unsigned *interval = (unsigned *)kp->arg;
+ unsigned long value;
+
+ rc = kstrtoul(val, 0, &value);
+ if (rc) {
+ CERROR("Invalid module parameter value for 'lnet_recovery_interval'\n");
+ return rc;
+ }
+
+ if (value < 1) {
+ CERROR("lnet_recovery_interval must be at least 1 second\n");
+ return -EINVAL;
+ }
+
+ /*
+ * The purpose of locking the api_mutex here is to ensure that
+ * the correct value ends up stored properly.
+ */
+ mutex_lock(&the_lnet.ln_api_mutex);
+
+ if (the_lnet.ln_state != LNET_STATE_RUNNING) {
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return 0;
+ }
+
+ *interval = value;
+
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ return 0;
+}
+
+static int
+discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
+{
+ int rc;
+ unsigned *discovery = (unsigned *)kp->arg;
+ unsigned long value;
+ struct lnet_ping_buffer *pbuf;
+
+ rc = kstrtoul(val, 0, &value);
+ if (rc) {
+ CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
+ return rc;
+ }
+
+ value = (value) ? 1 : 0;
+
+ /*
+ * The purpose of locking the api_mutex here is to ensure that
+ * the correct value ends up stored properly.
+ */
+ mutex_lock(&the_lnet.ln_api_mutex);
+
+ if (value == *discovery) {
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return 0;
+ }
+
+ *discovery = value;
+
+ if (the_lnet.ln_state != LNET_STATE_RUNNING) {
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return 0;
+ }
+
+ /* tell peers that discovery setting has changed */
+ lnet_net_lock(LNET_LOCK_EX);
+ pbuf = the_lnet.ln_ping_target;
+ if (value)
+ pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
+ else
+ pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ lnet_push_update_to_peers(1);
+
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ return 0;
+}
+
+static int
+drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp)
+{
+ int rc;
+ unsigned int *drop_asym_route = (unsigned int *)kp->arg;
+ unsigned long value;
+
+ rc = kstrtoul(val, 0, &value);
+ if (rc) {
+ CERROR("Invalid module parameter value for "
+ "'lnet_drop_asym_route'\n");
+ return rc;
+ }
+
+ /*
+ * The purpose of locking the api_mutex here is to ensure that
+ * the correct value ends up stored properly.
+ */
+ mutex_lock(&the_lnet.ln_api_mutex);
+
+ if (value == *drop_asym_route) {
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return 0;
+ }
+
+ *drop_asym_route = value;
+
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ return 0;
+}
+
+static int
+transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp)
+{
+ int rc;
+ unsigned *transaction_to = (unsigned *)kp->arg;
+ unsigned long value;
+
+ rc = kstrtoul(val, 0, &value);
+ if (rc) {
+ CERROR("Invalid module parameter value for 'lnet_transaction_timeout'\n");
+ return rc;
+ }
+
+ /*
+ * The purpose of locking the api_mutex here is to ensure that
+ * the correct value ends up stored properly.
+ */
+ mutex_lock(&the_lnet.ln_api_mutex);
+
+ if (the_lnet.ln_state != LNET_STATE_RUNNING) {
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return 0;
+ }
+
+ if (value < lnet_retry_count || value == 0) {
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ CERROR("Invalid value for lnet_transaction_timeout (%lu). "
+ "Has to be greater than lnet_retry_count (%u)\n",
+ value, lnet_retry_count);
+ return -EINVAL;
+ }
+
+ if (value == *transaction_to) {
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return 0;
+ }
+
+ *transaction_to = value;
+ if (lnet_retry_count == 0)
+ lnet_lnd_timeout = value;
+ else
+ lnet_lnd_timeout = value / lnet_retry_count;
+
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ return 0;
+}
+
+static int
+retry_count_set(const char *val, cfs_kernel_param_arg_t *kp)
+{
+ int rc;
+ unsigned *retry_count = (unsigned *)kp->arg;
+ unsigned long value;
+
+ rc = kstrtoul(val, 0, &value);
+ if (rc) {
+ CERROR("Invalid module parameter value for 'lnet_retry_count'\n");
+ return rc;
+ }
+
+ /*
+ * The purpose of locking the api_mutex here is to ensure that
+ * the correct value ends up stored properly.
+ */
+ mutex_lock(&the_lnet.ln_api_mutex);
+
+ if (the_lnet.ln_state != LNET_STATE_RUNNING) {
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return 0;
+ }
+
+ if (value > lnet_transaction_timeout) {
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ CERROR("Invalid value for lnet_retry_count (%lu). "
+ "Has to be smaller than lnet_transaction_timeout (%u)\n",
+ value, lnet_transaction_timeout);
+ return -EINVAL;
+ }
+
+ if (value == *retry_count) {
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return 0;
+ }
+
+ *retry_count = value;
+
+ if (value == 0)
+ lnet_lnd_timeout = lnet_transaction_timeout;
+ else
+ lnet_lnd_timeout = lnet_transaction_timeout / value;
+
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ return 0;
+}
+
+static int
+intf_max_set(const char *val, cfs_kernel_param_arg_t *kp)
+{
+ int value, rc;
+
+ rc = kstrtoint(val, 0, &value);
+ if (rc) {
+ CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
+ return rc;
+ }
+
+ if (value < LNET_INTERFACES_MIN) {
+ CWARN("max interfaces provided are too small, setting to %d\n",
+ LNET_INTERFACES_MAX_DEFAULT);
+ value = LNET_INTERFACES_MAX_DEFAULT;
+ }
+
+ *(int *)kp->arg = value;
+
+ return 0;
+}
static char *
lnet_get_routes(void)
lnet_init_locks(void)
{
spin_lock_init(&the_lnet.ln_eq_wait_lock);
+ spin_lock_init(&the_lnet.ln_msg_resend_lock);
init_waitqueue_head(&the_lnet.ln_eq_waitq);
- init_waitqueue_head(&the_lnet.ln_rc_waitq);
+ init_waitqueue_head(&the_lnet.ln_mt_waitq);
mutex_init(&the_lnet.ln_lnd_mutex);
- mutex_init(&the_lnet.ln_api_mutex);
}
static void
/* create specific kmem_cache for MEs and small MDs (i.e., originally
* allocated in <size-xxx> kmem_cache).
*/
- lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(lnet_me_t),
+ lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(struct lnet_me),
0, 0, NULL);
if (!lnet_mes_cachep)
return -ENOMEM;
CLASSERT((int)offsetof(struct lnet_handle_wire, wh_object_cookie) == 8);
CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) == 8);
- /* Checks for struct lnet_magicversion_t */
- CLASSERT((int)sizeof(lnet_magicversion_t) == 8);
- CLASSERT((int)offsetof(lnet_magicversion_t, magic) == 0);
- CLASSERT((int)sizeof(((lnet_magicversion_t *)0)->magic) == 4);
- CLASSERT((int)offsetof(lnet_magicversion_t, version_major) == 4);
- CLASSERT((int)sizeof(((lnet_magicversion_t *)0)->version_major) == 2);
- CLASSERT((int)offsetof(lnet_magicversion_t, version_minor) == 6);
- CLASSERT((int)sizeof(((lnet_magicversion_t *)0)->version_minor) == 2);
-
- /* Checks for struct lnet_hdr_t */
- CLASSERT((int)sizeof(lnet_hdr_t) == 72);
- CLASSERT((int)offsetof(lnet_hdr_t, dest_nid) == 0);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->dest_nid) == 8);
- CLASSERT((int)offsetof(lnet_hdr_t, src_nid) == 8);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->src_nid) == 8);
- CLASSERT((int)offsetof(lnet_hdr_t, dest_pid) == 16);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->dest_pid) == 4);
- CLASSERT((int)offsetof(lnet_hdr_t, src_pid) == 20);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->src_pid) == 4);
- CLASSERT((int)offsetof(lnet_hdr_t, type) == 24);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->type) == 4);
- CLASSERT((int)offsetof(lnet_hdr_t, payload_length) == 28);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->payload_length) == 4);
- CLASSERT((int)offsetof(lnet_hdr_t, msg) == 32);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg) == 40);
+ /* Checks for struct struct lnet_magicversion */
+ CLASSERT((int)sizeof(struct lnet_magicversion) == 8);
+ CLASSERT((int)offsetof(struct lnet_magicversion, magic) == 0);
+ CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->magic) == 4);
+ CLASSERT((int)offsetof(struct lnet_magicversion, version_major) == 4);
+ CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_major) == 2);
+ CLASSERT((int)offsetof(struct lnet_magicversion, version_minor) == 6);
+ CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_minor) == 2);
+
+ /* Checks for struct struct lnet_hdr */
+ CLASSERT((int)sizeof(struct lnet_hdr) == 72);
+ CLASSERT((int)offsetof(struct lnet_hdr, dest_nid) == 0);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_nid) == 8);
+ CLASSERT((int)offsetof(struct lnet_hdr, src_nid) == 8);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_nid) == 8);
+ CLASSERT((int)offsetof(struct lnet_hdr, dest_pid) == 16);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_pid) == 4);
+ CLASSERT((int)offsetof(struct lnet_hdr, src_pid) == 20);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_pid) == 4);
+ CLASSERT((int)offsetof(struct lnet_hdr, type) == 24);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->type) == 4);
+ CLASSERT((int)offsetof(struct lnet_hdr, payload_length) == 28);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->payload_length) == 4);
+ CLASSERT((int)offsetof(struct lnet_hdr, msg) == 32);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg) == 40);
/* Ack */
- CLASSERT((int)offsetof(lnet_hdr_t, msg.ack.dst_wmd) == 32);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.ack.dst_wmd) == 16);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.ack.match_bits) == 48);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.ack.match_bits) == 8);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.ack.mlength) == 56);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.ack.mlength) == 4);
+ CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) == 32);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) == 16);
+ CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.match_bits) == 48);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) == 8);
+ CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.mlength) == 56);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) == 4);
/* Put */
- CLASSERT((int)offsetof(lnet_hdr_t, msg.put.ack_wmd) == 32);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.ack_wmd) == 16);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.put.match_bits) == 48);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.match_bits) == 8);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.put.hdr_data) == 56);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.hdr_data) == 8);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.put.ptl_index) == 64);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.ptl_index) == 4);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.put.offset) == 68);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.offset) == 4);
+ CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) == 32);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) == 16);
+ CLASSERT((int)offsetof(struct lnet_hdr, msg.put.match_bits) == 48);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) == 8);
+ CLASSERT((int)offsetof(struct lnet_hdr, msg.put.hdr_data) == 56);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) == 8);
+ CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ptl_index) == 64);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) == 4);
+ CLASSERT((int)offsetof(struct lnet_hdr, msg.put.offset) == 68);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) == 4);
/* Get */
- CLASSERT((int)offsetof(lnet_hdr_t, msg.get.return_wmd) == 32);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.return_wmd) == 16);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.get.match_bits) == 48);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.match_bits) == 8);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.get.ptl_index) == 56);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.ptl_index) == 4);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.get.src_offset) == 60);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.src_offset) == 4);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.get.sink_length) == 64);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.sink_length) == 4);
+ CLASSERT((int)offsetof(struct lnet_hdr, msg.get.return_wmd) == 32);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) == 16);
+ CLASSERT((int)offsetof(struct lnet_hdr, msg.get.match_bits) == 48);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) == 8);
+ CLASSERT((int)offsetof(struct lnet_hdr, msg.get.ptl_index) == 56);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) == 4);
+ CLASSERT((int)offsetof(struct lnet_hdr, msg.get.src_offset) == 60);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) == 4);
+ CLASSERT((int)offsetof(struct lnet_hdr, msg.get.sink_length) == 64);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) == 4);
/* Reply */
- CLASSERT((int)offsetof(lnet_hdr_t, msg.reply.dst_wmd) == 32);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.reply.dst_wmd) == 16);
+ CLASSERT((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) == 32);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) == 16);
/* Hello */
- CLASSERT((int)offsetof(lnet_hdr_t, msg.hello.incarnation) == 32);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.hello.incarnation) == 8);
- CLASSERT((int)offsetof(lnet_hdr_t, msg.hello.type) == 40);
- CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.hello.type) == 4);
+ CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.incarnation) == 32);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) == 8);
+ CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.type) == 40);
+ CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) == 4);
+
+ /* Checks for struct lnet_ni_status and related constants */
+ CLASSERT(LNET_NI_STATUS_INVALID == 0x00000000);
+ CLASSERT(LNET_NI_STATUS_UP == 0x15aac0de);
+ CLASSERT(LNET_NI_STATUS_DOWN == 0xdeadface);
+
+ /* Checks for struct lnet_ni_status */
+ CLASSERT((int)sizeof(struct lnet_ni_status) == 16);
+ CLASSERT((int)offsetof(struct lnet_ni_status, ns_nid) == 0);
+ CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) == 8);
+ CLASSERT((int)offsetof(struct lnet_ni_status, ns_status) == 8);
+ CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_status) == 4);
+ CLASSERT((int)offsetof(struct lnet_ni_status, ns_unused) == 12);
+ CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_unused) == 4);
+
+ /* Checks for struct lnet_ping_info and related constants */
+ CLASSERT(LNET_PROTO_PING_MAGIC == 0x70696E67);
+ CLASSERT(LNET_PING_FEAT_INVAL == 0);
+ CLASSERT(LNET_PING_FEAT_BASE == 1);
+ CLASSERT(LNET_PING_FEAT_NI_STATUS == 2);
+ CLASSERT(LNET_PING_FEAT_RTE_DISABLED == 4);
+ CLASSERT(LNET_PING_FEAT_MULTI_RAIL == 8);
+ CLASSERT(LNET_PING_FEAT_DISCOVERY == 16);
+ CLASSERT(LNET_PING_FEAT_BITS == 31);
+
+ /* Checks for struct lnet_ping_info */
+ CLASSERT((int)sizeof(struct lnet_ping_info) == 16);
+ CLASSERT((int)offsetof(struct lnet_ping_info, pi_magic) == 0);
+ CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) == 4);
+ CLASSERT((int)offsetof(struct lnet_ping_info, pi_features) == 4);
+ CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_features) == 4);
+ CLASSERT((int)offsetof(struct lnet_ping_info, pi_pid) == 8);
+ CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) == 4);
+ CLASSERT((int)offsetof(struct lnet_ping_info, pi_nnis) == 12);
+ CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) == 4);
+ CLASSERT((int)offsetof(struct lnet_ping_info, pi_ni) == 16);
+ CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) == 0);
}
-static lnd_t *lnet_find_lnd_by_type(__u32 type)
+static struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
{
- lnd_t *lnd;
+ struct lnet_lnd *lnd;
struct list_head *tmp;
/* holding lnd mutex */
list_for_each(tmp, &the_lnet.ln_lnds) {
- lnd = list_entry(tmp, lnd_t, lnd_list);
+ lnd = list_entry(tmp, struct lnet_lnd, lnd_list);
if (lnd->lnd_type == type)
return lnd;
return NULL;
}
+unsigned int
+lnet_get_lnd_timeout(void)
+{
+ return lnet_lnd_timeout;
+}
+EXPORT_SYMBOL(lnet_get_lnd_timeout);
+
void
-lnet_register_lnd (lnd_t *lnd)
+lnet_register_lnd(struct lnet_lnd *lnd)
{
mutex_lock(&the_lnet.ln_lnd_mutex);
EXPORT_SYMBOL(lnet_register_lnd);
void
-lnet_unregister_lnd (lnd_t *lnd)
+lnet_unregister_lnd(struct lnet_lnd *lnd)
{
mutex_lock(&the_lnet.ln_lnd_mutex);
EXPORT_SYMBOL(lnet_unregister_lnd);
void
-lnet_counters_get(lnet_counters_t *counters)
+lnet_counters_get_common(struct lnet_counters_common *common)
{
- lnet_counters_t *ctr;
+ struct lnet_counters *ctr;
+ int i;
+
+ memset(common, 0, sizeof(*common));
+
+ lnet_net_lock(LNET_LOCK_EX);
+
+ cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
+ common->lcc_msgs_max += ctr->lct_common.lcc_msgs_max;
+ common->lcc_msgs_alloc += ctr->lct_common.lcc_msgs_alloc;
+ common->lcc_errors += ctr->lct_common.lcc_errors;
+ common->lcc_send_count += ctr->lct_common.lcc_send_count;
+ common->lcc_recv_count += ctr->lct_common.lcc_recv_count;
+ common->lcc_route_count += ctr->lct_common.lcc_route_count;
+ common->lcc_drop_count += ctr->lct_common.lcc_drop_count;
+ common->lcc_send_length += ctr->lct_common.lcc_send_length;
+ common->lcc_recv_length += ctr->lct_common.lcc_recv_length;
+ common->lcc_route_length += ctr->lct_common.lcc_route_length;
+ common->lcc_drop_length += ctr->lct_common.lcc_drop_length;
+ }
+ lnet_net_unlock(LNET_LOCK_EX);
+}
+EXPORT_SYMBOL(lnet_counters_get_common);
+
+void
+lnet_counters_get(struct lnet_counters *counters)
+{
+ struct lnet_counters *ctr;
+ struct lnet_counters_health *health = &counters->lct_health;
int i;
memset(counters, 0, sizeof(*counters));
+ lnet_counters_get_common(&counters->lct_common);
+
lnet_net_lock(LNET_LOCK_EX);
cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
- counters->msgs_max += ctr->msgs_max;
- counters->msgs_alloc += ctr->msgs_alloc;
- counters->errors += ctr->errors;
- counters->send_count += ctr->send_count;
- counters->recv_count += ctr->recv_count;
- counters->route_count += ctr->route_count;
- counters->drop_count += ctr->drop_count;
- counters->send_length += ctr->send_length;
- counters->recv_length += ctr->recv_length;
- counters->route_length += ctr->route_length;
- counters->drop_length += ctr->drop_length;
-
+ health->lch_rst_alloc += ctr->lct_health.lch_rst_alloc;
+ health->lch_resend_count += ctr->lct_health.lch_resend_count;
+ health->lch_response_timeout_count +=
+ ctr->lct_health.lch_response_timeout_count;
+ health->lch_local_interrupt_count +=
+ ctr->lct_health.lch_local_interrupt_count;
+ health->lch_local_dropped_count +=
+ ctr->lct_health.lch_local_dropped_count;
+ health->lch_local_aborted_count +=
+ ctr->lct_health.lch_local_aborted_count;
+ health->lch_local_no_route_count +=
+ ctr->lct_health.lch_local_no_route_count;
+ health->lch_local_timeout_count +=
+ ctr->lct_health.lch_local_timeout_count;
+ health->lch_local_error_count +=
+ ctr->lct_health.lch_local_error_count;
+ health->lch_remote_dropped_count +=
+ ctr->lct_health.lch_remote_dropped_count;
+ health->lch_remote_error_count +=
+ ctr->lct_health.lch_remote_error_count;
+ health->lch_remote_timeout_count +=
+ ctr->lct_health.lch_remote_timeout_count;
+ health->lch_network_timeout_count +=
+ ctr->lct_health.lch_network_timeout_count;
}
lnet_net_unlock(LNET_LOCK_EX);
}
void
lnet_counters_reset(void)
{
- lnet_counters_t *counters;
+ struct lnet_counters *counters;
int i;
lnet_net_lock(LNET_LOCK_EX);
cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
- memset(counters, 0, sizeof(lnet_counters_t));
+ memset(counters, 0, sizeof(struct lnet_counters));
lnet_net_unlock(LNET_LOCK_EX);
}
list_del_init(e);
if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
- lnet_eq_free(list_entry(e, lnet_eq_t, eq_list));
+ lnet_eq_free(list_entry(e, struct lnet_eq, eq_list));
} else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
- lnet_md_free(list_entry(e, lnet_libmd_t, md_list));
+ lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
} else { /* NB: Active MEs should be attached on portals */
LBUG();
return recs;
}
-lnet_libhandle_t *
+struct lnet_libhandle *
lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
{
/* ALWAYS called with lnet_res_lock held */
struct list_head *head;
- lnet_libhandle_t *lh;
+ struct lnet_libhandle *lh;
unsigned int hash;
if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
}
void
-lnet_res_lh_initialize(struct lnet_res_container *rec, lnet_libhandle_t *lh)
+lnet_res_lh_initialize(struct lnet_res_container *rec,
+ struct lnet_libhandle *lh)
{
/* ALWAYS called with lnet_res_lock held */
unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
the_lnet.ln_pid = requested_pid;
INIT_LIST_HEAD(&the_lnet.ln_test_peers);
- INIT_LIST_HEAD(&the_lnet.ln_peers);
INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
INIT_LIST_HEAD(&the_lnet.ln_nets);
INIT_LIST_HEAD(&the_lnet.ln_routers);
INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
+ INIT_LIST_HEAD(&the_lnet.ln_dc_request);
+ INIT_LIST_HEAD(&the_lnet.ln_dc_working);
+ INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
+ INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq);
+ INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
+ init_waitqueue_head(&the_lnet.ln_dc_waitq);
rc = lnet_descriptor_setup();
if (rc != 0)
the_lnet.ln_interface_cookie = ktime_get_real_ns();
the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(lnet_counters_t));
+ sizeof(struct lnet_counters));
if (the_lnet.ln_counters == NULL) {
CERROR("Failed to allocate counters for LNet\n");
rc = -ENOMEM;
return 0;
}
-lnet_ni_t *
+struct lnet_ni *
lnet_net2ni_locked(__u32 net_id, int cpt)
{
struct lnet_ni *ni;
return NULL;
}
-lnet_ni_t *
+struct lnet_ni *
lnet_net2ni_addref(__u32 net)
{
- lnet_ni_t *ni;
+ struct lnet_ni *ni;
lnet_net_lock(0);
ni = lnet_net2ni_locked(net, 0);
return local;
}
-bool
-lnet_is_ni_healthy_locked(struct lnet_ni *ni)
-{
- if (ni->ni_state == LNET_NI_STATE_ACTIVE ||
- ni->ni_state == LNET_NI_STATE_DEGRADED)
- return true;
-
- return false;
-}
-
-lnet_ni_t *
+struct lnet_ni *
lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
{
struct lnet_net *net;
return NULL;
}
-lnet_ni_t *
+struct lnet_ni *
lnet_nid2ni_addref(lnet_nid_t nid)
{
- lnet_ni_t *ni;
+ struct lnet_ni *ni;
lnet_net_lock(0);
ni = lnet_nid2ni_locked(nid, 0);
return count;
}
-static struct lnet_ping_info *
-lnet_ping_info_create(int num_ni)
+struct lnet_ping_buffer *
+lnet_ping_buffer_alloc(int nnis, gfp_t gfp)
+{
+ struct lnet_ping_buffer *pbuf;
+
+ LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nnis), gfp);
+ if (pbuf) {
+ pbuf->pb_nnis = nnis;
+ atomic_set(&pbuf->pb_refcnt, 1);
+ }
+
+ return pbuf;
+}
+
+void
+lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
+{
+ LASSERT(lnet_ping_buffer_numref(pbuf) == 0);
+ LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
+}
+
+static struct lnet_ping_buffer *
+lnet_ping_target_create(int nnis)
{
- struct lnet_ping_info *ping_info;
- unsigned int infosz;
+ struct lnet_ping_buffer *pbuf;
- infosz = offsetof(struct lnet_ping_info, pi_ni[num_ni]);
- LIBCFS_ALLOC(ping_info, infosz);
- if (ping_info == NULL) {
- CERROR("Can't allocate ping info[%d]\n", num_ni);
+ pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
+ if (pbuf == NULL) {
+ CERROR("Can't allocate ping source [%d]\n", nnis);
return NULL;
}
- ping_info->pi_nnis = num_ni;
- ping_info->pi_pid = the_lnet.ln_pid;
- ping_info->pi_magic = LNET_PROTO_PING_MAGIC;
- ping_info->pi_features = LNET_PING_FEAT_NI_STATUS;
+ pbuf->pb_info.pi_nnis = nnis;
+ pbuf->pb_info.pi_pid = the_lnet.ln_pid;
+ pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
+ pbuf->pb_info.pi_features =
+ LNET_PING_FEAT_NI_STATUS | LNET_PING_FEAT_MULTI_RAIL;
- return ping_info;
+ return pbuf;
}
static inline int
return count;
}
-static inline void
-lnet_ping_info_free(struct lnet_ping_info *pinfo)
+int
+lnet_ping_info_validate(struct lnet_ping_info *pinfo)
{
- LIBCFS_FREE(pinfo,
- offsetof(struct lnet_ping_info,
- pi_ni[pinfo->pi_nnis]));
+ if (!pinfo)
+ return -EINVAL;
+ if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
+ return -EPROTO;
+ if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
+ return -EPROTO;
+ /* Loopback is guaranteed to be present */
+ if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
+ return -ERANGE;
+ if (LNET_NETTYP(LNET_NIDNET(LNET_PING_INFO_LONI(pinfo))) != LOLND)
+ return -EPROTO;
+ return 0;
}
static void
-lnet_ping_info_destroy(void)
+lnet_ping_target_destroy(void)
{
struct lnet_net *net;
struct lnet_ni *ni;
}
}
- lnet_ping_info_free(the_lnet.ln_ping_info);
- the_lnet.ln_ping_info = NULL;
+ lnet_ping_buffer_decref(the_lnet.ln_ping_target);
+ the_lnet.ln_ping_target = NULL;
lnet_net_unlock(LNET_LOCK_EX);
}
static void
-lnet_ping_event_handler(lnet_event_t *event)
+lnet_ping_target_event_handler(struct lnet_event *event)
{
- struct lnet_ping_info *pinfo = event->md.user_ptr;
+ struct lnet_ping_buffer *pbuf = event->md.user_ptr;
if (event->unlinked)
- pinfo->pi_features = LNET_PING_FEAT_INVAL;
+ lnet_ping_buffer_decref(pbuf);
}
static int
-lnet_ping_info_setup(struct lnet_ping_info **ppinfo, lnet_handle_md_t *md_handle,
- int ni_count, bool set_eq)
+lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
+ struct lnet_handle_md *ping_mdh,
+ int ni_count, bool set_eq)
{
- lnet_handle_me_t me_handle;
- lnet_process_id_t id = {LNET_NID_ANY, LNET_PID_ANY};
- lnet_md_t md = {NULL};
- int rc, rc2;
+ struct lnet_process_id id = {
+ .nid = LNET_NID_ANY,
+ .pid = LNET_PID_ANY
+ };
+ struct lnet_handle_me me_handle;
+ struct lnet_md md = { NULL };
+ int rc, rc2;
if (set_eq) {
- rc = LNetEQAlloc(0, lnet_ping_event_handler,
+ rc = LNetEQAlloc(0, lnet_ping_target_event_handler,
&the_lnet.ln_ping_target_eq);
if (rc != 0) {
- CERROR("Can't allocate ping EQ: %d\n", rc);
+ CERROR("Can't allocate ping buffer EQ: %d\n", rc);
return rc;
}
}
- *ppinfo = lnet_ping_info_create(ni_count);
- if (*ppinfo == NULL) {
+ *ppbuf = lnet_ping_target_create(ni_count);
+ if (*ppbuf == NULL) {
rc = -ENOMEM;
- goto failed_0;
+ goto fail_free_eq;
}
+ /* Ping target ME/MD */
rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
LNET_PROTO_PING_MATCHBITS, 0,
LNET_UNLINK, LNET_INS_AFTER,
&me_handle);
if (rc != 0) {
- CERROR("Can't create ping ME: %d\n", rc);
- goto failed_1;
+ CERROR("Can't create ping target ME: %d\n", rc);
+ goto fail_decref_ping_buffer;
}
/* initialize md content */
- md.start = *ppinfo;
- md.length = offsetof(struct lnet_ping_info,
- pi_ni[(*ppinfo)->pi_nnis]);
+ md.start = &(*ppbuf)->pb_info;
+ md.length = LNET_PING_INFO_SIZE((*ppbuf)->pb_nnis);
md.threshold = LNET_MD_THRESH_INF;
md.max_size = 0;
md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
LNET_MD_MANAGE_REMOTE;
- md.user_ptr = NULL;
md.eq_handle = the_lnet.ln_ping_target_eq;
- md.user_ptr = *ppinfo;
+ md.user_ptr = *ppbuf;
- rc = LNetMDAttach(me_handle, md, LNET_RETAIN, md_handle);
+ rc = LNetMDAttach(me_handle, md, LNET_RETAIN, ping_mdh);
if (rc != 0) {
- CERROR("Can't attach ping MD: %d\n", rc);
- goto failed_2;
+ CERROR("Can't attach ping target MD: %d\n", rc);
+ goto fail_unlink_ping_me;
}
+ lnet_ping_buffer_addref(*ppbuf);
return 0;
-failed_2:
+fail_unlink_ping_me:
rc2 = LNetMEUnlink(me_handle);
LASSERT(rc2 == 0);
-failed_1:
- lnet_ping_info_free(*ppinfo);
- *ppinfo = NULL;
-failed_0:
- if (set_eq)
- LNetEQFree(the_lnet.ln_ping_target_eq);
+fail_decref_ping_buffer:
+ LASSERT(lnet_ping_buffer_numref(*ppbuf) == 1);
+ lnet_ping_buffer_decref(*ppbuf);
+ *ppbuf = NULL;
+fail_free_eq:
+ if (set_eq) {
+ rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
+ LASSERT(rc2 == 0);
+ }
return rc;
}
static void
-lnet_ping_md_unlink(struct lnet_ping_info *pinfo, lnet_handle_md_t *md_handle)
+lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
+ struct lnet_handle_md *ping_mdh)
{
sigset_t blocked = cfs_block_allsigs();
- LNetMDUnlink(*md_handle);
- LNetInvalidateHandle(md_handle);
+ LNetMDUnlink(*ping_mdh);
+ LNetInvalidateMDHandle(ping_mdh);
- /* NB md could be busy; this just starts the unlink */
- while (pinfo->pi_features != LNET_PING_FEAT_INVAL) {
- CDEBUG(D_NET, "Still waiting for ping MD to unlink\n");
+ /* NB the MD could be busy; this just starts the unlink */
+ while (lnet_ping_buffer_numref(pbuf) > 1) {
+ CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
}
}
static void
-lnet_ping_info_install_locked(struct lnet_ping_info *ping_info)
+lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
{
- int i;
struct lnet_ni *ni;
struct lnet_net *net;
struct lnet_ni_status *ns;
+ int i;
+ int rc;
i = 0;
list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
- LASSERT(i < ping_info->pi_nnis);
+ LASSERT(i < pbuf->pb_nnis);
- ns = &ping_info->pi_ni[i];
+ ns = &pbuf->pb_info.pi_ni[i];
ns->ns_nid = ni->ni_nid;
lnet_ni_lock(ni);
ns->ns_status = (ni->ni_status != NULL) ?
- ni->ni_status->ns_status :
+ ni->ni_status->ns_status :
LNET_NI_STATUS_UP;
ni->ni_status = ns;
lnet_ni_unlock(ni);
i++;
}
-
}
+ /*
+ * We (ab)use the ns_status of the loopback interface to
+ * transmit the sequence number. The first interface listed
+ * must be the loopback interface.
+ */
+ rc = lnet_ping_info_validate(&pbuf->pb_info);
+ if (rc) {
+ LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
+ LBUG();
+ }
+ LNET_PING_BUFFER_SEQNO(pbuf) =
+ atomic_inc_return(&the_lnet.ln_ping_target_seqno);
}
static void
-lnet_ping_target_update(struct lnet_ping_info *pinfo, lnet_handle_md_t md_handle)
+lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
+ struct lnet_handle_md ping_mdh)
{
- struct lnet_ping_info *old_pinfo = NULL;
- lnet_handle_md_t old_md;
+ struct lnet_ping_buffer *old_pbuf = NULL;
+ struct lnet_handle_md old_ping_md;
/* switch the NIs to point to the new ping info created */
lnet_net_lock(LNET_LOCK_EX);
if (!the_lnet.ln_routing)
- pinfo->pi_features |= LNET_PING_FEAT_RTE_DISABLED;
- lnet_ping_info_install_locked(pinfo);
+ pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
+ if (!lnet_peer_discovery_disabled)
+ pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
- if (the_lnet.ln_ping_info != NULL) {
- old_pinfo = the_lnet.ln_ping_info;
- old_md = the_lnet.ln_ping_target_md;
+ /* Ensure only known feature bits have been set. */
+ LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
+ LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
+
+ lnet_ping_target_install_locked(pbuf);
+
+ if (the_lnet.ln_ping_target) {
+ old_pbuf = the_lnet.ln_ping_target;
+ old_ping_md = the_lnet.ln_ping_target_md;
}
- the_lnet.ln_ping_target_md = md_handle;
- the_lnet.ln_ping_info = pinfo;
+ the_lnet.ln_ping_target_md = ping_mdh;
+ the_lnet.ln_ping_target = pbuf;
lnet_net_unlock(LNET_LOCK_EX);
- if (old_pinfo != NULL) {
- /* unlink the old ping info */
- lnet_ping_md_unlink(old_pinfo, &old_md);
- lnet_ping_info_free(old_pinfo);
+ if (old_pbuf) {
+ /* unlink and free the old ping info */
+ lnet_ping_md_unlink(old_pbuf, &old_ping_md);
+ lnet_ping_buffer_decref(old_pbuf);
}
+
+ lnet_push_update_to_peers(0);
}
static void
{
int rc;
- lnet_ping_md_unlink(the_lnet.ln_ping_info,
+ lnet_ping_md_unlink(the_lnet.ln_ping_target,
&the_lnet.ln_ping_target_md);
rc = LNetEQFree(the_lnet.ln_ping_target_eq);
LASSERT(rc == 0);
- lnet_ping_info_destroy();
+ lnet_ping_target_destroy();
+}
+
+/* Resize the push target. */
+int lnet_push_target_resize(void)
+{
+ struct lnet_process_id id = { LNET_NID_ANY, LNET_PID_ANY };
+ struct lnet_md md = { NULL };
+ struct lnet_handle_me meh;
+ struct lnet_handle_md mdh;
+ struct lnet_handle_md old_mdh;
+ struct lnet_ping_buffer *pbuf;
+ struct lnet_ping_buffer *old_pbuf;
+ int nnis = the_lnet.ln_push_target_nnis;
+ int rc;
+
+ if (nnis <= 0) {
+ rc = -EINVAL;
+ goto fail_return;
+ }
+again:
+ pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
+ if (!pbuf) {
+ rc = -ENOMEM;
+ goto fail_return;
+ }
+
+ rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
+ LNET_PROTO_PING_MATCHBITS, 0,
+ LNET_UNLINK, LNET_INS_AFTER,
+ &meh);
+ if (rc) {
+ CERROR("Can't create push target ME: %d\n", rc);
+ goto fail_decref_pbuf;
+ }
+
+ /* initialize md content */
+ md.start = &pbuf->pb_info;
+ md.length = LNET_PING_INFO_SIZE(nnis);
+ md.threshold = LNET_MD_THRESH_INF;
+ md.max_size = 0;
+ md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE |
+ LNET_MD_MANAGE_REMOTE;
+ md.user_ptr = pbuf;
+ md.eq_handle = the_lnet.ln_push_target_eq;
+
+ rc = LNetMDAttach(meh, md, LNET_RETAIN, &mdh);
+ if (rc) {
+ CERROR("Can't attach push MD: %d\n", rc);
+ goto fail_unlink_meh;
+ }
+ lnet_ping_buffer_addref(pbuf);
+
+ lnet_net_lock(LNET_LOCK_EX);
+ old_pbuf = the_lnet.ln_push_target;
+ old_mdh = the_lnet.ln_push_target_md;
+ the_lnet.ln_push_target = pbuf;
+ the_lnet.ln_push_target_md = mdh;
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ if (old_pbuf) {
+ LNetMDUnlink(old_mdh);
+ lnet_ping_buffer_decref(old_pbuf);
+ }
+
+ if (nnis < the_lnet.ln_push_target_nnis)
+ goto again;
+
+ CDEBUG(D_NET, "nnis %d success\n", nnis);
+
+ return 0;
+
+fail_unlink_meh:
+ LNetMEUnlink(meh);
+fail_decref_pbuf:
+ lnet_ping_buffer_decref(pbuf);
+fail_return:
+ CDEBUG(D_NET, "nnis %d error %d\n", nnis, rc);
+ return rc;
+}
+
+static void lnet_push_target_event_handler(struct lnet_event *ev)
+{
+ struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
+
+ if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
+ lnet_swap_pinginfo(pbuf);
+
+ lnet_peer_push_event(ev);
+ if (ev->unlinked)
+ lnet_ping_buffer_decref(pbuf);
+}
+
+/* Initialize the push target. */
+static int lnet_push_target_init(void)
+{
+ int rc;
+
+ if (the_lnet.ln_push_target)
+ return -EALREADY;
+
+ rc = LNetEQAlloc(0, lnet_push_target_event_handler,
+ &the_lnet.ln_push_target_eq);
+ if (rc) {
+ CERROR("Can't allocated push target EQ: %d\n", rc);
+ return rc;
+ }
+
+ /* Start at the required minimum, we'll enlarge if required. */
+ the_lnet.ln_push_target_nnis = LNET_INTERFACES_MIN;
+
+ rc = lnet_push_target_resize();
+
+ if (rc) {
+ LNetEQFree(the_lnet.ln_push_target_eq);
+ LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
+ }
+
+ return rc;
+}
+
+/* Clean up the push target. */
+static void lnet_push_target_fini(void)
+{
+ if (!the_lnet.ln_push_target)
+ return;
+
+ /* Unlink and invalidate to prevent new references. */
+ LNetMDUnlink(the_lnet.ln_push_target_md);
+ LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
+
+ /* Wait for the unlink to complete. */
+ while (lnet_ping_buffer_numref(the_lnet.ln_push_target) > 1) {
+ CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
+ }
+
+ lnet_ping_buffer_decref(the_lnet.ln_push_target);
+ the_lnet.ln_push_target = NULL;
+ the_lnet.ln_push_target_nnis = 0;
+
+ LNetEQFree(the_lnet.ln_push_target_eq);
+ LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
}
static int
-lnet_ni_tq_credits(lnet_ni_t *ni)
+lnet_ni_tq_credits(struct lnet_ni *ni)
{
int credits;
}
static void
-lnet_ni_unlink_locked(lnet_ni_t *ni)
+lnet_ni_unlink_locked(struct lnet_ni *ni)
{
- if (!list_empty(&ni->ni_cptlist)) {
- list_del_init(&ni->ni_cptlist);
- lnet_ni_decref_locked(ni, 0);
- }
-
/* move it to zombie list and nobody can find it anymore */
LASSERT(!list_empty(&ni->ni_netlist));
list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
{
int i;
int islo;
- lnet_ni_t *ni;
+ struct lnet_ni *ni;
struct list_head *zombie_list = &net->net_ni_zombie;
/*
int j;
ni = list_entry(zombie_list->next,
- lnet_ni_t, ni_netlist);
+ struct lnet_ni, ni_netlist);
list_del_init(&ni->ni_netlist);
/* the ni should be in deleting state. If it's not it's
* a bug */
struct lnet_net *net = ni->ni_net;
lnet_net_lock(LNET_LOCK_EX);
+ lnet_ni_lock(ni);
ni->ni_state = LNET_NI_STATE_DELETING;
+ lnet_ni_unlock(ni);
lnet_ni_unlink_locked(ni);
lnet_incr_dlc_seq();
lnet_net_unlock(LNET_LOCK_EX);
while (!list_empty(&net->net_ni_list)) {
ni = list_entry(net->net_ni_list.next,
- lnet_ni_t, ni_netlist);
+ struct lnet_ni, ni_netlist);
lnet_net_unlock(LNET_LOCK_EX);
lnet_shutdown_lndni(ni);
lnet_net_lock(LNET_LOCK_EX);
lnet_shutdown_lndnets(void)
{
struct lnet_net *net;
+ struct list_head resend;
+ struct lnet_msg *msg, *tmp;
+
+ INIT_LIST_HEAD(&resend);
/* NB called holding the global mutex */
/* All quiet on the API front */
- LASSERT(!the_lnet.ln_shutdown);
+ LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
LASSERT(the_lnet.ln_refcount == 0);
lnet_net_lock(LNET_LOCK_EX);
- the_lnet.ln_shutdown = 1; /* flag shutdown */
+ the_lnet.ln_state = LNET_STATE_STOPPING;
while (!list_empty(&the_lnet.ln_nets)) {
/*
lnet_shutdown_lndnet(net);
}
+ spin_lock(&the_lnet.ln_msg_resend_lock);
+ list_splice(&the_lnet.ln_msg_resend, &resend);
+ spin_unlock(&the_lnet.ln_msg_resend_lock);
+
+ list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
+ list_del_init(&msg->msg_list);
+ msg->msg_no_resend = true;
+ lnet_finalize(msg, -ECANCELED);
+ }
+
lnet_net_lock(LNET_LOCK_EX);
- the_lnet.ln_shutdown = 0;
+ the_lnet.ln_state = LNET_STATE_SHUTDOWN;
lnet_net_unlock(LNET_LOCK_EX);
}
goto failed0;
}
+ lnet_ni_lock(ni);
ni->ni_state = LNET_NI_STATE_ACTIVE;
+ lnet_ni_unlock(ni);
/* We keep a reference on the loopback net through the loopback NI */
if (net->net_lnd->lnd_type == LOLND) {
atomic_set(&ni->ni_tx_credits,
lnet_ni_tq_credits(ni) * ni->ni_ncpts);
+ atomic_set(&ni->ni_healthv, LNET_MAX_HEALTH_VALUE);
CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
libcfs_nid2str(ni->ni_nid),
static int
lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
{
- struct lnet_ni *ni;
- struct lnet_net *net_l = NULL;
+ struct lnet_ni *ni;
+ struct lnet_net *net_l = NULL;
struct list_head local_ni_list;
int rc;
int ni_count = 0;
__u32 lnd_type;
- lnd_t *lnd;
+ struct lnet_lnd *lnd;
int peer_timeout =
net->net_tunables.lct_peer_timeout;
int maxtxcredits =
if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
lnd_type = LNET_NETTYP(net->net_id);
- LASSERT(libcfs_isknown_lnd(lnd_type));
-
- if (lnd_type == CIBLND || lnd_type == OPENIBLND ||
- lnd_type == IIBLND || lnd_type == VIBLND) {
- CERROR("LND %s obsoleted\n", libcfs_lnd2str(lnd_type));
- rc = -EINVAL;
- goto failed0;
- }
-
mutex_lock(&the_lnet.ln_lnd_mutex);
lnd = lnet_find_lnd_by_type(lnd_type);
int rc;
int ni_count = 0;
+ /*
+ * Change to running state before bringing up the LNDs. This
+ * allows lnet_shutdown_lndnets() to assert that we've passed
+ * through here.
+ */
+ lnet_net_lock(LNET_LOCK_EX);
+ the_lnet.ln_state = LNET_STATE_RUNNING;
+ lnet_net_unlock(LNET_LOCK_EX);
+
while (!list_empty(netlist)) {
net = list_entry(netlist->next, struct lnet_net, net_list);
list_del_init(&net->net_list);
lnet_assert_wire_constants();
- memset(&the_lnet, 0, sizeof(the_lnet));
-
/* refer to global cfs_cpt_table for now */
the_lnet.ln_cpt_table = cfs_cpt_table;
the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_table);
}
the_lnet.ln_refcount = 0;
- LNetInvalidateHandle(&the_lnet.ln_rc_eqh);
+ LNetInvalidateEQHandle(&the_lnet.ln_rc_eqh);
INIT_LIST_HEAD(&the_lnet.ln_lnds);
INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
+ INIT_LIST_HEAD(&the_lnet.ln_msg_resend);
INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
/* The hash table size is the number of bits it takes to express the set
while (!list_empty(&the_lnet.ln_lnds))
lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
- lnd_t, lnd_list));
+ struct lnet_lnd, lnd_list));
lnet_destroy_locks();
}
int im_a_router = 0;
int rc;
int ni_count;
- struct lnet_ping_info *pinfo;
- lnet_handle_md_t md_handle;
+ struct lnet_ping_buffer *pbuf;
+ struct lnet_handle_md ping_mdh;
struct list_head net_head;
struct lnet_net *net;
the_lnet.ln_refcount = 1;
/* Now I may use my own API functions... */
- rc = lnet_ping_info_setup(&pinfo, &md_handle, ni_count, true);
+ rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_count, true);
if (rc != 0)
goto err_acceptor_stop;
- lnet_ping_target_update(pinfo, md_handle);
+ lnet_ping_target_update(pbuf, ping_mdh);
+
+ rc = lnet_monitor_thr_start();
+ if (rc != 0)
+ goto err_stop_ping;
+
+ rc = lnet_push_target_init();
+ if (rc != 0)
+ goto err_stop_monitor_thr;
- rc = lnet_router_checker_start();
+ rc = lnet_peer_discovery_start();
if (rc != 0)
- goto err_stop_ping;
+ goto err_destroy_push_target;
lnet_fault_init();
- lnet_proc_init();
+ lnet_router_debugfs_init();
mutex_unlock(&the_lnet.ln_api_mutex);
return 0;
+err_destroy_push_target:
+ lnet_push_target_fini();
+err_stop_monitor_thr:
+ lnet_monitor_thr_stop();
err_stop_ping:
lnet_ping_target_fini();
err_acceptor_stop:
lnet_fault_fini();
- lnet_proc_fini();
- lnet_router_checker_stop();
+ lnet_router_debugfs_fini();
+ lnet_peer_discovery_stop();
+ lnet_push_target_fini();
+ lnet_monitor_thr_stop();
lnet_ping_target_fini();
/* Teardown fns that use my own API functions BEFORE here */
}
EXPORT_SYMBOL(LNetNIFini);
-
-static int lnet_handle_dbg_task(struct lnet_ioctl_dbg *dbg,
- struct lnet_dbg_task_info *dbg_info)
-{
- switch (dbg->dbg_task) {
- case LNET_DBG_INCR_DLC_SEQ:
- lnet_incr_dlc_seq();
- }
-
- return 0;
-}
/**
* Grabs the ni data from the ni structure and fills the out
* parameters
}
cfg_ni->lic_nid = ni->ni_nid;
- cfg_ni->lic_status = ni->ni_status->ns_status;
+ if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
+ cfg_ni->lic_status = LNET_NI_STATUS_UP;
+ else
+ cfg_ni->lic_status = ni->ni_status->ns_status;
cfg_ni->lic_tcp_bonding = use_tcp_bonding;
cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
if (stats) {
- stats->send_count = atomic_read(&ni->ni_stats.send_count);
- stats->recv_count = atomic_read(&ni->ni_stats.recv_count);
+ stats->iel_send_count = lnet_sum_stats(&ni->ni_stats,
+ LNET_STATS_TYPE_SEND);
+ stats->iel_recv_count = lnet_sum_stats(&ni->ni_stats,
+ LNET_STATS_TYPE_RECV);
+ stats->iel_drop_count = lnet_sum_stats(&ni->ni_stats,
+ LNET_STATS_TYPE_DROP);
}
/*
config->cfg_config_u.cfg_net.net_peer_rtr_credits =
ni->ni_net->net_tunables.lct_peer_rtr_credits;
- net_config->ni_status = ni->ni_status->ns_status;
+ if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
+ net_config->ni_status = LNET_NI_STATUS_UP;
+ else
+ net_config->ni_status = ni->ni_status->ns_status;
if (ni->ni_cpts) {
int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
struct lnet_ni *ni;
struct lnet_net *net = mynet;
+ /*
+ * It is possible that the net has been cleaned out while there is
+ * a message being sent. This function accessed the net without
+ * checking if the list is empty
+ */
if (prev == NULL) {
if (net == NULL)
net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
net_list);
+ if (list_empty(&net->net_ni_list))
+ return NULL;
ni = list_entry(net->net_ni_list.next, struct lnet_ni,
ni_netlist);
/* get the next net */
net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
net_list);
+ if (list_empty(&net->net_ni_list))
+ return NULL;
/* get the ni on it */
ni = list_entry(net->net_ni_list.next, struct lnet_ni,
ni_netlist);
return ni;
}
+ if (list_empty(&prev->ni_netlist))
+ return NULL;
+
/* there are more nis left */
ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
return rc;
}
+int lnet_get_ni_stats(struct lnet_ioctl_element_msg_stats *msg_stats)
+{
+ struct lnet_ni *ni;
+ int cpt;
+ int rc = -ENOENT;
+
+ if (!msg_stats)
+ return -EINVAL;
+
+ cpt = lnet_net_lock_current();
+
+ ni = lnet_get_ni_idx_locked(msg_stats->im_idx);
+
+ if (ni) {
+ lnet_usr_translate_stats(msg_stats, &ni->ni_stats);
+ rc = 0;
+ }
+
+ lnet_net_unlock(cpt);
+
+ return rc;
+}
+
static int lnet_add_net_common(struct lnet_net *net,
struct lnet_ioctl_config_lnd_tunables *tun)
{
__u32 net_id;
- lnet_ping_info_t *pinfo;
- lnet_handle_md_t md_handle;
+ struct lnet_ping_buffer *pbuf;
+ struct lnet_handle_md ping_mdh;
int rc;
- lnet_remotenet_t *rnet;
+ struct lnet_remotenet *rnet;
int net_ni_count;
int num_acceptor_nets;
/*
* make sure you calculate the correct number of slots in the ping
- * info. Since the ping info is a flattened list of all the NIs,
+ * buffer. Since the ping info is a flattened list of all the NIs,
* we should allocate enough slots to accomodate the number of NIs
* which will be added.
*
*/
net_ni_count = lnet_get_net_ni_count_pre(net);
- rc = lnet_ping_info_setup(&pinfo, &md_handle,
- net_ni_count + lnet_get_ni_count(),
- false);
+ rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
+ net_ni_count + lnet_get_ni_count(),
+ false);
if (rc < 0) {
lnet_net_free(net);
return rc;
lnet_peer_net_added(net);
lnet_net_unlock(LNET_LOCK_EX);
- lnet_ping_target_update(pinfo, md_handle);
+ lnet_ping_target_update(pbuf, ping_mdh);
return 0;
failed:
- lnet_ping_md_unlink(pinfo, &md_handle);
- lnet_ping_info_free(pinfo);
+ lnet_ping_md_unlink(pbuf, &ping_mdh);
+ lnet_ping_buffer_decref(pbuf);
return rc;
}
struct lnet_ni *ni;
struct lnet_ioctl_config_lnd_tunables *tun = NULL;
int rc, i;
- __u32 net_id;
+ __u32 net_id, lnd_type;
/* get the tunables if they are available */
if (conf->lic_cfg_hdr.ioc_len >=
tun);
net_id = LNET_NIDNET(conf->lic_nid);
+ lnd_type = LNET_NETTYP(net_id);
+
+ if (!libcfs_isknown_lnd(lnd_type)) {
+ CERROR("No valid net and lnd information provided\n");
+ return -EINVAL;
+ }
net = lnet_net_alloc(net_id, NULL);
if (!net)
struct lnet_net *net;
struct lnet_ni *ni;
__u32 net_id = LNET_NIDNET(conf->lic_nid);
- lnet_ping_info_t *pinfo;
- lnet_handle_md_t md_handle;
+ struct lnet_ping_buffer *pbuf;
+ struct lnet_handle_md ping_mdh;
int rc;
int net_count;
__u32 addr;
CERROR("net %s not found\n",
libcfs_net2str(net_id));
rc = -ENOENT;
- goto net_unlock;
+ goto unlock_net;
}
addr = LNET_NIDADDR(conf->lic_nid);
lnet_net_unlock(0);
/* create and link a new ping info, before removing the old one */
- rc = lnet_ping_info_setup(&pinfo, &md_handle,
+ rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
lnet_get_ni_count() - net_count,
false);
if (rc != 0)
- goto out;
+ goto unlock_api_mutex;
lnet_shutdown_lndnet(net);
if (lnet_count_acceptor_nets() == 0)
lnet_acceptor_stop();
- lnet_ping_target_update(pinfo, md_handle);
+ lnet_ping_target_update(pbuf, ping_mdh);
- goto out;
+ goto unlock_api_mutex;
}
ni = lnet_nid2ni_locked(conf->lic_nid, 0);
if (!ni) {
- CERROR("nid %s not found \n",
+ CERROR("nid %s not found\n",
libcfs_nid2str(conf->lic_nid));
rc = -ENOENT;
- goto net_unlock;
+ goto unlock_net;
}
net_count = lnet_get_net_ni_count_locked(net);
lnet_net_unlock(0);
/* create and link a new ping info, before removing the old one */
- rc = lnet_ping_info_setup(&pinfo, &md_handle,
+ rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
lnet_get_ni_count() - 1, false);
if (rc != 0)
- goto out;
+ goto unlock_api_mutex;
lnet_shutdown_lndni(ni);
if (lnet_count_acceptor_nets() == 0)
lnet_acceptor_stop();
- lnet_ping_target_update(pinfo, md_handle);
+ lnet_ping_target_update(pbuf, ping_mdh);
/* check if the net is empty and remove it if it is */
if (net_count == 1)
lnet_shutdown_lndnet(net);
- goto out;
+ goto unlock_api_mutex;
-net_unlock:
+unlock_net:
lnet_net_unlock(0);
-out:
+unlock_api_mutex:
mutex_unlock(&the_lnet.ln_api_mutex);
return rc;
if (rc > 1) {
rc = -EINVAL; /* only add one network per call */
- goto failed;
+ goto out_unlock_clean;
}
net = list_entry(net_head.next, struct lnet_net, net_list);
conf->cfg_config_u.cfg_net.net_max_tx_credits;
rc = lnet_add_net_common(net, &tun);
- if (rc != 0)
- goto failed;
- return 0;
-
-failed:
+out_unlock_clean:
mutex_unlock(&the_lnet.ln_api_mutex);
while (!list_empty(&net_head)) {
+ /* net_head list is empty in success case */
net = list_entry(net_head.next, struct lnet_net, net_list);
list_del_init(&net->net_list);
lnet_net_free(net);
lnet_dyn_del_net(__u32 net_id)
{
struct lnet_net *net;
- struct lnet_ping_info *pinfo;
- lnet_handle_md_t md_handle;
+ struct lnet_ping_buffer *pbuf;
+ struct lnet_handle_md ping_mdh;
int rc;
int net_ni_count;
net = lnet_get_net_locked(net_id);
if (net == NULL) {
+ lnet_net_unlock(0);
rc = -EINVAL;
goto out;
}
lnet_net_unlock(0);
/* create and link a new ping info, before removing the old one */
- rc = lnet_ping_info_setup(&pinfo, &md_handle,
- lnet_get_ni_count() - net_ni_count, false);
+ rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
+ lnet_get_ni_count() - net_ni_count, false);
if (rc != 0)
goto out;
if (lnet_count_acceptor_nets() == 0)
lnet_acceptor_stop();
- lnet_ping_target_update(pinfo, md_handle);
+ lnet_ping_target_update(pbuf, ping_mdh);
out:
mutex_unlock(&the_lnet.ln_api_mutex);
return atomic_read(&lnet_dlc_seq_no);
}
+static void
+lnet_ni_set_healthv(lnet_nid_t nid, int value, bool all)
+{
+ struct lnet_net *net;
+ struct lnet_ni *ni;
+
+ lnet_net_lock(LNET_LOCK_EX);
+ list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
+ list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
+ if (ni->ni_nid == nid || all) {
+ atomic_set(&ni->ni_healthv, value);
+ if (list_empty(&ni->ni_recovery) &&
+ value < LNET_MAX_HEALTH_VALUE) {
+ CERROR("manually adding local NI %s to recovery\n",
+ libcfs_nid2str(ni->ni_nid));
+ list_add_tail(&ni->ni_recovery,
+ &the_lnet.ln_mt_localNIRecovq);
+ lnet_ni_addref_locked(ni, 0);
+ }
+ if (!all) {
+ lnet_net_unlock(LNET_LOCK_EX);
+ return;
+ }
+ }
+ }
+ }
+ lnet_net_unlock(LNET_LOCK_EX);
+}
+
+static int
+lnet_get_local_ni_hstats(struct lnet_ioctl_local_ni_hstats *stats)
+{
+ int cpt, rc = 0;
+ struct lnet_ni *ni;
+ lnet_nid_t nid = stats->hlni_nid;
+
+ cpt = lnet_net_lock_current();
+ ni = lnet_nid2ni_locked(nid, cpt);
+
+ if (!ni) {
+ rc = -ENOENT;
+ goto unlock;
+ }
+
+ stats->hlni_local_interrupt = atomic_read(&ni->ni_hstats.hlt_local_interrupt);
+ stats->hlni_local_dropped = atomic_read(&ni->ni_hstats.hlt_local_dropped);
+ stats->hlni_local_aborted = atomic_read(&ni->ni_hstats.hlt_local_aborted);
+ stats->hlni_local_no_route = atomic_read(&ni->ni_hstats.hlt_local_no_route);
+ stats->hlni_local_timeout = atomic_read(&ni->ni_hstats.hlt_local_timeout);
+ stats->hlni_local_error = atomic_read(&ni->ni_hstats.hlt_local_error);
+ stats->hlni_health_value = atomic_read(&ni->ni_healthv);
+
+unlock:
+ lnet_net_unlock(cpt);
+
+ return rc;
+}
+
+static int
+lnet_get_local_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
+{
+ struct lnet_ni *ni;
+ int i = 0;
+
+ lnet_net_lock(LNET_LOCK_EX);
+ list_for_each_entry(ni, &the_lnet.ln_mt_localNIRecovq, ni_recovery) {
+ list->rlst_nid_array[i] = ni->ni_nid;
+ i++;
+ if (i >= LNET_MAX_SHOW_NUM_NID)
+ break;
+ }
+ lnet_net_unlock(LNET_LOCK_EX);
+ list->rlst_num_nids = i;
+
+ return 0;
+}
+
+static int
+lnet_get_peer_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
+{
+ struct lnet_peer_ni *lpni;
+ int i = 0;
+
+ lnet_net_lock(LNET_LOCK_EX);
+ list_for_each_entry(lpni, &the_lnet.ln_mt_peerNIRecovq, lpni_recovery) {
+ list->rlst_nid_array[i] = lpni->lpni_nid;
+ i++;
+ if (i >= LNET_MAX_SHOW_NUM_NID)
+ break;
+ }
+ lnet_net_unlock(LNET_LOCK_EX);
+ list->rlst_num_nids = i;
+
+ return 0;
+}
+
/**
* LNet ioctl handler.
*
{
struct libcfs_ioctl_data *data = arg;
struct lnet_ioctl_config_data *config;
- lnet_process_id_t id = {0};
- lnet_ni_t *ni;
+ struct lnet_process_id id = {0};
+ struct lnet_ni *ni;
int rc;
BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
__u32 tun_size;
cfg_ni = arg;
+
/* get the tunables if they are available */
if (cfg_ni->lic_cfg_hdr.ioc_len <
- sizeof(*cfg_ni) + sizeof(*stats)+ sizeof(*tun))
+ sizeof(*cfg_ni) + sizeof(*stats) + sizeof(*tun))
return -EINVAL;
stats = (struct lnet_ioctl_element_stats *)
return rc;
}
+ case IOC_LIBCFS_GET_LOCAL_NI_MSG_STATS: {
+ struct lnet_ioctl_element_msg_stats *msg_stats = arg;
+
+ if (msg_stats->im_hdr.ioc_len != sizeof(*msg_stats))
+ return -EINVAL;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_get_ni_stats(msg_stats);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ return rc;
+ }
+
case IOC_LIBCFS_GET_NET: {
size_t total = sizeof(*config) +
sizeof(struct lnet_ioctl_net_config);
return rc;
case IOC_LIBCFS_SET_NUMA_RANGE: {
- struct lnet_ioctl_numa_range *numa;
+ struct lnet_ioctl_set_value *numa;
numa = arg;
- if (numa->nr_hdr.ioc_len != sizeof(*numa))
+ if (numa->sv_hdr.ioc_len != sizeof(*numa))
return -EINVAL;
- mutex_lock(&the_lnet.ln_api_mutex);
- lnet_numa_range = numa->nr_range;
- mutex_unlock(&the_lnet.ln_api_mutex);
+ lnet_net_lock(LNET_LOCK_EX);
+ lnet_numa_range = numa->sv_value;
+ lnet_net_unlock(LNET_LOCK_EX);
return 0;
}
case IOC_LIBCFS_GET_NUMA_RANGE: {
- struct lnet_ioctl_numa_range *numa;
+ struct lnet_ioctl_set_value *numa;
numa = arg;
- if (numa->nr_hdr.ioc_len != sizeof(*numa))
+ if (numa->sv_hdr.ioc_len != sizeof(*numa))
return -EINVAL;
- numa->nr_range = lnet_numa_range;
+ numa->sv_value = lnet_numa_range;
return 0;
}
return rc;
}
+ case IOC_LIBCFS_GET_LOCAL_HSTATS: {
+ struct lnet_ioctl_local_ni_hstats *stats = arg;
+
+ if (stats->hlni_hdr.ioc_len < sizeof(*stats))
+ return -EINVAL;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_get_local_ni_hstats(stats);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ return rc;
+ }
+
+ case IOC_LIBCFS_GET_RECOVERY_QUEUE: {
+ struct lnet_ioctl_recovery_list *list = arg;
+ if (list->rlst_hdr.ioc_len < sizeof(*list))
+ return -EINVAL;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ if (list->rlst_type == LNET_HEALTH_TYPE_LOCAL_NI)
+ rc = lnet_get_local_ni_recovery_list(list);
+ else
+ rc = lnet_get_peer_ni_recovery_list(list);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
+ }
+
case IOC_LIBCFS_ADD_PEER_NI: {
struct lnet_ioctl_peer_cfg *cfg = arg;
return -EINVAL;
mutex_lock(&the_lnet.ln_api_mutex);
- rc = lnet_add_peer_ni_to_peer(cfg->prcfg_prim_nid,
- cfg->prcfg_cfg_nid,
- cfg->prcfg_mr);
+ rc = lnet_add_peer_ni(cfg->prcfg_prim_nid,
+ cfg->prcfg_cfg_nid,
+ cfg->prcfg_mr);
mutex_unlock(&the_lnet.ln_api_mutex);
return rc;
}
return -EINVAL;
mutex_lock(&the_lnet.ln_api_mutex);
- rc = lnet_del_peer_ni_from_peer(cfg->prcfg_prim_nid,
- cfg->prcfg_cfg_nid);
+ rc = lnet_del_peer_ni(cfg->prcfg_prim_nid,
+ cfg->prcfg_cfg_nid);
mutex_unlock(&the_lnet.ln_api_mutex);
return rc;
}
case IOC_LIBCFS_GET_PEER_NI: {
struct lnet_ioctl_peer_cfg *cfg = arg;
- struct lnet_peer_ni_credit_info *lpni_cri;
- struct lnet_ioctl_element_stats *lpni_stats;
- size_t total = sizeof(*cfg) + sizeof(*lpni_cri) +
- sizeof(*lpni_stats);
- if (cfg->prcfg_hdr.ioc_len < total)
+ if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
return -EINVAL;
- lpni_cri = (struct lnet_peer_ni_credit_info*) cfg->prcfg_bulk;
- lpni_stats = (struct lnet_ioctl_element_stats *)
- (cfg->prcfg_bulk + sizeof(*lpni_cri));
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_get_peer_info(cfg,
+ (void __user *)cfg->prcfg_bulk);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
+ }
+
+ case IOC_LIBCFS_GET_PEER_LIST: {
+ struct lnet_ioctl_peer_cfg *cfg = arg;
+
+ if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
+ return -EINVAL;
mutex_lock(&the_lnet.ln_api_mutex);
- rc = lnet_get_peer_info(cfg->prcfg_idx, &cfg->prcfg_prim_nid,
- &cfg->prcfg_cfg_nid, &cfg->prcfg_mr,
- lpni_cri, lpni_stats);
+ rc = lnet_get_peer_list(&cfg->prcfg_count, &cfg->prcfg_size,
+ (struct lnet_process_id __user *)cfg->prcfg_bulk);
mutex_unlock(&the_lnet.ln_api_mutex);
return rc;
}
- case IOC_LIBCFS_NOTIFY_ROUTER: {
- unsigned long jiffies_passed;
+ case IOC_LIBCFS_SET_HEALHV: {
+ struct lnet_ioctl_reset_health_cfg *cfg = arg;
+ int value;
+ if (cfg->rh_hdr.ioc_len < sizeof(*cfg))
+ return -EINVAL;
+ if (cfg->rh_value < 0 ||
+ cfg->rh_value > LNET_MAX_HEALTH_VALUE)
+ value = LNET_MAX_HEALTH_VALUE;
+ else
+ value = cfg->rh_value;
+ CDEBUG(D_NET, "Manually setting healthv to %d for %s:%s. all = %d\n",
+ value, (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI) ?
+ "local" : "peer", libcfs_nid2str(cfg->rh_nid), cfg->rh_all);
+ mutex_lock(&the_lnet.ln_api_mutex);
+ if (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI)
+ lnet_ni_set_healthv(cfg->rh_nid, value,
+ cfg->rh_all);
+ else
+ lnet_peer_ni_set_healthv(cfg->rh_nid, value,
+ cfg->rh_all);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return 0;
+ }
- jiffies_passed = ktime_get_real_seconds() - data->ioc_u64[0];
- jiffies_passed = cfs_time_seconds(jiffies_passed);
+ case IOC_LIBCFS_NOTIFY_ROUTER: {
+ time64_t deadline = ktime_get_real_seconds() - data->ioc_u64[0];
+ /* The deadline passed in by the user should be some time in
+ * seconds in the future since the UNIX epoch. We have to map
+ * that deadline to the wall clock.
+ */
+ deadline += ktime_get_seconds();
return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
- jiffies - jiffies_passed);
+ deadline);
}
case IOC_LIBCFS_LNET_DIST:
id.nid = data->ioc_nid;
id.pid = data->ioc_u32[0];
- /* Don't block longer than 2 minutes */
- if (data->ioc_u32[1] > 120 * MSEC_PER_SEC)
- return -EINVAL;
-
- /* If timestamp is negative then disable timeout */
- if ((s32)data->ioc_u32[1] < 0)
- timeout = MAX_SCHEDULE_TIMEOUT;
+ /* If timeout is negative then set default of 3 minutes */
+ if (((s32)data->ioc_u32[1] <= 0) ||
+ data->ioc_u32[1] > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
+ timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
else
timeout = msecs_to_jiffies(data->ioc_u32[1]);
rc = lnet_ping(id, timeout, data->ioc_pbuf1,
- data->ioc_plen1 / sizeof(lnet_process_id_t));
+ data->ioc_plen1 / sizeof(struct lnet_process_id));
+
if (rc < 0)
return rc;
+
data->ioc_count = rc;
return 0;
}
- case IOC_LIBCFS_DBG: {
- struct lnet_ioctl_dbg *dbg = arg;
- struct lnet_dbg_task_info *dbg_info;
- size_t total = sizeof(*dbg) + sizeof(*dbg_info);
+ case IOC_LIBCFS_PING_PEER: {
+ struct lnet_ioctl_ping_data *ping = arg;
+ struct lnet_peer *lp;
+ signed long timeout;
- if (dbg->dbg_hdr.ioc_len < total)
- return -EINVAL;
+ /* If timeout is negative then set default of 3 minutes */
+ if (((s32)ping->op_param) <= 0 ||
+ ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
+ timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
+ else
+ timeout = msecs_to_jiffies(ping->op_param);
- dbg_info = (struct lnet_dbg_task_info*) dbg->dbg_bulk;
+ rc = lnet_ping(ping->ping_id, timeout,
+ ping->ping_buf,
+ ping->ping_count);
+ if (rc < 0)
+ return rc;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ lp = lnet_find_peer(ping->ping_id.nid);
+ if (lp) {
+ ping->ping_id.nid = lp->lp_primary_nid;
+ ping->mr_info = lnet_peer_is_multi_rail(lp);
+ lnet_peer_decref_locked(lp);
+ }
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ ping->ping_count = rc;
+ return 0;
+ }
+
+ case IOC_LIBCFS_DISCOVER: {
+ struct lnet_ioctl_ping_data *discover = arg;
+ struct lnet_peer *lp;
+
+ rc = lnet_discover(discover->ping_id, discover->op_param,
+ discover->ping_buf,
+ discover->ping_count);
+ if (rc < 0)
+ return rc;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ lp = lnet_find_peer(discover->ping_id.nid);
+ if (lp) {
+ discover->ping_id.nid = lp->lp_primary_nid;
+ discover->mr_info = lnet_peer_is_multi_rail(lp);
+ lnet_peer_decref_locked(lp);
+ }
+ mutex_unlock(&the_lnet.ln_api_mutex);
- return lnet_handle_dbg_task(dbg, dbg_info);
+ discover->ping_count = rc;
+ return 0;
}
default:
}
EXPORT_SYMBOL(LNetCtl);
-void LNetDebugPeer(lnet_process_id_t id)
+void LNetDebugPeer(struct lnet_process_id id)
{
lnet_debug_peer(id.nid);
}
EXPORT_SYMBOL(LNetDebugPeer);
/**
- * Retrieve the lnet_process_id_t ID of LNet interface at \a index. Note that
- * all interfaces share a same PID, as requested by LNetNIInit().
+ * Determine if the specified peer \a nid is on the local node.
+ *
+ * \param nid peer nid to check
+ *
+ * \retval true If peer NID is on the local node.
+ * \retval false If peer NID is not on the local node.
+ */
+bool LNetIsPeerLocal(lnet_nid_t nid)
+{
+ struct lnet_net *net;
+ struct lnet_ni *ni;
+ int cpt;
+
+ cpt = lnet_net_lock_current();
+ list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
+ list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
+ if (ni->ni_nid == nid) {
+ lnet_net_unlock(cpt);
+ return true;
+ }
+ }
+ }
+ lnet_net_unlock(cpt);
+
+ return false;
+}
+EXPORT_SYMBOL(LNetIsPeerLocal);
+
+/**
+ * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
+ * Note that all interfaces share a same PID, as requested by LNetNIInit().
*
* \param index Index of the interface to look up.
* \param id On successful return, this location will hold the
- * lnet_process_id_t ID of the interface.
+ * struct lnet_process_id ID of the interface.
*
* \retval 0 If an interface exists at \a index.
* \retval -ENOENT If no interface has been found.
*/
int
-LNetGetId(unsigned int index, lnet_process_id_t *id)
+LNetGetId(unsigned int index, struct lnet_process_id *id)
{
struct lnet_ni *ni;
struct lnet_net *net;
}
EXPORT_SYMBOL(LNetGetId);
-/**
- * Print a string representation of handle \a h into buffer \a str of
- * \a len bytes.
- */
-void
-LNetSnprintHandle(char *str, int len, lnet_handle_any_t h)
-{
- snprintf(str, len, "%#llx", h.cookie);
-}
-EXPORT_SYMBOL(LNetSnprintHandle);
-
-static int lnet_ping(lnet_process_id_t id, signed long timeout,
- lnet_process_id_t __user *ids, int n_ids)
+static int lnet_ping(struct lnet_process_id id, signed long timeout,
+ struct lnet_process_id __user *ids, int n_ids)
{
- lnet_handle_eq_t eqh;
- lnet_handle_md_t mdh;
- lnet_event_t event;
- lnet_md_t md = { NULL };
- int which;
- int unlinked = 0;
- int replied = 0;
+ struct lnet_handle_eq eqh;
+ struct lnet_handle_md mdh;
+ struct lnet_event event;
+ struct lnet_md md = { NULL };
+ int which;
+ int unlinked = 0;
+ int replied = 0;
const signed long a_long_time = msecs_to_jiffies(60 * MSEC_PER_SEC);
- int infosz;
- struct lnet_ping_info *info;
- lnet_process_id_t tmpid;
- int i;
- int nob;
- int rc;
- int rc2;
- sigset_t blocked;
-
- infosz = offsetof(struct lnet_ping_info, pi_ni[n_ids]);
+ struct lnet_ping_buffer *pbuf;
+ struct lnet_process_id tmpid;
+ int i;
+ int nob;
+ int rc;
+ int rc2;
+ sigset_t blocked;
/* n_ids limit is arbitrary */
- if (n_ids <= 0 || n_ids > 20 || id.nid == LNET_NID_ANY)
+ if (n_ids <= 0 || id.nid == LNET_NID_ANY)
return -EINVAL;
+ /*
+ * if the user buffer has more space than the lnet_interfaces_max
+ * then only fill it up to lnet_interfaces_max
+ */
+ if (n_ids > lnet_interfaces_max)
+ n_ids = lnet_interfaces_max;
+
if (id.pid == LNET_PID_ANY)
id.pid = LNET_PID_LUSTRE;
- LIBCFS_ALLOC(info, infosz);
- if (info == NULL)
+ pbuf = lnet_ping_buffer_alloc(n_ids, GFP_NOFS);
+ if (!pbuf)
return -ENOMEM;
/* NB 2 events max (including any unlink event) */
rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
if (rc != 0) {
CERROR("Can't allocate EQ: %d\n", rc);
- goto out_0;
+ goto fail_ping_buffer_decref;
}
/* initialize md content */
- md.start = info;
- md.length = infosz;
- md.threshold = 2; /*GET/REPLY*/
+ md.start = &pbuf->pb_info;
+ md.length = LNET_PING_INFO_SIZE(n_ids);
+ md.threshold = 2; /* GET/REPLY */
md.max_size = 0;
md.options = LNET_MD_TRUNCATE;
md.user_ptr = NULL;
rc = LNetMDBind(md, LNET_UNLINK, &mdh);
if (rc != 0) {
CERROR("Can't bind MD: %d\n", rc);
- goto out_1;
+ goto fail_free_eq;
}
rc = LNetGet(LNET_NID_ANY, mdh, id,
LNET_RESERVED_PORTAL,
- LNET_PROTO_PING_MATCHBITS, 0);
+ LNET_PROTO_PING_MATCHBITS, 0, false);
if (rc != 0) {
/* Don't CERROR; this could be deliberate! */
-
rc2 = LNetMDUnlink(mdh);
LASSERT(rc2 == 0);
replied = 1;
rc = event.mlength;
}
-
} while (rc2 <= 0 || !event.unlinked);
if (!replied) {
CWARN("%s: Unexpected rc >= 0 but no reply!\n",
libcfs_id2str(id));
rc = -EIO;
- goto out_1;
+ goto fail_free_eq;
}
nob = rc;
- LASSERT(nob >= 0 && nob <= infosz);
+ LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
- rc = -EPROTO; /* if I can't parse... */
+ rc = -EPROTO; /* if I can't parse... */
if (nob < 8) {
- /* can't check magic/version */
CERROR("%s: ping info too short %d\n",
libcfs_id2str(id), nob);
- goto out_1;
+ goto fail_free_eq;
}
- if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
- lnet_swap_pinginfo(info);
- } else if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
+ if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
+ lnet_swap_pinginfo(pbuf);
+ } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
CERROR("%s: Unexpected magic %08x\n",
- libcfs_id2str(id), info->pi_magic);
- goto out_1;
+ libcfs_id2str(id), pbuf->pb_info.pi_magic);
+ goto fail_free_eq;
}
- if ((info->pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
+ if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
CERROR("%s: ping w/o NI status: 0x%x\n",
- libcfs_id2str(id), info->pi_features);
- goto out_1;
+ libcfs_id2str(id), pbuf->pb_info.pi_features);
+ goto fail_free_eq;
}
- if (nob < offsetof(struct lnet_ping_info, pi_ni[0])) {
- CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
- nob, (int)offsetof(struct lnet_ping_info, pi_ni[0]));
- goto out_1;
+ if (nob < LNET_PING_INFO_SIZE(0)) {
+ CERROR("%s: Short reply %d(%d min)\n",
+ libcfs_id2str(id),
+ nob, (int)LNET_PING_INFO_SIZE(0));
+ goto fail_free_eq;
}
- if (info->pi_nnis < n_ids)
- n_ids = info->pi_nnis;
+ if (pbuf->pb_info.pi_nnis < n_ids)
+ n_ids = pbuf->pb_info.pi_nnis;
- if (nob < offsetof(struct lnet_ping_info, pi_ni[n_ids])) {
- CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
- nob, (int)offsetof(struct lnet_ping_info, pi_ni[n_ids]));
- goto out_1;
+ if (nob < LNET_PING_INFO_SIZE(n_ids)) {
+ CERROR("%s: Short reply %d(%d expected)\n",
+ libcfs_id2str(id),
+ nob, (int)LNET_PING_INFO_SIZE(n_ids));
+ goto fail_free_eq;
}
- rc = -EFAULT; /* If I SEGV... */
+ rc = -EFAULT; /* if I segv in copy_to_user()... */
memset(&tmpid, 0, sizeof(tmpid));
for (i = 0; i < n_ids; i++) {
- tmpid.pid = info->pi_pid;
- tmpid.nid = info->pi_ni[i].ns_nid;
+ tmpid.pid = pbuf->pb_info.pi_pid;
+ tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
- goto out_1;
+ goto fail_free_eq;
}
- rc = info->pi_nnis;
+ rc = pbuf->pb_info.pi_nnis;
- out_1:
+ fail_free_eq:
rc2 = LNetEQFree(eqh);
if (rc2 != 0)
CERROR("rc2 %d\n", rc2);
LASSERT(rc2 == 0);
- out_0:
- LIBCFS_FREE(info, infosz);
+ fail_ping_buffer_decref:
+ lnet_ping_buffer_decref(pbuf);
+ return rc;
+}
+
+static int
+lnet_discover(struct lnet_process_id id, __u32 force,
+ struct lnet_process_id __user *ids, int n_ids)
+{
+ struct lnet_peer_ni *lpni;
+ struct lnet_peer_ni *p;
+ struct lnet_peer *lp;
+ struct lnet_process_id *buf;
+ int cpt;
+ int i;
+ int rc;
+ int max_intf = lnet_interfaces_max;
+ size_t buf_size;
+
+ if (n_ids <= 0 ||
+ id.nid == LNET_NID_ANY)
+ return -EINVAL;
+
+ if (id.pid == LNET_PID_ANY)
+ id.pid = LNET_PID_LUSTRE;
+
+ /*
+ * if the user buffer has more space than the max_intf
+ * then only fill it up to max_intf
+ */
+ if (n_ids > max_intf)
+ n_ids = max_intf;
+
+ buf_size = n_ids * sizeof(*buf);
+
+ LIBCFS_ALLOC(buf, buf_size);
+ if (!buf)
+ return -ENOMEM;
+
+ cpt = lnet_net_lock_current();
+ lpni = lnet_nid2peerni_locked(id.nid, LNET_NID_ANY, cpt);
+ if (IS_ERR(lpni)) {
+ rc = PTR_ERR(lpni);
+ goto out;
+ }
+
+ /*
+ * Clearing the NIDS_UPTODATE flag ensures the peer will
+ * be discovered, provided discovery has not been disabled.
+ */
+ lp = lpni->lpni_peer_net->lpn_peer;
+ spin_lock(&lp->lp_lock);
+ lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
+ /* If the force flag is set, force a PING and PUSH as well. */
+ if (force)
+ lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
+ spin_unlock(&lp->lp_lock);
+ rc = lnet_discover_peer_locked(lpni, cpt, true);
+ if (rc)
+ goto out_decref;
+
+ /* Peer may have changed. */
+ lp = lpni->lpni_peer_net->lpn_peer;
+ if (lp->lp_nnis < n_ids)
+ n_ids = lp->lp_nnis;
+
+ i = 0;
+ p = NULL;
+ while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
+ buf[i].pid = id.pid;
+ buf[i].nid = p->lpni_nid;
+ if (++i >= n_ids)
+ break;
+ }
+
+ lnet_net_unlock(cpt);
+
+ rc = -EFAULT;
+ if (copy_to_user(ids, buf, n_ids * sizeof(*buf)))
+ goto out_relock;
+ rc = n_ids;
+out_relock:
+ lnet_net_lock(cpt);
+out_decref:
+ lnet_peer_ni_decref_locked(lpni);
+out:
+ lnet_net_unlock(cpt);
+
+ LIBCFS_FREE(buf, buf_size);
+
return rc;
}
+
+/**
+ * Retrieve peer discovery status.
+ *
+ * \retval 1 if lnet_peer_discovery_disabled is 0
+ * \retval 0 if lnet_peer_discovery_disabled is 1
+ */
+int
+LNetGetPeerDiscoveryStatus(void)
+{
+ return !lnet_peer_discovery_disabled;
+}
+EXPORT_SYMBOL(LNetGetPeerDiscoveryStatus);