*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*/
#define DEBUG_SUBSYSTEM S_LNET
#include <linux/ktime.h>
#include <linux/moduleparam.h>
#include <linux/uaccess.h>
-
+#ifdef HAVE_SCHED_HEADERS
+#include <linux/sched/signal.h>
+#endif
+#include <lnet/udsp.h>
#include <lnet/lib-lnet.h>
#define D_LNI D_CONSOLE
module_param(rnet_htable_size, int, 0444);
MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
-static int use_tcp_bonding = false;
+static int use_tcp_bonding;
module_param(use_tcp_bonding, int, 0444);
MODULE_PARM_DESC(use_tcp_bonding,
- "Set to 1 to use socklnd bonding. 0 to use Multi-Rail");
+ "use_tcp_bonding parameter has been removed");
unsigned int lnet_numa_range = 0;
module_param(lnet_numa_range, uint, 0444);
/*
* lnet_health_sensitivity determines by how much we decrement the health
- * value on sending error. The value defaults to 0, which means health
- * checking is turned off by default.
+ * value on sending error. The value defaults to 100, which means health
+ * interface health is decremented by 100 points every failure.
*/
-unsigned int lnet_health_sensitivity = 0;
+unsigned int lnet_health_sensitivity = 100;
static int sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
+#ifdef HAVE_KERNEL_PARAM_OPS
static struct kernel_param_ops param_ops_health_sensitivity = {
.set = sensitivity_set,
.get = param_get_int,
};
#define param_check_health_sensitivity(name, p) \
__param_check(name, p, int)
-#ifdef HAVE_KERNEL_PARAM_OPS
module_param(lnet_health_sensitivity, health_sensitivity, S_IRUGO|S_IWUSR);
#else
module_param_call(lnet_health_sensitivity, sensitivity_set, param_get_int,
*/
unsigned int lnet_recovery_interval = 1;
static int recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp);
+#ifdef HAVE_KERNEL_PARAM_OPS
static struct kernel_param_ops param_ops_recovery_interval = {
.set = recovery_interval_set,
.get = param_get_int,
};
#define param_check_recovery_interval(name, p) \
__param_check(name, p, int)
-#ifdef HAVE_KERNEL_PARAM_OPS
module_param(lnet_recovery_interval, recovery_interval, S_IRUGO|S_IWUSR);
#else
module_param_call(lnet_recovery_interval, recovery_interval_set, param_get_int,
&lnet_recovery_interval, S_IRUGO|S_IWUSR);
#endif
MODULE_PARM_DESC(lnet_recovery_interval,
- "Interval to recover unhealthy interfaces in seconds");
+ "DEPRECATED - Interval to recover unhealthy interfaces in seconds");
+
+unsigned int lnet_recovery_limit;
+module_param(lnet_recovery_limit, uint, 0644);
+MODULE_PARM_DESC(lnet_recovery_limit,
+ "How long to attempt recovery of unhealthy peer interfaces in seconds. Set to 0 to allow indefinite recovery");
static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
static int intf_max_set(const char *val, cfs_kernel_param_arg_t *kp);
MODULE_PARM_DESC(lnet_peer_discovery_disabled,
"Set to 1 to disable peer discovery on this node.");
-unsigned lnet_transaction_timeout = 5;
+unsigned int lnet_drop_asym_route;
+static int drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp);
+
+static struct kernel_param_ops param_ops_drop_asym_route = {
+ .set = drop_asym_route_set,
+ .get = param_get_int,
+};
+
+#define param_check_drop_asym_route(name, p) \
+ __param_check(name, p, int)
+#ifdef HAVE_KERNEL_PARAM_OPS
+module_param(lnet_drop_asym_route, drop_asym_route, 0644);
+#else
+module_param_call(lnet_drop_asym_route, drop_asym_route_set, param_get_int,
+ ¶m_ops_drop_asym_route, 0644);
+#endif
+MODULE_PARM_DESC(lnet_drop_asym_route,
+ "Set to 1 to drop asymmetrical route messages.");
+
+#define LNET_TRANSACTION_TIMEOUT_DEFAULT 50
+unsigned int lnet_transaction_timeout = LNET_TRANSACTION_TIMEOUT_DEFAULT;
static int transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp);
+#ifdef HAVE_KERNEL_PARAM_OPS
static struct kernel_param_ops param_ops_transaction_timeout = {
.set = transaction_to_set,
.get = param_get_int,
#define param_check_transaction_timeout(name, p) \
__param_check(name, p, int)
-#ifdef HAVE_KERNEL_PARAM_OPS
module_param(lnet_transaction_timeout, transaction_timeout, S_IRUGO|S_IWUSR);
#else
module_param_call(lnet_transaction_timeout, transaction_to_set, param_get_int,
&lnet_transaction_timeout, S_IRUGO|S_IWUSR);
#endif
-MODULE_PARM_DESC(lnet_peer_discovery_disabled,
- "Set to 1 to disable peer discovery on this node.");
+MODULE_PARM_DESC(lnet_transaction_timeout,
+ "Maximum number of seconds to wait for a peer response.");
-unsigned lnet_retry_count = 0;
+#define LNET_RETRY_COUNT_DEFAULT 2
+unsigned int lnet_retry_count = LNET_RETRY_COUNT_DEFAULT;
static int retry_count_set(const char *val, cfs_kernel_param_arg_t *kp);
+#ifdef HAVE_KERNEL_PARAM_OPS
static struct kernel_param_ops param_ops_retry_count = {
.set = retry_count_set,
.get = param_get_int,
#define param_check_retry_count(name, p) \
__param_check(name, p, int)
-#ifdef HAVE_KERNEL_PARAM_OPS
module_param(lnet_retry_count, retry_count, S_IRUGO|S_IWUSR);
#else
module_param_call(lnet_retry_count, retry_count_set, param_get_int,
MODULE_PARM_DESC(lnet_retry_count,
"Maximum number of times to retry transmitting a message");
+unsigned int lnet_response_tracking = 3;
+static int response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp);
+
+#ifdef HAVE_KERNEL_PARAM_OPS
+static struct kernel_param_ops param_ops_response_tracking = {
+ .set = response_tracking_set,
+ .get = param_get_int,
+};
+
+#define param_check_response_tracking(name, p) \
+ __param_check(name, p, int)
+module_param(lnet_response_tracking, response_tracking, 0644);
+#else
+module_param_call(lnet_response_tracking, response_tracking_set, param_get_int,
+ &lnet_response_tracking, 0644);
+#endif
+MODULE_PARM_DESC(lnet_response_tracking,
+ "(0|1|2|3) LNet Internal Only|GET Reply only|PUT ACK only|Full Tracking (default)");
-unsigned lnet_lnd_timeout = LNET_LND_DEFAULT_TIMEOUT;
+#define LNET_LND_TIMEOUT_DEFAULT ((LNET_TRANSACTION_TIMEOUT_DEFAULT - 1) / \
+ (LNET_RETRY_COUNT_DEFAULT + 1))
+unsigned int lnet_lnd_timeout = LNET_LND_TIMEOUT_DEFAULT;
+static void lnet_set_lnd_timeout(void)
+{
+ lnet_lnd_timeout = (lnet_transaction_timeout - 1) /
+ (lnet_retry_count + 1);
+}
/*
* This sequence number keeps track of how many times DLC was used to
*/
mutex_lock(&the_lnet.ln_api_mutex);
- if (the_lnet.ln_state != LNET_STATE_RUNNING) {
- mutex_unlock(&the_lnet.ln_api_mutex);
- return 0;
- }
-
if (value > LNET_MAX_HEALTH_VALUE) {
mutex_unlock(&the_lnet.ln_api_mutex);
CERROR("Invalid health value. Maximum: %d value = %lu\n",
return -EINVAL;
}
+ if (*sensitivity != 0 && value == 0 && lnet_retry_count != 0) {
+ lnet_retry_count = 0;
+ lnet_set_lnd_timeout();
+ }
+
*sensitivity = value;
mutex_unlock(&the_lnet.ln_api_mutex);
static int
recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp)
{
+ CWARN("'lnet_recovery_interval' has been deprecated\n");
+
+ return 0;
+}
+
+static int
+discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
+{
int rc;
- unsigned *interval = (unsigned *)kp->arg;
+ unsigned *discovery_off = (unsigned *)kp->arg;
unsigned long value;
+ struct lnet_ping_buffer *pbuf;
rc = kstrtoul(val, 0, &value);
if (rc) {
- CERROR("Invalid module parameter value for 'lnet_recovery_interval'\n");
+ CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
return rc;
}
- if (value < 1) {
- CERROR("lnet_recovery_interval must be at least 1 second\n");
- return -EINVAL;
- }
+ value = (value) ? 1 : 0;
/*
* The purpose of locking the api_mutex here is to ensure that
*/
mutex_lock(&the_lnet.ln_api_mutex);
+ if (value == *discovery_off) {
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return 0;
+ }
+
+ /*
+ * We still want to set the discovery value even when LNet is not
+ * running. This is the case when LNet is being loaded and we want
+ * the module parameters to take effect. Otherwise if we're
+ * changing the value dynamically, we want to set it after
+ * updating the peers
+ */
if (the_lnet.ln_state != LNET_STATE_RUNNING) {
+ *discovery_off = value;
mutex_unlock(&the_lnet.ln_api_mutex);
return 0;
}
- *interval = value;
+ /* tell peers that discovery setting has changed */
+ lnet_net_lock(LNET_LOCK_EX);
+ pbuf = the_lnet.ln_ping_target;
+ if (value)
+ pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
+ else
+ pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ /* only send a push when we're turning off discovery */
+ if (*discovery_off <= 0 && value > 0)
+ lnet_push_update_to_peers(1);
+ *discovery_off = value;
mutex_unlock(&the_lnet.ln_api_mutex);
}
static int
-discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
+drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp)
{
int rc;
- unsigned *discovery = (unsigned *)kp->arg;
+ unsigned int *drop_asym_route = (unsigned int *)kp->arg;
unsigned long value;
- struct lnet_ping_buffer *pbuf;
rc = kstrtoul(val, 0, &value);
if (rc) {
- CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
+ CERROR("Invalid module parameter value for "
+ "'lnet_drop_asym_route'\n");
return rc;
}
- value = (value) ? 1 : 0;
-
/*
* The purpose of locking the api_mutex here is to ensure that
* the correct value ends up stored properly.
*/
mutex_lock(&the_lnet.ln_api_mutex);
- if (value == *discovery) {
- mutex_unlock(&the_lnet.ln_api_mutex);
- return 0;
- }
-
- *discovery = value;
-
- if (the_lnet.ln_state != LNET_STATE_RUNNING) {
+ if (value == *drop_asym_route) {
mutex_unlock(&the_lnet.ln_api_mutex);
return 0;
}
- /* tell peers that discovery setting has changed */
- lnet_net_lock(LNET_LOCK_EX);
- pbuf = the_lnet.ln_ping_target;
- if (value)
- pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
- else
- pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
- lnet_net_unlock(LNET_LOCK_EX);
-
- lnet_push_update_to_peers(1);
+ *drop_asym_route = value;
mutex_unlock(&the_lnet.ln_api_mutex);
*/
mutex_lock(&the_lnet.ln_api_mutex);
- if (the_lnet.ln_state != LNET_STATE_RUNNING) {
- mutex_unlock(&the_lnet.ln_api_mutex);
- return 0;
- }
-
- if (value < lnet_retry_count || value == 0) {
+ if (value <= lnet_retry_count || value == 0) {
mutex_unlock(&the_lnet.ln_api_mutex);
CERROR("Invalid value for lnet_transaction_timeout (%lu). "
"Has to be greater than lnet_retry_count (%u)\n",
}
*transaction_to = value;
- if (lnet_retry_count == 0)
- lnet_lnd_timeout = value;
- else
- lnet_lnd_timeout = value / lnet_retry_count;
+ /* Update the lnet_lnd_timeout now that we've modified the
+ * transaction timeout
+ */
+ lnet_set_lnd_timeout();
mutex_unlock(&the_lnet.ln_api_mutex);
*/
mutex_lock(&the_lnet.ln_api_mutex);
- if (the_lnet.ln_state != LNET_STATE_RUNNING) {
+ if (lnet_health_sensitivity == 0 && value > 0) {
mutex_unlock(&the_lnet.ln_api_mutex);
- return 0;
+ CERROR("Can not set lnet_retry_count when health feature is turned off\n");
+ return -EINVAL;
}
if (value > lnet_transaction_timeout) {
return -EINVAL;
}
- if (value == *retry_count) {
- mutex_unlock(&the_lnet.ln_api_mutex);
- return 0;
- }
-
*retry_count = value;
- if (value == 0)
- lnet_lnd_timeout = lnet_transaction_timeout;
- else
- lnet_lnd_timeout = lnet_transaction_timeout / value;
+ /* Update the lnet_lnd_timeout now that we've modified the
+ * retry count
+ */
+ lnet_set_lnd_timeout();
mutex_unlock(&the_lnet.ln_api_mutex);
return 0;
}
-static char *
+static int
+response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp)
+{
+ int rc;
+ unsigned long new_value;
+
+ rc = kstrtoul(val, 0, &new_value);
+ if (rc) {
+ CERROR("Invalid value for 'lnet_response_tracking'\n");
+ return -EINVAL;
+ }
+
+ if (new_value < 0 || new_value > 3) {
+ CWARN("Invalid value (%lu) for 'lnet_response_tracking'\n",
+ new_value);
+ return -EINVAL;
+ }
+
+ lnet_response_tracking = new_value;
+
+ return 0;
+}
+
+static const char *
lnet_get_routes(void)
{
return routes;
}
-static char *
+static const char *
lnet_get_networks(void)
{
- char *nets;
- int rc;
+ const char *nets;
+ int rc;
if (*networks != 0 && *ip2nets != 0) {
LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
{
spin_lock_init(&the_lnet.ln_eq_wait_lock);
spin_lock_init(&the_lnet.ln_msg_resend_lock);
- init_waitqueue_head(&the_lnet.ln_eq_waitq);
- init_waitqueue_head(&the_lnet.ln_mt_waitq);
+ init_completion(&the_lnet.ln_mt_wait_complete);
mutex_init(&the_lnet.ln_lnd_mutex);
}
-static void
-lnet_fini_locks(void)
-{
-}
-
struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
* MDs kmem_cache */
+struct kmem_cache *lnet_udsp_cachep; /* udsp cache */
+struct kmem_cache *lnet_rspt_cachep; /* response tracker cache */
+struct kmem_cache *lnet_msg_cachep;
static int
-lnet_descriptor_setup(void)
+lnet_slab_setup(void)
{
/* create specific kmem_cache for MEs and small MDs (i.e., originally
* allocated in <size-xxx> kmem_cache).
if (!lnet_small_mds_cachep)
return -ENOMEM;
+ lnet_udsp_cachep = kmem_cache_create("lnet_udsp",
+ sizeof(struct lnet_udsp),
+ 0, 0, NULL);
+ if (!lnet_udsp_cachep)
+ return -ENOMEM;
+
+ lnet_rspt_cachep = kmem_cache_create("lnet_rspt", sizeof(struct lnet_rsp_tracker),
+ 0, 0, NULL);
+ if (!lnet_rspt_cachep)
+ return -ENOMEM;
+
+ lnet_msg_cachep = kmem_cache_create("lnet_msg", sizeof(struct lnet_msg),
+ 0, 0, NULL);
+ if (!lnet_msg_cachep)
+ return -ENOMEM;
+
return 0;
}
static void
-lnet_descriptor_cleanup(void)
+lnet_slab_cleanup(void)
{
+ if (lnet_msg_cachep) {
+ kmem_cache_destroy(lnet_msg_cachep);
+ lnet_msg_cachep = NULL;
+ }
+
+ if (lnet_rspt_cachep) {
+ kmem_cache_destroy(lnet_rspt_cachep);
+ lnet_rspt_cachep = NULL;
+ }
+
+ if (lnet_udsp_cachep) {
+ kmem_cache_destroy(lnet_udsp_cachep);
+ lnet_udsp_cachep = NULL;
+ }
if (lnet_small_mds_cachep) {
kmem_cache_destroy(lnet_small_mds_cachep);
LASSERT(the_lnet.ln_remote_nets_hash == NULL);
LASSERT(the_lnet.ln_remote_nets_hbits > 0);
- LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
+ CFS_ALLOC_PTR_ARRAY(hash, LNET_REMOTE_NETS_HASH_SIZE);
if (hash == NULL) {
CERROR("Failed to create remote nets hash table\n");
return -ENOMEM;
for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
- LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
- LNET_REMOTE_NETS_HASH_SIZE *
- sizeof(the_lnet.ln_remote_nets_hash[0]));
+ CFS_FREE_PTR_ARRAY(the_lnet.ln_remote_nets_hash,
+ LNET_REMOTE_NETS_HASH_SIZE);
the_lnet.ln_remote_nets_hash = NULL;
}
cfs_percpt_lock_free(the_lnet.ln_net_lock);
the_lnet.ln_net_lock = NULL;
}
-
- lnet_fini_locks();
}
static int
/* Wire protocol assertions generated by 'wirecheck'
* running on Linux robert.bartonsoftware.com 2.6.8-1.521
* #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
- * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
+ * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7)
+ */
/* Constants... */
- CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
- CLASSERT(LNET_PROTO_TCP_VERSION_MAJOR == 1);
- CLASSERT(LNET_PROTO_TCP_VERSION_MINOR == 0);
- CLASSERT(LNET_MSG_ACK == 0);
- CLASSERT(LNET_MSG_PUT == 1);
- CLASSERT(LNET_MSG_GET == 2);
- CLASSERT(LNET_MSG_REPLY == 3);
- CLASSERT(LNET_MSG_HELLO == 4);
+ BUILD_BUG_ON(LNET_PROTO_TCP_MAGIC != 0xeebc0ded);
+ BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MAJOR != 1);
+ BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MINOR != 0);
+ BUILD_BUG_ON(LNET_MSG_ACK != 0);
+ BUILD_BUG_ON(LNET_MSG_PUT != 1);
+ BUILD_BUG_ON(LNET_MSG_GET != 2);
+ BUILD_BUG_ON(LNET_MSG_REPLY != 3);
+ BUILD_BUG_ON(LNET_MSG_HELLO != 4);
+
+ BUILD_BUG_ON((int)sizeof(lnet_nid_t) != 8);
+ BUILD_BUG_ON((int)sizeof(lnet_pid_t) != 4);
+
+ /* Checks for struct lnet_nid */
+ BUILD_BUG_ON((int)sizeof(struct lnet_nid) != 20);
+ BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_size) != 0);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_size) != 1);
+ BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_type) != 1);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_type) != 1);
+ BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_num) != 2);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_num) != 2);
+ BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_addr) != 4);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_addr) != 16);
+
+ /* Checks for struct lnet_process_id_packed */
+ BUILD_BUG_ON((int)sizeof(struct lnet_process_id_packed) != 12);
+ BUILD_BUG_ON((int)offsetof(struct lnet_process_id_packed, nid) != 0);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_process_id_packed *)0)->nid) != 8);
+ BUILD_BUG_ON((int)offsetof(struct lnet_process_id_packed, pid) != 8);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_process_id_packed *)0)->pid) != 4);
/* Checks for struct lnet_handle_wire */
- CLASSERT((int)sizeof(struct lnet_handle_wire) == 16);
- CLASSERT((int)offsetof(struct lnet_handle_wire, wh_interface_cookie) == 0);
- CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) == 8);
- CLASSERT((int)offsetof(struct lnet_handle_wire, wh_object_cookie) == 8);
- CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) == 8);
+ BUILD_BUG_ON((int)sizeof(struct lnet_handle_wire) != 16);
+ BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
+ wh_interface_cookie) != 0);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) != 8);
+ BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
+ wh_object_cookie) != 8);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) != 8);
/* Checks for struct struct lnet_magicversion */
- CLASSERT((int)sizeof(struct lnet_magicversion) == 8);
- CLASSERT((int)offsetof(struct lnet_magicversion, magic) == 0);
- CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->magic) == 4);
- CLASSERT((int)offsetof(struct lnet_magicversion, version_major) == 4);
- CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_major) == 2);
- CLASSERT((int)offsetof(struct lnet_magicversion, version_minor) == 6);
- CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_minor) == 2);
+ BUILD_BUG_ON((int)sizeof(struct lnet_magicversion) != 8);
+ BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, magic) != 0);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->magic) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, version_major) != 4);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_major) != 2);
+ BUILD_BUG_ON((int)offsetof(struct lnet_magicversion,
+ version_minor) != 6);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_minor) != 2);
/* Checks for struct struct lnet_hdr */
- CLASSERT((int)sizeof(struct lnet_hdr) == 72);
- CLASSERT((int)offsetof(struct lnet_hdr, dest_nid) == 0);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_nid) == 8);
- CLASSERT((int)offsetof(struct lnet_hdr, src_nid) == 8);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_nid) == 8);
- CLASSERT((int)offsetof(struct lnet_hdr, dest_pid) == 16);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_pid) == 4);
- CLASSERT((int)offsetof(struct lnet_hdr, src_pid) == 20);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_pid) == 4);
- CLASSERT((int)offsetof(struct lnet_hdr, type) == 24);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->type) == 4);
- CLASSERT((int)offsetof(struct lnet_hdr, payload_length) == 28);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->payload_length) == 4);
- CLASSERT((int)offsetof(struct lnet_hdr, msg) == 32);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg) == 40);
+ BUILD_BUG_ON((int)sizeof(struct lnet_hdr) != 72);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_nid) != 0);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_nid) != 8);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_nid) != 8);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_nid) != 8);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_pid) != 16);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_pid) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_pid) != 20);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_pid) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, type) != 24);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->type) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, payload_length) != 28);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->payload_length) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg) != 32);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg) != 40);
/* Ack */
- CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) == 32);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) == 16);
- CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.match_bits) == 48);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) == 8);
- CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.mlength) == 56);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) == 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) != 32);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) != 16);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.match_bits) != 48);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) != 8);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.mlength) != 56);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) != 4);
/* Put */
- CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) == 32);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) == 16);
- CLASSERT((int)offsetof(struct lnet_hdr, msg.put.match_bits) == 48);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) == 8);
- CLASSERT((int)offsetof(struct lnet_hdr, msg.put.hdr_data) == 56);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) == 8);
- CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ptl_index) == 64);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) == 4);
- CLASSERT((int)offsetof(struct lnet_hdr, msg.put.offset) == 68);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) == 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) != 32);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) != 16);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.match_bits) != 48);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) != 8);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.hdr_data) != 56);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) != 8);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ptl_index) != 64);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.offset) != 68);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) != 4);
/* Get */
- CLASSERT((int)offsetof(struct lnet_hdr, msg.get.return_wmd) == 32);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) == 16);
- CLASSERT((int)offsetof(struct lnet_hdr, msg.get.match_bits) == 48);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) == 8);
- CLASSERT((int)offsetof(struct lnet_hdr, msg.get.ptl_index) == 56);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) == 4);
- CLASSERT((int)offsetof(struct lnet_hdr, msg.get.src_offset) == 60);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) == 4);
- CLASSERT((int)offsetof(struct lnet_hdr, msg.get.sink_length) == 64);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) == 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.return_wmd) != 32);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) != 16);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.match_bits) != 48);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) != 8);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.ptl_index) != 56);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.src_offset) != 60);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.sink_length) != 64);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) != 4);
/* Reply */
- CLASSERT((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) == 32);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) == 16);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) != 32);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) != 16);
/* Hello */
- CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.incarnation) == 32);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) == 8);
- CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.type) == 40);
- CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) == 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.incarnation) != 32);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) != 8);
+ BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.type) != 40);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) != 4);
/* Checks for struct lnet_ni_status and related constants */
- CLASSERT(LNET_NI_STATUS_INVALID == 0x00000000);
- CLASSERT(LNET_NI_STATUS_UP == 0x15aac0de);
- CLASSERT(LNET_NI_STATUS_DOWN == 0xdeadface);
+ BUILD_BUG_ON(LNET_NI_STATUS_INVALID != 0x00000000);
+ BUILD_BUG_ON(LNET_NI_STATUS_UP != 0x15aac0de);
+ BUILD_BUG_ON(LNET_NI_STATUS_DOWN != 0xdeadface);
/* Checks for struct lnet_ni_status */
- CLASSERT((int)sizeof(struct lnet_ni_status) == 16);
- CLASSERT((int)offsetof(struct lnet_ni_status, ns_nid) == 0);
- CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) == 8);
- CLASSERT((int)offsetof(struct lnet_ni_status, ns_status) == 8);
- CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_status) == 4);
- CLASSERT((int)offsetof(struct lnet_ni_status, ns_unused) == 12);
- CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_unused) == 4);
+ BUILD_BUG_ON((int)sizeof(struct lnet_ni_status) != 16);
+ BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_nid) != 0);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) != 8);
+ BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_status) != 8);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_status) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_unused) != 12);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_unused) != 4);
/* Checks for struct lnet_ping_info and related constants */
- CLASSERT(LNET_PROTO_PING_MAGIC == 0x70696E67);
- CLASSERT(LNET_PING_FEAT_INVAL == 0);
- CLASSERT(LNET_PING_FEAT_BASE == 1);
- CLASSERT(LNET_PING_FEAT_NI_STATUS == 2);
- CLASSERT(LNET_PING_FEAT_RTE_DISABLED == 4);
- CLASSERT(LNET_PING_FEAT_MULTI_RAIL == 8);
- CLASSERT(LNET_PING_FEAT_DISCOVERY == 16);
- CLASSERT(LNET_PING_FEAT_BITS == 31);
+ BUILD_BUG_ON(LNET_PROTO_PING_MAGIC != 0x70696E67);
+ BUILD_BUG_ON(LNET_PING_FEAT_INVAL != 0);
+ BUILD_BUG_ON(LNET_PING_FEAT_BASE != 1);
+ BUILD_BUG_ON(LNET_PING_FEAT_NI_STATUS != 2);
+ BUILD_BUG_ON(LNET_PING_FEAT_RTE_DISABLED != 4);
+ BUILD_BUG_ON(LNET_PING_FEAT_MULTI_RAIL != 8);
+ BUILD_BUG_ON(LNET_PING_FEAT_DISCOVERY != 16);
+ BUILD_BUG_ON(LNET_PING_FEAT_BITS != 31);
/* Checks for struct lnet_ping_info */
- CLASSERT((int)sizeof(struct lnet_ping_info) == 16);
- CLASSERT((int)offsetof(struct lnet_ping_info, pi_magic) == 0);
- CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) == 4);
- CLASSERT((int)offsetof(struct lnet_ping_info, pi_features) == 4);
- CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_features) == 4);
- CLASSERT((int)offsetof(struct lnet_ping_info, pi_pid) == 8);
- CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) == 4);
- CLASSERT((int)offsetof(struct lnet_ping_info, pi_nnis) == 12);
- CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) == 4);
- CLASSERT((int)offsetof(struct lnet_ping_info, pi_ni) == 16);
- CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) == 0);
+ BUILD_BUG_ON((int)sizeof(struct lnet_ping_info) != 16);
+ BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_magic) != 0);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_features) != 4);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_features) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_pid) != 8);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_nnis) != 12);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_ni) != 16);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) != 0);
+
+ /* Acceptor connection request */
+ BUILD_BUG_ON(LNET_PROTO_ACCEPTOR_VERSION != 1);
+
+ /* Checks for struct lnet_acceptor_connreq */
+ BUILD_BUG_ON((int)sizeof(struct lnet_acceptor_connreq) != 16);
+ BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_magic) != 0);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_magic) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_version) != 4);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_version) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_nid) != 8);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_nid) != 8);
+
+ /* Checks for struct lnet_acceptor_connreq_v2 */
+ BUILD_BUG_ON((int)sizeof(struct lnet_acceptor_connreq_v2) != 28);
+ BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_magic) != 0);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_magic) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_version) != 4);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_version) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_nid) != 8);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_nid) != 20);
+
+ /* Checks for struct lnet_counters_common */
+ BUILD_BUG_ON((int)sizeof(struct lnet_counters_common) != 60);
+ BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_msgs_alloc) != 0);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_msgs_alloc) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_msgs_max) != 4);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_msgs_max) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_errors) != 8);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_errors) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_send_count) != 12);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_send_count) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_recv_count) != 16);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_recv_count) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_route_count) != 20);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_route_count) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_drop_count) != 24);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_drop_count) != 4);
+ BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_send_length) != 28);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_send_length) != 8);
+ BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_recv_length) != 36);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_recv_length) != 8);
+ BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_route_length) != 44);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_route_length) != 8);
+ BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_drop_length) != 52);
+ BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_drop_length) != 8);
}
-static struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
+static const struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
{
- struct lnet_lnd *lnd;
- struct list_head *tmp;
+ const struct lnet_lnd *lnd;
/* holding lnd mutex */
- list_for_each(tmp, &the_lnet.ln_lnds) {
- lnd = list_entry(tmp, struct lnet_lnd, lnd_list);
+ if (type >= NUM_LNDS)
+ return NULL;
+ lnd = the_lnet.ln_lnds[type];
+ LASSERT(!lnd || lnd->lnd_type == type);
- if (lnd->lnd_type == type)
- return lnd;
- }
- return NULL;
+ return lnd;
}
unsigned int
EXPORT_SYMBOL(lnet_get_lnd_timeout);
void
-lnet_register_lnd(struct lnet_lnd *lnd)
+lnet_register_lnd(const struct lnet_lnd *lnd)
{
mutex_lock(&the_lnet.ln_lnd_mutex);
LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
- list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
- lnd->lnd_refcount = 0;
+ the_lnet.ln_lnds[lnd->lnd_type] = lnd;
CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
EXPORT_SYMBOL(lnet_register_lnd);
void
-lnet_unregister_lnd(struct lnet_lnd *lnd)
+lnet_unregister_lnd(const struct lnet_lnd *lnd)
{
mutex_lock(&the_lnet.ln_lnd_mutex);
LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
- LASSERT(lnd->lnd_refcount == 0);
- list_del(&lnd->lnd_list);
+ the_lnet.ln_lnds[lnd->lnd_type] = NULL;
CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
mutex_unlock(&the_lnet.ln_lnd_mutex);
}
EXPORT_SYMBOL(lnet_unregister_lnd);
-void
-lnet_counters_get_common(struct lnet_counters_common *common)
+static void
+lnet_counters_get_common_locked(struct lnet_counters_common *common)
{
struct lnet_counters *ctr;
int i;
+ /* FIXME !!! Their is no assert_lnet_net_locked() to ensure this
+ * actually called under the protection of the lnet_net_lock.
+ */
memset(common, 0, sizeof(*common));
- lnet_net_lock(LNET_LOCK_EX);
-
cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
common->lcc_msgs_max += ctr->lct_common.lcc_msgs_max;
common->lcc_msgs_alloc += ctr->lct_common.lcc_msgs_alloc;
common->lcc_route_length += ctr->lct_common.lcc_route_length;
common->lcc_drop_length += ctr->lct_common.lcc_drop_length;
}
+}
+
+void
+lnet_counters_get_common(struct lnet_counters_common *common)
+{
+ lnet_net_lock(LNET_LOCK_EX);
+ lnet_counters_get_common_locked(common);
lnet_net_unlock(LNET_LOCK_EX);
}
EXPORT_SYMBOL(lnet_counters_get_common);
-void
+int
lnet_counters_get(struct lnet_counters *counters)
{
struct lnet_counters *ctr;
struct lnet_counters_health *health = &counters->lct_health;
- int i;
+ int i, rc = 0;
memset(counters, 0, sizeof(*counters));
- lnet_counters_get_common(&counters->lct_common);
-
lnet_net_lock(LNET_LOCK_EX);
+ if (the_lnet.ln_state != LNET_STATE_RUNNING)
+ GOTO(out_unlock, rc = -ENODEV);
+
+ lnet_counters_get_common_locked(&counters->lct_common);
+
cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
health->lch_rst_alloc += ctr->lct_health.lch_rst_alloc;
health->lch_resend_count += ctr->lct_health.lch_resend_count;
health->lch_network_timeout_count +=
ctr->lct_health.lch_network_timeout_count;
}
+out_unlock:
lnet_net_unlock(LNET_LOCK_EX);
+ return rc;
}
EXPORT_SYMBOL(lnet_counters_get);
lnet_net_lock(LNET_LOCK_EX);
+ if (the_lnet.ln_state != LNET_STATE_RUNNING)
+ goto avoid_reset;
+
cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
memset(counters, 0, sizeof(struct lnet_counters));
-
+avoid_reset:
lnet_net_unlock(LNET_LOCK_EX);
}
struct list_head *e = rec->rec_active.next;
list_del_init(e);
- if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
- lnet_eq_free(list_entry(e, struct lnet_eq, eq_list));
-
- } else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
+ if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
} else { /* NB: Active MEs should be attached on portals */
}
if (rec->rec_lh_hash != NULL) {
- LIBCFS_FREE(rec->rec_lh_hash,
- LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
+ CFS_FREE_PTR_ARRAY(rec->rec_lh_hash, LNET_LH_HASH_SIZE);
rec->rec_lh_hash = NULL;
}
list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
}
+struct list_head **
+lnet_create_array_of_queues(void)
+{
+ struct list_head **qs;
+ struct list_head *q;
+ int i;
+
+ qs = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(struct list_head));
+ if (!qs) {
+ CERROR("Failed to allocate queues\n");
+ return NULL;
+ }
+
+ cfs_percpt_for_each(q, i, qs)
+ INIT_LIST_HEAD(q);
+
+ return qs;
+}
+
static int lnet_unprepare(void);
static int
INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq);
INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
+ INIT_LIST_HEAD(&the_lnet.ln_udsp_list);
init_waitqueue_head(&the_lnet.ln_dc_waitq);
+ the_lnet.ln_mt_handler = NULL;
+ init_completion(&the_lnet.ln_started);
- rc = lnet_descriptor_setup();
+ rc = lnet_slab_setup();
if (rc != 0)
goto failed;
if (rc != 0)
goto failed;
- recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
- if (recs == NULL) {
- rc = -ENOMEM;
- goto failed;
- }
-
- the_lnet.ln_me_containers = recs;
-
recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
if (recs == NULL) {
rc = -ENOMEM;
goto failed;
}
+ the_lnet.ln_mt_zombie_rstqs = lnet_create_array_of_queues();
+ if (!the_lnet.ln_mt_zombie_rstqs) {
+ rc = -ENOMEM;
+ goto failed;
+ }
+
return 0;
failed:
LASSERT(list_empty(&the_lnet.ln_test_peers));
LASSERT(list_empty(&the_lnet.ln_nets));
+ if (the_lnet.ln_mt_zombie_rstqs) {
+ lnet_clean_zombie_rstqs();
+ the_lnet.ln_mt_zombie_rstqs = NULL;
+ }
+
+ lnet_assert_handler_unused(the_lnet.ln_mt_handler);
+ the_lnet.ln_mt_handler = NULL;
+
lnet_portals_destroy();
if (the_lnet.ln_md_containers != NULL) {
the_lnet.ln_md_containers = NULL;
}
- if (the_lnet.ln_me_containers != NULL) {
- lnet_res_containers_destroy(the_lnet.ln_me_containers);
- the_lnet.ln_me_containers = NULL;
- }
-
lnet_res_container_cleanup(&the_lnet.ln_eq_container);
lnet_msg_containers_destroy();
the_lnet.ln_counters = NULL;
}
lnet_destroy_remote_nets_table();
- lnet_descriptor_cleanup();
+ lnet_udsp_destroy(true);
+ lnet_slab_cleanup();
return 0;
}
return NULL;
}
-unsigned int
-lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
+void
+lnet_net_clr_pref_rtrs(struct lnet_net *net)
+{
+ struct list_head zombies;
+ struct lnet_nid_list *ne;
+ struct lnet_nid_list *tmp;
+
+ INIT_LIST_HEAD(&zombies);
+
+ lnet_net_lock(LNET_LOCK_EX);
+ list_splice_init(&net->net_rtr_pref_nids, &zombies);
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
+ list_del_init(&ne->nl_list);
+ LIBCFS_FREE(ne, sizeof(*ne));
+ }
+}
+
+int
+lnet_net_add_pref_rtr(struct lnet_net *net,
+ struct lnet_nid *gw_nid)
+__must_hold(&the_lnet.ln_api_mutex)
+{
+ struct lnet_nid_list *ne;
+
+ /* This function is called with api_mutex held. When the api_mutex
+ * is held the list can not be modified, as it is only modified as
+ * a result of applying a UDSP and that happens under api_mutex
+ * lock.
+ */
+ list_for_each_entry(ne, &net->net_rtr_pref_nids, nl_list) {
+ if (nid_same(&ne->nl_nid, gw_nid))
+ return -EEXIST;
+ }
+
+ LIBCFS_ALLOC(ne, sizeof(*ne));
+ if (!ne)
+ return -ENOMEM;
+
+ ne->nl_nid = *gw_nid;
+
+ /* Lock the cpt to protect against addition and checks in the
+ * selection algorithm
+ */
+ lnet_net_lock(LNET_LOCK_EX);
+ list_add(&ne->nl_list, &net->net_rtr_pref_nids);
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ return 0;
+}
+
+bool
+lnet_net_is_pref_rtr_locked(struct lnet_net *net, struct lnet_nid *rtr_nid)
+{
+ struct lnet_nid_list *ne;
+
+ CDEBUG(D_NET, "%s: rtr pref empty: %d\n",
+ libcfs_net2str(net->net_id),
+ list_empty(&net->net_rtr_pref_nids));
+
+ if (list_empty(&net->net_rtr_pref_nids))
+ return false;
+
+ list_for_each_entry(ne, &net->net_rtr_pref_nids, nl_list) {
+ CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
+ libcfs_nidstr(&ne->nl_nid),
+ libcfs_nidstr(rtr_nid));
+ if (nid_same(rtr_nid, &ne->nl_nid))
+ return true;
+ }
+
+ return false;
+}
+
+static unsigned int
+lnet_nid4_cpt_hash(lnet_nid_t nid, unsigned int number)
{
__u64 key = nid;
unsigned int val;
return (unsigned int)(key + val + (val >> 1)) % number;
}
+unsigned int
+lnet_nid_cpt_hash(struct lnet_nid *nid, unsigned int number)
+{
+ unsigned int val;
+ u32 h = 0;
+ int i;
+
+ LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
+
+ if (number == 1)
+ return 0;
+
+ if (nid_is_nid4(nid))
+ return lnet_nid4_cpt_hash(lnet_nid_to_nid4(nid), number);
+
+ for (i = 0; i < 4; i++)
+ h = hash_32(nid->nid_addr[i]^h, 32);
+ val = hash_32(LNET_NID_NET(nid) ^ h, LNET_CPT_BITS);
+ if (val < number)
+ return val;
+ return (unsigned int)(h + val + (val >> 1)) % number;
+}
+
int
-lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
+lnet_cpt_of_nid_locked(struct lnet_nid *nid, struct lnet_ni *ni)
{
struct lnet_net *net;
}
/* no NI provided so look at the net */
- net = lnet_get_net_locked(LNET_NIDNET(nid));
+ net = lnet_get_net_locked(LNET_NID_NET(nid));
if (net != NULL && net->net_cpts != NULL) {
return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
}
int
-lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
+lnet_nid2cpt(struct lnet_nid *nid, struct lnet_ni *ni)
{
int cpt;
int cpt2;
return cpt2;
}
+EXPORT_SYMBOL(lnet_nid2cpt);
+
+int
+lnet_cpt_of_nid(lnet_nid_t nid4, struct lnet_ni *ni)
+{
+ struct lnet_nid nid;
+
+ if (LNET_CPT_NUMBER == 1)
+ return 0; /* the only one */
+
+ lnet_nid4_to_nid(nid4, &nid);
+ return lnet_nid2cpt(&nid, ni);
+}
EXPORT_SYMBOL(lnet_cpt_of_nid);
int
-lnet_islocalnet(__u32 net_id)
+lnet_islocalnet_locked(__u32 net_id)
{
struct lnet_net *net;
- int cpt;
- bool local;
-
- cpt = lnet_net_lock_current();
+ bool local;
net = lnet_get_net_locked(net_id);
local = net != NULL;
+ return local;
+}
+
+int
+lnet_islocalnet(__u32 net_id)
+{
+ int cpt;
+ bool local;
+
+ cpt = lnet_net_lock_current();
+
+ local = lnet_islocalnet_locked(net_id);
+
lnet_net_unlock(cpt);
return local;
}
struct lnet_ni *
-lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
+lnet_nid_to_ni_locked(struct lnet_nid *nid, int cpt)
{
struct lnet_net *net;
- struct lnet_ni *ni;
+ struct lnet_ni *ni;
LASSERT(cpt != LNET_LOCK_EX);
list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
- if (ni->ni_nid == nid)
+ if (nid_same(&ni->ni_nid, nid))
return ni;
}
}
return NULL;
}
+struct lnet_ni *
+lnet_nid2ni_locked(lnet_nid_t nid4, int cpt)
+{
+ struct lnet_nid nid;
+
+ lnet_nid4_to_nid(nid4, &nid);
+ return lnet_nid_to_ni_locked(&nid, cpt);
+}
+
struct lnet_ni *
-lnet_nid2ni_addref(lnet_nid_t nid)
+lnet_nid2ni_addref(lnet_nid_t nid4)
{
struct lnet_ni *ni;
+ struct lnet_nid nid;
+
+ lnet_nid4_to_nid(nid4, &nid);
lnet_net_lock(0);
- ni = lnet_nid2ni_locked(nid, 0);
+ ni = lnet_nid_to_ni_locked(&nid, 0);
if (ni)
lnet_ni_addref_locked(ni, 0);
lnet_net_unlock(0);
}
EXPORT_SYMBOL(lnet_nid2ni_addref);
+struct lnet_ni *
+lnet_nid_to_ni_addref(struct lnet_nid *nid)
+{
+ struct lnet_ni *ni;
+
+ lnet_net_lock(0);
+ ni = lnet_nid_to_ni_locked(nid, 0);
+ if (ni)
+ lnet_ni_addref_locked(ni, 0);
+ lnet_net_unlock(0);
+
+ return ni;
+}
+EXPORT_SYMBOL(lnet_nid_to_ni_addref);
+
int
-lnet_islocalnid(lnet_nid_t nid)
+lnet_islocalnid4(lnet_nid_t nid)
{
struct lnet_ni *ni;
int cpt;
}
int
+lnet_islocalnid(struct lnet_nid *nid)
+{
+ struct lnet_ni *ni;
+ int cpt;
+
+ cpt = lnet_net_lock_current();
+ ni = lnet_nid_to_ni_locked(nid, cpt);
+ lnet_net_unlock(cpt);
+
+ return ni != NULL;
+}
+
+int
lnet_count_acceptor_nets(void)
{
/* Return the # of NIs that need the acceptor. */
LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nnis), gfp);
if (pbuf) {
pbuf->pb_nnis = nnis;
+ pbuf->pb_needs_post = false;
atomic_set(&pbuf->pb_refcnt, 1);
}
void
lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
{
- LASSERT(lnet_ping_buffer_numref(pbuf) == 0);
+ LASSERT(atomic_read(&pbuf->pb_refcnt) == 0);
LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
}
return count;
}
+void
+lnet_swap_pinginfo(struct lnet_ping_buffer *pbuf)
+{
+ struct lnet_ni_status *stat;
+ int nnis;
+ int i;
+
+ __swab32s(&pbuf->pb_info.pi_magic);
+ __swab32s(&pbuf->pb_info.pi_features);
+ __swab32s(&pbuf->pb_info.pi_pid);
+ __swab32s(&pbuf->pb_info.pi_nnis);
+ nnis = pbuf->pb_info.pi_nnis;
+ if (nnis > pbuf->pb_nnis)
+ nnis = pbuf->pb_nnis;
+ for (i = 0; i < nnis; i++) {
+ stat = &pbuf->pb_info.pi_ni[i];
+ __swab64s(&stat->ns_nid);
+ __swab32s(&stat->ns_status);
+ }
+}
+
int
lnet_ping_info_validate(struct lnet_ping_info *pinfo)
{
/* Loopback is guaranteed to be present */
if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
return -ERANGE;
- if (LNET_NETTYP(LNET_NIDNET(LNET_PING_INFO_LONI(pinfo))) != LOLND)
+ if (LNET_PING_INFO_LONI(pinfo) != LNET_NID_LO_0)
return -EPROTO;
return 0;
}
static void
lnet_ping_target_event_handler(struct lnet_event *event)
{
- struct lnet_ping_buffer *pbuf = event->md.user_ptr;
+ struct lnet_ping_buffer *pbuf = event->md_user_ptr;
if (event->unlinked)
lnet_ping_buffer_decref(pbuf);
.nid = LNET_NID_ANY,
.pid = LNET_PID_ANY
};
- struct lnet_handle_me me_handle;
+ struct lnet_me *me;
struct lnet_md md = { NULL };
- int rc, rc2;
+ int rc;
- if (set_eq) {
- rc = LNetEQAlloc(0, lnet_ping_target_event_handler,
- &the_lnet.ln_ping_target_eq);
- if (rc != 0) {
- CERROR("Can't allocate ping buffer EQ: %d\n", rc);
- return rc;
- }
- }
+ if (set_eq)
+ the_lnet.ln_ping_target_handler =
+ lnet_ping_target_event_handler;
*ppbuf = lnet_ping_target_create(ni_count);
if (*ppbuf == NULL) {
}
/* Ping target ME/MD */
- rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
+ me = LNetMEAttach(LNET_RESERVED_PORTAL, id,
LNET_PROTO_PING_MATCHBITS, 0,
- LNET_UNLINK, LNET_INS_AFTER,
- &me_handle);
- if (rc != 0) {
+ LNET_UNLINK, LNET_INS_AFTER);
+ if (IS_ERR(me)) {
+ rc = PTR_ERR(me);
CERROR("Can't create ping target ME: %d\n", rc);
goto fail_decref_ping_buffer;
}
md.max_size = 0;
md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
LNET_MD_MANAGE_REMOTE;
- md.eq_handle = the_lnet.ln_ping_target_eq;
+ md.handler = the_lnet.ln_ping_target_handler;
md.user_ptr = *ppbuf;
- rc = LNetMDAttach(me_handle, md, LNET_RETAIN, ping_mdh);
+ rc = LNetMDAttach(me, &md, LNET_RETAIN, ping_mdh);
if (rc != 0) {
CERROR("Can't attach ping target MD: %d\n", rc);
- goto fail_unlink_ping_me;
+ goto fail_decref_ping_buffer;
}
lnet_ping_buffer_addref(*ppbuf);
return 0;
-fail_unlink_ping_me:
- rc2 = LNetMEUnlink(me_handle);
- LASSERT(rc2 == 0);
fail_decref_ping_buffer:
- LASSERT(lnet_ping_buffer_numref(*ppbuf) == 1);
+ LASSERT(atomic_read(&(*ppbuf)->pb_refcnt) == 1);
lnet_ping_buffer_decref(*ppbuf);
*ppbuf = NULL;
fail_free_eq:
- if (set_eq) {
- rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
- LASSERT(rc2 == 0);
- }
return rc;
}
lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
struct lnet_handle_md *ping_mdh)
{
- sigset_t blocked = cfs_block_allsigs();
-
LNetMDUnlink(*ping_mdh);
LNetInvalidateMDHandle(ping_mdh);
/* NB the MD could be busy; this just starts the unlink */
- while (lnet_ping_buffer_numref(pbuf) > 1) {
- CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
- }
-
- cfs_restore_sigs(blocked);
+ wait_var_event_warning(&pbuf->pb_refcnt,
+ atomic_read(&pbuf->pb_refcnt) <= 1,
+ "Still waiting for ping data MD to unlink\n");
}
static void
ns = &pbuf->pb_info.pi_ni[i];
- ns->ns_nid = ni->ni_nid;
+ if (!nid_is_nid4(&ni->ni_nid))
+ continue;
+ ns->ns_nid = lnet_nid_to_nid4(&ni->ni_nid);
lnet_ni_lock(ni);
- ns->ns_status = (ni->ni_status != NULL) ?
- ni->ni_status->ns_status :
- LNET_NI_STATUS_UP;
+ ns->ns_status = lnet_ni_get_status_locked(ni);
ni->ni_status = ns;
lnet_ni_unlock(ni);
static void
lnet_ping_target_fini(void)
{
- int rc;
-
lnet_ping_md_unlink(the_lnet.ln_ping_target,
&the_lnet.ln_ping_target_md);
- rc = LNetEQFree(the_lnet.ln_ping_target_eq);
- LASSERT(rc == 0);
-
+ lnet_assert_handler_unused(the_lnet.ln_ping_target_handler);
lnet_ping_target_destroy();
}
/* Resize the push target. */
int lnet_push_target_resize(void)
{
- struct lnet_process_id id = { LNET_NID_ANY, LNET_PID_ANY };
- struct lnet_md md = { NULL };
- struct lnet_handle_me meh;
struct lnet_handle_md mdh;
struct lnet_handle_md old_mdh;
struct lnet_ping_buffer *pbuf;
struct lnet_ping_buffer *old_pbuf;
- int nnis = the_lnet.ln_push_target_nnis;
+ int nnis;
int rc;
+again:
+ nnis = the_lnet.ln_push_target_nnis;
if (nnis <= 0) {
- rc = -EINVAL;
- goto fail_return;
+ CDEBUG(D_NET, "Invalid nnis %d\n", nnis);
+ return -EINVAL;
}
-again:
+
+ /* NB: lnet_ping_buffer_alloc() sets pbuf refcount to 1. That ref is
+ * dropped when we need to resize again (see "old_pbuf" below) or when
+ * LNet is shutdown (see lnet_push_target_fini())
+ */
pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
if (!pbuf) {
- rc = -ENOMEM;
- goto fail_return;
- }
-
- rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
- LNET_PROTO_PING_MATCHBITS, 0,
- LNET_UNLINK, LNET_INS_AFTER,
- &meh);
- if (rc) {
- CERROR("Can't create push target ME: %d\n", rc);
- goto fail_decref_pbuf;
+ CDEBUG(D_NET, "Can't allocate pbuf for nnis %d\n", nnis);
+ return -ENOMEM;
}
- /* initialize md content */
- md.start = &pbuf->pb_info;
- md.length = LNET_PING_INFO_SIZE(nnis);
- md.threshold = LNET_MD_THRESH_INF;
- md.max_size = 0;
- md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE |
- LNET_MD_MANAGE_REMOTE;
- md.user_ptr = pbuf;
- md.eq_handle = the_lnet.ln_push_target_eq;
-
- rc = LNetMDAttach(meh, md, LNET_RETAIN, &mdh);
+ rc = lnet_push_target_post(pbuf, &mdh);
if (rc) {
- CERROR("Can't attach push MD: %d\n", rc);
- goto fail_unlink_meh;
+ CDEBUG(D_NET, "Failed to post push target: %d\n", rc);
+ lnet_ping_buffer_decref(pbuf);
+ return rc;
}
- lnet_ping_buffer_addref(pbuf);
lnet_net_lock(LNET_LOCK_EX);
old_pbuf = the_lnet.ln_push_target;
if (old_pbuf) {
LNetMDUnlink(old_mdh);
+ /* Drop ref set by lnet_ping_buffer_alloc() */
lnet_ping_buffer_decref(old_pbuf);
}
+ /* Received another push or reply that requires a larger buffer */
if (nnis < the_lnet.ln_push_target_nnis)
goto again;
CDEBUG(D_NET, "nnis %d success\n", nnis);
-
return 0;
+}
-fail_unlink_meh:
- LNetMEUnlink(meh);
-fail_decref_pbuf:
- lnet_ping_buffer_decref(pbuf);
-fail_return:
- CDEBUG(D_NET, "nnis %d error %d\n", nnis, rc);
- return rc;
+int lnet_push_target_post(struct lnet_ping_buffer *pbuf,
+ struct lnet_handle_md *mdhp)
+{
+ struct lnet_process_id id = { LNET_NID_ANY, LNET_PID_ANY };
+ struct lnet_md md = { NULL };
+ struct lnet_me *me;
+ int rc;
+
+ me = LNetMEAttach(LNET_RESERVED_PORTAL, id,
+ LNET_PROTO_PING_MATCHBITS, 0,
+ LNET_UNLINK, LNET_INS_AFTER);
+ if (IS_ERR(me)) {
+ rc = PTR_ERR(me);
+ CERROR("Can't create push target ME: %d\n", rc);
+ return rc;
+ }
+
+ pbuf->pb_needs_post = false;
+
+ /* This reference is dropped by lnet_push_target_event_handler() */
+ lnet_ping_buffer_addref(pbuf);
+
+ /* initialize md content */
+ md.start = &pbuf->pb_info;
+ md.length = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
+ md.threshold = 1;
+ md.max_size = 0;
+ md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE;
+ md.user_ptr = pbuf;
+ md.handler = the_lnet.ln_push_target_handler;
+
+ rc = LNetMDAttach(me, &md, LNET_UNLINK, mdhp);
+ if (rc) {
+ CERROR("Can't attach push MD: %d\n", rc);
+ lnet_ping_buffer_decref(pbuf);
+ pbuf->pb_needs_post = true;
+ return rc;
+ }
+
+ CDEBUG(D_NET, "posted push target %p\n", pbuf);
+
+ return 0;
}
static void lnet_push_target_event_handler(struct lnet_event *ev)
{
- struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
+ struct lnet_ping_buffer *pbuf = ev->md_user_ptr;
+
+ CDEBUG(D_NET, "type %d status %d unlinked %d\n", ev->type, ev->status,
+ ev->unlinked);
if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
lnet_swap_pinginfo(pbuf);
+ if (ev->type == LNET_EVENT_UNLINK) {
+ /* Drop ref added by lnet_push_target_post() */
+ lnet_ping_buffer_decref(pbuf);
+ return;
+ }
+
lnet_peer_push_event(ev);
if (ev->unlinked)
+ /* Drop ref added by lnet_push_target_post */
lnet_ping_buffer_decref(pbuf);
}
if (the_lnet.ln_push_target)
return -EALREADY;
- rc = LNetEQAlloc(0, lnet_push_target_event_handler,
- &the_lnet.ln_push_target_eq);
- if (rc) {
- CERROR("Can't allocated push target EQ: %d\n", rc);
- return rc;
- }
+ the_lnet.ln_push_target_handler =
+ lnet_push_target_event_handler;
+
+ rc = LNetSetLazyPortal(LNET_RESERVED_PORTAL);
+ LASSERT(rc == 0);
/* Start at the required minimum, we'll enlarge if required. */
the_lnet.ln_push_target_nnis = LNET_INTERFACES_MIN;
rc = lnet_push_target_resize();
if (rc) {
- LNetEQFree(the_lnet.ln_push_target_eq);
- LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
+ LNetClearLazyPortal(LNET_RESERVED_PORTAL);
+ the_lnet.ln_push_target_handler = NULL;
}
return rc;
LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
/* Wait for the unlink to complete. */
- while (lnet_ping_buffer_numref(the_lnet.ln_push_target) > 1) {
- CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
- }
+ wait_var_event_warning(&the_lnet.ln_push_target->pb_refcnt,
+ atomic_read(&the_lnet.ln_push_target->pb_refcnt) <= 1,
+ "Still waiting for ping data MD to unlink\n");
+ /* Drop ref set by lnet_ping_buffer_alloc() */
lnet_ping_buffer_decref(the_lnet.ln_push_target);
the_lnet.ln_push_target = NULL;
the_lnet.ln_push_target_nnis = 0;
- LNetEQFree(the_lnet.ln_push_target_eq);
- LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
+ LNetClearLazyPortal(LNET_RESERVED_PORTAL);
+ lnet_assert_handler_unused(the_lnet.ln_push_target_handler);
+ the_lnet.ln_push_target_handler = NULL;
}
static int
}
if (!list_empty(&ni->ni_netlist)) {
+ /* Unlock mutex while waiting to allow other
+ * threads to read the LNet state and fall through
+ * to avoid deadlock
+ */
lnet_net_unlock(LNET_LOCK_EX);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
++i;
if ((i & (-i)) == i) {
CDEBUG(D_WARNING,
"Waiting for zombie LNI %s\n",
- libcfs_nid2str(ni->ni_nid));
+ libcfs_nidstr(&ni->ni_nid));
}
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
+
+ mutex_lock(&the_lnet.ln_api_mutex);
lnet_net_lock(LNET_LOCK_EX);
continue;
}
islo = ni->ni_net->net_lnd->lnd_type == LOLND;
LASSERT(!in_interrupt());
+ /* Holding the mutex makes it safe for lnd_shutdown
+ * to call module_put(). Module unload cannot finish
+ * until lnet_unregister_lnd() completes, and that
+ * requires the mutex.
+ */
+ mutex_lock(&the_lnet.ln_lnd_mutex);
(net->net_lnd->lnd_shutdown)(ni);
+ mutex_unlock(&the_lnet.ln_lnd_mutex);
if (!islo)
CDEBUG(D_LNI, "Removed LNI %s\n",
- libcfs_nid2str(ni->ni_nid));
+ libcfs_nidstr(&ni->ni_nid));
lnet_ni_free(ni);
i = 2;
lnet_net_lock(LNET_LOCK_EX);
- net->net_state = LNET_NET_STATE_DELETING;
-
list_del_init(&net->net_list);
while (!list_empty(&net->net_ni_list)) {
/* Do peer table cleanup for this net */
lnet_peer_tables_cleanup(net);
- lnet_net_lock(LNET_LOCK_EX);
- /*
- * decrement ref count on lnd only when the entire network goes
- * away
- */
- net->net_lnd->lnd_refcount--;
-
- lnet_net_unlock(LNET_LOCK_EX);
-
lnet_net_free(net);
}
lnet_shutdown_lndnets(void)
{
struct lnet_net *net;
- struct list_head resend;
+ LIST_HEAD(resend);
struct lnet_msg *msg, *tmp;
- INIT_LIST_HEAD(&resend);
-
/* NB called holding the global mutex */
/* All quiet on the API front */
lnet_net_lock(LNET_LOCK_EX);
the_lnet.ln_state = LNET_STATE_STOPPING;
- while (!list_empty(&the_lnet.ln_nets)) {
- /*
- * move the nets to the zombie list to avoid them being
- * picked up for new work. LONET is also included in the
- * Nets that will be moved to the zombie list
- */
- net = list_entry(the_lnet.ln_nets.next,
- struct lnet_net, net_list);
- list_move(&net->net_list, &the_lnet.ln_net_zombie);
- }
+ /*
+ * move the nets to the zombie list to avoid them being
+ * picked up for new work. LONET is also included in the
+ * Nets that will be moved to the zombie list
+ */
+ list_splice_init(&the_lnet.ln_nets, &the_lnet.ln_net_zombie);
/* Drop the cached loopback Net. */
if (the_lnet.ln_loni != NULL) {
if (rc != 0) {
LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
rc, libcfs_lnd2str(net->net_lnd->lnd_type));
- lnet_net_lock(LNET_LOCK_EX);
- net->net_lnd->lnd_refcount--;
- lnet_net_unlock(LNET_LOCK_EX);
goto failed0;
}
atomic_set(&ni->ni_healthv, LNET_MAX_HEALTH_VALUE);
CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
- libcfs_nid2str(ni->ni_nid),
+ libcfs_nidstr(&ni->ni_nid),
ni->ni_net->net_tunables.lct_peer_tx_credits,
lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
ni->ni_net->net_tunables.lct_peer_rtr_credits,
lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
{
struct lnet_ni *ni;
- struct lnet_net *net_l = NULL;
- struct list_head local_ni_list;
- int rc;
- int ni_count = 0;
- __u32 lnd_type;
- struct lnet_lnd *lnd;
- int peer_timeout =
+ struct lnet_net *net_l = NULL;
+ LIST_HEAD(local_ni_list);
+ int rc;
+ int ni_count = 0;
+ __u32 lnd_type;
+ const struct lnet_lnd *lnd;
+ int peer_timeout =
net->net_tunables.lct_peer_timeout;
- int maxtxcredits =
+ int maxtxcredits =
net->net_tunables.lct_max_tx_credits;
- int peerrtrcredits =
+ int peerrtrcredits =
net->net_tunables.lct_peer_rtr_credits;
- INIT_LIST_HEAD(&local_ni_list);
-
/*
* make sure that this net is unique. If it isn't then
* we are adding interfaces to an already existing network, and
}
}
- lnet_net_lock(LNET_LOCK_EX);
- lnd->lnd_refcount++;
- lnet_net_unlock(LNET_LOCK_EX);
-
net->net_lnd = lnd;
mutex_unlock(&the_lnet.ln_lnd_mutex);
* After than we want to delete the network being added,
* to avoid a memory leak.
*/
-
- /*
- * When a network uses TCP bonding then all its interfaces
- * must be specified when the network is first defined: the
- * TCP bonding code doesn't allow for interfaces to be added
- * or removed.
- */
- if (net_l != net && net_l != NULL && use_tcp_bonding &&
- LNET_NETTYP(net_l->net_id) == SOCKLND) {
- rc = -EINVAL;
- goto failed0;
- }
-
while (!list_empty(&net->net_ni_added)) {
ni = list_entry(net->net_ni_added.next, struct lnet_ni,
ni_netlist);
/* make sure that the the NI we're about to start
* up is actually unique. if it's not fail. */
if (!lnet_ni_unique_net(&net_l->net_ni_list,
- ni->ni_interfaces[0])) {
- rc = -EINVAL;
+ ni->ni_interface)) {
+ rc = -EEXIST;
goto failed1;
}
rc = lnet_startup_lndni(ni, tun);
- LASSERT(ni->ni_net->net_tunables.lct_peer_timeout <= 0 ||
- ni->ni_net->net_lnd->lnd_query != NULL);
-
if (rc < 0)
goto failed1;
*/
lnet_net_free(net);
} else {
- net->net_state = LNET_NET_STATE_ACTIVE;
/*
* restore tunables after it has been overwitten by the
* lnd
return rc;
}
+static int lnet_genl_parse_list(struct sk_buff *msg,
+ const struct ln_key_list *data[], u16 idx)
+{
+ const struct ln_key_list *list = data[idx];
+ const struct ln_key_props *props;
+ struct nlattr *node;
+ u16 count;
+
+ if (!list)
+ return 0;
+
+ if (!list->lkl_maxattr)
+ return -ERANGE;
+
+ props = list->lkl_list;
+ if (!props)
+ return -EINVAL;
+
+ node = nla_nest_start(msg, LN_SCALAR_ATTR_LIST);
+ if (!node)
+ return -ENOBUFS;
+
+ for (count = 1; count <= list->lkl_maxattr; count++) {
+ struct nlattr *key = nla_nest_start(msg, count);
+
+ if (count == 1)
+ nla_put_u16(msg, LN_SCALAR_ATTR_LIST_SIZE,
+ list->lkl_maxattr);
+
+ nla_put_u16(msg, LN_SCALAR_ATTR_INDEX, count);
+ if (props[count].lkp_values)
+ nla_put_string(msg, LN_SCALAR_ATTR_VALUE,
+ props[count].lkp_values);
+ if (props[count].lkp_key_format)
+ nla_put_u16(msg, LN_SCALAR_ATTR_KEY_FORMAT,
+ props[count].lkp_key_format);
+ nla_put_u16(msg, LN_SCALAR_ATTR_NLA_TYPE,
+ props[count].lkp_data_type);
+ if (props[count].lkp_data_type == NLA_NESTED) {
+ int rc;
+
+ rc = lnet_genl_parse_list(msg, data, ++idx);
+ if (rc < 0)
+ return rc;
+ }
+
+ nla_nest_end(msg, key);
+ }
+
+ nla_nest_end(msg, node);
+ return 0;
+}
+
+int lnet_genl_send_scalar_list(struct sk_buff *msg, u32 portid, u32 seq,
+ const struct genl_family *family, int flags,
+ u8 cmd, const struct ln_key_list *data[])
+{
+ int rc = 0;
+ void *hdr;
+
+ if (!data[0])
+ return -EINVAL;
+
+ hdr = genlmsg_put(msg, portid, seq, family, flags, cmd);
+ if (!hdr)
+ GOTO(canceled, rc = -EMSGSIZE);
+
+ rc = lnet_genl_parse_list(msg, data, 0);
+ if (rc < 0)
+ GOTO(canceled, rc);
+
+ genlmsg_end(msg, hdr);
+canceled:
+ if (rc < 0)
+ genlmsg_cancel(msg, hdr);
+ return rc;
+}
+EXPORT_SYMBOL(lnet_genl_send_scalar_list);
+
/**
* Initialize LNet library.
*
lnet_assert_wire_constants();
/* refer to global cfs_cpt_table for now */
- the_lnet.ln_cpt_table = cfs_cpt_table;
- the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_table);
+ the_lnet.ln_cpt_table = cfs_cpt_tab;
+ the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_tab);
LASSERT(the_lnet.ln_cpt_number > 0);
if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
}
the_lnet.ln_refcount = 0;
- LNetInvalidateEQHandle(&the_lnet.ln_rc_eqh);
- INIT_LIST_HEAD(&the_lnet.ln_lnds);
INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
- INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
INIT_LIST_HEAD(&the_lnet.ln_msg_resend);
- INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
/* The hash table size is the number of bits it takes to express the set
* ln_num_routes, minus 1 (better to under estimate than over so we
*
* \pre lnet_lib_init() called with success.
* \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
+ *
+ * As this happens at module-unload, all lnds must already be unloaded,
+ * so they must already be unregistered.
*/
void lnet_lib_exit(void)
{
- LASSERT(the_lnet.ln_refcount == 0);
+ int i;
- while (!list_empty(&the_lnet.ln_lnds))
- lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
- struct lnet_lnd, lnd_list));
+ LASSERT(the_lnet.ln_refcount == 0);
+ lnet_unregister_lnd(&the_lolnd);
+ for (i = 0; i < NUM_LNDS; i++)
+ LASSERT(!the_lnet.ln_lnds[i]);
lnet_destroy_locks();
}
int ni_count;
struct lnet_ping_buffer *pbuf;
struct lnet_handle_md ping_mdh;
- struct list_head net_head;
+ LIST_HEAD(net_head);
struct lnet_net *net;
- INIT_LIST_HEAD(&net_head);
-
mutex_lock(&the_lnet.ln_api_mutex);
CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
goto err_empty_list;
}
+ if (use_tcp_bonding)
+ CWARN("use_tcp_bonding has been removed. Use Multi-Rail and Dynamic Discovery instead, see LU-13641\n");
+
/* If LNet is being initialized via DLC it is possible
* that the user requests not to load module parameters (ones which
* are supported by DLC) on initialization. Therefore, make sure not
* in this case. On cleanup in case of failure only clean up
* routes if it has been loaded */
if (!the_lnet.ln_nis_from_mod_params) {
- rc = lnet_parse_networks(&net_head, lnet_get_networks(),
- use_tcp_bonding);
+ rc = lnet_parse_networks(&net_head, lnet_get_networks());
if (rc < 0)
goto err_empty_list;
}
if (rc != 0)
goto err_shutdown_lndnis;
- rc = lnet_check_routes();
- if (rc != 0)
- goto err_destroy_routes;
-
rc = lnet_rtrpools_alloc(im_a_router);
if (rc != 0)
goto err_destroy_routes;
lnet_ping_target_update(pbuf, ping_mdh);
- rc = lnet_monitor_thr_start();
- if (rc != 0)
- goto err_stop_ping;
+ the_lnet.ln_mt_handler = lnet_mt_event_handler;
rc = lnet_push_target_init();
if (rc != 0)
- goto err_stop_monitor_thr;
+ goto err_stop_ping;
rc = lnet_peer_discovery_start();
if (rc != 0)
goto err_destroy_push_target;
+ rc = lnet_monitor_thr_start();
+ if (rc != 0)
+ goto err_stop_discovery_thr;
+
lnet_fault_init();
lnet_router_debugfs_init();
mutex_unlock(&the_lnet.ln_api_mutex);
+ complete_all(&the_lnet.ln_started);
+
+ /* wait for all routers to start */
+ lnet_wait_router_start();
+
return 0;
+err_stop_discovery_thr:
+ lnet_peer_discovery_stop();
err_destroy_push_target:
lnet_push_target_fini();
-err_stop_monitor_thr:
- lnet_monitor_thr_stop();
err_stop_ping:
lnet_ping_target_fini();
err_acceptor_stop:
* \return always 0 for current implementation.
*/
int
-LNetNIFini()
+LNetNIFini(void)
{
mutex_lock(&the_lnet.ln_api_mutex);
lnet_fault_fini();
- lnet_router_debugfs_init();
+ lnet_router_debugfs_fini();
+ lnet_monitor_thr_stop();
lnet_peer_discovery_stop();
lnet_push_target_fini();
- lnet_monitor_thr_stop();
lnet_ping_target_fini();
/* Teardown fns that use my own API functions BEFORE here */
size_t min_size = 0;
int i;
- if (!ni || !cfg_ni || !tun)
+ if (!ni || !cfg_ni || !tun || !nid_is_nid4(&ni->ni_nid))
return;
- if (ni->ni_interfaces[0] != NULL) {
- for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
- if (ni->ni_interfaces[i] != NULL) {
- strncpy(cfg_ni->lic_ni_intf[i],
- ni->ni_interfaces[i],
- sizeof(cfg_ni->lic_ni_intf[i]));
- }
- }
+ if (ni->ni_interface != NULL) {
+ strncpy(cfg_ni->lic_ni_intf,
+ ni->ni_interface,
+ sizeof(cfg_ni->lic_ni_intf));
}
- cfg_ni->lic_nid = ni->ni_nid;
- if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
- cfg_ni->lic_status = LNET_NI_STATUS_UP;
- else
- cfg_ni->lic_status = ni->ni_status->ns_status;
- cfg_ni->lic_tcp_bonding = use_tcp_bonding;
+ cfg_ni->lic_nid = lnet_nid_to_nid4(&ni->ni_nid);
+ cfg_ni->lic_status = lnet_ni_get_status_locked(ni);
cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
size_t min_size, tunable_size = 0;
int i;
- if (!ni || !config)
+ if (!ni || !config || !nid_is_nid4(&ni->ni_nid))
return;
net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
if (!net_config)
return;
- BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
- ARRAY_SIZE(net_config->ni_interfaces));
-
- for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
- if (!ni->ni_interfaces[i])
- break;
+ if (!ni->ni_interface)
+ return;
- strncpy(net_config->ni_interfaces[i],
- ni->ni_interfaces[i],
- sizeof(net_config->ni_interfaces[i]));
- }
+ strncpy(net_config->ni_interface,
+ ni->ni_interface,
+ sizeof(net_config->ni_interface));
- config->cfg_nid = ni->ni_nid;
+ config->cfg_nid = lnet_nid_to_nid4(&ni->ni_nid);
config->cfg_config_u.cfg_net.net_peer_timeout =
ni->ni_net->net_tunables.lct_peer_timeout;
config->cfg_config_u.cfg_net.net_max_tx_credits =
config->cfg_config_u.cfg_net.net_peer_rtr_credits =
ni->ni_net->net_tunables.lct_peer_rtr_credits;
- if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
- net_config->ni_status = LNET_NI_STATUS_UP;
- else
- net_config->ni_status = ni->ni_status->ns_status;
+ net_config->ni_status = lnet_ni_get_status_locked(ni);
if (ni->ni_cpts) {
int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
return NULL;
}
+int lnet_get_net_healthv_locked(struct lnet_net *net)
+{
+ struct lnet_ni *ni;
+ int best_healthv = 0;
+ int healthv, ni_fatal;
+
+ list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
+ healthv = atomic_read(&ni->ni_healthv);
+ ni_fatal = atomic_read(&ni->ni_fatal_error_on);
+ if (!ni_fatal && healthv > best_healthv)
+ best_healthv = healthv;
+ }
+
+ return best_healthv;
+}
+
struct lnet_ni *
lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
{
static int lnet_add_net_common(struct lnet_net *net,
struct lnet_ioctl_config_lnd_tunables *tun)
{
- __u32 net_id;
+ struct lnet_handle_md ping_mdh;
struct lnet_ping_buffer *pbuf;
- struct lnet_handle_md ping_mdh;
- int rc;
struct lnet_remotenet *rnet;
- int net_ni_count;
- int num_acceptor_nets;
+ struct lnet_ni *ni;
+ int net_ni_count;
+ __u32 net_id;
+ int rc;
lnet_net_lock(LNET_LOCK_EX);
rnet = lnet_find_rnet_locked(net->net_id);
else
memset(&net->net_tunables, -1, sizeof(net->net_tunables));
- /*
- * before starting this network get a count of the current TCP
- * networks which require the acceptor thread running. If that
- * count is == 0 before we start up this network, then we'd want to
- * start up the acceptor thread after starting up this network
- */
- num_acceptor_nets = lnet_count_acceptor_nets();
-
net_id = net->net_id;
rc = lnet_startup_lndnet(net,
lnet_net_lock(LNET_LOCK_EX);
net = lnet_get_net_locked(net_id);
- lnet_net_unlock(LNET_LOCK_EX);
-
LASSERT(net);
+ /* apply the UDSPs */
+ rc = lnet_udsp_apply_policies_on_net(net);
+ if (rc)
+ CERROR("Failed to apply UDSPs on local net %s\n",
+ libcfs_net2str(net->net_id));
+
+ /* At this point we lost track of which NI was just added, so we
+ * just re-apply the policies on all of the NIs on this net
+ */
+ list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
+ rc = lnet_udsp_apply_policies_on_ni(ni);
+ if (rc)
+ CERROR("Failed to apply UDSPs on ni %s\n",
+ libcfs_nidstr(&ni->ni_nid));
+ }
+ lnet_net_unlock(LNET_LOCK_EX);
+
/*
* Start the acceptor thread if this is the first network
* being added that requires the thread.
*/
- if (net->net_lnd->lnd_accept && num_acceptor_nets == 0) {
+ if (net->net_lnd->lnd_accept) {
rc = lnet_acceptor_start();
if (rc < 0) {
/* shutdown the net that we just started */
return rc;
}
+static void
+lnet_set_tune_defaults(struct lnet_ioctl_config_lnd_tunables *tun)
+{
+ if (tun) {
+ if (!tun->lt_cmn.lct_peer_timeout)
+ tun->lt_cmn.lct_peer_timeout = DEFAULT_PEER_TIMEOUT;
+ if (!tun->lt_cmn.lct_peer_tx_credits)
+ tun->lt_cmn.lct_peer_tx_credits = DEFAULT_PEER_CREDITS;
+ if (!tun->lt_cmn.lct_max_tx_credits)
+ tun->lt_cmn.lct_max_tx_credits = DEFAULT_CREDITS;
+ }
+}
+
static int lnet_handle_legacy_ip2nets(char *ip2nets,
struct lnet_ioctl_config_lnd_tunables *tun)
{
struct lnet_net *net;
- char *nets;
+ const char *nets;
int rc;
- struct list_head net_head;
-
- INIT_LIST_HEAD(&net_head);
+ LIST_HEAD(net_head);
rc = lnet_parse_ip2nets(&nets, ip2nets);
if (rc < 0)
return rc;
- rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
+ rc = lnet_parse_networks(&net_head, nets);
if (rc < 0)
return rc;
+ lnet_set_tune_defaults(tun);
+
mutex_lock(&the_lnet.ln_api_mutex);
while (!list_empty(&net_head)) {
net = list_entry(net_head.next, struct lnet_net, net_list);
}
ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
- conf->lic_ni_intf[0]);
+ conf->lic_ni_intf);
if (!ni)
return -ENOMEM;
+ lnet_set_tune_defaults(tun);
+
mutex_lock(&the_lnet.ln_api_mutex);
rc = lnet_add_net_common(net, tun);
lnet_shutdown_lndnet(net);
- if (lnet_count_acceptor_nets() == 0)
- lnet_acceptor_stop();
+ lnet_acceptor_stop();
lnet_ping_target_update(pbuf, ping_mdh);
lnet_shutdown_lndni(ni);
- if (lnet_count_acceptor_nets() == 0)
- lnet_acceptor_stop();
+ lnet_acceptor_stop();
lnet_ping_target_update(pbuf, ping_mdh);
int
lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
{
- struct lnet_net *net;
- struct list_head net_head;
- int rc;
+ struct lnet_net *net;
+ LIST_HEAD(net_head);
+ int rc;
struct lnet_ioctl_config_lnd_tunables tun;
- char *nets = conf->cfg_config_u.cfg_net.net_intf;
-
- INIT_LIST_HEAD(&net_head);
+ const char *nets = conf->cfg_config_u.cfg_net.net_intf;
/* Create a net/ni structures for the network string */
- rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
+ rc = lnet_parse_networks(&net_head, nets);
if (rc <= 0)
return rc == 0 ? -EINVAL : rc;
memset(&tun, 0, sizeof(tun));
tun.lt_cmn.lct_peer_timeout =
- conf->cfg_config_u.cfg_net.net_peer_timeout;
+ (!conf->cfg_config_u.cfg_net.net_peer_timeout) ? DEFAULT_PEER_TIMEOUT :
+ conf->cfg_config_u.cfg_net.net_peer_timeout;
tun.lt_cmn.lct_peer_tx_credits =
- conf->cfg_config_u.cfg_net.net_peer_tx_credits;
+ (!conf->cfg_config_u.cfg_net.net_peer_tx_credits) ? DEFAULT_PEER_CREDITS :
+ conf->cfg_config_u.cfg_net.net_peer_tx_credits;
tun.lt_cmn.lct_peer_rtr_credits =
conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
tun.lt_cmn.lct_max_tx_credits =
- conf->cfg_config_u.cfg_net.net_max_tx_credits;
+ (!conf->cfg_config_u.cfg_net.net_max_tx_credits) ? DEFAULT_CREDITS :
+ conf->cfg_config_u.cfg_net.net_max_tx_credits;
rc = lnet_add_net_common(net, &tun);
lnet_shutdown_lndnet(net);
- if (lnet_count_acceptor_nets() == 0)
- lnet_acceptor_stop();
+ lnet_acceptor_stop();
lnet_ping_target_update(pbuf, ping_mdh);
lnet_net_lock(LNET_LOCK_EX);
list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
- if (ni->ni_nid == nid || all) {
+ if (all || (nid_is_nid4(&ni->ni_nid) &&
+ lnet_nid_to_nid4(&ni->ni_nid) == nid)) {
atomic_set(&ni->ni_healthv, value);
if (list_empty(&ni->ni_recovery) &&
value < LNET_MAX_HEALTH_VALUE) {
CERROR("manually adding local NI %s to recovery\n",
- libcfs_nid2str(ni->ni_nid));
+ libcfs_nidstr(&ni->ni_nid));
list_add_tail(&ni->ni_recovery,
&the_lnet.ln_mt_localNIRecovq);
lnet_ni_addref_locked(ni, 0);
lnet_net_unlock(LNET_LOCK_EX);
}
+static void
+lnet_ni_set_conns_per_peer(lnet_nid_t nid, int value, bool all)
+{
+ struct lnet_net *net;
+ struct lnet_ni *ni;
+
+ lnet_net_lock(LNET_LOCK_EX);
+ list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
+ list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
+ if (lnet_nid_to_nid4(&ni->ni_nid) != nid && !all)
+ continue;
+ if (LNET_NETTYP(net->net_id) == SOCKLND)
+ ni->ni_lnd_tunables.lnd_tun_u.lnd_sock.lnd_conns_per_peer = value;
+ else if (LNET_NETTYP(net->net_id) == O2IBLND)
+ ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib.lnd_conns_per_peer = value;
+ if (!all) {
+ lnet_net_unlock(LNET_LOCK_EX);
+ return;
+ }
+ }
+ }
+ lnet_net_unlock(LNET_LOCK_EX);
+}
+
static int
lnet_get_local_ni_hstats(struct lnet_ioctl_local_ni_hstats *stats)
{
stats->hlni_local_no_route = atomic_read(&ni->ni_hstats.hlt_local_no_route);
stats->hlni_local_timeout = atomic_read(&ni->ni_hstats.hlt_local_timeout);
stats->hlni_local_error = atomic_read(&ni->ni_hstats.hlt_local_error);
+ stats->hlni_fatal_error = atomic_read(&ni->ni_fatal_error_on);
stats->hlni_health_value = atomic_read(&ni->ni_healthv);
+ stats->hlni_ping_count = ni->ni_ping_count;
+ stats->hlni_next_ping = ni->ni_next_ping;
unlock:
lnet_net_unlock(cpt);
lnet_net_lock(LNET_LOCK_EX);
list_for_each_entry(ni, &the_lnet.ln_mt_localNIRecovq, ni_recovery) {
- list->rlst_nid_array[i] = ni->ni_nid;
+ if (!nid_is_nid4(&ni->ni_nid))
+ continue;
+ list->rlst_nid_array[i] = lnet_nid_to_nid4(&ni->ni_nid);
i++;
if (i >= LNET_MAX_SHOW_NUM_NID)
break;
lnet_net_lock(LNET_LOCK_EX);
list_for_each_entry(lpni, &the_lnet.ln_mt_peerNIRecovq, lpni_recovery) {
- list->rlst_nid_array[i] = lpni->lpni_nid;
+ list->rlst_nid_array[i] = lnet_nid_to_nid4(&lpni->lpni_nid);
i++;
if (i >= LNET_MAX_SHOW_NUM_NID)
break;
struct lnet_ioctl_config_data *config;
struct lnet_process_id id = {0};
struct lnet_ni *ni;
+ struct lnet_nid nid;
int rc;
BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
case IOC_LIBCFS_FAIL_NID:
return lnet_fail_nid(data->ioc_nid, data->ioc_count);
- case IOC_LIBCFS_ADD_ROUTE:
+ case IOC_LIBCFS_ADD_ROUTE: {
+ /* default router sensitivity to 1 */
+ unsigned int sensitivity = 1;
config = arg;
if (config->cfg_hdr.ioc_len < sizeof(*config))
return -EINVAL;
+ if (config->cfg_config_u.cfg_route.rtr_sensitivity) {
+ sensitivity =
+ config->cfg_config_u.cfg_route.rtr_sensitivity;
+ }
+
+ lnet_nid4_to_nid(config->cfg_nid, &nid);
mutex_lock(&the_lnet.ln_api_mutex);
rc = lnet_add_route(config->cfg_net,
config->cfg_config_u.cfg_route.rtr_hop,
- config->cfg_nid,
+ &nid,
config->cfg_config_u.cfg_route.
- rtr_priority);
- if (rc == 0) {
- rc = lnet_check_routes();
- if (rc != 0)
- lnet_del_route(config->cfg_net,
- config->cfg_nid);
- }
+ rtr_priority, sensitivity);
mutex_unlock(&the_lnet.ln_api_mutex);
return rc;
+ }
case IOC_LIBCFS_DEL_ROUTE:
config = arg;
&config->cfg_nid,
&config->cfg_config_u.cfg_route.rtr_flags,
&config->cfg_config_u.cfg_route.
- rtr_priority);
+ rtr_priority,
+ &config->cfg_config_u.cfg_route.
+ rtr_sensitivity);
mutex_unlock(&the_lnet.ln_api_mutex);
return rc;
return -EINVAL;
mutex_lock(&the_lnet.ln_api_mutex);
- lnet_counters_get(&lnet_stats->st_cntrs);
+ rc = lnet_counters_get(&lnet_stats->st_cntrs);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
+ }
+
+ case IOC_LIBCFS_RESET_LNET_STATS:
+ {
+ mutex_lock(&the_lnet.ln_api_mutex);
+ lnet_counters_reset();
mutex_unlock(&the_lnet.ln_api_mutex);
return 0;
}
mutex_lock(&the_lnet.ln_api_mutex);
rc = lnet_add_peer_ni(cfg->prcfg_prim_nid,
cfg->prcfg_cfg_nid,
- cfg->prcfg_mr);
+ cfg->prcfg_mr, false);
mutex_unlock(&the_lnet.ln_api_mutex);
return rc;
}
return 0;
}
+ case IOC_LIBCFS_SET_CONNS_PER_PEER: {
+ struct lnet_ioctl_reset_conns_per_peer_cfg *cfg = arg;
+ int value;
+
+ if (cfg->rcpp_hdr.ioc_len < sizeof(*cfg))
+ return -EINVAL;
+ if (cfg->rcpp_value < 0)
+ value = 1;
+ else
+ value = cfg->rcpp_value;
+ CDEBUG(D_NET,
+ "Setting conns_per_peer to %d for %s. all = %d\n",
+ value, libcfs_nid2str(cfg->rcpp_nid), cfg->rcpp_all);
+ mutex_lock(&the_lnet.ln_api_mutex);
+ lnet_ni_set_conns_per_peer(cfg->rcpp_nid, value, cfg->rcpp_all);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return 0;
+ }
+
case IOC_LIBCFS_NOTIFY_ROUTER: {
time64_t deadline = ktime_get_real_seconds() - data->ioc_u64[0];
* that deadline to the wall clock.
*/
deadline += ktime_get_seconds();
- return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
+ return lnet_notify(NULL, data->ioc_nid, data->ioc_flags, false,
deadline);
}
return 0;
case IOC_LIBCFS_TESTPROTOCOMPAT:
- lnet_net_lock(LNET_LOCK_EX);
the_lnet.ln_testprotocompat = data->ioc_flags;
- lnet_net_unlock(LNET_LOCK_EX);
return 0;
case IOC_LIBCFS_LNET_FAULT:
/* If timeout is negative then set default of 3 minutes */
if (((s32)data->ioc_u32[1] <= 0) ||
data->ioc_u32[1] > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
- timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
+ timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
else
- timeout = msecs_to_jiffies(data->ioc_u32[1]);
+ timeout = nsecs_to_jiffies(data->ioc_u32[1] * NSEC_PER_MSEC);
rc = lnet_ping(id, timeout, data->ioc_pbuf1,
data->ioc_plen1 / sizeof(struct lnet_process_id));
/* If timeout is negative then set default of 3 minutes */
if (((s32)ping->op_param) <= 0 ||
ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
- timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
+ timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
else
- timeout = msecs_to_jiffies(ping->op_param);
+ timeout = nsecs_to_jiffies(ping->op_param * NSEC_PER_MSEC);
rc = lnet_ping(ping->ping_id, timeout,
ping->ping_buf,
mutex_lock(&the_lnet.ln_api_mutex);
lp = lnet_find_peer(ping->ping_id.nid);
if (lp) {
- ping->ping_id.nid = lp->lp_primary_nid;
+ ping->ping_id.nid =
+ lnet_nid_to_nid4(&lp->lp_primary_nid);
ping->mr_info = lnet_peer_is_multi_rail(lp);
lnet_peer_decref_locked(lp);
}
mutex_lock(&the_lnet.ln_api_mutex);
lp = lnet_find_peer(discover->ping_id.nid);
if (lp) {
- discover->ping_id.nid = lp->lp_primary_nid;
+ discover->ping_id.nid =
+ lnet_nid_to_nid4(&lp->lp_primary_nid);
discover->mr_info = lnet_peer_is_multi_rail(lp);
lnet_peer_decref_locked(lp);
}
return 0;
}
+ case IOC_LIBCFS_ADD_UDSP: {
+ struct lnet_ioctl_udsp *ioc_udsp = arg;
+ __u32 bulk_size = ioc_udsp->iou_hdr.ioc_len;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_udsp_demarshal_add(arg, bulk_size);
+ if (!rc) {
+ rc = lnet_udsp_apply_policies(NULL, false);
+ CDEBUG(D_NET, "policy application returned %d\n", rc);
+ rc = 0;
+ }
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ return rc;
+ }
+
+ case IOC_LIBCFS_DEL_UDSP: {
+ struct lnet_ioctl_udsp *ioc_udsp = arg;
+ int idx = ioc_udsp->iou_idx;
+
+ if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
+ return -EINVAL;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_udsp_del_policy(idx);
+ if (!rc) {
+ rc = lnet_udsp_apply_policies(NULL, false);
+ CDEBUG(D_NET, "policy re-application returned %d\n",
+ rc);
+ rc = 0;
+ }
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ return rc;
+ }
+
+ case IOC_LIBCFS_GET_UDSP_SIZE: {
+ struct lnet_ioctl_udsp *ioc_udsp = arg;
+ struct lnet_udsp *udsp;
+
+ if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
+ return -EINVAL;
+
+ rc = 0;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ udsp = lnet_udsp_get_policy(ioc_udsp->iou_idx);
+ if (!udsp) {
+ rc = -ENOENT;
+ } else {
+ /* coming in iou_idx will hold the idx of the udsp
+ * to get the size of. going out the iou_idx will
+ * hold the size of the UDSP found at the passed
+ * in index.
+ */
+ ioc_udsp->iou_idx = lnet_get_udsp_size(udsp);
+ if (ioc_udsp->iou_idx < 0)
+ rc = -EINVAL;
+ }
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ return rc;
+ }
+
+ case IOC_LIBCFS_GET_UDSP: {
+ struct lnet_ioctl_udsp *ioc_udsp = arg;
+ struct lnet_udsp *udsp;
+
+ if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
+ return -EINVAL;
+
+ rc = 0;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ udsp = lnet_udsp_get_policy(ioc_udsp->iou_idx);
+ if (!udsp)
+ rc = -ENOENT;
+ else
+ rc = lnet_udsp_marshal(udsp, ioc_udsp);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ return rc;
+ }
+
+ case IOC_LIBCFS_GET_CONST_UDSP_INFO: {
+ struct lnet_ioctl_construct_udsp_info *info = arg;
+
+ if (info->cud_hdr.ioc_len < sizeof(*info))
+ return -EINVAL;
+
+ CDEBUG(D_NET, "GET_UDSP_INFO for %s\n",
+ libcfs_nid2str(info->cud_nid));
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ lnet_udsp_get_construct_info(info);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ return 0;
+ }
+
default:
ni = lnet_net2ni_addref(data->ioc_net);
if (ni == NULL)
cpt = lnet_net_lock_current();
list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
- if (ni->ni_nid == nid) {
+ if (lnet_nid_to_nid4(&ni->ni_nid) == nid) {
lnet_net_unlock(cpt);
return true;
}
list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
+ if (!nid_is_nid4(&ni->ni_nid))
+ /* FIXME this needs to be handled */
+ continue;
if (index-- != 0)
continue;
- id->nid = ni->ni_nid;
+ id->nid = lnet_nid_to_nid4(&ni->ni_nid);
id->pid = the_lnet.ln_pid;
rc = 0;
break;
}
EXPORT_SYMBOL(LNetGetId);
+struct ping_data {
+ int rc;
+ int replied;
+ struct lnet_handle_md mdh;
+ struct completion completion;
+};
+
+static void
+lnet_ping_event_handler(struct lnet_event *event)
+{
+ struct ping_data *pd = event->md_user_ptr;
+
+ CDEBUG(D_NET, "ping event (%d %d)%s\n",
+ event->type, event->status,
+ event->unlinked ? " unlinked" : "");
+
+ if (event->status) {
+ if (!pd->rc)
+ pd->rc = event->status;
+ } else if (event->type == LNET_EVENT_REPLY) {
+ pd->replied = 1;
+ pd->rc = event->mlength;
+ }
+ if (event->unlinked)
+ complete(&pd->completion);
+}
+
static int lnet_ping(struct lnet_process_id id, signed long timeout,
struct lnet_process_id __user *ids, int n_ids)
{
- struct lnet_handle_eq eqh;
- struct lnet_handle_md mdh;
- struct lnet_event event;
struct lnet_md md = { NULL };
- int which;
- int unlinked = 0;
- int replied = 0;
- const signed long a_long_time = msecs_to_jiffies(60 * MSEC_PER_SEC);
+ struct ping_data pd = { 0 };
struct lnet_ping_buffer *pbuf;
struct lnet_process_id tmpid;
int i;
int nob;
int rc;
int rc2;
- sigset_t blocked;
/* n_ids limit is arbitrary */
if (n_ids <= 0 || id.nid == LNET_NID_ANY)
if (!pbuf)
return -ENOMEM;
- /* NB 2 events max (including any unlink event) */
- rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
- if (rc != 0) {
- CERROR("Can't allocate EQ: %d\n", rc);
- goto fail_ping_buffer_decref;
- }
-
/* initialize md content */
md.start = &pbuf->pb_info;
md.length = LNET_PING_INFO_SIZE(n_ids);
md.threshold = 2; /* GET/REPLY */
md.max_size = 0;
md.options = LNET_MD_TRUNCATE;
- md.user_ptr = NULL;
- md.eq_handle = eqh;
+ md.user_ptr = &pd;
+ md.handler = lnet_ping_event_handler;
- rc = LNetMDBind(md, LNET_UNLINK, &mdh);
+ init_completion(&pd.completion);
+
+ rc = LNetMDBind(&md, LNET_UNLINK, &pd.mdh);
if (rc != 0) {
CERROR("Can't bind MD: %d\n", rc);
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
- rc = LNetGet(LNET_NID_ANY, mdh, id,
+ rc = LNetGet(LNET_NID_ANY, pd.mdh, id,
LNET_RESERVED_PORTAL,
LNET_PROTO_PING_MATCHBITS, 0, false);
if (rc != 0) {
/* Don't CERROR; this could be deliberate! */
- rc2 = LNetMDUnlink(mdh);
+ rc2 = LNetMDUnlink(pd.mdh);
LASSERT(rc2 == 0);
/* NB must wait for the UNLINK event below... */
- unlinked = 1;
- timeout = a_long_time;
- }
-
- do {
- /* MUST block for unlink to complete */
- if (unlinked)
- blocked = cfs_block_allsigs();
-
- rc2 = LNetEQPoll(&eqh, 1, timeout, &event, &which);
-
- if (unlinked)
- cfs_restore_sigs(blocked);
-
- CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
- (rc2 <= 0) ? -1 : event.type,
- (rc2 <= 0) ? -1 : event.status,
- (rc2 > 0 && event.unlinked) ? " unlinked" : "");
-
- LASSERT(rc2 != -EOVERFLOW); /* can't miss anything */
-
- if (rc2 <= 0 || event.status != 0) {
- /* timeout or error */
- if (!replied && rc == 0)
- rc = (rc2 < 0) ? rc2 :
- (rc2 == 0) ? -ETIMEDOUT :
- event.status;
-
- if (!unlinked) {
- /* Ensure completion in finite time... */
- LNetMDUnlink(mdh);
- /* No assertion (racing with network) */
- unlinked = 1;
- timeout = a_long_time;
- } else if (rc2 == 0) {
- /* timed out waiting for unlink */
- CWARN("ping %s: late network completion\n",
- libcfs_id2str(id));
- }
- } else if (event.type == LNET_EVENT_REPLY) {
- replied = 1;
- rc = event.mlength;
- }
- } while (rc2 <= 0 || !event.unlinked);
+ }
- if (!replied) {
- if (rc >= 0)
- CWARN("%s: Unexpected rc >= 0 but no reply!\n",
- libcfs_id2str(id));
+ if (wait_for_completion_timeout(&pd.completion, timeout) == 0) {
+ /* Ensure completion in finite time... */
+ LNetMDUnlink(pd.mdh);
+ wait_for_completion(&pd.completion);
+ }
+ if (!pd.replied) {
rc = -EIO;
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
- nob = rc;
+ nob = pd.rc;
LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
rc = -EPROTO; /* if I can't parse... */
if (nob < 8) {
CERROR("%s: ping info too short %d\n",
libcfs_id2str(id), nob);
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
} else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
CERROR("%s: Unexpected magic %08x\n",
libcfs_id2str(id), pbuf->pb_info.pi_magic);
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
CERROR("%s: ping w/o NI status: 0x%x\n",
libcfs_id2str(id), pbuf->pb_info.pi_features);
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
if (nob < LNET_PING_INFO_SIZE(0)) {
CERROR("%s: Short reply %d(%d min)\n",
libcfs_id2str(id),
nob, (int)LNET_PING_INFO_SIZE(0));
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
if (pbuf->pb_info.pi_nnis < n_ids)
CERROR("%s: Short reply %d(%d expected)\n",
libcfs_id2str(id),
nob, (int)LNET_PING_INFO_SIZE(n_ids));
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
rc = -EFAULT; /* if I segv in copy_to_user()... */
tmpid.pid = pbuf->pb_info.pi_pid;
tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
- goto fail_free_eq;
+ goto fail_ping_buffer_decref;
}
rc = pbuf->pb_info.pi_nnis;
- fail_free_eq:
- rc2 = LNetEQFree(eqh);
- if (rc2 != 0)
- CERROR("rc2 %d\n", rc2);
- LASSERT(rc2 == 0);
-
fail_ping_buffer_decref:
lnet_ping_buffer_decref(pbuf);
return rc;
int cpt;
int i;
int rc;
- int max_intf = lnet_interfaces_max;
- size_t buf_size;
if (n_ids <= 0 ||
id.nid == LNET_NID_ANY)
id.pid = LNET_PID_LUSTRE;
/*
- * if the user buffer has more space than the max_intf
- * then only fill it up to max_intf
+ * If the user buffer has more space than the lnet_interfaces_max,
+ * then only fill it up to lnet_interfaces_max.
*/
- if (n_ids > max_intf)
- n_ids = max_intf;
-
- buf_size = n_ids * sizeof(*buf);
+ if (n_ids > lnet_interfaces_max)
+ n_ids = lnet_interfaces_max;
- LIBCFS_ALLOC(buf, buf_size);
+ CFS_ALLOC_PTR_ARRAY(buf, n_ids);
if (!buf)
return -ENOMEM;
if (rc)
goto out_decref;
- /* Peer may have changed. */
+ /* The lpni (or lp) for this NID may have changed and our ref is
+ * the only thing keeping the old one around. Release the ref
+ * and lookup the lpni again
+ */
+ lnet_peer_ni_decref_locked(lpni);
+ lpni = lnet_find_peer_ni_locked(id.nid);
+ if (!lpni) {
+ rc = -ENOENT;
+ goto out;
+ }
lp = lpni->lpni_peer_net->lpn_peer;
- if (lp->lp_nnis < n_ids)
- n_ids = lp->lp_nnis;
i = 0;
p = NULL;
while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
buf[i].pid = id.pid;
- buf[i].nid = p->lpni_nid;
+ buf[i].nid = lnet_nid_to_nid4(&p->lpni_nid);
if (++i >= n_ids)
break;
}
+ rc = i;
- lnet_net_unlock(cpt);
-
- rc = -EFAULT;
- if (copy_to_user(ids, buf, n_ids * sizeof(*buf)))
- goto out_relock;
- rc = n_ids;
-out_relock:
- lnet_net_lock(cpt);
out_decref:
lnet_peer_ni_decref_locked(lpni);
out:
lnet_net_unlock(cpt);
- LIBCFS_FREE(buf, buf_size);
+ if (rc >= 0)
+ if (copy_to_user(ids, buf, rc * sizeof(*buf)))
+ rc = -EFAULT;
+ CFS_FREE_PTR_ARRAY(buf, n_ids);
return rc;
}