4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #define DEBUG_SUBSYSTEM S_LNET
34 #include <linux/ctype.h>
35 #include <linux/log2.h>
36 #include <linux/ktime.h>
37 #include <linux/moduleparam.h>
38 #include <linux/uaccess.h>
39 #ifdef HAVE_SCHED_HEADERS
40 #include <linux/sched/signal.h>
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
45 #define D_LNI D_CONSOLE
48 * initialize ln_api_mutex statically, since it needs to be used in
49 * discovery_set callback. That module parameter callback can be called
50 * before module init completes. The mutex needs to be ready for use then.
52 struct lnet the_lnet = {
53 .ln_api_mutex = __MUTEX_INITIALIZER(the_lnet.ln_api_mutex),
54 }; /* THE state of the network */
55 EXPORT_SYMBOL(the_lnet);
57 static char *ip2nets = "";
58 module_param(ip2nets, charp, 0444);
59 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
61 static char *networks = "";
62 module_param(networks, charp, 0444);
63 MODULE_PARM_DESC(networks, "local networks");
65 static char *routes = "";
66 module_param(routes, charp, 0444);
67 MODULE_PARM_DESC(routes, "routes to non-local networks");
69 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
70 module_param(rnet_htable_size, int, 0444);
71 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
73 static int use_tcp_bonding;
74 module_param(use_tcp_bonding, int, 0444);
75 MODULE_PARM_DESC(use_tcp_bonding,
76 "use_tcp_bonding parameter has been removed");
78 unsigned int lnet_numa_range = 0;
79 module_param(lnet_numa_range, uint, 0444);
80 MODULE_PARM_DESC(lnet_numa_range,
81 "NUMA range to consider during Multi-Rail selection");
84 * lnet_health_sensitivity determines by how much we decrement the health
85 * value on sending error. The value defaults to 100, which means health
86 * interface health is decremented by 100 points every failure.
88 unsigned int lnet_health_sensitivity = 100;
89 static int sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
90 #ifdef HAVE_KERNEL_PARAM_OPS
91 static struct kernel_param_ops param_ops_health_sensitivity = {
92 .set = sensitivity_set,
95 #define param_check_health_sensitivity(name, p) \
96 __param_check(name, p, int)
97 module_param(lnet_health_sensitivity, health_sensitivity, S_IRUGO|S_IWUSR);
99 module_param_call(lnet_health_sensitivity, sensitivity_set, param_get_int,
100 &lnet_health_sensitivity, S_IRUGO|S_IWUSR);
102 MODULE_PARM_DESC(lnet_health_sensitivity,
103 "Value to decrement the health value by on error");
106 * lnet_recovery_interval determines how often we should perform recovery
107 * on unhealthy interfaces.
109 unsigned int lnet_recovery_interval = 1;
110 static int recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp);
111 #ifdef HAVE_KERNEL_PARAM_OPS
112 static struct kernel_param_ops param_ops_recovery_interval = {
113 .set = recovery_interval_set,
114 .get = param_get_int,
116 #define param_check_recovery_interval(name, p) \
117 __param_check(name, p, int)
118 module_param(lnet_recovery_interval, recovery_interval, S_IRUGO|S_IWUSR);
120 module_param_call(lnet_recovery_interval, recovery_interval_set, param_get_int,
121 &lnet_recovery_interval, S_IRUGO|S_IWUSR);
123 MODULE_PARM_DESC(lnet_recovery_interval,
124 "DEPRECATED - Interval to recover unhealthy interfaces in seconds");
126 unsigned int lnet_recovery_limit;
127 module_param(lnet_recovery_limit, uint, 0644);
128 MODULE_PARM_DESC(lnet_recovery_limit,
129 "How long to attempt recovery of unhealthy peer interfaces in seconds. Set to 0 to allow indefinite recovery");
131 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
132 static int intf_max_set(const char *val, cfs_kernel_param_arg_t *kp);
134 static struct kernel_param_ops param_ops_interfaces_max = {
136 .get = param_get_int,
139 #define param_check_interfaces_max(name, p) \
140 __param_check(name, p, int)
142 #ifdef HAVE_KERNEL_PARAM_OPS
143 module_param(lnet_interfaces_max, interfaces_max, 0644);
145 module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
146 ¶m_ops_interfaces_max, 0644);
148 MODULE_PARM_DESC(lnet_interfaces_max,
149 "Maximum number of interfaces in a node.");
151 unsigned lnet_peer_discovery_disabled = 0;
152 static int discovery_set(const char *val, cfs_kernel_param_arg_t *kp);
154 static struct kernel_param_ops param_ops_discovery_disabled = {
155 .set = discovery_set,
156 .get = param_get_int,
159 #define param_check_discovery_disabled(name, p) \
160 __param_check(name, p, int)
161 #ifdef HAVE_KERNEL_PARAM_OPS
162 module_param(lnet_peer_discovery_disabled, discovery_disabled, 0644);
164 module_param_call(lnet_peer_discovery_disabled, discovery_set, param_get_int,
165 ¶m_ops_discovery_disabled, 0644);
167 MODULE_PARM_DESC(lnet_peer_discovery_disabled,
168 "Set to 1 to disable peer discovery on this node.");
170 unsigned int lnet_drop_asym_route;
171 static int drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp);
173 static struct kernel_param_ops param_ops_drop_asym_route = {
174 .set = drop_asym_route_set,
175 .get = param_get_int,
178 #define param_check_drop_asym_route(name, p) \
179 __param_check(name, p, int)
180 #ifdef HAVE_KERNEL_PARAM_OPS
181 module_param(lnet_drop_asym_route, drop_asym_route, 0644);
183 module_param_call(lnet_drop_asym_route, drop_asym_route_set, param_get_int,
184 ¶m_ops_drop_asym_route, 0644);
186 MODULE_PARM_DESC(lnet_drop_asym_route,
187 "Set to 1 to drop asymmetrical route messages.");
189 #define LNET_TRANSACTION_TIMEOUT_DEFAULT 50
190 unsigned int lnet_transaction_timeout = LNET_TRANSACTION_TIMEOUT_DEFAULT;
191 static int transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp);
192 #ifdef HAVE_KERNEL_PARAM_OPS
193 static struct kernel_param_ops param_ops_transaction_timeout = {
194 .set = transaction_to_set,
195 .get = param_get_int,
198 #define param_check_transaction_timeout(name, p) \
199 __param_check(name, p, int)
200 module_param(lnet_transaction_timeout, transaction_timeout, S_IRUGO|S_IWUSR);
202 module_param_call(lnet_transaction_timeout, transaction_to_set, param_get_int,
203 &lnet_transaction_timeout, S_IRUGO|S_IWUSR);
205 MODULE_PARM_DESC(lnet_transaction_timeout,
206 "Maximum number of seconds to wait for a peer response.");
208 #define LNET_RETRY_COUNT_DEFAULT 2
209 unsigned int lnet_retry_count = LNET_RETRY_COUNT_DEFAULT;
210 static int retry_count_set(const char *val, cfs_kernel_param_arg_t *kp);
211 #ifdef HAVE_KERNEL_PARAM_OPS
212 static struct kernel_param_ops param_ops_retry_count = {
213 .set = retry_count_set,
214 .get = param_get_int,
217 #define param_check_retry_count(name, p) \
218 __param_check(name, p, int)
219 module_param(lnet_retry_count, retry_count, S_IRUGO|S_IWUSR);
221 module_param_call(lnet_retry_count, retry_count_set, param_get_int,
222 &lnet_retry_count, S_IRUGO|S_IWUSR);
224 MODULE_PARM_DESC(lnet_retry_count,
225 "Maximum number of times to retry transmitting a message");
227 unsigned int lnet_response_tracking = 3;
228 static int response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp);
230 #ifdef HAVE_KERNEL_PARAM_OPS
231 static struct kernel_param_ops param_ops_response_tracking = {
232 .set = response_tracking_set,
233 .get = param_get_int,
236 #define param_check_response_tracking(name, p) \
237 __param_check(name, p, int)
238 module_param(lnet_response_tracking, response_tracking, 0644);
240 module_param_call(lnet_response_tracking, response_tracking_set, param_get_int,
241 &lnet_response_tracking, 0644);
243 MODULE_PARM_DESC(lnet_response_tracking,
244 "(0|1|2|3) LNet Internal Only|GET Reply only|PUT ACK only|Full Tracking (default)");
246 #define LNET_LND_TIMEOUT_DEFAULT ((LNET_TRANSACTION_TIMEOUT_DEFAULT - 1) / \
247 (LNET_RETRY_COUNT_DEFAULT + 1))
248 unsigned int lnet_lnd_timeout = LNET_LND_TIMEOUT_DEFAULT;
249 static void lnet_set_lnd_timeout(void)
251 lnet_lnd_timeout = (lnet_transaction_timeout - 1) /
252 (lnet_retry_count + 1);
256 * This sequence number keeps track of how many times DLC was used to
257 * update the local NIs. It is incremented when a NI is added or
258 * removed and checked when sending a message to determine if there is
259 * a need to re-run the selection algorithm. See lnet_select_pathway()
260 * for more details on its usage.
262 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
264 static int lnet_ping(struct lnet_process_id id, signed long timeout,
265 struct lnet_process_id __user *ids, int n_ids);
267 static int lnet_discover(struct lnet_process_id id, __u32 force,
268 struct lnet_process_id __user *ids, int n_ids);
271 sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
274 unsigned *sensitivity = (unsigned *)kp->arg;
277 rc = kstrtoul(val, 0, &value);
279 CERROR("Invalid module parameter value for 'lnet_health_sensitivity'\n");
284 * The purpose of locking the api_mutex here is to ensure that
285 * the correct value ends up stored properly.
287 mutex_lock(&the_lnet.ln_api_mutex);
289 if (value > LNET_MAX_HEALTH_VALUE) {
290 mutex_unlock(&the_lnet.ln_api_mutex);
291 CERROR("Invalid health value. Maximum: %d value = %lu\n",
292 LNET_MAX_HEALTH_VALUE, value);
296 if (*sensitivity != 0 && value == 0 && lnet_retry_count != 0) {
297 lnet_retry_count = 0;
298 lnet_set_lnd_timeout();
301 *sensitivity = value;
303 mutex_unlock(&the_lnet.ln_api_mutex);
309 recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp)
311 CWARN("'lnet_recovery_interval' has been deprecated\n");
317 discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
320 unsigned *discovery_off = (unsigned *)kp->arg;
322 struct lnet_ping_buffer *pbuf;
324 rc = kstrtoul(val, 0, &value);
326 CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
330 value = (value) ? 1 : 0;
333 * The purpose of locking the api_mutex here is to ensure that
334 * the correct value ends up stored properly.
336 mutex_lock(&the_lnet.ln_api_mutex);
338 if (value == *discovery_off) {
339 mutex_unlock(&the_lnet.ln_api_mutex);
344 * We still want to set the discovery value even when LNet is not
345 * running. This is the case when LNet is being loaded and we want
346 * the module parameters to take effect. Otherwise if we're
347 * changing the value dynamically, we want to set it after
350 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
351 *discovery_off = value;
352 mutex_unlock(&the_lnet.ln_api_mutex);
356 /* tell peers that discovery setting has changed */
357 lnet_net_lock(LNET_LOCK_EX);
358 pbuf = the_lnet.ln_ping_target;
360 pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
362 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
363 lnet_net_unlock(LNET_LOCK_EX);
365 /* only send a push when we're turning off discovery */
366 if (*discovery_off <= 0 && value > 0)
367 lnet_push_update_to_peers(1);
368 *discovery_off = value;
370 mutex_unlock(&the_lnet.ln_api_mutex);
376 drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp)
379 unsigned int *drop_asym_route = (unsigned int *)kp->arg;
382 rc = kstrtoul(val, 0, &value);
384 CERROR("Invalid module parameter value for "
385 "'lnet_drop_asym_route'\n");
390 * The purpose of locking the api_mutex here is to ensure that
391 * the correct value ends up stored properly.
393 mutex_lock(&the_lnet.ln_api_mutex);
395 if (value == *drop_asym_route) {
396 mutex_unlock(&the_lnet.ln_api_mutex);
400 *drop_asym_route = value;
402 mutex_unlock(&the_lnet.ln_api_mutex);
408 transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp)
411 unsigned *transaction_to = (unsigned *)kp->arg;
414 rc = kstrtoul(val, 0, &value);
416 CERROR("Invalid module parameter value for 'lnet_transaction_timeout'\n");
421 * The purpose of locking the api_mutex here is to ensure that
422 * the correct value ends up stored properly.
424 mutex_lock(&the_lnet.ln_api_mutex);
426 if (value <= lnet_retry_count || value == 0) {
427 mutex_unlock(&the_lnet.ln_api_mutex);
428 CERROR("Invalid value for lnet_transaction_timeout (%lu). "
429 "Has to be greater than lnet_retry_count (%u)\n",
430 value, lnet_retry_count);
434 if (value == *transaction_to) {
435 mutex_unlock(&the_lnet.ln_api_mutex);
439 *transaction_to = value;
440 /* Update the lnet_lnd_timeout now that we've modified the
441 * transaction timeout
443 lnet_set_lnd_timeout();
445 mutex_unlock(&the_lnet.ln_api_mutex);
451 retry_count_set(const char *val, cfs_kernel_param_arg_t *kp)
454 unsigned *retry_count = (unsigned *)kp->arg;
457 rc = kstrtoul(val, 0, &value);
459 CERROR("Invalid module parameter value for 'lnet_retry_count'\n");
464 * The purpose of locking the api_mutex here is to ensure that
465 * the correct value ends up stored properly.
467 mutex_lock(&the_lnet.ln_api_mutex);
469 if (lnet_health_sensitivity == 0 && value > 0) {
470 mutex_unlock(&the_lnet.ln_api_mutex);
471 CERROR("Can not set lnet_retry_count when health feature is turned off\n");
475 if (value > lnet_transaction_timeout) {
476 mutex_unlock(&the_lnet.ln_api_mutex);
477 CERROR("Invalid value for lnet_retry_count (%lu). "
478 "Has to be smaller than lnet_transaction_timeout (%u)\n",
479 value, lnet_transaction_timeout);
483 *retry_count = value;
485 /* Update the lnet_lnd_timeout now that we've modified the
488 lnet_set_lnd_timeout();
490 mutex_unlock(&the_lnet.ln_api_mutex);
496 intf_max_set(const char *val, cfs_kernel_param_arg_t *kp)
500 rc = kstrtoint(val, 0, &value);
502 CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
506 if (value < LNET_INTERFACES_MIN) {
507 CWARN("max interfaces provided are too small, setting to %d\n",
508 LNET_INTERFACES_MAX_DEFAULT);
509 value = LNET_INTERFACES_MAX_DEFAULT;
512 *(int *)kp->arg = value;
518 response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp)
521 unsigned long new_value;
523 rc = kstrtoul(val, 0, &new_value);
525 CERROR("Invalid value for 'lnet_response_tracking'\n");
529 if (new_value < 0 || new_value > 3) {
530 CWARN("Invalid value (%lu) for 'lnet_response_tracking'\n",
535 lnet_response_tracking = new_value;
541 lnet_get_routes(void)
547 lnet_get_networks(void)
552 if (*networks != 0 && *ip2nets != 0) {
553 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
554 "'ip2nets' but not both at once\n");
559 rc = lnet_parse_ip2nets(&nets, ip2nets);
560 return (rc == 0) ? nets : NULL;
570 lnet_init_locks(void)
572 spin_lock_init(&the_lnet.ln_eq_wait_lock);
573 spin_lock_init(&the_lnet.ln_msg_resend_lock);
574 init_completion(&the_lnet.ln_mt_wait_complete);
575 mutex_init(&the_lnet.ln_lnd_mutex);
578 struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
579 struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
581 struct kmem_cache *lnet_udsp_cachep; /* udsp cache */
582 struct kmem_cache *lnet_rspt_cachep; /* response tracker cache */
583 struct kmem_cache *lnet_msg_cachep;
586 lnet_slab_setup(void)
588 /* create specific kmem_cache for MEs and small MDs (i.e., originally
589 * allocated in <size-xxx> kmem_cache).
591 lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(struct lnet_me),
593 if (!lnet_mes_cachep)
596 lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
597 LNET_SMALL_MD_SIZE, 0, 0,
599 if (!lnet_small_mds_cachep)
602 lnet_udsp_cachep = kmem_cache_create("lnet_udsp",
603 sizeof(struct lnet_udsp),
605 if (!lnet_udsp_cachep)
608 lnet_rspt_cachep = kmem_cache_create("lnet_rspt", sizeof(struct lnet_rsp_tracker),
610 if (!lnet_rspt_cachep)
613 lnet_msg_cachep = kmem_cache_create("lnet_msg", sizeof(struct lnet_msg),
615 if (!lnet_msg_cachep)
622 lnet_slab_cleanup(void)
624 if (lnet_msg_cachep) {
625 kmem_cache_destroy(lnet_msg_cachep);
626 lnet_msg_cachep = NULL;
629 if (lnet_rspt_cachep) {
630 kmem_cache_destroy(lnet_rspt_cachep);
631 lnet_rspt_cachep = NULL;
634 if (lnet_udsp_cachep) {
635 kmem_cache_destroy(lnet_udsp_cachep);
636 lnet_udsp_cachep = NULL;
639 if (lnet_small_mds_cachep) {
640 kmem_cache_destroy(lnet_small_mds_cachep);
641 lnet_small_mds_cachep = NULL;
644 if (lnet_mes_cachep) {
645 kmem_cache_destroy(lnet_mes_cachep);
646 lnet_mes_cachep = NULL;
651 lnet_create_remote_nets_table(void)
654 struct list_head *hash;
656 LASSERT(the_lnet.ln_remote_nets_hash == NULL);
657 LASSERT(the_lnet.ln_remote_nets_hbits > 0);
658 CFS_ALLOC_PTR_ARRAY(hash, LNET_REMOTE_NETS_HASH_SIZE);
660 CERROR("Failed to create remote nets hash table\n");
664 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
665 INIT_LIST_HEAD(&hash[i]);
666 the_lnet.ln_remote_nets_hash = hash;
671 lnet_destroy_remote_nets_table(void)
675 if (the_lnet.ln_remote_nets_hash == NULL)
678 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
679 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
681 CFS_FREE_PTR_ARRAY(the_lnet.ln_remote_nets_hash,
682 LNET_REMOTE_NETS_HASH_SIZE);
683 the_lnet.ln_remote_nets_hash = NULL;
687 lnet_destroy_locks(void)
689 if (the_lnet.ln_res_lock != NULL) {
690 cfs_percpt_lock_free(the_lnet.ln_res_lock);
691 the_lnet.ln_res_lock = NULL;
694 if (the_lnet.ln_net_lock != NULL) {
695 cfs_percpt_lock_free(the_lnet.ln_net_lock);
696 the_lnet.ln_net_lock = NULL;
701 lnet_create_locks(void)
705 the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
706 if (the_lnet.ln_res_lock == NULL)
709 the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
710 if (the_lnet.ln_net_lock == NULL)
716 lnet_destroy_locks();
720 static void lnet_assert_wire_constants(void)
722 /* Wire protocol assertions generated by 'wirecheck'
723 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
724 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
725 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7)
729 BUILD_BUG_ON(LNET_PROTO_TCP_MAGIC != 0xeebc0ded);
730 BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MAJOR != 1);
731 BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MINOR != 0);
732 BUILD_BUG_ON(LNET_MSG_ACK != 0);
733 BUILD_BUG_ON(LNET_MSG_PUT != 1);
734 BUILD_BUG_ON(LNET_MSG_GET != 2);
735 BUILD_BUG_ON(LNET_MSG_REPLY != 3);
736 BUILD_BUG_ON(LNET_MSG_HELLO != 4);
738 BUILD_BUG_ON((int)sizeof(lnet_nid_t) != 8);
739 BUILD_BUG_ON((int)sizeof(lnet_pid_t) != 4);
741 /* Checks for struct lnet_nid */
742 BUILD_BUG_ON((int)sizeof(struct lnet_nid) != 20);
743 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_size) != 0);
744 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_size) != 1);
745 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_type) != 1);
746 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_type) != 1);
747 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_num) != 2);
748 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_num) != 2);
749 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_addr) != 4);
750 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_addr) != 16);
752 /* Checks for struct lnet_process_id_packed */
753 BUILD_BUG_ON((int)sizeof(struct lnet_process_id_packed) != 12);
754 BUILD_BUG_ON((int)offsetof(struct lnet_process_id_packed, nid) != 0);
755 BUILD_BUG_ON((int)sizeof(((struct lnet_process_id_packed *)0)->nid) != 8);
756 BUILD_BUG_ON((int)offsetof(struct lnet_process_id_packed, pid) != 8);
757 BUILD_BUG_ON((int)sizeof(((struct lnet_process_id_packed *)0)->pid) != 4);
759 /* Checks for struct lnet_handle_wire */
760 BUILD_BUG_ON((int)sizeof(struct lnet_handle_wire) != 16);
761 BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
762 wh_interface_cookie) != 0);
763 BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) != 8);
764 BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
765 wh_object_cookie) != 8);
766 BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) != 8);
768 /* Checks for struct struct lnet_magicversion */
769 BUILD_BUG_ON((int)sizeof(struct lnet_magicversion) != 8);
770 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, magic) != 0);
771 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->magic) != 4);
772 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, version_major) != 4);
773 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_major) != 2);
774 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion,
775 version_minor) != 6);
776 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_minor) != 2);
778 /* Checks for struct struct lnet_hdr */
779 BUILD_BUG_ON((int)sizeof(struct lnet_hdr) != 72);
780 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_nid) != 0);
781 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_nid) != 8);
782 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_nid) != 8);
783 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_nid) != 8);
784 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_pid) != 16);
785 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_pid) != 4);
786 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_pid) != 20);
787 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_pid) != 4);
788 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, type) != 24);
789 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->type) != 4);
790 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, payload_length) != 28);
791 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->payload_length) != 4);
792 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg) != 32);
793 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg) != 40);
796 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) != 32);
797 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) != 16);
798 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.match_bits) != 48);
799 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) != 8);
800 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.mlength) != 56);
801 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) != 4);
804 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) != 32);
805 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) != 16);
806 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.match_bits) != 48);
807 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) != 8);
808 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.hdr_data) != 56);
809 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) != 8);
810 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ptl_index) != 64);
811 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) != 4);
812 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.offset) != 68);
813 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) != 4);
816 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.return_wmd) != 32);
817 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) != 16);
818 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.match_bits) != 48);
819 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) != 8);
820 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.ptl_index) != 56);
821 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) != 4);
822 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.src_offset) != 60);
823 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) != 4);
824 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.sink_length) != 64);
825 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) != 4);
828 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) != 32);
829 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) != 16);
832 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.incarnation) != 32);
833 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) != 8);
834 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.type) != 40);
835 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) != 4);
837 /* Checks for struct lnet_ni_status and related constants */
838 BUILD_BUG_ON(LNET_NI_STATUS_INVALID != 0x00000000);
839 BUILD_BUG_ON(LNET_NI_STATUS_UP != 0x15aac0de);
840 BUILD_BUG_ON(LNET_NI_STATUS_DOWN != 0xdeadface);
842 /* Checks for struct lnet_ni_status */
843 BUILD_BUG_ON((int)sizeof(struct lnet_ni_status) != 16);
844 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_nid) != 0);
845 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) != 8);
846 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_status) != 8);
847 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_status) != 4);
848 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_unused) != 12);
849 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_unused) != 4);
851 /* Checks for struct lnet_ping_info and related constants */
852 BUILD_BUG_ON(LNET_PROTO_PING_MAGIC != 0x70696E67);
853 BUILD_BUG_ON(LNET_PING_FEAT_INVAL != 0);
854 BUILD_BUG_ON(LNET_PING_FEAT_BASE != 1);
855 BUILD_BUG_ON(LNET_PING_FEAT_NI_STATUS != 2);
856 BUILD_BUG_ON(LNET_PING_FEAT_RTE_DISABLED != 4);
857 BUILD_BUG_ON(LNET_PING_FEAT_MULTI_RAIL != 8);
858 BUILD_BUG_ON(LNET_PING_FEAT_DISCOVERY != 16);
859 BUILD_BUG_ON(LNET_PING_FEAT_BITS != 31);
861 /* Checks for struct lnet_ping_info */
862 BUILD_BUG_ON((int)sizeof(struct lnet_ping_info) != 16);
863 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_magic) != 0);
864 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) != 4);
865 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_features) != 4);
866 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_features) != 4);
867 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_pid) != 8);
868 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) != 4);
869 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_nnis) != 12);
870 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) != 4);
871 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_ni) != 16);
872 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) != 0);
874 /* Acceptor connection request */
875 BUILD_BUG_ON(LNET_PROTO_ACCEPTOR_VERSION != 1);
877 /* Checks for struct lnet_acceptor_connreq */
878 BUILD_BUG_ON((int)sizeof(struct lnet_acceptor_connreq) != 16);
879 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_magic) != 0);
880 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_magic) != 4);
881 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_version) != 4);
882 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_version) != 4);
883 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_nid) != 8);
884 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_nid) != 8);
886 /* Checks for struct lnet_acceptor_connreq_v2 */
887 BUILD_BUG_ON((int)sizeof(struct lnet_acceptor_connreq_v2) != 28);
888 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_magic) != 0);
889 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_magic) != 4);
890 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_version) != 4);
891 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_version) != 4);
892 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_nid) != 8);
893 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_nid) != 20);
895 /* Checks for struct lnet_counters_common */
896 BUILD_BUG_ON((int)sizeof(struct lnet_counters_common) != 60);
897 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_msgs_alloc) != 0);
898 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_msgs_alloc) != 4);
899 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_msgs_max) != 4);
900 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_msgs_max) != 4);
901 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_errors) != 8);
902 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_errors) != 4);
903 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_send_count) != 12);
904 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_send_count) != 4);
905 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_recv_count) != 16);
906 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_recv_count) != 4);
907 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_route_count) != 20);
908 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_route_count) != 4);
909 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_drop_count) != 24);
910 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_drop_count) != 4);
911 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_send_length) != 28);
912 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_send_length) != 8);
913 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_recv_length) != 36);
914 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_recv_length) != 8);
915 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_route_length) != 44);
916 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_route_length) != 8);
917 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_drop_length) != 52);
918 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_drop_length) != 8);
921 static const struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
923 const struct lnet_lnd *lnd;
925 /* holding lnd mutex */
926 if (type >= NUM_LNDS)
928 lnd = the_lnet.ln_lnds[type];
929 LASSERT(!lnd || lnd->lnd_type == type);
935 lnet_get_lnd_timeout(void)
937 return lnet_lnd_timeout;
939 EXPORT_SYMBOL(lnet_get_lnd_timeout);
942 lnet_register_lnd(const struct lnet_lnd *lnd)
944 mutex_lock(&the_lnet.ln_lnd_mutex);
946 LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
947 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
949 the_lnet.ln_lnds[lnd->lnd_type] = lnd;
951 CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
953 mutex_unlock(&the_lnet.ln_lnd_mutex);
955 EXPORT_SYMBOL(lnet_register_lnd);
958 lnet_unregister_lnd(const struct lnet_lnd *lnd)
960 mutex_lock(&the_lnet.ln_lnd_mutex);
962 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
964 the_lnet.ln_lnds[lnd->lnd_type] = NULL;
965 CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
967 mutex_unlock(&the_lnet.ln_lnd_mutex);
969 EXPORT_SYMBOL(lnet_unregister_lnd);
972 lnet_counters_get_common_locked(struct lnet_counters_common *common)
974 struct lnet_counters *ctr;
977 /* FIXME !!! Their is no assert_lnet_net_locked() to ensure this
978 * actually called under the protection of the lnet_net_lock.
980 memset(common, 0, sizeof(*common));
982 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
983 common->lcc_msgs_max += ctr->lct_common.lcc_msgs_max;
984 common->lcc_msgs_alloc += ctr->lct_common.lcc_msgs_alloc;
985 common->lcc_errors += ctr->lct_common.lcc_errors;
986 common->lcc_send_count += ctr->lct_common.lcc_send_count;
987 common->lcc_recv_count += ctr->lct_common.lcc_recv_count;
988 common->lcc_route_count += ctr->lct_common.lcc_route_count;
989 common->lcc_drop_count += ctr->lct_common.lcc_drop_count;
990 common->lcc_send_length += ctr->lct_common.lcc_send_length;
991 common->lcc_recv_length += ctr->lct_common.lcc_recv_length;
992 common->lcc_route_length += ctr->lct_common.lcc_route_length;
993 common->lcc_drop_length += ctr->lct_common.lcc_drop_length;
998 lnet_counters_get_common(struct lnet_counters_common *common)
1000 lnet_net_lock(LNET_LOCK_EX);
1001 lnet_counters_get_common_locked(common);
1002 lnet_net_unlock(LNET_LOCK_EX);
1004 EXPORT_SYMBOL(lnet_counters_get_common);
1007 lnet_counters_get(struct lnet_counters *counters)
1009 struct lnet_counters *ctr;
1010 struct lnet_counters_health *health = &counters->lct_health;
1013 memset(counters, 0, sizeof(*counters));
1015 lnet_net_lock(LNET_LOCK_EX);
1017 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1018 GOTO(out_unlock, rc = -ENODEV);
1020 lnet_counters_get_common_locked(&counters->lct_common);
1022 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
1023 health->lch_rst_alloc += ctr->lct_health.lch_rst_alloc;
1024 health->lch_resend_count += ctr->lct_health.lch_resend_count;
1025 health->lch_response_timeout_count +=
1026 ctr->lct_health.lch_response_timeout_count;
1027 health->lch_local_interrupt_count +=
1028 ctr->lct_health.lch_local_interrupt_count;
1029 health->lch_local_dropped_count +=
1030 ctr->lct_health.lch_local_dropped_count;
1031 health->lch_local_aborted_count +=
1032 ctr->lct_health.lch_local_aborted_count;
1033 health->lch_local_no_route_count +=
1034 ctr->lct_health.lch_local_no_route_count;
1035 health->lch_local_timeout_count +=
1036 ctr->lct_health.lch_local_timeout_count;
1037 health->lch_local_error_count +=
1038 ctr->lct_health.lch_local_error_count;
1039 health->lch_remote_dropped_count +=
1040 ctr->lct_health.lch_remote_dropped_count;
1041 health->lch_remote_error_count +=
1042 ctr->lct_health.lch_remote_error_count;
1043 health->lch_remote_timeout_count +=
1044 ctr->lct_health.lch_remote_timeout_count;
1045 health->lch_network_timeout_count +=
1046 ctr->lct_health.lch_network_timeout_count;
1049 lnet_net_unlock(LNET_LOCK_EX);
1052 EXPORT_SYMBOL(lnet_counters_get);
1055 lnet_counters_reset(void)
1057 struct lnet_counters *counters;
1060 lnet_net_lock(LNET_LOCK_EX);
1062 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1065 cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
1066 memset(counters, 0, sizeof(struct lnet_counters));
1068 lnet_net_unlock(LNET_LOCK_EX);
1072 lnet_res_type2str(int type)
1077 case LNET_COOKIE_TYPE_MD:
1079 case LNET_COOKIE_TYPE_ME:
1081 case LNET_COOKIE_TYPE_EQ:
1087 lnet_res_container_cleanup(struct lnet_res_container *rec)
1091 if (rec->rec_type == 0) /* not set yet, it's uninitialized */
1094 while (!list_empty(&rec->rec_active)) {
1095 struct list_head *e = rec->rec_active.next;
1098 if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
1099 lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
1101 } else { /* NB: Active MEs should be attached on portals */
1108 /* Found alive MD/ME/EQ, user really should unlink/free
1109 * all of them before finalize LNet, but if someone didn't,
1110 * we have to recycle garbage for him */
1111 CERROR("%d active elements on exit of %s container\n",
1112 count, lnet_res_type2str(rec->rec_type));
1115 if (rec->rec_lh_hash != NULL) {
1116 CFS_FREE_PTR_ARRAY(rec->rec_lh_hash, LNET_LH_HASH_SIZE);
1117 rec->rec_lh_hash = NULL;
1120 rec->rec_type = 0; /* mark it as finalized */
1124 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
1129 LASSERT(rec->rec_type == 0);
1131 rec->rec_type = type;
1132 INIT_LIST_HEAD(&rec->rec_active);
1134 rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
1136 /* Arbitrary choice of hash table size */
1137 LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
1138 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
1139 if (rec->rec_lh_hash == NULL) {
1144 for (i = 0; i < LNET_LH_HASH_SIZE; i++)
1145 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
1150 CERROR("Failed to setup %s resource container\n",
1151 lnet_res_type2str(type));
1152 lnet_res_container_cleanup(rec);
1157 lnet_res_containers_destroy(struct lnet_res_container **recs)
1159 struct lnet_res_container *rec;
1162 cfs_percpt_for_each(rec, i, recs)
1163 lnet_res_container_cleanup(rec);
1165 cfs_percpt_free(recs);
1168 static struct lnet_res_container **
1169 lnet_res_containers_create(int type)
1171 struct lnet_res_container **recs;
1172 struct lnet_res_container *rec;
1176 recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
1178 CERROR("Failed to allocate %s resource containers\n",
1179 lnet_res_type2str(type));
1183 cfs_percpt_for_each(rec, i, recs) {
1184 rc = lnet_res_container_setup(rec, i, type);
1186 lnet_res_containers_destroy(recs);
1194 struct lnet_libhandle *
1195 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
1197 /* ALWAYS called with lnet_res_lock held */
1198 struct list_head *head;
1199 struct lnet_libhandle *lh;
1202 if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
1205 hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
1206 head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
1208 list_for_each_entry(lh, head, lh_hash_chain) {
1209 if (lh->lh_cookie == cookie)
1217 lnet_res_lh_initialize(struct lnet_res_container *rec,
1218 struct lnet_libhandle *lh)
1220 /* ALWAYS called with lnet_res_lock held */
1221 unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
1224 lh->lh_cookie = rec->rec_lh_cookie;
1225 rec->rec_lh_cookie += 1 << ibits;
1227 hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
1229 list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
1233 lnet_create_array_of_queues(void)
1235 struct list_head **qs;
1236 struct list_head *q;
1239 qs = cfs_percpt_alloc(lnet_cpt_table(),
1240 sizeof(struct list_head));
1242 CERROR("Failed to allocate queues\n");
1246 cfs_percpt_for_each(q, i, qs)
1252 static int lnet_unprepare(void);
1255 lnet_prepare(lnet_pid_t requested_pid)
1257 /* Prepare to bring up the network */
1258 struct lnet_res_container **recs;
1261 if (requested_pid == LNET_PID_ANY) {
1262 /* Don't instantiate LNET just for me */
1266 LASSERT(the_lnet.ln_refcount == 0);
1268 the_lnet.ln_routing = 0;
1270 LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
1271 the_lnet.ln_pid = requested_pid;
1273 INIT_LIST_HEAD(&the_lnet.ln_test_peers);
1274 INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
1275 INIT_LIST_HEAD(&the_lnet.ln_nets);
1276 INIT_LIST_HEAD(&the_lnet.ln_routers);
1277 INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
1278 INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
1279 INIT_LIST_HEAD(&the_lnet.ln_dc_request);
1280 INIT_LIST_HEAD(&the_lnet.ln_dc_working);
1281 INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
1282 INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq);
1283 INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
1284 INIT_LIST_HEAD(&the_lnet.ln_udsp_list);
1285 init_waitqueue_head(&the_lnet.ln_dc_waitq);
1286 the_lnet.ln_mt_handler = NULL;
1287 init_completion(&the_lnet.ln_started);
1289 rc = lnet_slab_setup();
1293 rc = lnet_create_remote_nets_table();
1298 * NB the interface cookie in wire handles guards against delayed
1299 * replies and ACKs appearing valid after reboot.
1301 the_lnet.ln_interface_cookie = ktime_get_real_ns();
1303 the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
1304 sizeof(struct lnet_counters));
1305 if (the_lnet.ln_counters == NULL) {
1306 CERROR("Failed to allocate counters for LNet\n");
1311 rc = lnet_peer_tables_create();
1315 rc = lnet_msg_containers_create();
1319 rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
1320 LNET_COOKIE_TYPE_EQ);
1324 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
1330 the_lnet.ln_md_containers = recs;
1332 rc = lnet_portals_create();
1334 CERROR("Failed to create portals for LNet: %d\n", rc);
1338 the_lnet.ln_mt_zombie_rstqs = lnet_create_array_of_queues();
1339 if (!the_lnet.ln_mt_zombie_rstqs) {
1352 lnet_unprepare (void)
1354 /* NB no LNET_LOCK since this is the last reference. All LND instances
1355 * have shut down already, so it is safe to unlink and free all
1356 * descriptors, even those that appear committed to a network op (eg MD
1357 * with non-zero pending count) */
1359 lnet_fail_nid(LNET_NID_ANY, 0);
1361 LASSERT(the_lnet.ln_refcount == 0);
1362 LASSERT(list_empty(&the_lnet.ln_test_peers));
1363 LASSERT(list_empty(&the_lnet.ln_nets));
1365 if (the_lnet.ln_mt_zombie_rstqs) {
1366 lnet_clean_zombie_rstqs();
1367 the_lnet.ln_mt_zombie_rstqs = NULL;
1370 lnet_assert_handler_unused(the_lnet.ln_mt_handler);
1371 the_lnet.ln_mt_handler = NULL;
1373 lnet_portals_destroy();
1375 if (the_lnet.ln_md_containers != NULL) {
1376 lnet_res_containers_destroy(the_lnet.ln_md_containers);
1377 the_lnet.ln_md_containers = NULL;
1380 lnet_res_container_cleanup(&the_lnet.ln_eq_container);
1382 lnet_msg_containers_destroy();
1384 lnet_rtrpools_free(0);
1386 if (the_lnet.ln_counters != NULL) {
1387 cfs_percpt_free(the_lnet.ln_counters);
1388 the_lnet.ln_counters = NULL;
1390 lnet_destroy_remote_nets_table();
1391 lnet_udsp_destroy(true);
1392 lnet_slab_cleanup();
1398 lnet_net2ni_locked(__u32 net_id, int cpt)
1401 struct lnet_net *net;
1403 LASSERT(cpt != LNET_LOCK_EX);
1405 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1406 if (net->net_id == net_id) {
1407 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
1417 lnet_net2ni_addref(__u32 net)
1422 ni = lnet_net2ni_locked(net, 0);
1424 lnet_ni_addref_locked(ni, 0);
1429 EXPORT_SYMBOL(lnet_net2ni_addref);
1432 lnet_get_net_locked(__u32 net_id)
1434 struct lnet_net *net;
1436 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1437 if (net->net_id == net_id)
1445 lnet_net_clr_pref_rtrs(struct lnet_net *net)
1447 struct list_head zombies;
1448 struct lnet_nid_list *ne;
1449 struct lnet_nid_list *tmp;
1451 INIT_LIST_HEAD(&zombies);
1453 lnet_net_lock(LNET_LOCK_EX);
1454 list_splice_init(&net->net_rtr_pref_nids, &zombies);
1455 lnet_net_unlock(LNET_LOCK_EX);
1457 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1458 list_del_init(&ne->nl_list);
1459 LIBCFS_FREE(ne, sizeof(*ne));
1464 lnet_net_add_pref_rtr(struct lnet_net *net,
1466 __must_hold(&the_lnet.ln_api_mutex)
1468 struct lnet_nid_list *ne;
1470 /* This function is called with api_mutex held. When the api_mutex
1471 * is held the list can not be modified, as it is only modified as
1472 * a result of applying a UDSP and that happens under api_mutex
1475 list_for_each_entry(ne, &net->net_rtr_pref_nids, nl_list) {
1476 if (ne->nl_nid == gw_nid)
1480 LIBCFS_ALLOC(ne, sizeof(*ne));
1484 ne->nl_nid = gw_nid;
1486 /* Lock the cpt to protect against addition and checks in the
1487 * selection algorithm
1489 lnet_net_lock(LNET_LOCK_EX);
1490 list_add(&ne->nl_list, &net->net_rtr_pref_nids);
1491 lnet_net_unlock(LNET_LOCK_EX);
1497 lnet_net_is_pref_rtr_locked(struct lnet_net *net, lnet_nid_t rtr_nid)
1499 struct lnet_nid_list *ne;
1501 CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
1502 libcfs_net2str(net->net_id),
1503 list_empty(&net->net_rtr_pref_nids));
1505 if (list_empty(&net->net_rtr_pref_nids))
1508 list_for_each_entry(ne, &net->net_rtr_pref_nids, nl_list) {
1509 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
1510 libcfs_nid2str(ne->nl_nid),
1511 libcfs_nid2str(rtr_nid));
1512 if (rtr_nid == ne->nl_nid)
1520 lnet_nid4_cpt_hash(lnet_nid_t nid, unsigned int number)
1525 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
1530 val = hash_long(key, LNET_CPT_BITS);
1531 /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
1535 return (unsigned int)(key + val + (val >> 1)) % number;
1539 lnet_nid_cpt_hash(struct lnet_nid *nid, unsigned int number)
1545 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
1550 if (nid_is_nid4(nid))
1551 return lnet_nid4_cpt_hash(lnet_nid_to_nid4(nid), number);
1553 for (i = 0; i < 4; i++)
1554 h = hash_32(nid->nid_addr[i]^h, 32);
1555 val = hash_32(LNET_NID_NET(nid) ^ h, LNET_CPT_BITS);
1558 return (unsigned int)(h + val + (val >> 1)) % number;
1562 lnet_cpt_of_nid_locked(struct lnet_nid *nid, struct lnet_ni *ni)
1564 struct lnet_net *net;
1566 /* must called with hold of lnet_net_lock */
1567 if (LNET_CPT_NUMBER == 1)
1568 return 0; /* the only one */
1571 * If NI is provided then use the CPT identified in the NI cpt
1572 * list if one exists. If one doesn't exist, then that NI is
1573 * associated with all CPTs and it follows that the net it belongs
1574 * to is implicitly associated with all CPTs, so just hash the nid
1578 if (ni->ni_cpts != NULL)
1579 return ni->ni_cpts[lnet_nid_cpt_hash(nid,
1582 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1585 /* no NI provided so look at the net */
1586 net = lnet_get_net_locked(LNET_NID_NET(nid));
1588 if (net != NULL && net->net_cpts != NULL) {
1589 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
1592 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1596 lnet_nid2cpt(struct lnet_nid *nid, struct lnet_ni *ni)
1601 if (LNET_CPT_NUMBER == 1)
1602 return 0; /* the only one */
1604 cpt = lnet_net_lock_current();
1606 cpt2 = lnet_cpt_of_nid_locked(nid, ni);
1608 lnet_net_unlock(cpt);
1612 EXPORT_SYMBOL(lnet_nid2cpt);
1615 lnet_cpt_of_nid(lnet_nid_t nid4, struct lnet_ni *ni)
1617 struct lnet_nid nid;
1619 if (LNET_CPT_NUMBER == 1)
1620 return 0; /* the only one */
1622 lnet_nid4_to_nid(nid4, &nid);
1623 return lnet_nid2cpt(&nid, ni);
1625 EXPORT_SYMBOL(lnet_cpt_of_nid);
1628 lnet_islocalnet_locked(__u32 net_id)
1630 struct lnet_net *net;
1633 net = lnet_get_net_locked(net_id);
1635 local = net != NULL;
1641 lnet_islocalnet(__u32 net_id)
1646 cpt = lnet_net_lock_current();
1648 local = lnet_islocalnet_locked(net_id);
1650 lnet_net_unlock(cpt);
1656 lnet_nid_to_ni_locked(struct lnet_nid *nid, int cpt)
1658 struct lnet_net *net;
1661 LASSERT(cpt != LNET_LOCK_EX);
1663 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1664 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1665 if (nid_same(&ni->ni_nid, nid))
1674 lnet_nid2ni_locked(lnet_nid_t nid4, int cpt)
1676 struct lnet_nid nid;
1678 lnet_nid4_to_nid(nid4, &nid);
1679 return lnet_nid_to_ni_locked(&nid, cpt);
1683 lnet_nid2ni_addref(lnet_nid_t nid4)
1686 struct lnet_nid nid;
1688 lnet_nid4_to_nid(nid4, &nid);
1691 ni = lnet_nid_to_ni_locked(&nid, 0);
1693 lnet_ni_addref_locked(ni, 0);
1698 EXPORT_SYMBOL(lnet_nid2ni_addref);
1701 lnet_nid_to_ni_addref(struct lnet_nid *nid)
1706 ni = lnet_nid_to_ni_locked(nid, 0);
1708 lnet_ni_addref_locked(ni, 0);
1713 EXPORT_SYMBOL(lnet_nid_to_ni_addref);
1716 lnet_islocalnid(lnet_nid_t nid)
1721 cpt = lnet_net_lock_current();
1722 ni = lnet_nid2ni_locked(nid, cpt);
1723 lnet_net_unlock(cpt);
1729 lnet_count_acceptor_nets(void)
1731 /* Return the # of NIs that need the acceptor. */
1733 struct lnet_net *net;
1736 cpt = lnet_net_lock_current();
1737 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1738 /* all socklnd type networks should have the acceptor
1740 if (net->net_lnd->lnd_accept != NULL)
1744 lnet_net_unlock(cpt);
1749 struct lnet_ping_buffer *
1750 lnet_ping_buffer_alloc(int nnis, gfp_t gfp)
1752 struct lnet_ping_buffer *pbuf;
1754 LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nnis), gfp);
1756 pbuf->pb_nnis = nnis;
1757 pbuf->pb_needs_post = false;
1758 atomic_set(&pbuf->pb_refcnt, 1);
1765 lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
1767 LASSERT(atomic_read(&pbuf->pb_refcnt) == 0);
1768 LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
1771 static struct lnet_ping_buffer *
1772 lnet_ping_target_create(int nnis)
1774 struct lnet_ping_buffer *pbuf;
1776 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1778 CERROR("Can't allocate ping source [%d]\n", nnis);
1782 pbuf->pb_info.pi_nnis = nnis;
1783 pbuf->pb_info.pi_pid = the_lnet.ln_pid;
1784 pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
1785 pbuf->pb_info.pi_features =
1786 LNET_PING_FEAT_NI_STATUS | LNET_PING_FEAT_MULTI_RAIL;
1792 lnet_get_net_ni_count_locked(struct lnet_net *net)
1797 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1804 lnet_get_net_ni_count_pre(struct lnet_net *net)
1809 list_for_each_entry(ni, &net->net_ni_added, ni_netlist)
1816 lnet_get_ni_count(void)
1819 struct lnet_net *net;
1824 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1825 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1835 lnet_swap_pinginfo(struct lnet_ping_buffer *pbuf)
1837 struct lnet_ni_status *stat;
1841 __swab32s(&pbuf->pb_info.pi_magic);
1842 __swab32s(&pbuf->pb_info.pi_features);
1843 __swab32s(&pbuf->pb_info.pi_pid);
1844 __swab32s(&pbuf->pb_info.pi_nnis);
1845 nnis = pbuf->pb_info.pi_nnis;
1846 if (nnis > pbuf->pb_nnis)
1847 nnis = pbuf->pb_nnis;
1848 for (i = 0; i < nnis; i++) {
1849 stat = &pbuf->pb_info.pi_ni[i];
1850 __swab64s(&stat->ns_nid);
1851 __swab32s(&stat->ns_status);
1856 lnet_ping_info_validate(struct lnet_ping_info *pinfo)
1860 if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
1862 if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
1864 /* Loopback is guaranteed to be present */
1865 if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
1867 if (LNET_PING_INFO_LONI(pinfo) != LNET_NID_LO_0)
1873 lnet_ping_target_destroy(void)
1875 struct lnet_net *net;
1878 lnet_net_lock(LNET_LOCK_EX);
1880 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1881 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1883 ni->ni_status = NULL;
1888 lnet_ping_buffer_decref(the_lnet.ln_ping_target);
1889 the_lnet.ln_ping_target = NULL;
1891 lnet_net_unlock(LNET_LOCK_EX);
1895 lnet_ping_target_event_handler(struct lnet_event *event)
1897 struct lnet_ping_buffer *pbuf = event->md_user_ptr;
1899 if (event->unlinked)
1900 lnet_ping_buffer_decref(pbuf);
1904 lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
1905 struct lnet_handle_md *ping_mdh,
1906 int ni_count, bool set_eq)
1908 struct lnet_process_id id = {
1909 .nid = LNET_NID_ANY,
1913 struct lnet_md md = { NULL };
1917 the_lnet.ln_ping_target_handler =
1918 lnet_ping_target_event_handler;
1920 *ppbuf = lnet_ping_target_create(ni_count);
1921 if (*ppbuf == NULL) {
1926 /* Ping target ME/MD */
1927 me = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1928 LNET_PROTO_PING_MATCHBITS, 0,
1929 LNET_UNLINK, LNET_INS_AFTER);
1932 CERROR("Can't create ping target ME: %d\n", rc);
1933 goto fail_decref_ping_buffer;
1936 /* initialize md content */
1937 md.start = &(*ppbuf)->pb_info;
1938 md.length = LNET_PING_INFO_SIZE((*ppbuf)->pb_nnis);
1939 md.threshold = LNET_MD_THRESH_INF;
1941 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1942 LNET_MD_MANAGE_REMOTE;
1943 md.handler = the_lnet.ln_ping_target_handler;
1944 md.user_ptr = *ppbuf;
1946 rc = LNetMDAttach(me, &md, LNET_RETAIN, ping_mdh);
1948 CERROR("Can't attach ping target MD: %d\n", rc);
1949 goto fail_decref_ping_buffer;
1951 lnet_ping_buffer_addref(*ppbuf);
1955 fail_decref_ping_buffer:
1956 LASSERT(atomic_read(&(*ppbuf)->pb_refcnt) == 1);
1957 lnet_ping_buffer_decref(*ppbuf);
1964 lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
1965 struct lnet_handle_md *ping_mdh)
1967 LNetMDUnlink(*ping_mdh);
1968 LNetInvalidateMDHandle(ping_mdh);
1970 /* NB the MD could be busy; this just starts the unlink */
1971 wait_var_event_warning(&pbuf->pb_refcnt,
1972 atomic_read(&pbuf->pb_refcnt) <= 1,
1973 "Still waiting for ping data MD to unlink\n");
1977 lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
1980 struct lnet_net *net;
1981 struct lnet_ni_status *ns;
1986 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1987 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1988 LASSERT(i < pbuf->pb_nnis);
1990 ns = &pbuf->pb_info.pi_ni[i];
1992 if (!nid_is_nid4(&ni->ni_nid))
1994 ns->ns_nid = lnet_nid_to_nid4(&ni->ni_nid);
1997 ns->ns_status = lnet_ni_get_status_locked(ni);
2005 * We (ab)use the ns_status of the loopback interface to
2006 * transmit the sequence number. The first interface listed
2007 * must be the loopback interface.
2009 rc = lnet_ping_info_validate(&pbuf->pb_info);
2011 LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
2014 LNET_PING_BUFFER_SEQNO(pbuf) =
2015 atomic_inc_return(&the_lnet.ln_ping_target_seqno);
2019 lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
2020 struct lnet_handle_md ping_mdh)
2022 struct lnet_ping_buffer *old_pbuf = NULL;
2023 struct lnet_handle_md old_ping_md;
2025 /* switch the NIs to point to the new ping info created */
2026 lnet_net_lock(LNET_LOCK_EX);
2028 if (!the_lnet.ln_routing)
2029 pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
2030 if (!lnet_peer_discovery_disabled)
2031 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
2033 /* Ensure only known feature bits have been set. */
2034 LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
2035 LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
2037 lnet_ping_target_install_locked(pbuf);
2039 if (the_lnet.ln_ping_target) {
2040 old_pbuf = the_lnet.ln_ping_target;
2041 old_ping_md = the_lnet.ln_ping_target_md;
2043 the_lnet.ln_ping_target_md = ping_mdh;
2044 the_lnet.ln_ping_target = pbuf;
2046 lnet_net_unlock(LNET_LOCK_EX);
2049 /* unlink and free the old ping info */
2050 lnet_ping_md_unlink(old_pbuf, &old_ping_md);
2051 lnet_ping_buffer_decref(old_pbuf);
2054 lnet_push_update_to_peers(0);
2058 lnet_ping_target_fini(void)
2060 lnet_ping_md_unlink(the_lnet.ln_ping_target,
2061 &the_lnet.ln_ping_target_md);
2063 lnet_assert_handler_unused(the_lnet.ln_ping_target_handler);
2064 lnet_ping_target_destroy();
2067 /* Resize the push target. */
2068 int lnet_push_target_resize(void)
2070 struct lnet_handle_md mdh;
2071 struct lnet_handle_md old_mdh;
2072 struct lnet_ping_buffer *pbuf;
2073 struct lnet_ping_buffer *old_pbuf;
2078 nnis = the_lnet.ln_push_target_nnis;
2080 CDEBUG(D_NET, "Invalid nnis %d\n", nnis);
2084 /* NB: lnet_ping_buffer_alloc() sets pbuf refcount to 1. That ref is
2085 * dropped when we need to resize again (see "old_pbuf" below) or when
2086 * LNet is shutdown (see lnet_push_target_fini())
2088 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
2090 CDEBUG(D_NET, "Can't allocate pbuf for nnis %d\n", nnis);
2094 rc = lnet_push_target_post(pbuf, &mdh);
2096 CDEBUG(D_NET, "Failed to post push target: %d\n", rc);
2097 lnet_ping_buffer_decref(pbuf);
2101 lnet_net_lock(LNET_LOCK_EX);
2102 old_pbuf = the_lnet.ln_push_target;
2103 old_mdh = the_lnet.ln_push_target_md;
2104 the_lnet.ln_push_target = pbuf;
2105 the_lnet.ln_push_target_md = mdh;
2106 lnet_net_unlock(LNET_LOCK_EX);
2109 LNetMDUnlink(old_mdh);
2110 /* Drop ref set by lnet_ping_buffer_alloc() */
2111 lnet_ping_buffer_decref(old_pbuf);
2114 /* Received another push or reply that requires a larger buffer */
2115 if (nnis < the_lnet.ln_push_target_nnis)
2118 CDEBUG(D_NET, "nnis %d success\n", nnis);
2122 int lnet_push_target_post(struct lnet_ping_buffer *pbuf,
2123 struct lnet_handle_md *mdhp)
2125 struct lnet_process_id id = { LNET_NID_ANY, LNET_PID_ANY };
2126 struct lnet_md md = { NULL };
2130 me = LNetMEAttach(LNET_RESERVED_PORTAL, id,
2131 LNET_PROTO_PING_MATCHBITS, 0,
2132 LNET_UNLINK, LNET_INS_AFTER);
2135 CERROR("Can't create push target ME: %d\n", rc);
2139 pbuf->pb_needs_post = false;
2141 /* This reference is dropped by lnet_push_target_event_handler() */
2142 lnet_ping_buffer_addref(pbuf);
2144 /* initialize md content */
2145 md.start = &pbuf->pb_info;
2146 md.length = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
2149 md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE;
2151 md.handler = the_lnet.ln_push_target_handler;
2153 rc = LNetMDAttach(me, &md, LNET_UNLINK, mdhp);
2155 CERROR("Can't attach push MD: %d\n", rc);
2156 lnet_ping_buffer_decref(pbuf);
2157 pbuf->pb_needs_post = true;
2161 CDEBUG(D_NET, "posted push target %p\n", pbuf);
2166 static void lnet_push_target_event_handler(struct lnet_event *ev)
2168 struct lnet_ping_buffer *pbuf = ev->md_user_ptr;
2170 CDEBUG(D_NET, "type %d status %d unlinked %d\n", ev->type, ev->status,
2173 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2174 lnet_swap_pinginfo(pbuf);
2176 if (ev->type == LNET_EVENT_UNLINK) {
2177 /* Drop ref added by lnet_push_target_post() */
2178 lnet_ping_buffer_decref(pbuf);
2182 lnet_peer_push_event(ev);
2184 /* Drop ref added by lnet_push_target_post */
2185 lnet_ping_buffer_decref(pbuf);
2188 /* Initialize the push target. */
2189 static int lnet_push_target_init(void)
2193 if (the_lnet.ln_push_target)
2196 the_lnet.ln_push_target_handler =
2197 lnet_push_target_event_handler;
2199 rc = LNetSetLazyPortal(LNET_RESERVED_PORTAL);
2202 /* Start at the required minimum, we'll enlarge if required. */
2203 the_lnet.ln_push_target_nnis = LNET_INTERFACES_MIN;
2205 rc = lnet_push_target_resize();
2208 LNetClearLazyPortal(LNET_RESERVED_PORTAL);
2209 the_lnet.ln_push_target_handler = NULL;
2215 /* Clean up the push target. */
2216 static void lnet_push_target_fini(void)
2218 if (!the_lnet.ln_push_target)
2221 /* Unlink and invalidate to prevent new references. */
2222 LNetMDUnlink(the_lnet.ln_push_target_md);
2223 LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
2225 /* Wait for the unlink to complete. */
2226 wait_var_event_warning(&the_lnet.ln_push_target->pb_refcnt,
2227 atomic_read(&the_lnet.ln_push_target->pb_refcnt) <= 1,
2228 "Still waiting for ping data MD to unlink\n");
2230 /* Drop ref set by lnet_ping_buffer_alloc() */
2231 lnet_ping_buffer_decref(the_lnet.ln_push_target);
2232 the_lnet.ln_push_target = NULL;
2233 the_lnet.ln_push_target_nnis = 0;
2235 LNetClearLazyPortal(LNET_RESERVED_PORTAL);
2236 lnet_assert_handler_unused(the_lnet.ln_push_target_handler);
2237 the_lnet.ln_push_target_handler = NULL;
2241 lnet_ni_tq_credits(struct lnet_ni *ni)
2245 LASSERT(ni->ni_ncpts >= 1);
2247 if (ni->ni_ncpts == 1)
2248 return ni->ni_net->net_tunables.lct_max_tx_credits;
2250 credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
2251 credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
2252 credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
2258 lnet_ni_unlink_locked(struct lnet_ni *ni)
2260 /* move it to zombie list and nobody can find it anymore */
2261 LASSERT(!list_empty(&ni->ni_netlist));
2262 list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
2263 lnet_ni_decref_locked(ni, 0);
2267 lnet_clear_zombies_nis_locked(struct lnet_net *net)
2272 struct list_head *zombie_list = &net->net_ni_zombie;
2275 * Now wait for the NIs I just nuked to show up on the zombie
2276 * list and shut them down in guaranteed thread context
2279 while (!list_empty(zombie_list)) {
2283 ni = list_entry(zombie_list->next,
2284 struct lnet_ni, ni_netlist);
2285 list_del_init(&ni->ni_netlist);
2286 /* the ni should be in deleting state. If it's not it's
2288 LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
2289 cfs_percpt_for_each(ref, j, ni->ni_refs) {
2292 /* still busy, add it back to zombie list */
2293 list_add(&ni->ni_netlist, zombie_list);
2297 if (!list_empty(&ni->ni_netlist)) {
2298 /* Unlock mutex while waiting to allow other
2299 * threads to read the LNet state and fall through
2302 lnet_net_unlock(LNET_LOCK_EX);
2303 mutex_unlock(&the_lnet.ln_api_mutex);
2306 if ((i & (-i)) == i) {
2308 "Waiting for zombie LNI %s\n",
2309 libcfs_nidstr(&ni->ni_nid));
2311 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2313 mutex_lock(&the_lnet.ln_api_mutex);
2314 lnet_net_lock(LNET_LOCK_EX);
2318 lnet_net_unlock(LNET_LOCK_EX);
2320 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
2322 LASSERT(!in_interrupt());
2323 /* Holding the mutex makes it safe for lnd_shutdown
2324 * to call module_put(). Module unload cannot finish
2325 * until lnet_unregister_lnd() completes, and that
2326 * requires the mutex.
2328 mutex_lock(&the_lnet.ln_lnd_mutex);
2329 (net->net_lnd->lnd_shutdown)(ni);
2330 mutex_unlock(&the_lnet.ln_lnd_mutex);
2333 CDEBUG(D_LNI, "Removed LNI %s\n",
2334 libcfs_nidstr(&ni->ni_nid));
2338 lnet_net_lock(LNET_LOCK_EX);
2342 /* shutdown down the NI and release refcount */
2344 lnet_shutdown_lndni(struct lnet_ni *ni)
2347 struct lnet_net *net = ni->ni_net;
2349 lnet_net_lock(LNET_LOCK_EX);
2351 ni->ni_state = LNET_NI_STATE_DELETING;
2353 lnet_ni_unlink_locked(ni);
2354 lnet_incr_dlc_seq();
2355 lnet_net_unlock(LNET_LOCK_EX);
2357 /* clear messages for this NI on the lazy portal */
2358 for (i = 0; i < the_lnet.ln_nportals; i++)
2359 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
2361 lnet_net_lock(LNET_LOCK_EX);
2362 lnet_clear_zombies_nis_locked(net);
2363 lnet_net_unlock(LNET_LOCK_EX);
2367 lnet_shutdown_lndnet(struct lnet_net *net)
2371 lnet_net_lock(LNET_LOCK_EX);
2373 list_del_init(&net->net_list);
2375 while (!list_empty(&net->net_ni_list)) {
2376 ni = list_entry(net->net_ni_list.next,
2377 struct lnet_ni, ni_netlist);
2378 lnet_net_unlock(LNET_LOCK_EX);
2379 lnet_shutdown_lndni(ni);
2380 lnet_net_lock(LNET_LOCK_EX);
2383 lnet_net_unlock(LNET_LOCK_EX);
2385 /* Do peer table cleanup for this net */
2386 lnet_peer_tables_cleanup(net);
2392 lnet_shutdown_lndnets(void)
2394 struct lnet_net *net;
2396 struct lnet_msg *msg, *tmp;
2398 /* NB called holding the global mutex */
2400 /* All quiet on the API front */
2401 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
2402 LASSERT(the_lnet.ln_refcount == 0);
2404 lnet_net_lock(LNET_LOCK_EX);
2405 the_lnet.ln_state = LNET_STATE_STOPPING;
2408 * move the nets to the zombie list to avoid them being
2409 * picked up for new work. LONET is also included in the
2410 * Nets that will be moved to the zombie list
2412 list_splice_init(&the_lnet.ln_nets, &the_lnet.ln_net_zombie);
2414 /* Drop the cached loopback Net. */
2415 if (the_lnet.ln_loni != NULL) {
2416 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
2417 the_lnet.ln_loni = NULL;
2419 lnet_net_unlock(LNET_LOCK_EX);
2421 /* iterate through the net zombie list and delete each net */
2422 while (!list_empty(&the_lnet.ln_net_zombie)) {
2423 net = list_entry(the_lnet.ln_net_zombie.next,
2424 struct lnet_net, net_list);
2425 lnet_shutdown_lndnet(net);
2428 spin_lock(&the_lnet.ln_msg_resend_lock);
2429 list_splice(&the_lnet.ln_msg_resend, &resend);
2430 spin_unlock(&the_lnet.ln_msg_resend_lock);
2432 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
2433 list_del_init(&msg->msg_list);
2434 msg->msg_no_resend = true;
2435 lnet_finalize(msg, -ECANCELED);
2438 lnet_net_lock(LNET_LOCK_EX);
2439 the_lnet.ln_state = LNET_STATE_SHUTDOWN;
2440 lnet_net_unlock(LNET_LOCK_EX);
2444 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
2447 struct lnet_tx_queue *tq;
2449 struct lnet_net *net = ni->ni_net;
2451 mutex_lock(&the_lnet.ln_lnd_mutex);
2454 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
2455 ni->ni_lnd_tunables_set = true;
2458 rc = (net->net_lnd->lnd_startup)(ni);
2460 mutex_unlock(&the_lnet.ln_lnd_mutex);
2463 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
2464 rc, libcfs_lnd2str(net->net_lnd->lnd_type));
2469 ni->ni_state = LNET_NI_STATE_ACTIVE;
2472 /* We keep a reference on the loopback net through the loopback NI */
2473 if (net->net_lnd->lnd_type == LOLND) {
2475 LASSERT(the_lnet.ln_loni == NULL);
2476 the_lnet.ln_loni = ni;
2477 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
2478 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
2479 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
2480 ni->ni_net->net_tunables.lct_peer_timeout = 0;
2484 if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
2485 ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
2486 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
2487 libcfs_lnd2str(net->net_lnd->lnd_type),
2488 ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
2490 /* shutdown the NI since if we get here then it must've already
2493 lnet_shutdown_lndni(ni);
2497 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
2498 tq->tq_credits_min =
2499 tq->tq_credits_max =
2500 tq->tq_credits = lnet_ni_tq_credits(ni);
2503 atomic_set(&ni->ni_tx_credits,
2504 lnet_ni_tq_credits(ni) * ni->ni_ncpts);
2505 atomic_set(&ni->ni_healthv, LNET_MAX_HEALTH_VALUE);
2507 CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
2508 libcfs_nidstr(&ni->ni_nid),
2509 ni->ni_net->net_tunables.lct_peer_tx_credits,
2510 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
2511 ni->ni_net->net_tunables.lct_peer_rtr_credits,
2512 ni->ni_net->net_tunables.lct_peer_timeout);
2521 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
2524 struct lnet_net *net_l = NULL;
2525 LIST_HEAD(local_ni_list);
2529 const struct lnet_lnd *lnd;
2531 net->net_tunables.lct_peer_timeout;
2533 net->net_tunables.lct_max_tx_credits;
2534 int peerrtrcredits =
2535 net->net_tunables.lct_peer_rtr_credits;
2538 * make sure that this net is unique. If it isn't then
2539 * we are adding interfaces to an already existing network, and
2540 * 'net' is just a convenient way to pass in the list.
2541 * if it is unique we need to find the LND and load it if
2544 if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
2545 lnd_type = LNET_NETTYP(net->net_id);
2547 mutex_lock(&the_lnet.ln_lnd_mutex);
2548 lnd = lnet_find_lnd_by_type(lnd_type);
2551 mutex_unlock(&the_lnet.ln_lnd_mutex);
2552 rc = request_module("%s", libcfs_lnd2modname(lnd_type));
2553 mutex_lock(&the_lnet.ln_lnd_mutex);
2555 lnd = lnet_find_lnd_by_type(lnd_type);
2557 mutex_unlock(&the_lnet.ln_lnd_mutex);
2558 CERROR("Can't load LND %s, module %s, rc=%d\n",
2559 libcfs_lnd2str(lnd_type),
2560 libcfs_lnd2modname(lnd_type), rc);
2561 #ifndef HAVE_MODULE_LOADING_SUPPORT
2562 LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
2563 "compiled with kernel module "
2564 "loading support.");
2573 mutex_unlock(&the_lnet.ln_lnd_mutex);
2579 * net_l: if the network being added is unique then net_l
2580 * will point to that network
2581 * if the network being added is not unique then
2582 * net_l points to the existing network.
2584 * When we enter the loop below, we'll pick NIs off he
2585 * network beign added and start them up, then add them to
2586 * a local ni list. Once we've successfully started all
2587 * the NIs then we join the local NI list (of started up
2588 * networks) with the net_l->net_ni_list, which should
2589 * point to the correct network to add the new ni list to
2591 * If any of the new NIs fail to start up, then we want to
2592 * iterate through the local ni list, which should include
2593 * any NIs which were successfully started up, and shut
2596 * After than we want to delete the network being added,
2597 * to avoid a memory leak.
2599 while (!list_empty(&net->net_ni_added)) {
2600 ni = list_entry(net->net_ni_added.next, struct lnet_ni,
2602 list_del_init(&ni->ni_netlist);
2604 /* make sure that the the NI we're about to start
2605 * up is actually unique. if it's not fail. */
2606 if (!lnet_ni_unique_net(&net_l->net_ni_list,
2607 ni->ni_interface)) {
2612 /* adjust the pointer the parent network, just in case it
2613 * the net is a duplicate */
2616 rc = lnet_startup_lndni(ni, tun);
2622 list_add_tail(&ni->ni_netlist, &local_ni_list);
2627 lnet_net_lock(LNET_LOCK_EX);
2628 list_splice_tail(&local_ni_list, &net_l->net_ni_list);
2629 lnet_incr_dlc_seq();
2630 lnet_net_unlock(LNET_LOCK_EX);
2632 /* if the network is not unique then we don't want to keep
2633 * it around after we're done. Free it. Otherwise add that
2634 * net to the global the_lnet.ln_nets */
2635 if (net_l != net && net_l != NULL) {
2637 * TODO - note. currently the tunables can not be updated
2643 * restore tunables after it has been overwitten by the
2646 if (peer_timeout != -1)
2647 net->net_tunables.lct_peer_timeout = peer_timeout;
2648 if (maxtxcredits != -1)
2649 net->net_tunables.lct_max_tx_credits = maxtxcredits;
2650 if (peerrtrcredits != -1)
2651 net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
2653 lnet_net_lock(LNET_LOCK_EX);
2654 list_add_tail(&net->net_list, &the_lnet.ln_nets);
2655 lnet_net_unlock(LNET_LOCK_EX);
2662 * shutdown the new NIs that are being started up
2663 * free the NET being started
2665 while (!list_empty(&local_ni_list)) {
2666 ni = list_entry(local_ni_list.next, struct lnet_ni,
2669 lnet_shutdown_lndni(ni);
2679 lnet_startup_lndnets(struct list_head *netlist)
2681 struct lnet_net *net;
2686 * Change to running state before bringing up the LNDs. This
2687 * allows lnet_shutdown_lndnets() to assert that we've passed
2690 lnet_net_lock(LNET_LOCK_EX);
2691 the_lnet.ln_state = LNET_STATE_RUNNING;
2692 lnet_net_unlock(LNET_LOCK_EX);
2694 while (!list_empty(netlist)) {
2695 net = list_entry(netlist->next, struct lnet_net, net_list);
2696 list_del_init(&net->net_list);
2698 rc = lnet_startup_lndnet(net, NULL);
2708 lnet_shutdown_lndnets();
2713 static int lnet_genl_parse_list(struct sk_buff *msg,
2714 const struct ln_key_list *data[], u16 idx)
2716 const struct ln_key_list *list = data[idx];
2717 const struct ln_key_props *props;
2718 struct nlattr *node;
2724 if (!list->lkl_maxattr)
2727 props = list->lkl_list;
2731 node = nla_nest_start(msg, LN_SCALAR_ATTR_LIST);
2735 for (count = 1; count <= list->lkl_maxattr; count++) {
2736 struct nlattr *key = nla_nest_start(msg, count);
2739 nla_put_u16(msg, LN_SCALAR_ATTR_LIST_SIZE,
2742 nla_put_u16(msg, LN_SCALAR_ATTR_INDEX, count);
2743 if (props[count].lkp_values)
2744 nla_put_string(msg, LN_SCALAR_ATTR_VALUE,
2745 props[count].lkp_values);
2746 if (props[count].lkp_key_format)
2747 nla_put_u16(msg, LN_SCALAR_ATTR_KEY_FORMAT,
2748 props[count].lkp_key_format);
2749 nla_put_u16(msg, LN_SCALAR_ATTR_NLA_TYPE,
2750 props[count].lkp_data_type);
2751 if (props[count].lkp_data_type == NLA_NESTED) {
2754 rc = lnet_genl_parse_list(msg, data, ++idx);
2759 nla_nest_end(msg, key);
2762 nla_nest_end(msg, node);
2766 int lnet_genl_send_scalar_list(struct sk_buff *msg, u32 portid, u32 seq,
2767 const struct genl_family *family, int flags,
2768 u8 cmd, const struct ln_key_list *data[])
2776 hdr = genlmsg_put(msg, portid, seq, family, flags, cmd);
2778 GOTO(canceled, rc = -EMSGSIZE);
2780 rc = lnet_genl_parse_list(msg, data, 0);
2784 genlmsg_end(msg, hdr);
2787 genlmsg_cancel(msg, hdr);
2790 EXPORT_SYMBOL(lnet_genl_send_scalar_list);
2793 * Initialize LNet library.
2795 * Automatically called at module loading time. Caller has to call
2796 * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
2797 * latter returned 0. It must be called exactly once.
2799 * \retval 0 on success
2800 * \retval -ve on failures.
2802 int lnet_lib_init(void)
2806 lnet_assert_wire_constants();
2808 /* refer to global cfs_cpt_table for now */
2809 the_lnet.ln_cpt_table = cfs_cpt_tab;
2810 the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_tab);
2812 LASSERT(the_lnet.ln_cpt_number > 0);
2813 if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
2814 /* we are under risk of consuming all lh_cookie */
2815 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
2816 "please change setting of CPT-table and retry\n",
2817 the_lnet.ln_cpt_number, LNET_CPT_MAX);
2821 while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
2822 the_lnet.ln_cpt_bits++;
2824 rc = lnet_create_locks();
2826 CERROR("Can't create LNet global locks: %d\n", rc);
2830 the_lnet.ln_refcount = 0;
2831 INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
2832 INIT_LIST_HEAD(&the_lnet.ln_msg_resend);
2834 /* The hash table size is the number of bits it takes to express the set
2835 * ln_num_routes, minus 1 (better to under estimate than over so we
2836 * don't waste memory). */
2837 if (rnet_htable_size <= 0)
2838 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
2839 else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
2840 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
2841 the_lnet.ln_remote_nets_hbits = max_t(int, 1,
2842 order_base_2(rnet_htable_size) - 1);
2844 /* All LNDs apart from the LOLND are in separate modules. They
2845 * register themselves when their module loads, and unregister
2846 * themselves when their module is unloaded. */
2847 lnet_register_lnd(&the_lolnd);
2852 * Finalize LNet library.
2854 * \pre lnet_lib_init() called with success.
2855 * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
2857 * As this happens at module-unload, all lnds must already be unloaded,
2858 * so they must already be unregistered.
2860 void lnet_lib_exit(void)
2864 LASSERT(the_lnet.ln_refcount == 0);
2865 lnet_unregister_lnd(&the_lolnd);
2866 for (i = 0; i < NUM_LNDS; i++)
2867 LASSERT(!the_lnet.ln_lnds[i]);
2868 lnet_destroy_locks();
2872 * Set LNet PID and start LNet interfaces, routing, and forwarding.
2874 * Users must call this function at least once before any other functions.
2875 * For each successful call there must be a corresponding call to
2876 * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
2879 * The PID used by LNet may be different from the one requested.
2882 * \param requested_pid PID requested by the caller.
2884 * \return >= 0 on success, and < 0 error code on failures.
2887 LNetNIInit(lnet_pid_t requested_pid)
2889 int im_a_router = 0;
2892 struct lnet_ping_buffer *pbuf;
2893 struct lnet_handle_md ping_mdh;
2894 LIST_HEAD(net_head);
2895 struct lnet_net *net;
2897 mutex_lock(&the_lnet.ln_api_mutex);
2899 CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
2901 if (the_lnet.ln_refcount > 0) {
2902 rc = the_lnet.ln_refcount++;
2903 mutex_unlock(&the_lnet.ln_api_mutex);
2907 rc = lnet_prepare(requested_pid);
2909 mutex_unlock(&the_lnet.ln_api_mutex);
2913 /* create a network for Loopback network */
2914 net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
2917 goto err_empty_list;
2920 /* Add in the loopback NI */
2921 if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
2923 goto err_empty_list;
2926 if (use_tcp_bonding)
2927 CWARN("use_tcp_bonding has been removed. Use Multi-Rail and Dynamic Discovery instead, see LU-13641\n");
2929 /* If LNet is being initialized via DLC it is possible
2930 * that the user requests not to load module parameters (ones which
2931 * are supported by DLC) on initialization. Therefore, make sure not
2932 * to load networks, routes and forwarding from module parameters
2933 * in this case. On cleanup in case of failure only clean up
2934 * routes if it has been loaded */
2935 if (!the_lnet.ln_nis_from_mod_params) {
2936 rc = lnet_parse_networks(&net_head, lnet_get_networks());
2938 goto err_empty_list;
2941 ni_count = lnet_startup_lndnets(&net_head);
2944 goto err_empty_list;
2947 if (!the_lnet.ln_nis_from_mod_params) {
2948 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
2950 goto err_shutdown_lndnis;
2952 rc = lnet_rtrpools_alloc(im_a_router);
2954 goto err_destroy_routes;
2957 rc = lnet_acceptor_start();
2959 goto err_destroy_routes;
2961 the_lnet.ln_refcount = 1;
2962 /* Now I may use my own API functions... */
2964 rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_count, true);
2966 goto err_acceptor_stop;
2968 lnet_ping_target_update(pbuf, ping_mdh);
2970 the_lnet.ln_mt_handler = lnet_mt_event_handler;
2972 rc = lnet_push_target_init();
2976 rc = lnet_peer_discovery_start();
2978 goto err_destroy_push_target;
2980 rc = lnet_monitor_thr_start();
2982 goto err_stop_discovery_thr;
2985 lnet_router_debugfs_init();
2987 mutex_unlock(&the_lnet.ln_api_mutex);
2989 complete_all(&the_lnet.ln_started);
2991 /* wait for all routers to start */
2992 lnet_wait_router_start();
2996 err_stop_discovery_thr:
2997 lnet_peer_discovery_stop();
2998 err_destroy_push_target:
2999 lnet_push_target_fini();
3001 lnet_ping_target_fini();
3003 the_lnet.ln_refcount = 0;
3004 lnet_acceptor_stop();
3006 if (!the_lnet.ln_nis_from_mod_params)
3007 lnet_destroy_routes();
3008 err_shutdown_lndnis:
3009 lnet_shutdown_lndnets();
3013 mutex_unlock(&the_lnet.ln_api_mutex);
3014 while (!list_empty(&net_head)) {
3015 struct lnet_net *net;
3017 net = list_entry(net_head.next, struct lnet_net, net_list);
3018 list_del_init(&net->net_list);
3023 EXPORT_SYMBOL(LNetNIInit);
3026 * Stop LNet interfaces, routing, and forwarding.
3028 * Users must call this function once for each successful call to LNetNIInit().
3029 * Once the LNetNIFini() operation has been started, the results of pending
3030 * API operations are undefined.
3032 * \return always 0 for current implementation.
3037 mutex_lock(&the_lnet.ln_api_mutex);
3039 LASSERT(the_lnet.ln_refcount > 0);
3041 if (the_lnet.ln_refcount != 1) {
3042 the_lnet.ln_refcount--;
3044 LASSERT(!the_lnet.ln_niinit_self);
3048 lnet_router_debugfs_fini();
3049 lnet_monitor_thr_stop();
3050 lnet_peer_discovery_stop();
3051 lnet_push_target_fini();
3052 lnet_ping_target_fini();
3054 /* Teardown fns that use my own API functions BEFORE here */
3055 the_lnet.ln_refcount = 0;
3057 lnet_acceptor_stop();
3058 lnet_destroy_routes();
3059 lnet_shutdown_lndnets();
3063 mutex_unlock(&the_lnet.ln_api_mutex);
3066 EXPORT_SYMBOL(LNetNIFini);
3069 * Grabs the ni data from the ni structure and fills the out
3072 * \param[in] ni network interface structure
3073 * \param[out] cfg_ni NI config information
3074 * \param[out] tun network and LND tunables
3077 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
3078 struct lnet_ioctl_config_lnd_tunables *tun,
3079 struct lnet_ioctl_element_stats *stats,
3082 size_t min_size = 0;
3085 if (!ni || !cfg_ni || !tun || !nid_is_nid4(&ni->ni_nid))
3088 if (ni->ni_interface != NULL) {
3089 strncpy(cfg_ni->lic_ni_intf,
3091 sizeof(cfg_ni->lic_ni_intf));
3094 cfg_ni->lic_nid = lnet_nid_to_nid4(&ni->ni_nid);
3095 cfg_ni->lic_status = lnet_ni_get_status_locked(ni);
3096 cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
3098 memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
3101 stats->iel_send_count = lnet_sum_stats(&ni->ni_stats,
3102 LNET_STATS_TYPE_SEND);
3103 stats->iel_recv_count = lnet_sum_stats(&ni->ni_stats,
3104 LNET_STATS_TYPE_RECV);
3105 stats->iel_drop_count = lnet_sum_stats(&ni->ni_stats,
3106 LNET_STATS_TYPE_DROP);
3110 * tun->lt_tun will always be present, but in order to be
3111 * backwards compatible, we need to deal with the cases when
3112 * tun->lt_tun is smaller than what the kernel has, because it
3113 * comes from an older version of a userspace program, then we'll
3114 * need to copy as much information as we have available space.
3116 min_size = tun_size - sizeof(tun->lt_cmn);
3117 memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
3119 /* copy over the cpts */
3120 if (ni->ni_ncpts == LNET_CPT_NUMBER &&
3121 ni->ni_cpts == NULL) {
3122 for (i = 0; i < ni->ni_ncpts; i++)
3123 cfg_ni->lic_cpts[i] = i;
3126 ni->ni_cpts != NULL && i < ni->ni_ncpts &&
3127 i < LNET_MAX_SHOW_NUM_CPT;
3129 cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
3131 cfg_ni->lic_ncpts = ni->ni_ncpts;
3135 * NOTE: This is a legacy function left in the code to be backwards
3136 * compatible with older userspace programs. It should eventually be
3139 * Grabs the ni data from the ni structure and fills the out
3142 * \param[in] ni network interface structure
3143 * \param[out] config config information
3146 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
3147 struct lnet_ioctl_config_data *config)
3149 struct lnet_ioctl_net_config *net_config;
3150 struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
3151 size_t min_size, tunable_size = 0;
3154 if (!ni || !config || !nid_is_nid4(&ni->ni_nid))
3157 net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
3161 if (!ni->ni_interface)
3164 strncpy(net_config->ni_interface,
3166 sizeof(net_config->ni_interface));
3168 config->cfg_nid = lnet_nid_to_nid4(&ni->ni_nid);
3169 config->cfg_config_u.cfg_net.net_peer_timeout =
3170 ni->ni_net->net_tunables.lct_peer_timeout;
3171 config->cfg_config_u.cfg_net.net_max_tx_credits =
3172 ni->ni_net->net_tunables.lct_max_tx_credits;
3173 config->cfg_config_u.cfg_net.net_peer_tx_credits =
3174 ni->ni_net->net_tunables.lct_peer_tx_credits;
3175 config->cfg_config_u.cfg_net.net_peer_rtr_credits =
3176 ni->ni_net->net_tunables.lct_peer_rtr_credits;
3178 net_config->ni_status = lnet_ni_get_status_locked(ni);
3181 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
3183 for (i = 0; i < num_cpts; i++)
3184 net_config->ni_cpts[i] = ni->ni_cpts[i];
3186 config->cfg_ncpts = num_cpts;
3190 * See if user land tools sent in a newer and larger version
3191 * of struct lnet_tunables than what the kernel uses.
3193 min_size = sizeof(*config) + sizeof(*net_config);
3195 if (config->cfg_hdr.ioc_len > min_size)
3196 tunable_size = config->cfg_hdr.ioc_len - min_size;
3198 /* Don't copy too much data to user space */
3199 min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
3200 lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
3202 if (lnd_cfg && min_size) {
3203 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
3204 config->cfg_config_u.cfg_net.net_interface_count = 1;
3206 /* Tell user land that kernel side has less data */
3207 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
3208 min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
3209 config->cfg_hdr.ioc_len -= min_size;
3215 lnet_get_ni_idx_locked(int idx)
3218 struct lnet_net *net;
3220 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3221 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3230 int lnet_get_net_healthv_locked(struct lnet_net *net)
3233 int best_healthv = 0;
3234 int healthv, ni_fatal;
3236 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3237 healthv = atomic_read(&ni->ni_healthv);
3238 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
3239 if (!ni_fatal && healthv > best_healthv)
3240 best_healthv = healthv;
3243 return best_healthv;
3247 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
3250 struct lnet_net *net = mynet;
3253 * It is possible that the net has been cleaned out while there is
3254 * a message being sent. This function accessed the net without
3255 * checking if the list is empty
3259 net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
3261 if (list_empty(&net->net_ni_list))
3263 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
3269 if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
3270 /* if you reached the end of the ni list and the net is
3271 * specified, then there are no more nis in that net */
3275 /* we reached the end of this net ni list. move to the
3277 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
3278 /* no more nets and no more NIs. */
3281 /* get the next net */
3282 net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
3284 if (list_empty(&net->net_ni_list))
3286 /* get the ni on it */
3287 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
3293 if (list_empty(&prev->ni_netlist))
3296 /* there are more nis left */
3297 ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
3303 lnet_get_net_config(struct lnet_ioctl_config_data *config)
3308 int idx = config->cfg_count;
3310 cpt = lnet_net_lock_current();
3312 ni = lnet_get_ni_idx_locked(idx);
3317 lnet_fill_ni_info_legacy(ni, config);
3321 lnet_net_unlock(cpt);
3326 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
3327 struct lnet_ioctl_config_lnd_tunables *tun,
3328 struct lnet_ioctl_element_stats *stats,
3335 if (!cfg_ni || !tun || !stats)
3338 cpt = lnet_net_lock_current();
3340 ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
3345 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
3349 lnet_net_unlock(cpt);
3353 int lnet_get_ni_stats(struct lnet_ioctl_element_msg_stats *msg_stats)
3362 cpt = lnet_net_lock_current();
3364 ni = lnet_get_ni_idx_locked(msg_stats->im_idx);
3367 lnet_usr_translate_stats(msg_stats, &ni->ni_stats);
3371 lnet_net_unlock(cpt);
3376 static int lnet_add_net_common(struct lnet_net *net,
3377 struct lnet_ioctl_config_lnd_tunables *tun)
3379 struct lnet_handle_md ping_mdh;
3380 struct lnet_ping_buffer *pbuf;
3381 struct lnet_remotenet *rnet;
3387 lnet_net_lock(LNET_LOCK_EX);
3388 rnet = lnet_find_rnet_locked(net->net_id);
3389 lnet_net_unlock(LNET_LOCK_EX);
3391 * make sure that the net added doesn't invalidate the current
3392 * configuration LNet is keeping
3395 CERROR("Adding net %s will invalidate routing configuration\n",
3396 libcfs_net2str(net->net_id));
3402 * make sure you calculate the correct number of slots in the ping
3403 * buffer. Since the ping info is a flattened list of all the NIs,
3404 * we should allocate enough slots to accomodate the number of NIs
3405 * which will be added.
3407 * since ni hasn't been configured yet, use
3408 * lnet_get_net_ni_count_pre() which checks the net_ni_added list
3410 net_ni_count = lnet_get_net_ni_count_pre(net);
3412 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3413 net_ni_count + lnet_get_ni_count(),
3421 memcpy(&net->net_tunables,
3422 &tun->lt_cmn, sizeof(net->net_tunables));
3424 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
3426 net_id = net->net_id;
3428 rc = lnet_startup_lndnet(net,
3429 (tun) ? &tun->lt_tun : NULL);
3433 lnet_net_lock(LNET_LOCK_EX);
3434 net = lnet_get_net_locked(net_id);
3437 /* apply the UDSPs */
3438 rc = lnet_udsp_apply_policies_on_net(net);
3440 CERROR("Failed to apply UDSPs on local net %s\n",
3441 libcfs_net2str(net->net_id));
3443 /* At this point we lost track of which NI was just added, so we
3444 * just re-apply the policies on all of the NIs on this net
3446 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3447 rc = lnet_udsp_apply_policies_on_ni(ni);
3449 CERROR("Failed to apply UDSPs on ni %s\n",
3450 libcfs_nidstr(&ni->ni_nid));
3452 lnet_net_unlock(LNET_LOCK_EX);
3455 * Start the acceptor thread if this is the first network
3456 * being added that requires the thread.
3458 if (net->net_lnd->lnd_accept) {
3459 rc = lnet_acceptor_start();
3461 /* shutdown the net that we just started */
3462 CERROR("Failed to start up acceptor thread\n");
3463 lnet_shutdown_lndnet(net);
3468 lnet_net_lock(LNET_LOCK_EX);
3469 lnet_peer_net_added(net);
3470 lnet_net_unlock(LNET_LOCK_EX);
3472 lnet_ping_target_update(pbuf, ping_mdh);
3477 lnet_ping_md_unlink(pbuf, &ping_mdh);
3478 lnet_ping_buffer_decref(pbuf);
3483 lnet_set_tune_defaults(struct lnet_ioctl_config_lnd_tunables *tun)
3486 if (!tun->lt_cmn.lct_peer_timeout)
3487 tun->lt_cmn.lct_peer_timeout = DEFAULT_PEER_TIMEOUT;
3488 if (!tun->lt_cmn.lct_peer_tx_credits)
3489 tun->lt_cmn.lct_peer_tx_credits = DEFAULT_PEER_CREDITS;
3490 if (!tun->lt_cmn.lct_max_tx_credits)
3491 tun->lt_cmn.lct_max_tx_credits = DEFAULT_CREDITS;
3495 static int lnet_handle_legacy_ip2nets(char *ip2nets,
3496 struct lnet_ioctl_config_lnd_tunables *tun)
3498 struct lnet_net *net;
3501 LIST_HEAD(net_head);
3503 rc = lnet_parse_ip2nets(&nets, ip2nets);
3507 rc = lnet_parse_networks(&net_head, nets);
3511 lnet_set_tune_defaults(tun);
3513 mutex_lock(&the_lnet.ln_api_mutex);
3514 while (!list_empty(&net_head)) {
3515 net = list_entry(net_head.next, struct lnet_net, net_list);
3516 list_del_init(&net->net_list);
3517 rc = lnet_add_net_common(net, tun);
3523 mutex_unlock(&the_lnet.ln_api_mutex);
3525 while (!list_empty(&net_head)) {
3526 net = list_entry(net_head.next, struct lnet_net, net_list);
3527 list_del_init(&net->net_list);
3533 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
3535 struct lnet_net *net;
3537 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
3539 __u32 net_id, lnd_type;
3541 /* get the tunables if they are available */
3542 if (conf->lic_cfg_hdr.ioc_len >=
3543 sizeof(*conf) + sizeof(*tun))
3544 tun = (struct lnet_ioctl_config_lnd_tunables *)
3547 /* handle legacy ip2nets from DLC */
3548 if (conf->lic_legacy_ip2nets[0] != '\0')
3549 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
3552 net_id = LNET_NIDNET(conf->lic_nid);
3553 lnd_type = LNET_NETTYP(net_id);
3555 if (!libcfs_isknown_lnd(lnd_type)) {
3556 CERROR("No valid net and lnd information provided\n");
3560 net = lnet_net_alloc(net_id, NULL);
3564 for (i = 0; i < conf->lic_ncpts; i++) {
3565 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER)
3569 ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
3574 lnet_set_tune_defaults(tun);
3576 mutex_lock(&the_lnet.ln_api_mutex);
3578 rc = lnet_add_net_common(net, tun);
3580 mutex_unlock(&the_lnet.ln_api_mutex);
3585 int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
3587 struct lnet_net *net;
3589 __u32 net_id = LNET_NIDNET(conf->lic_nid);
3590 struct lnet_ping_buffer *pbuf;
3591 struct lnet_handle_md ping_mdh;
3596 /* don't allow userspace to shutdown the LOLND */
3597 if (LNET_NETTYP(net_id) == LOLND)
3600 mutex_lock(&the_lnet.ln_api_mutex);
3604 net = lnet_get_net_locked(net_id);
3606 CERROR("net %s not found\n",
3607 libcfs_net2str(net_id));
3612 addr = LNET_NIDADDR(conf->lic_nid);
3614 /* remove the entire net */
3615 net_count = lnet_get_net_ni_count_locked(net);
3619 /* create and link a new ping info, before removing the old one */
3620 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3621 lnet_get_ni_count() - net_count,
3624 goto unlock_api_mutex;
3626 lnet_shutdown_lndnet(net);
3628 lnet_acceptor_stop();
3630 lnet_ping_target_update(pbuf, ping_mdh);
3632 goto unlock_api_mutex;
3635 ni = lnet_nid2ni_locked(conf->lic_nid, 0);
3637 CERROR("nid %s not found\n",
3638 libcfs_nid2str(conf->lic_nid));
3643 net_count = lnet_get_net_ni_count_locked(net);
3647 /* create and link a new ping info, before removing the old one */
3648 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3649 lnet_get_ni_count() - 1, false);
3651 goto unlock_api_mutex;
3653 lnet_shutdown_lndni(ni);
3655 lnet_acceptor_stop();
3657 lnet_ping_target_update(pbuf, ping_mdh);
3659 /* check if the net is empty and remove it if it is */
3661 lnet_shutdown_lndnet(net);
3663 goto unlock_api_mutex;
3668 mutex_unlock(&the_lnet.ln_api_mutex);
3674 * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
3675 * They are only expected to be called for unique networks.
3676 * That can be as a result of older DLC library
3677 * calls. Multi-Rail DLC and beyond no longer uses these APIs.
3680 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
3682 struct lnet_net *net;
3683 LIST_HEAD(net_head);
3685 struct lnet_ioctl_config_lnd_tunables tun;
3686 const char *nets = conf->cfg_config_u.cfg_net.net_intf;
3688 /* Create a net/ni structures for the network string */
3689 rc = lnet_parse_networks(&net_head, nets);
3691 return rc == 0 ? -EINVAL : rc;
3693 mutex_lock(&the_lnet.ln_api_mutex);
3696 rc = -EINVAL; /* only add one network per call */
3697 goto out_unlock_clean;
3700 net = list_entry(net_head.next, struct lnet_net, net_list);
3701 list_del_init(&net->net_list);
3703 LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
3705 memset(&tun, 0, sizeof(tun));
3707 tun.lt_cmn.lct_peer_timeout =
3708 (!conf->cfg_config_u.cfg_net.net_peer_timeout) ? DEFAULT_PEER_TIMEOUT :
3709 conf->cfg_config_u.cfg_net.net_peer_timeout;
3710 tun.lt_cmn.lct_peer_tx_credits =
3711 (!conf->cfg_config_u.cfg_net.net_peer_tx_credits) ? DEFAULT_PEER_CREDITS :
3712 conf->cfg_config_u.cfg_net.net_peer_tx_credits;
3713 tun.lt_cmn.lct_peer_rtr_credits =
3714 conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
3715 tun.lt_cmn.lct_max_tx_credits =
3716 (!conf->cfg_config_u.cfg_net.net_max_tx_credits) ? DEFAULT_CREDITS :
3717 conf->cfg_config_u.cfg_net.net_max_tx_credits;
3719 rc = lnet_add_net_common(net, &tun);
3722 mutex_unlock(&the_lnet.ln_api_mutex);
3723 while (!list_empty(&net_head)) {
3724 /* net_head list is empty in success case */
3725 net = list_entry(net_head.next, struct lnet_net, net_list);
3726 list_del_init(&net->net_list);
3733 lnet_dyn_del_net(__u32 net_id)
3735 struct lnet_net *net;
3736 struct lnet_ping_buffer *pbuf;
3737 struct lnet_handle_md ping_mdh;
3741 /* don't allow userspace to shutdown the LOLND */
3742 if (LNET_NETTYP(net_id) == LOLND)
3745 mutex_lock(&the_lnet.ln_api_mutex);
3749 net = lnet_get_net_locked(net_id);
3756 net_ni_count = lnet_get_net_ni_count_locked(net);
3760 /* create and link a new ping info, before removing the old one */
3761 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3762 lnet_get_ni_count() - net_ni_count, false);
3766 lnet_shutdown_lndnet(net);
3768 lnet_acceptor_stop();
3770 lnet_ping_target_update(pbuf, ping_mdh);
3773 mutex_unlock(&the_lnet.ln_api_mutex);
3778 void lnet_incr_dlc_seq(void)
3780 atomic_inc(&lnet_dlc_seq_no);
3783 __u32 lnet_get_dlc_seq_locked(void)
3785 return atomic_read(&lnet_dlc_seq_no);
3789 lnet_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3791 struct lnet_net *net;
3794 lnet_net_lock(LNET_LOCK_EX);
3795 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3796 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3797 if (all || (nid_is_nid4(&ni->ni_nid) &&
3798 lnet_nid_to_nid4(&ni->ni_nid) == nid)) {
3799 atomic_set(&ni->ni_healthv, value);
3800 if (list_empty(&ni->ni_recovery) &&
3801 value < LNET_MAX_HEALTH_VALUE) {
3802 CERROR("manually adding local NI %s to recovery\n",
3803 libcfs_nidstr(&ni->ni_nid));
3804 list_add_tail(&ni->ni_recovery,
3805 &the_lnet.ln_mt_localNIRecovq);
3806 lnet_ni_addref_locked(ni, 0);
3809 lnet_net_unlock(LNET_LOCK_EX);
3815 lnet_net_unlock(LNET_LOCK_EX);
3819 lnet_ni_set_conns_per_peer(lnet_nid_t nid, int value, bool all)
3821 struct lnet_net *net;
3824 lnet_net_lock(LNET_LOCK_EX);
3825 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3826 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3827 if (lnet_nid_to_nid4(&ni->ni_nid) != nid && !all)
3829 if (LNET_NETTYP(net->net_id) == SOCKLND)
3830 ni->ni_lnd_tunables.lnd_tun_u.lnd_sock.lnd_conns_per_peer = value;
3831 else if (LNET_NETTYP(net->net_id) == O2IBLND)
3832 ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib.lnd_conns_per_peer = value;
3834 lnet_net_unlock(LNET_LOCK_EX);
3839 lnet_net_unlock(LNET_LOCK_EX);
3843 lnet_get_local_ni_hstats(struct lnet_ioctl_local_ni_hstats *stats)
3847 lnet_nid_t nid = stats->hlni_nid;
3849 cpt = lnet_net_lock_current();
3850 ni = lnet_nid2ni_locked(nid, cpt);
3857 stats->hlni_local_interrupt = atomic_read(&ni->ni_hstats.hlt_local_interrupt);
3858 stats->hlni_local_dropped = atomic_read(&ni->ni_hstats.hlt_local_dropped);
3859 stats->hlni_local_aborted = atomic_read(&ni->ni_hstats.hlt_local_aborted);
3860 stats->hlni_local_no_route = atomic_read(&ni->ni_hstats.hlt_local_no_route);
3861 stats->hlni_local_timeout = atomic_read(&ni->ni_hstats.hlt_local_timeout);
3862 stats->hlni_local_error = atomic_read(&ni->ni_hstats.hlt_local_error);
3863 stats->hlni_fatal_error = atomic_read(&ni->ni_fatal_error_on);
3864 stats->hlni_health_value = atomic_read(&ni->ni_healthv);
3865 stats->hlni_ping_count = ni->ni_ping_count;
3866 stats->hlni_next_ping = ni->ni_next_ping;
3869 lnet_net_unlock(cpt);
3875 lnet_get_local_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
3880 lnet_net_lock(LNET_LOCK_EX);
3881 list_for_each_entry(ni, &the_lnet.ln_mt_localNIRecovq, ni_recovery) {
3882 if (!nid_is_nid4(&ni->ni_nid))
3884 list->rlst_nid_array[i] = lnet_nid_to_nid4(&ni->ni_nid);
3886 if (i >= LNET_MAX_SHOW_NUM_NID)
3889 lnet_net_unlock(LNET_LOCK_EX);
3890 list->rlst_num_nids = i;
3896 lnet_get_peer_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
3898 struct lnet_peer_ni *lpni;
3901 lnet_net_lock(LNET_LOCK_EX);
3902 list_for_each_entry(lpni, &the_lnet.ln_mt_peerNIRecovq, lpni_recovery) {
3903 list->rlst_nid_array[i] = lnet_nid_to_nid4(&lpni->lpni_nid);
3905 if (i >= LNET_MAX_SHOW_NUM_NID)
3908 lnet_net_unlock(LNET_LOCK_EX);
3909 list->rlst_num_nids = i;
3915 * LNet ioctl handler.
3919 LNetCtl(unsigned int cmd, void *arg)
3921 struct libcfs_ioctl_data *data = arg;
3922 struct lnet_ioctl_config_data *config;
3923 struct lnet_process_id id = {0};
3927 BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
3928 sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
3931 case IOC_LIBCFS_GET_NI:
3932 rc = LNetGetId(data->ioc_count, &id);
3933 data->ioc_nid = id.nid;
3936 case IOC_LIBCFS_FAIL_NID:
3937 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
3939 case IOC_LIBCFS_ADD_ROUTE: {
3940 /* default router sensitivity to 1 */
3941 unsigned int sensitivity = 1;
3944 if (config->cfg_hdr.ioc_len < sizeof(*config))
3947 if (config->cfg_config_u.cfg_route.rtr_sensitivity) {
3949 config->cfg_config_u.cfg_route.rtr_sensitivity;
3952 mutex_lock(&the_lnet.ln_api_mutex);
3953 rc = lnet_add_route(config->cfg_net,
3954 config->cfg_config_u.cfg_route.rtr_hop,
3956 config->cfg_config_u.cfg_route.
3957 rtr_priority, sensitivity);
3958 mutex_unlock(&the_lnet.ln_api_mutex);
3962 case IOC_LIBCFS_DEL_ROUTE:
3965 if (config->cfg_hdr.ioc_len < sizeof(*config))
3968 mutex_lock(&the_lnet.ln_api_mutex);
3969 rc = lnet_del_route(config->cfg_net, config->cfg_nid);
3970 mutex_unlock(&the_lnet.ln_api_mutex);
3973 case IOC_LIBCFS_GET_ROUTE:
3976 if (config->cfg_hdr.ioc_len < sizeof(*config))
3979 mutex_lock(&the_lnet.ln_api_mutex);
3980 rc = lnet_get_route(config->cfg_count,
3982 &config->cfg_config_u.cfg_route.rtr_hop,
3984 &config->cfg_config_u.cfg_route.rtr_flags,
3985 &config->cfg_config_u.cfg_route.
3987 &config->cfg_config_u.cfg_route.
3989 mutex_unlock(&the_lnet.ln_api_mutex);
3992 case IOC_LIBCFS_GET_LOCAL_NI: {
3993 struct lnet_ioctl_config_ni *cfg_ni;
3994 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
3995 struct lnet_ioctl_element_stats *stats;
4000 /* get the tunables if they are available */
4001 if (cfg_ni->lic_cfg_hdr.ioc_len <
4002 sizeof(*cfg_ni) + sizeof(*stats) + sizeof(*tun))
4005 stats = (struct lnet_ioctl_element_stats *)
4007 tun = (struct lnet_ioctl_config_lnd_tunables *)
4008 (cfg_ni->lic_bulk + sizeof(*stats));
4010 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
4013 mutex_lock(&the_lnet.ln_api_mutex);
4014 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
4015 mutex_unlock(&the_lnet.ln_api_mutex);
4019 case IOC_LIBCFS_GET_LOCAL_NI_MSG_STATS: {
4020 struct lnet_ioctl_element_msg_stats *msg_stats = arg;
4022 if (msg_stats->im_hdr.ioc_len != sizeof(*msg_stats))
4025 mutex_lock(&the_lnet.ln_api_mutex);
4026 rc = lnet_get_ni_stats(msg_stats);
4027 mutex_unlock(&the_lnet.ln_api_mutex);
4032 case IOC_LIBCFS_GET_NET: {
4033 size_t total = sizeof(*config) +
4034 sizeof(struct lnet_ioctl_net_config);
4037 if (config->cfg_hdr.ioc_len < total)
4040 mutex_lock(&the_lnet.ln_api_mutex);
4041 rc = lnet_get_net_config(config);
4042 mutex_unlock(&the_lnet.ln_api_mutex);
4046 case IOC_LIBCFS_GET_LNET_STATS:
4048 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
4050 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
4053 mutex_lock(&the_lnet.ln_api_mutex);
4054 rc = lnet_counters_get(&lnet_stats->st_cntrs);
4055 mutex_unlock(&the_lnet.ln_api_mutex);
4059 case IOC_LIBCFS_RESET_LNET_STATS:
4061 mutex_lock(&the_lnet.ln_api_mutex);
4062 lnet_counters_reset();
4063 mutex_unlock(&the_lnet.ln_api_mutex);
4067 case IOC_LIBCFS_CONFIG_RTR:
4070 if (config->cfg_hdr.ioc_len < sizeof(*config))
4073 mutex_lock(&the_lnet.ln_api_mutex);
4074 if (config->cfg_config_u.cfg_buffers.buf_enable) {
4075 rc = lnet_rtrpools_enable();
4076 mutex_unlock(&the_lnet.ln_api_mutex);
4079 lnet_rtrpools_disable();
4080 mutex_unlock(&the_lnet.ln_api_mutex);
4083 case IOC_LIBCFS_ADD_BUF:
4086 if (config->cfg_hdr.ioc_len < sizeof(*config))
4089 mutex_lock(&the_lnet.ln_api_mutex);
4090 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
4092 config->cfg_config_u.cfg_buffers.
4094 config->cfg_config_u.cfg_buffers.
4096 mutex_unlock(&the_lnet.ln_api_mutex);
4099 case IOC_LIBCFS_SET_NUMA_RANGE: {
4100 struct lnet_ioctl_set_value *numa;
4102 if (numa->sv_hdr.ioc_len != sizeof(*numa))
4104 lnet_net_lock(LNET_LOCK_EX);
4105 lnet_numa_range = numa->sv_value;
4106 lnet_net_unlock(LNET_LOCK_EX);
4110 case IOC_LIBCFS_GET_NUMA_RANGE: {
4111 struct lnet_ioctl_set_value *numa;
4113 if (numa->sv_hdr.ioc_len != sizeof(*numa))
4115 numa->sv_value = lnet_numa_range;
4119 case IOC_LIBCFS_GET_BUF: {
4120 struct lnet_ioctl_pool_cfg *pool_cfg;
4121 size_t total = sizeof(*config) + sizeof(*pool_cfg);
4125 if (config->cfg_hdr.ioc_len < total)
4128 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
4130 mutex_lock(&the_lnet.ln_api_mutex);
4131 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
4132 mutex_unlock(&the_lnet.ln_api_mutex);
4136 case IOC_LIBCFS_GET_LOCAL_HSTATS: {
4137 struct lnet_ioctl_local_ni_hstats *stats = arg;
4139 if (stats->hlni_hdr.ioc_len < sizeof(*stats))
4142 mutex_lock(&the_lnet.ln_api_mutex);
4143 rc = lnet_get_local_ni_hstats(stats);
4144 mutex_unlock(&the_lnet.ln_api_mutex);
4149 case IOC_LIBCFS_GET_RECOVERY_QUEUE: {
4150 struct lnet_ioctl_recovery_list *list = arg;
4151 if (list->rlst_hdr.ioc_len < sizeof(*list))
4154 mutex_lock(&the_lnet.ln_api_mutex);
4155 if (list->rlst_type == LNET_HEALTH_TYPE_LOCAL_NI)
4156 rc = lnet_get_local_ni_recovery_list(list);
4158 rc = lnet_get_peer_ni_recovery_list(list);
4159 mutex_unlock(&the_lnet.ln_api_mutex);
4163 case IOC_LIBCFS_ADD_PEER_NI: {
4164 struct lnet_ioctl_peer_cfg *cfg = arg;
4166 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4169 mutex_lock(&the_lnet.ln_api_mutex);
4170 rc = lnet_add_peer_ni(cfg->prcfg_prim_nid,
4172 cfg->prcfg_mr, false);
4173 mutex_unlock(&the_lnet.ln_api_mutex);
4177 case IOC_LIBCFS_DEL_PEER_NI: {
4178 struct lnet_ioctl_peer_cfg *cfg = arg;
4180 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4183 mutex_lock(&the_lnet.ln_api_mutex);
4184 rc = lnet_del_peer_ni(cfg->prcfg_prim_nid,
4185 cfg->prcfg_cfg_nid);
4186 mutex_unlock(&the_lnet.ln_api_mutex);
4190 case IOC_LIBCFS_GET_PEER_INFO: {
4191 struct lnet_ioctl_peer *peer_info = arg;
4193 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
4196 mutex_lock(&the_lnet.ln_api_mutex);
4197 rc = lnet_get_peer_ni_info(
4198 peer_info->pr_count,
4200 peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
4201 &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
4202 &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
4203 &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
4204 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
4205 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
4206 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
4207 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
4208 mutex_unlock(&the_lnet.ln_api_mutex);
4212 case IOC_LIBCFS_GET_PEER_NI: {
4213 struct lnet_ioctl_peer_cfg *cfg = arg;
4215 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4218 mutex_lock(&the_lnet.ln_api_mutex);
4219 rc = lnet_get_peer_info(cfg,
4220 (void __user *)cfg->prcfg_bulk);
4221 mutex_unlock(&the_lnet.ln_api_mutex);
4225 case IOC_LIBCFS_GET_PEER_LIST: {
4226 struct lnet_ioctl_peer_cfg *cfg = arg;
4228 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4231 mutex_lock(&the_lnet.ln_api_mutex);
4232 rc = lnet_get_peer_list(&cfg->prcfg_count, &cfg->prcfg_size,
4233 (struct lnet_process_id __user *)cfg->prcfg_bulk);
4234 mutex_unlock(&the_lnet.ln_api_mutex);
4238 case IOC_LIBCFS_SET_HEALHV: {
4239 struct lnet_ioctl_reset_health_cfg *cfg = arg;
4241 if (cfg->rh_hdr.ioc_len < sizeof(*cfg))
4243 if (cfg->rh_value < 0 ||
4244 cfg->rh_value > LNET_MAX_HEALTH_VALUE)
4245 value = LNET_MAX_HEALTH_VALUE;
4247 value = cfg->rh_value;
4248 CDEBUG(D_NET, "Manually setting healthv to %d for %s:%s. all = %d\n",
4249 value, (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI) ?
4250 "local" : "peer", libcfs_nid2str(cfg->rh_nid), cfg->rh_all);
4251 mutex_lock(&the_lnet.ln_api_mutex);
4252 if (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI)
4253 lnet_ni_set_healthv(cfg->rh_nid, value,
4256 lnet_peer_ni_set_healthv(cfg->rh_nid, value,
4258 mutex_unlock(&the_lnet.ln_api_mutex);
4262 case IOC_LIBCFS_SET_CONNS_PER_PEER: {
4263 struct lnet_ioctl_reset_conns_per_peer_cfg *cfg = arg;
4266 if (cfg->rcpp_hdr.ioc_len < sizeof(*cfg))
4268 if (cfg->rcpp_value < 0)
4271 value = cfg->rcpp_value;
4273 "Setting conns_per_peer to %d for %s. all = %d\n",
4274 value, libcfs_nid2str(cfg->rcpp_nid), cfg->rcpp_all);
4275 mutex_lock(&the_lnet.ln_api_mutex);
4276 lnet_ni_set_conns_per_peer(cfg->rcpp_nid, value, cfg->rcpp_all);
4277 mutex_unlock(&the_lnet.ln_api_mutex);
4281 case IOC_LIBCFS_NOTIFY_ROUTER: {
4282 time64_t deadline = ktime_get_real_seconds() - data->ioc_u64[0];
4284 /* The deadline passed in by the user should be some time in
4285 * seconds in the future since the UNIX epoch. We have to map
4286 * that deadline to the wall clock.
4288 deadline += ktime_get_seconds();
4289 return lnet_notify(NULL, data->ioc_nid, data->ioc_flags, false,
4293 case IOC_LIBCFS_LNET_DIST:
4294 rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
4295 if (rc < 0 && rc != -EHOSTUNREACH)
4298 data->ioc_u32[0] = rc;
4301 case IOC_LIBCFS_TESTPROTOCOMPAT:
4302 the_lnet.ln_testprotocompat = data->ioc_flags;
4305 case IOC_LIBCFS_LNET_FAULT:
4306 return lnet_fault_ctl(data->ioc_flags, data);
4308 case IOC_LIBCFS_PING: {
4309 signed long timeout;
4311 id.nid = data->ioc_nid;
4312 id.pid = data->ioc_u32[0];
4314 /* If timeout is negative then set default of 3 minutes */
4315 if (((s32)data->ioc_u32[1] <= 0) ||
4316 data->ioc_u32[1] > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
4317 timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
4319 timeout = nsecs_to_jiffies(data->ioc_u32[1] * NSEC_PER_MSEC);
4321 rc = lnet_ping(id, timeout, data->ioc_pbuf1,
4322 data->ioc_plen1 / sizeof(struct lnet_process_id));
4327 data->ioc_count = rc;
4331 case IOC_LIBCFS_PING_PEER: {
4332 struct lnet_ioctl_ping_data *ping = arg;
4333 struct lnet_peer *lp;
4334 signed long timeout;
4336 /* If timeout is negative then set default of 3 minutes */
4337 if (((s32)ping->op_param) <= 0 ||
4338 ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
4339 timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
4341 timeout = nsecs_to_jiffies(ping->op_param * NSEC_PER_MSEC);
4343 rc = lnet_ping(ping->ping_id, timeout,
4349 mutex_lock(&the_lnet.ln_api_mutex);
4350 lp = lnet_find_peer(ping->ping_id.nid);
4353 lnet_nid_to_nid4(&lp->lp_primary_nid);
4354 ping->mr_info = lnet_peer_is_multi_rail(lp);
4355 lnet_peer_decref_locked(lp);
4357 mutex_unlock(&the_lnet.ln_api_mutex);
4359 ping->ping_count = rc;
4363 case IOC_LIBCFS_DISCOVER: {
4364 struct lnet_ioctl_ping_data *discover = arg;
4365 struct lnet_peer *lp;
4367 rc = lnet_discover(discover->ping_id, discover->op_param,
4369 discover->ping_count);
4373 mutex_lock(&the_lnet.ln_api_mutex);
4374 lp = lnet_find_peer(discover->ping_id.nid);
4376 discover->ping_id.nid =
4377 lnet_nid_to_nid4(&lp->lp_primary_nid);
4378 discover->mr_info = lnet_peer_is_multi_rail(lp);
4379 lnet_peer_decref_locked(lp);
4381 mutex_unlock(&the_lnet.ln_api_mutex);
4383 discover->ping_count = rc;
4387 case IOC_LIBCFS_ADD_UDSP: {
4388 struct lnet_ioctl_udsp *ioc_udsp = arg;
4389 __u32 bulk_size = ioc_udsp->iou_hdr.ioc_len;
4391 mutex_lock(&the_lnet.ln_api_mutex);
4392 rc = lnet_udsp_demarshal_add(arg, bulk_size);
4394 rc = lnet_udsp_apply_policies(NULL, false);
4395 CDEBUG(D_NET, "policy application returned %d\n", rc);
4398 mutex_unlock(&the_lnet.ln_api_mutex);
4403 case IOC_LIBCFS_DEL_UDSP: {
4404 struct lnet_ioctl_udsp *ioc_udsp = arg;
4405 int idx = ioc_udsp->iou_idx;
4407 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4410 mutex_lock(&the_lnet.ln_api_mutex);
4411 rc = lnet_udsp_del_policy(idx);
4413 rc = lnet_udsp_apply_policies(NULL, false);
4414 CDEBUG(D_NET, "policy re-application returned %d\n",
4418 mutex_unlock(&the_lnet.ln_api_mutex);
4423 case IOC_LIBCFS_GET_UDSP_SIZE: {
4424 struct lnet_ioctl_udsp *ioc_udsp = arg;
4425 struct lnet_udsp *udsp;
4427 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4432 mutex_lock(&the_lnet.ln_api_mutex);
4433 udsp = lnet_udsp_get_policy(ioc_udsp->iou_idx);
4437 /* coming in iou_idx will hold the idx of the udsp
4438 * to get the size of. going out the iou_idx will
4439 * hold the size of the UDSP found at the passed
4442 ioc_udsp->iou_idx = lnet_get_udsp_size(udsp);
4443 if (ioc_udsp->iou_idx < 0)
4446 mutex_unlock(&the_lnet.ln_api_mutex);
4451 case IOC_LIBCFS_GET_UDSP: {
4452 struct lnet_ioctl_udsp *ioc_udsp = arg;
4453 struct lnet_udsp *udsp;
4455 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4460 mutex_lock(&the_lnet.ln_api_mutex);
4461 udsp = lnet_udsp_get_policy(ioc_udsp->iou_idx);
4465 rc = lnet_udsp_marshal(udsp, ioc_udsp);
4466 mutex_unlock(&the_lnet.ln_api_mutex);
4471 case IOC_LIBCFS_GET_CONST_UDSP_INFO: {
4472 struct lnet_ioctl_construct_udsp_info *info = arg;
4474 if (info->cud_hdr.ioc_len < sizeof(*info))
4477 CDEBUG(D_NET, "GET_UDSP_INFO for %s\n",
4478 libcfs_nid2str(info->cud_nid));
4480 mutex_lock(&the_lnet.ln_api_mutex);
4481 lnet_udsp_get_construct_info(info);
4482 mutex_unlock(&the_lnet.ln_api_mutex);
4488 ni = lnet_net2ni_addref(data->ioc_net);
4492 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
4495 rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
4502 EXPORT_SYMBOL(LNetCtl);
4504 void LNetDebugPeer(struct lnet_process_id id)
4506 lnet_debug_peer(id.nid);
4508 EXPORT_SYMBOL(LNetDebugPeer);
4511 * Determine if the specified peer \a nid is on the local node.
4513 * \param nid peer nid to check
4515 * \retval true If peer NID is on the local node.
4516 * \retval false If peer NID is not on the local node.
4518 bool LNetIsPeerLocal(lnet_nid_t nid)
4520 struct lnet_net *net;
4524 cpt = lnet_net_lock_current();
4525 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
4526 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
4527 if (lnet_nid_to_nid4(&ni->ni_nid) == nid) {
4528 lnet_net_unlock(cpt);
4533 lnet_net_unlock(cpt);
4537 EXPORT_SYMBOL(LNetIsPeerLocal);
4540 * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
4541 * Note that all interfaces share a same PID, as requested by LNetNIInit().
4543 * \param index Index of the interface to look up.
4544 * \param id On successful return, this location will hold the
4545 * struct lnet_process_id ID of the interface.
4547 * \retval 0 If an interface exists at \a index.
4548 * \retval -ENOENT If no interface has been found.
4551 LNetGetId(unsigned int index, struct lnet_process_id *id)
4554 struct lnet_net *net;
4558 LASSERT(the_lnet.ln_refcount > 0);
4560 cpt = lnet_net_lock_current();
4562 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
4563 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
4564 if (!nid_is_nid4(&ni->ni_nid))
4565 /* FIXME this needs to be handled */
4570 id->nid = lnet_nid_to_nid4(&ni->ni_nid);
4571 id->pid = the_lnet.ln_pid;
4577 lnet_net_unlock(cpt);
4580 EXPORT_SYMBOL(LNetGetId);
4585 struct lnet_handle_md mdh;
4586 struct completion completion;
4590 lnet_ping_event_handler(struct lnet_event *event)
4592 struct ping_data *pd = event->md_user_ptr;
4594 CDEBUG(D_NET, "ping event (%d %d)%s\n",
4595 event->type, event->status,
4596 event->unlinked ? " unlinked" : "");
4598 if (event->status) {
4600 pd->rc = event->status;
4601 } else if (event->type == LNET_EVENT_REPLY) {
4603 pd->rc = event->mlength;
4605 if (event->unlinked)
4606 complete(&pd->completion);
4609 static int lnet_ping(struct lnet_process_id id, signed long timeout,
4610 struct lnet_process_id __user *ids, int n_ids)
4612 struct lnet_md md = { NULL };
4613 struct ping_data pd = { 0 };
4614 struct lnet_ping_buffer *pbuf;
4615 struct lnet_process_id tmpid;
4621 /* n_ids limit is arbitrary */
4622 if (n_ids <= 0 || id.nid == LNET_NID_ANY)
4626 * if the user buffer has more space than the lnet_interfaces_max
4627 * then only fill it up to lnet_interfaces_max
4629 if (n_ids > lnet_interfaces_max)
4630 n_ids = lnet_interfaces_max;
4632 if (id.pid == LNET_PID_ANY)
4633 id.pid = LNET_PID_LUSTRE;
4635 pbuf = lnet_ping_buffer_alloc(n_ids, GFP_NOFS);
4639 /* initialize md content */
4640 md.start = &pbuf->pb_info;
4641 md.length = LNET_PING_INFO_SIZE(n_ids);
4642 md.threshold = 2; /* GET/REPLY */
4644 md.options = LNET_MD_TRUNCATE;
4646 md.handler = lnet_ping_event_handler;
4648 init_completion(&pd.completion);
4650 rc = LNetMDBind(&md, LNET_UNLINK, &pd.mdh);
4652 CERROR("Can't bind MD: %d\n", rc);
4653 goto fail_ping_buffer_decref;
4656 rc = LNetGet(LNET_NID_ANY, pd.mdh, id,
4657 LNET_RESERVED_PORTAL,
4658 LNET_PROTO_PING_MATCHBITS, 0, false);
4661 /* Don't CERROR; this could be deliberate! */
4662 rc2 = LNetMDUnlink(pd.mdh);
4665 /* NB must wait for the UNLINK event below... */
4668 if (wait_for_completion_timeout(&pd.completion, timeout) == 0) {
4669 /* Ensure completion in finite time... */
4670 LNetMDUnlink(pd.mdh);
4671 wait_for_completion(&pd.completion);
4675 goto fail_ping_buffer_decref;
4679 LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
4681 rc = -EPROTO; /* if I can't parse... */
4684 CERROR("%s: ping info too short %d\n",
4685 libcfs_id2str(id), nob);
4686 goto fail_ping_buffer_decref;
4689 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
4690 lnet_swap_pinginfo(pbuf);
4691 } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
4692 CERROR("%s: Unexpected magic %08x\n",
4693 libcfs_id2str(id), pbuf->pb_info.pi_magic);
4694 goto fail_ping_buffer_decref;
4697 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
4698 CERROR("%s: ping w/o NI status: 0x%x\n",
4699 libcfs_id2str(id), pbuf->pb_info.pi_features);
4700 goto fail_ping_buffer_decref;
4703 if (nob < LNET_PING_INFO_SIZE(0)) {
4704 CERROR("%s: Short reply %d(%d min)\n",
4706 nob, (int)LNET_PING_INFO_SIZE(0));
4707 goto fail_ping_buffer_decref;
4710 if (pbuf->pb_info.pi_nnis < n_ids)
4711 n_ids = pbuf->pb_info.pi_nnis;
4713 if (nob < LNET_PING_INFO_SIZE(n_ids)) {
4714 CERROR("%s: Short reply %d(%d expected)\n",
4716 nob, (int)LNET_PING_INFO_SIZE(n_ids));
4717 goto fail_ping_buffer_decref;
4720 rc = -EFAULT; /* if I segv in copy_to_user()... */
4722 memset(&tmpid, 0, sizeof(tmpid));
4723 for (i = 0; i < n_ids; i++) {
4724 tmpid.pid = pbuf->pb_info.pi_pid;
4725 tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
4726 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
4727 goto fail_ping_buffer_decref;
4729 rc = pbuf->pb_info.pi_nnis;
4731 fail_ping_buffer_decref:
4732 lnet_ping_buffer_decref(pbuf);
4737 lnet_discover(struct lnet_process_id id, __u32 force,
4738 struct lnet_process_id __user *ids, int n_ids)
4740 struct lnet_peer_ni *lpni;
4741 struct lnet_peer_ni *p;
4742 struct lnet_peer *lp;
4743 struct lnet_process_id *buf;
4749 id.nid == LNET_NID_ANY)
4752 if (id.pid == LNET_PID_ANY)
4753 id.pid = LNET_PID_LUSTRE;
4756 * If the user buffer has more space than the lnet_interfaces_max,
4757 * then only fill it up to lnet_interfaces_max.
4759 if (n_ids > lnet_interfaces_max)
4760 n_ids = lnet_interfaces_max;
4762 CFS_ALLOC_PTR_ARRAY(buf, n_ids);
4766 cpt = lnet_net_lock_current();
4767 lpni = lnet_nid2peerni_locked(id.nid, LNET_NID_ANY, cpt);
4774 * Clearing the NIDS_UPTODATE flag ensures the peer will
4775 * be discovered, provided discovery has not been disabled.
4777 lp = lpni->lpni_peer_net->lpn_peer;
4778 spin_lock(&lp->lp_lock);
4779 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
4780 /* If the force flag is set, force a PING and PUSH as well. */
4782 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
4783 spin_unlock(&lp->lp_lock);
4784 rc = lnet_discover_peer_locked(lpni, cpt, true);
4788 /* The lpni (or lp) for this NID may have changed and our ref is
4789 * the only thing keeping the old one around. Release the ref
4790 * and lookup the lpni again
4792 lnet_peer_ni_decref_locked(lpni);
4793 lpni = lnet_find_peer_ni_locked(id.nid);
4798 lp = lpni->lpni_peer_net->lpn_peer;
4802 while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
4803 buf[i].pid = id.pid;
4804 buf[i].nid = lnet_nid_to_nid4(&p->lpni_nid);
4811 lnet_peer_ni_decref_locked(lpni);
4813 lnet_net_unlock(cpt);
4816 if (copy_to_user(ids, buf, rc * sizeof(*buf)))
4818 CFS_FREE_PTR_ARRAY(buf, n_ids);
4824 * Retrieve peer discovery status.
4826 * \retval 1 if lnet_peer_discovery_disabled is 0
4827 * \retval 0 if lnet_peer_discovery_disabled is 1
4830 LNetGetPeerDiscoveryStatus(void)
4832 return !lnet_peer_discovery_disabled;
4834 EXPORT_SYMBOL(LNetGetPeerDiscoveryStatus);