4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_LNET
35 #include <linux/ctype.h>
36 #include <linux/log2.h>
37 #include <linux/ktime.h>
38 #include <linux/moduleparam.h>
39 #include <linux/uaccess.h>
40 #ifdef HAVE_SCHED_HEADERS
41 #include <linux/sched/signal.h>
44 #include <lnet/udsp.h>
45 #include <lnet/lib-lnet.h>
47 #define D_LNI D_CONSOLE
50 * initialize ln_api_mutex statically, since it needs to be used in
51 * discovery_set callback. That module parameter callback can be called
52 * before module init completes. The mutex needs to be ready for use then.
54 struct lnet the_lnet = {
55 .ln_api_mutex = __MUTEX_INITIALIZER(the_lnet.ln_api_mutex),
56 }; /* THE state of the network */
57 EXPORT_SYMBOL(the_lnet);
59 static char *ip2nets = "";
60 module_param(ip2nets, charp, 0444);
61 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
63 static char *networks = "";
64 module_param(networks, charp, 0444);
65 MODULE_PARM_DESC(networks, "local networks");
67 static char *routes = "";
68 module_param(routes, charp, 0444);
69 MODULE_PARM_DESC(routes, "routes to non-local networks");
71 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
72 module_param(rnet_htable_size, int, 0444);
73 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
75 static int use_tcp_bonding = false;
76 module_param(use_tcp_bonding, int, 0444);
77 MODULE_PARM_DESC(use_tcp_bonding,
78 "use_tcp_bonding parameter has been deprecated");
80 unsigned int lnet_numa_range = 0;
81 module_param(lnet_numa_range, uint, 0444);
82 MODULE_PARM_DESC(lnet_numa_range,
83 "NUMA range to consider during Multi-Rail selection");
86 * lnet_health_sensitivity determines by how much we decrement the health
87 * value on sending error. The value defaults to 100, which means health
88 * interface health is decremented by 100 points every failure.
90 unsigned int lnet_health_sensitivity = 100;
91 static int sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
92 #ifdef HAVE_KERNEL_PARAM_OPS
93 static struct kernel_param_ops param_ops_health_sensitivity = {
94 .set = sensitivity_set,
97 #define param_check_health_sensitivity(name, p) \
98 __param_check(name, p, int)
99 module_param(lnet_health_sensitivity, health_sensitivity, S_IRUGO|S_IWUSR);
101 module_param_call(lnet_health_sensitivity, sensitivity_set, param_get_int,
102 &lnet_health_sensitivity, S_IRUGO|S_IWUSR);
104 MODULE_PARM_DESC(lnet_health_sensitivity,
105 "Value to decrement the health value by on error");
108 * lnet_recovery_interval determines how often we should perform recovery
109 * on unhealthy interfaces.
111 unsigned int lnet_recovery_interval = 1;
112 static int recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp);
113 #ifdef HAVE_KERNEL_PARAM_OPS
114 static struct kernel_param_ops param_ops_recovery_interval = {
115 .set = recovery_interval_set,
116 .get = param_get_int,
118 #define param_check_recovery_interval(name, p) \
119 __param_check(name, p, int)
120 module_param(lnet_recovery_interval, recovery_interval, S_IRUGO|S_IWUSR);
122 module_param_call(lnet_recovery_interval, recovery_interval_set, param_get_int,
123 &lnet_recovery_interval, S_IRUGO|S_IWUSR);
125 MODULE_PARM_DESC(lnet_recovery_interval,
126 "Interval to recover unhealthy interfaces in seconds");
128 unsigned int lnet_recovery_limit;
129 module_param(lnet_recovery_limit, uint, 0644);
130 MODULE_PARM_DESC(lnet_recovery_limit,
131 "How long to attempt recovery of unhealthy peer interfaces in seconds. Set to 0 to allow indefinite recovery");
133 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
134 static int intf_max_set(const char *val, cfs_kernel_param_arg_t *kp);
136 static struct kernel_param_ops param_ops_interfaces_max = {
138 .get = param_get_int,
141 #define param_check_interfaces_max(name, p) \
142 __param_check(name, p, int)
144 #ifdef HAVE_KERNEL_PARAM_OPS
145 module_param(lnet_interfaces_max, interfaces_max, 0644);
147 module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
148 ¶m_ops_interfaces_max, 0644);
150 MODULE_PARM_DESC(lnet_interfaces_max,
151 "Maximum number of interfaces in a node.");
153 unsigned lnet_peer_discovery_disabled = 0;
154 static int discovery_set(const char *val, cfs_kernel_param_arg_t *kp);
156 static struct kernel_param_ops param_ops_discovery_disabled = {
157 .set = discovery_set,
158 .get = param_get_int,
161 #define param_check_discovery_disabled(name, p) \
162 __param_check(name, p, int)
163 #ifdef HAVE_KERNEL_PARAM_OPS
164 module_param(lnet_peer_discovery_disabled, discovery_disabled, 0644);
166 module_param_call(lnet_peer_discovery_disabled, discovery_set, param_get_int,
167 ¶m_ops_discovery_disabled, 0644);
169 MODULE_PARM_DESC(lnet_peer_discovery_disabled,
170 "Set to 1 to disable peer discovery on this node.");
172 unsigned int lnet_drop_asym_route;
173 static int drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp);
175 static struct kernel_param_ops param_ops_drop_asym_route = {
176 .set = drop_asym_route_set,
177 .get = param_get_int,
180 #define param_check_drop_asym_route(name, p) \
181 __param_check(name, p, int)
182 #ifdef HAVE_KERNEL_PARAM_OPS
183 module_param(lnet_drop_asym_route, drop_asym_route, 0644);
185 module_param_call(lnet_drop_asym_route, drop_asym_route_set, param_get_int,
186 ¶m_ops_drop_asym_route, 0644);
188 MODULE_PARM_DESC(lnet_drop_asym_route,
189 "Set to 1 to drop asymmetrical route messages.");
191 #define LNET_TRANSACTION_TIMEOUT_DEFAULT 50
192 unsigned int lnet_transaction_timeout = LNET_TRANSACTION_TIMEOUT_DEFAULT;
193 static int transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp);
194 #ifdef HAVE_KERNEL_PARAM_OPS
195 static struct kernel_param_ops param_ops_transaction_timeout = {
196 .set = transaction_to_set,
197 .get = param_get_int,
200 #define param_check_transaction_timeout(name, p) \
201 __param_check(name, p, int)
202 module_param(lnet_transaction_timeout, transaction_timeout, S_IRUGO|S_IWUSR);
204 module_param_call(lnet_transaction_timeout, transaction_to_set, param_get_int,
205 &lnet_transaction_timeout, S_IRUGO|S_IWUSR);
207 MODULE_PARM_DESC(lnet_transaction_timeout,
208 "Maximum number of seconds to wait for a peer response.");
210 #define LNET_RETRY_COUNT_DEFAULT 2
211 unsigned int lnet_retry_count = LNET_RETRY_COUNT_DEFAULT;
212 static int retry_count_set(const char *val, cfs_kernel_param_arg_t *kp);
213 #ifdef HAVE_KERNEL_PARAM_OPS
214 static struct kernel_param_ops param_ops_retry_count = {
215 .set = retry_count_set,
216 .get = param_get_int,
219 #define param_check_retry_count(name, p) \
220 __param_check(name, p, int)
221 module_param(lnet_retry_count, retry_count, S_IRUGO|S_IWUSR);
223 module_param_call(lnet_retry_count, retry_count_set, param_get_int,
224 &lnet_retry_count, S_IRUGO|S_IWUSR);
226 MODULE_PARM_DESC(lnet_retry_count,
227 "Maximum number of times to retry transmitting a message");
229 unsigned int lnet_response_tracking = 3;
230 static int response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp);
232 #ifdef HAVE_KERNEL_PARAM_OPS
233 static struct kernel_param_ops param_ops_response_tracking = {
234 .set = response_tracking_set,
235 .get = param_get_int,
238 #define param_check_response_tracking(name, p) \
239 __param_check(name, p, int)
240 module_param(lnet_response_tracking, response_tracking, 0644);
242 module_param_call(lnet_response_tracking, response_tracking_set, param_get_int,
243 &lnet_response_tracking, 0644);
245 MODULE_PARM_DESC(lnet_response_tracking,
246 "(0|1|2|3) LNet Internal Only|GET Reply only|PUT ACK only|Full Tracking (default)");
248 #define LNET_LND_TIMEOUT_DEFAULT ((LNET_TRANSACTION_TIMEOUT_DEFAULT - 1) / \
249 (LNET_RETRY_COUNT_DEFAULT + 1))
250 unsigned int lnet_lnd_timeout = LNET_LND_TIMEOUT_DEFAULT;
251 static void lnet_set_lnd_timeout(void)
253 lnet_lnd_timeout = (lnet_transaction_timeout - 1) /
254 (lnet_retry_count + 1);
257 unsigned int lnet_current_net_count;
260 * This sequence number keeps track of how many times DLC was used to
261 * update the local NIs. It is incremented when a NI is added or
262 * removed and checked when sending a message to determine if there is
263 * a need to re-run the selection algorithm. See lnet_select_pathway()
264 * for more details on its usage.
266 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
268 static int lnet_ping(struct lnet_process_id id, signed long timeout,
269 struct lnet_process_id __user *ids, int n_ids);
271 static int lnet_discover(struct lnet_process_id id, __u32 force,
272 struct lnet_process_id __user *ids, int n_ids);
275 sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
278 unsigned *sensitivity = (unsigned *)kp->arg;
281 rc = kstrtoul(val, 0, &value);
283 CERROR("Invalid module parameter value for 'lnet_health_sensitivity'\n");
288 * The purpose of locking the api_mutex here is to ensure that
289 * the correct value ends up stored properly.
291 mutex_lock(&the_lnet.ln_api_mutex);
293 if (value > LNET_MAX_HEALTH_VALUE) {
294 mutex_unlock(&the_lnet.ln_api_mutex);
295 CERROR("Invalid health value. Maximum: %d value = %lu\n",
296 LNET_MAX_HEALTH_VALUE, value);
300 if (*sensitivity != 0 && value == 0 && lnet_retry_count != 0) {
301 lnet_retry_count = 0;
302 lnet_set_lnd_timeout();
305 *sensitivity = value;
307 mutex_unlock(&the_lnet.ln_api_mutex);
313 recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp)
316 unsigned *interval = (unsigned *)kp->arg;
319 rc = kstrtoul(val, 0, &value);
321 CERROR("Invalid module parameter value for 'lnet_recovery_interval'\n");
326 CERROR("lnet_recovery_interval must be at least 1 second\n");
331 * The purpose of locking the api_mutex here is to ensure that
332 * the correct value ends up stored properly.
334 mutex_lock(&the_lnet.ln_api_mutex);
338 mutex_unlock(&the_lnet.ln_api_mutex);
344 discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
347 unsigned *discovery_off = (unsigned *)kp->arg;
349 struct lnet_ping_buffer *pbuf;
351 rc = kstrtoul(val, 0, &value);
353 CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
357 value = (value) ? 1 : 0;
360 * The purpose of locking the api_mutex here is to ensure that
361 * the correct value ends up stored properly.
363 mutex_lock(&the_lnet.ln_api_mutex);
365 if (value == *discovery_off) {
366 mutex_unlock(&the_lnet.ln_api_mutex);
371 * We still want to set the discovery value even when LNet is not
372 * running. This is the case when LNet is being loaded and we want
373 * the module parameters to take effect. Otherwise if we're
374 * changing the value dynamically, we want to set it after
377 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
378 *discovery_off = value;
379 mutex_unlock(&the_lnet.ln_api_mutex);
383 /* tell peers that discovery setting has changed */
384 lnet_net_lock(LNET_LOCK_EX);
385 pbuf = the_lnet.ln_ping_target;
387 pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
389 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
390 lnet_net_unlock(LNET_LOCK_EX);
392 /* only send a push when we're turning off discovery */
393 if (*discovery_off <= 0 && value > 0)
394 lnet_push_update_to_peers(1);
395 *discovery_off = value;
397 mutex_unlock(&the_lnet.ln_api_mutex);
403 drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp)
406 unsigned int *drop_asym_route = (unsigned int *)kp->arg;
409 rc = kstrtoul(val, 0, &value);
411 CERROR("Invalid module parameter value for "
412 "'lnet_drop_asym_route'\n");
417 * The purpose of locking the api_mutex here is to ensure that
418 * the correct value ends up stored properly.
420 mutex_lock(&the_lnet.ln_api_mutex);
422 if (value == *drop_asym_route) {
423 mutex_unlock(&the_lnet.ln_api_mutex);
427 *drop_asym_route = value;
429 mutex_unlock(&the_lnet.ln_api_mutex);
435 transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp)
438 unsigned *transaction_to = (unsigned *)kp->arg;
441 rc = kstrtoul(val, 0, &value);
443 CERROR("Invalid module parameter value for 'lnet_transaction_timeout'\n");
448 * The purpose of locking the api_mutex here is to ensure that
449 * the correct value ends up stored properly.
451 mutex_lock(&the_lnet.ln_api_mutex);
453 if (value <= lnet_retry_count || value == 0) {
454 mutex_unlock(&the_lnet.ln_api_mutex);
455 CERROR("Invalid value for lnet_transaction_timeout (%lu). "
456 "Has to be greater than lnet_retry_count (%u)\n",
457 value, lnet_retry_count);
461 if (value == *transaction_to) {
462 mutex_unlock(&the_lnet.ln_api_mutex);
466 *transaction_to = value;
467 /* Update the lnet_lnd_timeout now that we've modified the
468 * transaction timeout
470 lnet_set_lnd_timeout();
472 mutex_unlock(&the_lnet.ln_api_mutex);
478 retry_count_set(const char *val, cfs_kernel_param_arg_t *kp)
481 unsigned *retry_count = (unsigned *)kp->arg;
484 rc = kstrtoul(val, 0, &value);
486 CERROR("Invalid module parameter value for 'lnet_retry_count'\n");
491 * The purpose of locking the api_mutex here is to ensure that
492 * the correct value ends up stored properly.
494 mutex_lock(&the_lnet.ln_api_mutex);
496 if (lnet_health_sensitivity == 0 && value > 0) {
497 mutex_unlock(&the_lnet.ln_api_mutex);
498 CERROR("Can not set lnet_retry_count when health feature is turned off\n");
502 if (value > lnet_transaction_timeout) {
503 mutex_unlock(&the_lnet.ln_api_mutex);
504 CERROR("Invalid value for lnet_retry_count (%lu). "
505 "Has to be smaller than lnet_transaction_timeout (%u)\n",
506 value, lnet_transaction_timeout);
510 *retry_count = value;
512 /* Update the lnet_lnd_timeout now that we've modified the
515 lnet_set_lnd_timeout();
517 mutex_unlock(&the_lnet.ln_api_mutex);
523 intf_max_set(const char *val, cfs_kernel_param_arg_t *kp)
527 rc = kstrtoint(val, 0, &value);
529 CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
533 if (value < LNET_INTERFACES_MIN) {
534 CWARN("max interfaces provided are too small, setting to %d\n",
535 LNET_INTERFACES_MAX_DEFAULT);
536 value = LNET_INTERFACES_MAX_DEFAULT;
539 *(int *)kp->arg = value;
545 response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp)
548 unsigned long new_value;
550 rc = kstrtoul(val, 0, &new_value);
552 CERROR("Invalid value for 'lnet_response_tracking'\n");
556 if (new_value < 0 || new_value > 3) {
557 CWARN("Invalid value (%lu) for 'lnet_response_tracking'\n",
562 lnet_response_tracking = new_value;
568 lnet_get_routes(void)
574 lnet_get_networks(void)
579 if (*networks != 0 && *ip2nets != 0) {
580 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
581 "'ip2nets' but not both at once\n");
586 rc = lnet_parse_ip2nets(&nets, ip2nets);
587 return (rc == 0) ? nets : NULL;
597 lnet_init_locks(void)
599 spin_lock_init(&the_lnet.ln_eq_wait_lock);
600 spin_lock_init(&the_lnet.ln_msg_resend_lock);
601 init_completion(&the_lnet.ln_mt_wait_complete);
602 mutex_init(&the_lnet.ln_lnd_mutex);
605 struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
606 struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
608 struct kmem_cache *lnet_udsp_cachep; /* udsp cache */
609 struct kmem_cache *lnet_rspt_cachep; /* response tracker cache */
610 struct kmem_cache *lnet_msg_cachep;
613 lnet_slab_setup(void)
615 /* create specific kmem_cache for MEs and small MDs (i.e., originally
616 * allocated in <size-xxx> kmem_cache).
618 lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(struct lnet_me),
620 if (!lnet_mes_cachep)
623 lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
624 LNET_SMALL_MD_SIZE, 0, 0,
626 if (!lnet_small_mds_cachep)
629 lnet_udsp_cachep = kmem_cache_create("lnet_udsp",
630 sizeof(struct lnet_udsp),
632 if (!lnet_udsp_cachep)
635 lnet_rspt_cachep = kmem_cache_create("lnet_rspt", sizeof(struct lnet_rsp_tracker),
637 if (!lnet_rspt_cachep)
640 lnet_msg_cachep = kmem_cache_create("lnet_msg", sizeof(struct lnet_msg),
642 if (!lnet_msg_cachep)
649 lnet_slab_cleanup(void)
651 if (lnet_msg_cachep) {
652 kmem_cache_destroy(lnet_msg_cachep);
653 lnet_msg_cachep = NULL;
656 if (lnet_rspt_cachep) {
657 kmem_cache_destroy(lnet_rspt_cachep);
658 lnet_rspt_cachep = NULL;
661 if (lnet_udsp_cachep) {
662 kmem_cache_destroy(lnet_udsp_cachep);
663 lnet_udsp_cachep = NULL;
666 if (lnet_small_mds_cachep) {
667 kmem_cache_destroy(lnet_small_mds_cachep);
668 lnet_small_mds_cachep = NULL;
671 if (lnet_mes_cachep) {
672 kmem_cache_destroy(lnet_mes_cachep);
673 lnet_mes_cachep = NULL;
678 lnet_create_remote_nets_table(void)
681 struct list_head *hash;
683 LASSERT(the_lnet.ln_remote_nets_hash == NULL);
684 LASSERT(the_lnet.ln_remote_nets_hbits > 0);
685 CFS_ALLOC_PTR_ARRAY(hash, LNET_REMOTE_NETS_HASH_SIZE);
687 CERROR("Failed to create remote nets hash table\n");
691 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
692 INIT_LIST_HEAD(&hash[i]);
693 the_lnet.ln_remote_nets_hash = hash;
698 lnet_destroy_remote_nets_table(void)
702 if (the_lnet.ln_remote_nets_hash == NULL)
705 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
706 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
708 CFS_FREE_PTR_ARRAY(the_lnet.ln_remote_nets_hash,
709 LNET_REMOTE_NETS_HASH_SIZE);
710 the_lnet.ln_remote_nets_hash = NULL;
714 lnet_destroy_locks(void)
716 if (the_lnet.ln_res_lock != NULL) {
717 cfs_percpt_lock_free(the_lnet.ln_res_lock);
718 the_lnet.ln_res_lock = NULL;
721 if (the_lnet.ln_net_lock != NULL) {
722 cfs_percpt_lock_free(the_lnet.ln_net_lock);
723 the_lnet.ln_net_lock = NULL;
728 lnet_create_locks(void)
732 the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
733 if (the_lnet.ln_res_lock == NULL)
736 the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
737 if (the_lnet.ln_net_lock == NULL)
743 lnet_destroy_locks();
747 static void lnet_assert_wire_constants(void)
749 /* Wire protocol assertions generated by 'wirecheck'
750 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
751 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
752 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7)
756 BUILD_BUG_ON(LNET_PROTO_TCP_MAGIC != 0xeebc0ded);
757 BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MAJOR != 1);
758 BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MINOR != 0);
759 BUILD_BUG_ON(LNET_MSG_ACK != 0);
760 BUILD_BUG_ON(LNET_MSG_PUT != 1);
761 BUILD_BUG_ON(LNET_MSG_GET != 2);
762 BUILD_BUG_ON(LNET_MSG_REPLY != 3);
763 BUILD_BUG_ON(LNET_MSG_HELLO != 4);
765 BUILD_BUG_ON((int)sizeof(lnet_nid_t) != 8);
766 BUILD_BUG_ON((int)sizeof(lnet_pid_t) != 4);
768 /* Checks for struct lnet_process_id_packed */
769 BUILD_BUG_ON((int)sizeof(struct lnet_process_id_packed) != 12);
770 BUILD_BUG_ON((int)offsetof(struct lnet_process_id_packed, nid) != 0);
771 BUILD_BUG_ON((int)sizeof(((struct lnet_process_id_packed *)0)->nid) != 8);
772 BUILD_BUG_ON((int)offsetof(struct lnet_process_id_packed, pid) != 8);
773 BUILD_BUG_ON((int)sizeof(((struct lnet_process_id_packed *)0)->pid) != 4);
775 /* Checks for struct lnet_handle_wire */
776 BUILD_BUG_ON((int)sizeof(struct lnet_handle_wire) != 16);
777 BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
778 wh_interface_cookie) != 0);
779 BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) != 8);
780 BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
781 wh_object_cookie) != 8);
782 BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) != 8);
784 /* Checks for struct struct lnet_magicversion */
785 BUILD_BUG_ON((int)sizeof(struct lnet_magicversion) != 8);
786 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, magic) != 0);
787 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->magic) != 4);
788 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, version_major) != 4);
789 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_major) != 2);
790 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion,
791 version_minor) != 6);
792 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_minor) != 2);
794 /* Checks for struct struct lnet_hdr */
795 BUILD_BUG_ON((int)sizeof(struct lnet_hdr) != 72);
796 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_nid) != 0);
797 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_nid) != 8);
798 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_nid) != 8);
799 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_nid) != 8);
800 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_pid) != 16);
801 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_pid) != 4);
802 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_pid) != 20);
803 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_pid) != 4);
804 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, type) != 24);
805 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->type) != 4);
806 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, payload_length) != 28);
807 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->payload_length) != 4);
808 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg) != 32);
809 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg) != 40);
812 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) != 32);
813 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) != 16);
814 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.match_bits) != 48);
815 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) != 8);
816 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.mlength) != 56);
817 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) != 4);
820 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) != 32);
821 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) != 16);
822 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.match_bits) != 48);
823 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) != 8);
824 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.hdr_data) != 56);
825 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) != 8);
826 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ptl_index) != 64);
827 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) != 4);
828 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.offset) != 68);
829 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) != 4);
832 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.return_wmd) != 32);
833 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) != 16);
834 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.match_bits) != 48);
835 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) != 8);
836 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.ptl_index) != 56);
837 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) != 4);
838 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.src_offset) != 60);
839 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) != 4);
840 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.sink_length) != 64);
841 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) != 4);
844 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) != 32);
845 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) != 16);
848 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.incarnation) != 32);
849 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) != 8);
850 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.type) != 40);
851 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) != 4);
853 /* Checks for struct lnet_ni_status and related constants */
854 BUILD_BUG_ON(LNET_NI_STATUS_INVALID != 0x00000000);
855 BUILD_BUG_ON(LNET_NI_STATUS_UP != 0x15aac0de);
856 BUILD_BUG_ON(LNET_NI_STATUS_DOWN != 0xdeadface);
858 /* Checks for struct lnet_ni_status */
859 BUILD_BUG_ON((int)sizeof(struct lnet_ni_status) != 16);
860 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_nid) != 0);
861 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) != 8);
862 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_status) != 8);
863 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_status) != 4);
864 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_unused) != 12);
865 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_unused) != 4);
867 /* Checks for struct lnet_ping_info and related constants */
868 BUILD_BUG_ON(LNET_PROTO_PING_MAGIC != 0x70696E67);
869 BUILD_BUG_ON(LNET_PING_FEAT_INVAL != 0);
870 BUILD_BUG_ON(LNET_PING_FEAT_BASE != 1);
871 BUILD_BUG_ON(LNET_PING_FEAT_NI_STATUS != 2);
872 BUILD_BUG_ON(LNET_PING_FEAT_RTE_DISABLED != 4);
873 BUILD_BUG_ON(LNET_PING_FEAT_MULTI_RAIL != 8);
874 BUILD_BUG_ON(LNET_PING_FEAT_DISCOVERY != 16);
875 BUILD_BUG_ON(LNET_PING_FEAT_BITS != 31);
877 /* Checks for struct lnet_ping_info */
878 BUILD_BUG_ON((int)sizeof(struct lnet_ping_info) != 16);
879 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_magic) != 0);
880 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) != 4);
881 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_features) != 4);
882 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_features) != 4);
883 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_pid) != 8);
884 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) != 4);
885 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_nnis) != 12);
886 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) != 4);
887 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_ni) != 16);
888 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) != 0);
890 /* Acceptor connection request */
891 BUILD_BUG_ON(LNET_PROTO_ACCEPTOR_VERSION != 1);
893 /* Checks for struct lnet_acceptor_connreq */
894 BUILD_BUG_ON((int)sizeof(struct lnet_acceptor_connreq) != 16);
895 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_magic) != 0);
896 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_magic) != 4);
897 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_version) != 4);
898 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_version) != 4);
899 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_nid) != 8);
900 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_nid) != 8);
902 /* Checks for struct lnet_counters_common */
903 BUILD_BUG_ON((int)sizeof(struct lnet_counters_common) != 60);
904 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_msgs_alloc) != 0);
905 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_msgs_alloc) != 4);
906 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_msgs_max) != 4);
907 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_msgs_max) != 4);
908 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_errors) != 8);
909 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_errors) != 4);
910 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_send_count) != 12);
911 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_send_count) != 4);
912 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_recv_count) != 16);
913 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_recv_count) != 4);
914 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_route_count) != 20);
915 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_route_count) != 4);
916 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_drop_count) != 24);
917 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_drop_count) != 4);
918 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_send_length) != 28);
919 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_send_length) != 8);
920 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_recv_length) != 36);
921 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_recv_length) != 8);
922 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_route_length) != 44);
923 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_route_length) != 8);
924 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_drop_length) != 52);
925 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_drop_length) != 8);
928 static const struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
930 const struct lnet_lnd *lnd;
932 /* holding lnd mutex */
933 if (type >= NUM_LNDS)
935 lnd = the_lnet.ln_lnds[type];
936 LASSERT(!lnd || lnd->lnd_type == type);
942 lnet_get_lnd_timeout(void)
944 return lnet_lnd_timeout;
946 EXPORT_SYMBOL(lnet_get_lnd_timeout);
949 lnet_register_lnd(const struct lnet_lnd *lnd)
951 mutex_lock(&the_lnet.ln_lnd_mutex);
953 LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
954 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
956 the_lnet.ln_lnds[lnd->lnd_type] = lnd;
958 CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
960 mutex_unlock(&the_lnet.ln_lnd_mutex);
962 EXPORT_SYMBOL(lnet_register_lnd);
965 lnet_unregister_lnd(const struct lnet_lnd *lnd)
967 mutex_lock(&the_lnet.ln_lnd_mutex);
969 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
971 the_lnet.ln_lnds[lnd->lnd_type] = NULL;
972 CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
974 mutex_unlock(&the_lnet.ln_lnd_mutex);
976 EXPORT_SYMBOL(lnet_unregister_lnd);
979 lnet_counters_get_common_locked(struct lnet_counters_common *common)
981 struct lnet_counters *ctr;
984 /* FIXME !!! Their is no assert_lnet_net_locked() to ensure this
985 * actually called under the protection of the lnet_net_lock.
987 memset(common, 0, sizeof(*common));
989 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
990 common->lcc_msgs_max += ctr->lct_common.lcc_msgs_max;
991 common->lcc_msgs_alloc += ctr->lct_common.lcc_msgs_alloc;
992 common->lcc_errors += ctr->lct_common.lcc_errors;
993 common->lcc_send_count += ctr->lct_common.lcc_send_count;
994 common->lcc_recv_count += ctr->lct_common.lcc_recv_count;
995 common->lcc_route_count += ctr->lct_common.lcc_route_count;
996 common->lcc_drop_count += ctr->lct_common.lcc_drop_count;
997 common->lcc_send_length += ctr->lct_common.lcc_send_length;
998 common->lcc_recv_length += ctr->lct_common.lcc_recv_length;
999 common->lcc_route_length += ctr->lct_common.lcc_route_length;
1000 common->lcc_drop_length += ctr->lct_common.lcc_drop_length;
1005 lnet_counters_get_common(struct lnet_counters_common *common)
1007 lnet_net_lock(LNET_LOCK_EX);
1008 lnet_counters_get_common_locked(common);
1009 lnet_net_unlock(LNET_LOCK_EX);
1011 EXPORT_SYMBOL(lnet_counters_get_common);
1014 lnet_counters_get(struct lnet_counters *counters)
1016 struct lnet_counters *ctr;
1017 struct lnet_counters_health *health = &counters->lct_health;
1020 memset(counters, 0, sizeof(*counters));
1022 lnet_net_lock(LNET_LOCK_EX);
1024 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1025 GOTO(out_unlock, rc = -ENODEV);
1027 lnet_counters_get_common_locked(&counters->lct_common);
1029 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
1030 health->lch_rst_alloc += ctr->lct_health.lch_rst_alloc;
1031 health->lch_resend_count += ctr->lct_health.lch_resend_count;
1032 health->lch_response_timeout_count +=
1033 ctr->lct_health.lch_response_timeout_count;
1034 health->lch_local_interrupt_count +=
1035 ctr->lct_health.lch_local_interrupt_count;
1036 health->lch_local_dropped_count +=
1037 ctr->lct_health.lch_local_dropped_count;
1038 health->lch_local_aborted_count +=
1039 ctr->lct_health.lch_local_aborted_count;
1040 health->lch_local_no_route_count +=
1041 ctr->lct_health.lch_local_no_route_count;
1042 health->lch_local_timeout_count +=
1043 ctr->lct_health.lch_local_timeout_count;
1044 health->lch_local_error_count +=
1045 ctr->lct_health.lch_local_error_count;
1046 health->lch_remote_dropped_count +=
1047 ctr->lct_health.lch_remote_dropped_count;
1048 health->lch_remote_error_count +=
1049 ctr->lct_health.lch_remote_error_count;
1050 health->lch_remote_timeout_count +=
1051 ctr->lct_health.lch_remote_timeout_count;
1052 health->lch_network_timeout_count +=
1053 ctr->lct_health.lch_network_timeout_count;
1056 lnet_net_unlock(LNET_LOCK_EX);
1059 EXPORT_SYMBOL(lnet_counters_get);
1062 lnet_counters_reset(void)
1064 struct lnet_counters *counters;
1067 lnet_net_lock(LNET_LOCK_EX);
1069 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1072 cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
1073 memset(counters, 0, sizeof(struct lnet_counters));
1075 lnet_net_unlock(LNET_LOCK_EX);
1079 lnet_res_type2str(int type)
1084 case LNET_COOKIE_TYPE_MD:
1086 case LNET_COOKIE_TYPE_ME:
1088 case LNET_COOKIE_TYPE_EQ:
1094 lnet_res_container_cleanup(struct lnet_res_container *rec)
1098 if (rec->rec_type == 0) /* not set yet, it's uninitialized */
1101 while (!list_empty(&rec->rec_active)) {
1102 struct list_head *e = rec->rec_active.next;
1105 if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
1106 lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
1108 } else { /* NB: Active MEs should be attached on portals */
1115 /* Found alive MD/ME/EQ, user really should unlink/free
1116 * all of them before finalize LNet, but if someone didn't,
1117 * we have to recycle garbage for him */
1118 CERROR("%d active elements on exit of %s container\n",
1119 count, lnet_res_type2str(rec->rec_type));
1122 if (rec->rec_lh_hash != NULL) {
1123 CFS_FREE_PTR_ARRAY(rec->rec_lh_hash, LNET_LH_HASH_SIZE);
1124 rec->rec_lh_hash = NULL;
1127 rec->rec_type = 0; /* mark it as finalized */
1131 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
1136 LASSERT(rec->rec_type == 0);
1138 rec->rec_type = type;
1139 INIT_LIST_HEAD(&rec->rec_active);
1141 rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
1143 /* Arbitrary choice of hash table size */
1144 LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
1145 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
1146 if (rec->rec_lh_hash == NULL) {
1151 for (i = 0; i < LNET_LH_HASH_SIZE; i++)
1152 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
1157 CERROR("Failed to setup %s resource container\n",
1158 lnet_res_type2str(type));
1159 lnet_res_container_cleanup(rec);
1164 lnet_res_containers_destroy(struct lnet_res_container **recs)
1166 struct lnet_res_container *rec;
1169 cfs_percpt_for_each(rec, i, recs)
1170 lnet_res_container_cleanup(rec);
1172 cfs_percpt_free(recs);
1175 static struct lnet_res_container **
1176 lnet_res_containers_create(int type)
1178 struct lnet_res_container **recs;
1179 struct lnet_res_container *rec;
1183 recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
1185 CERROR("Failed to allocate %s resource containers\n",
1186 lnet_res_type2str(type));
1190 cfs_percpt_for_each(rec, i, recs) {
1191 rc = lnet_res_container_setup(rec, i, type);
1193 lnet_res_containers_destroy(recs);
1201 struct lnet_libhandle *
1202 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
1204 /* ALWAYS called with lnet_res_lock held */
1205 struct list_head *head;
1206 struct lnet_libhandle *lh;
1209 if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
1212 hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
1213 head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
1215 list_for_each_entry(lh, head, lh_hash_chain) {
1216 if (lh->lh_cookie == cookie)
1224 lnet_res_lh_initialize(struct lnet_res_container *rec,
1225 struct lnet_libhandle *lh)
1227 /* ALWAYS called with lnet_res_lock held */
1228 unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
1231 lh->lh_cookie = rec->rec_lh_cookie;
1232 rec->rec_lh_cookie += 1 << ibits;
1234 hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
1236 list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
1240 lnet_create_array_of_queues(void)
1242 struct list_head **qs;
1243 struct list_head *q;
1246 qs = cfs_percpt_alloc(lnet_cpt_table(),
1247 sizeof(struct list_head));
1249 CERROR("Failed to allocate queues\n");
1253 cfs_percpt_for_each(q, i, qs)
1259 static int lnet_unprepare(void);
1262 lnet_prepare(lnet_pid_t requested_pid)
1264 /* Prepare to bring up the network */
1265 struct lnet_res_container **recs;
1268 if (requested_pid == LNET_PID_ANY) {
1269 /* Don't instantiate LNET just for me */
1273 LASSERT(the_lnet.ln_refcount == 0);
1275 the_lnet.ln_routing = 0;
1277 LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
1278 the_lnet.ln_pid = requested_pid;
1280 INIT_LIST_HEAD(&the_lnet.ln_test_peers);
1281 INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
1282 INIT_LIST_HEAD(&the_lnet.ln_nets);
1283 INIT_LIST_HEAD(&the_lnet.ln_routers);
1284 INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
1285 INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
1286 INIT_LIST_HEAD(&the_lnet.ln_dc_request);
1287 INIT_LIST_HEAD(&the_lnet.ln_dc_working);
1288 INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
1289 INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq);
1290 INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
1291 INIT_LIST_HEAD(&the_lnet.ln_udsp_list);
1292 init_waitqueue_head(&the_lnet.ln_dc_waitq);
1293 the_lnet.ln_mt_handler = NULL;
1294 init_completion(&the_lnet.ln_started);
1296 rc = lnet_slab_setup();
1300 rc = lnet_create_remote_nets_table();
1305 * NB the interface cookie in wire handles guards against delayed
1306 * replies and ACKs appearing valid after reboot.
1308 the_lnet.ln_interface_cookie = ktime_get_real_ns();
1310 the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
1311 sizeof(struct lnet_counters));
1312 if (the_lnet.ln_counters == NULL) {
1313 CERROR("Failed to allocate counters for LNet\n");
1318 rc = lnet_peer_tables_create();
1322 rc = lnet_msg_containers_create();
1326 rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
1327 LNET_COOKIE_TYPE_EQ);
1331 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
1337 the_lnet.ln_md_containers = recs;
1339 rc = lnet_portals_create();
1341 CERROR("Failed to create portals for LNet: %d\n", rc);
1345 the_lnet.ln_mt_zombie_rstqs = lnet_create_array_of_queues();
1346 if (!the_lnet.ln_mt_zombie_rstqs) {
1359 lnet_unprepare (void)
1361 /* NB no LNET_LOCK since this is the last reference. All LND instances
1362 * have shut down already, so it is safe to unlink and free all
1363 * descriptors, even those that appear committed to a network op (eg MD
1364 * with non-zero pending count) */
1366 lnet_fail_nid(LNET_NID_ANY, 0);
1368 LASSERT(the_lnet.ln_refcount == 0);
1369 LASSERT(list_empty(&the_lnet.ln_test_peers));
1370 LASSERT(list_empty(&the_lnet.ln_nets));
1372 if (the_lnet.ln_mt_zombie_rstqs) {
1373 lnet_clean_zombie_rstqs();
1374 the_lnet.ln_mt_zombie_rstqs = NULL;
1377 lnet_assert_handler_unused(the_lnet.ln_mt_handler);
1378 the_lnet.ln_mt_handler = NULL;
1380 lnet_portals_destroy();
1382 if (the_lnet.ln_md_containers != NULL) {
1383 lnet_res_containers_destroy(the_lnet.ln_md_containers);
1384 the_lnet.ln_md_containers = NULL;
1387 lnet_res_container_cleanup(&the_lnet.ln_eq_container);
1389 lnet_msg_containers_destroy();
1391 lnet_rtrpools_free(0);
1393 if (the_lnet.ln_counters != NULL) {
1394 cfs_percpt_free(the_lnet.ln_counters);
1395 the_lnet.ln_counters = NULL;
1397 lnet_destroy_remote_nets_table();
1398 lnet_udsp_destroy(true);
1399 lnet_slab_cleanup();
1405 lnet_net2ni_locked(__u32 net_id, int cpt)
1408 struct lnet_net *net;
1410 LASSERT(cpt != LNET_LOCK_EX);
1412 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1413 if (net->net_id == net_id) {
1414 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
1424 lnet_net2ni_addref(__u32 net)
1429 ni = lnet_net2ni_locked(net, 0);
1431 lnet_ni_addref_locked(ni, 0);
1436 EXPORT_SYMBOL(lnet_net2ni_addref);
1439 lnet_get_net_locked(__u32 net_id)
1441 struct lnet_net *net;
1443 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1444 if (net->net_id == net_id)
1452 lnet_net_clr_pref_rtrs(struct lnet_net *net)
1454 struct list_head zombies;
1455 struct lnet_nid_list *ne;
1456 struct lnet_nid_list *tmp;
1458 INIT_LIST_HEAD(&zombies);
1460 lnet_net_lock(LNET_LOCK_EX);
1461 list_splice_init(&net->net_rtr_pref_nids, &zombies);
1462 lnet_net_unlock(LNET_LOCK_EX);
1464 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1465 list_del_init(&ne->nl_list);
1466 LIBCFS_FREE(ne, sizeof(*ne));
1471 lnet_net_add_pref_rtr(struct lnet_net *net,
1473 __must_hold(&the_lnet.ln_api_mutex)
1475 struct lnet_nid_list *ne;
1477 /* This function is called with api_mutex held. When the api_mutex
1478 * is held the list can not be modified, as it is only modified as
1479 * a result of applying a UDSP and that happens under api_mutex
1482 list_for_each_entry(ne, &net->net_rtr_pref_nids, nl_list) {
1483 if (ne->nl_nid == gw_nid)
1487 LIBCFS_ALLOC(ne, sizeof(*ne));
1491 ne->nl_nid = gw_nid;
1493 /* Lock the cpt to protect against addition and checks in the
1494 * selection algorithm
1496 lnet_net_lock(LNET_LOCK_EX);
1497 list_add(&ne->nl_list, &net->net_rtr_pref_nids);
1498 lnet_net_unlock(LNET_LOCK_EX);
1504 lnet_net_is_pref_rtr_locked(struct lnet_net *net, lnet_nid_t rtr_nid)
1506 struct lnet_nid_list *ne;
1508 CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
1509 libcfs_net2str(net->net_id),
1510 list_empty(&net->net_rtr_pref_nids));
1512 if (list_empty(&net->net_rtr_pref_nids))
1515 list_for_each_entry(ne, &net->net_rtr_pref_nids, nl_list) {
1516 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
1517 libcfs_nid2str(ne->nl_nid),
1518 libcfs_nid2str(rtr_nid));
1519 if (rtr_nid == ne->nl_nid)
1527 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
1532 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
1537 val = hash_long(key, LNET_CPT_BITS);
1538 /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
1542 return (unsigned int)(key + val + (val >> 1)) % number;
1546 lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
1548 struct lnet_net *net;
1550 /* must called with hold of lnet_net_lock */
1551 if (LNET_CPT_NUMBER == 1)
1552 return 0; /* the only one */
1555 * If NI is provided then use the CPT identified in the NI cpt
1556 * list if one exists. If one doesn't exist, then that NI is
1557 * associated with all CPTs and it follows that the net it belongs
1558 * to is implicitly associated with all CPTs, so just hash the nid
1562 if (ni->ni_cpts != NULL)
1563 return ni->ni_cpts[lnet_nid_cpt_hash(nid,
1566 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1569 /* no NI provided so look at the net */
1570 net = lnet_get_net_locked(LNET_NIDNET(nid));
1572 if (net != NULL && net->net_cpts != NULL) {
1573 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
1576 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1580 lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
1585 if (LNET_CPT_NUMBER == 1)
1586 return 0; /* the only one */
1588 cpt = lnet_net_lock_current();
1590 cpt2 = lnet_cpt_of_nid_locked(nid, ni);
1592 lnet_net_unlock(cpt);
1596 EXPORT_SYMBOL(lnet_cpt_of_nid);
1599 lnet_islocalnet_locked(__u32 net_id)
1601 struct lnet_net *net;
1604 net = lnet_get_net_locked(net_id);
1606 local = net != NULL;
1612 lnet_islocalnet(__u32 net_id)
1617 cpt = lnet_net_lock_current();
1619 local = lnet_islocalnet_locked(net_id);
1621 lnet_net_unlock(cpt);
1627 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
1629 struct lnet_net *net;
1632 LASSERT(cpt != LNET_LOCK_EX);
1634 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1635 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1636 if (ni->ni_nid == nid)
1645 lnet_nid2ni_addref(lnet_nid_t nid)
1650 ni = lnet_nid2ni_locked(nid, 0);
1652 lnet_ni_addref_locked(ni, 0);
1657 EXPORT_SYMBOL(lnet_nid2ni_addref);
1660 lnet_islocalnid(lnet_nid_t nid)
1665 cpt = lnet_net_lock_current();
1666 ni = lnet_nid2ni_locked(nid, cpt);
1667 lnet_net_unlock(cpt);
1673 lnet_count_acceptor_nets(void)
1675 /* Return the # of NIs that need the acceptor. */
1677 struct lnet_net *net;
1680 cpt = lnet_net_lock_current();
1681 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1682 /* all socklnd type networks should have the acceptor
1684 if (net->net_lnd->lnd_accept != NULL)
1688 lnet_net_unlock(cpt);
1693 struct lnet_ping_buffer *
1694 lnet_ping_buffer_alloc(int nnis, gfp_t gfp)
1696 struct lnet_ping_buffer *pbuf;
1698 LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nnis), gfp);
1700 pbuf->pb_nnis = nnis;
1701 pbuf->pb_needs_post = false;
1702 atomic_set(&pbuf->pb_refcnt, 1);
1709 lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
1711 LASSERT(atomic_read(&pbuf->pb_refcnt) == 0);
1712 LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
1715 static struct lnet_ping_buffer *
1716 lnet_ping_target_create(int nnis)
1718 struct lnet_ping_buffer *pbuf;
1720 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1722 CERROR("Can't allocate ping source [%d]\n", nnis);
1726 pbuf->pb_info.pi_nnis = nnis;
1727 pbuf->pb_info.pi_pid = the_lnet.ln_pid;
1728 pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
1729 pbuf->pb_info.pi_features =
1730 LNET_PING_FEAT_NI_STATUS | LNET_PING_FEAT_MULTI_RAIL;
1736 lnet_get_net_ni_count_locked(struct lnet_net *net)
1741 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1748 lnet_get_net_ni_count_pre(struct lnet_net *net)
1753 list_for_each_entry(ni, &net->net_ni_added, ni_netlist)
1760 lnet_get_ni_count(void)
1763 struct lnet_net *net;
1768 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1769 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1779 lnet_get_net_count(void)
1781 struct lnet_net *net;
1786 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1796 lnet_swap_pinginfo(struct lnet_ping_buffer *pbuf)
1798 struct lnet_ni_status *stat;
1802 __swab32s(&pbuf->pb_info.pi_magic);
1803 __swab32s(&pbuf->pb_info.pi_features);
1804 __swab32s(&pbuf->pb_info.pi_pid);
1805 __swab32s(&pbuf->pb_info.pi_nnis);
1806 nnis = pbuf->pb_info.pi_nnis;
1807 if (nnis > pbuf->pb_nnis)
1808 nnis = pbuf->pb_nnis;
1809 for (i = 0; i < nnis; i++) {
1810 stat = &pbuf->pb_info.pi_ni[i];
1811 __swab64s(&stat->ns_nid);
1812 __swab32s(&stat->ns_status);
1817 lnet_ping_info_validate(struct lnet_ping_info *pinfo)
1821 if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
1823 if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
1825 /* Loopback is guaranteed to be present */
1826 if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
1828 if (LNET_PING_INFO_LONI(pinfo) != LNET_NID_LO_0)
1834 lnet_ping_target_destroy(void)
1836 struct lnet_net *net;
1839 lnet_net_lock(LNET_LOCK_EX);
1841 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1842 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1844 ni->ni_status = NULL;
1849 lnet_ping_buffer_decref(the_lnet.ln_ping_target);
1850 the_lnet.ln_ping_target = NULL;
1852 lnet_net_unlock(LNET_LOCK_EX);
1856 lnet_ping_target_event_handler(struct lnet_event *event)
1858 struct lnet_ping_buffer *pbuf = event->md_user_ptr;
1860 if (event->unlinked)
1861 lnet_ping_buffer_decref(pbuf);
1865 lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
1866 struct lnet_handle_md *ping_mdh,
1867 int ni_count, bool set_eq)
1869 struct lnet_process_id id = {
1870 .nid = LNET_NID_ANY,
1874 struct lnet_md md = { NULL };
1878 the_lnet.ln_ping_target_handler =
1879 lnet_ping_target_event_handler;
1881 *ppbuf = lnet_ping_target_create(ni_count);
1882 if (*ppbuf == NULL) {
1887 /* Ping target ME/MD */
1888 me = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1889 LNET_PROTO_PING_MATCHBITS, 0,
1890 LNET_UNLINK, LNET_INS_AFTER);
1893 CERROR("Can't create ping target ME: %d\n", rc);
1894 goto fail_decref_ping_buffer;
1897 /* initialize md content */
1898 md.start = &(*ppbuf)->pb_info;
1899 md.length = LNET_PING_INFO_SIZE((*ppbuf)->pb_nnis);
1900 md.threshold = LNET_MD_THRESH_INF;
1902 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1903 LNET_MD_MANAGE_REMOTE;
1904 md.handler = the_lnet.ln_ping_target_handler;
1905 md.user_ptr = *ppbuf;
1907 rc = LNetMDAttach(me, &md, LNET_RETAIN, ping_mdh);
1909 CERROR("Can't attach ping target MD: %d\n", rc);
1910 goto fail_decref_ping_buffer;
1912 lnet_ping_buffer_addref(*ppbuf);
1916 fail_decref_ping_buffer:
1917 LASSERT(atomic_read(&(*ppbuf)->pb_refcnt) == 1);
1918 lnet_ping_buffer_decref(*ppbuf);
1925 lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
1926 struct lnet_handle_md *ping_mdh)
1928 LNetMDUnlink(*ping_mdh);
1929 LNetInvalidateMDHandle(ping_mdh);
1931 /* NB the MD could be busy; this just starts the unlink */
1932 wait_var_event_warning(&pbuf->pb_refcnt,
1933 atomic_read(&pbuf->pb_refcnt) <= 1,
1934 "Still waiting for ping data MD to unlink\n");
1938 lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
1941 struct lnet_net *net;
1942 struct lnet_ni_status *ns;
1947 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1948 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1949 LASSERT(i < pbuf->pb_nnis);
1951 ns = &pbuf->pb_info.pi_ni[i];
1953 ns->ns_nid = ni->ni_nid;
1956 ns->ns_status = (ni->ni_status != NULL) ?
1957 ni->ni_status->ns_status :
1966 * We (ab)use the ns_status of the loopback interface to
1967 * transmit the sequence number. The first interface listed
1968 * must be the loopback interface.
1970 rc = lnet_ping_info_validate(&pbuf->pb_info);
1972 LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
1975 LNET_PING_BUFFER_SEQNO(pbuf) =
1976 atomic_inc_return(&the_lnet.ln_ping_target_seqno);
1980 lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
1981 struct lnet_handle_md ping_mdh)
1983 struct lnet_ping_buffer *old_pbuf = NULL;
1984 struct lnet_handle_md old_ping_md;
1986 /* switch the NIs to point to the new ping info created */
1987 lnet_net_lock(LNET_LOCK_EX);
1989 if (!the_lnet.ln_routing)
1990 pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1991 if (!lnet_peer_discovery_disabled)
1992 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
1994 /* Ensure only known feature bits have been set. */
1995 LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
1996 LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
1998 lnet_ping_target_install_locked(pbuf);
2000 if (the_lnet.ln_ping_target) {
2001 old_pbuf = the_lnet.ln_ping_target;
2002 old_ping_md = the_lnet.ln_ping_target_md;
2004 the_lnet.ln_ping_target_md = ping_mdh;
2005 the_lnet.ln_ping_target = pbuf;
2007 lnet_net_unlock(LNET_LOCK_EX);
2010 /* unlink and free the old ping info */
2011 lnet_ping_md_unlink(old_pbuf, &old_ping_md);
2012 lnet_ping_buffer_decref(old_pbuf);
2015 lnet_push_update_to_peers(0);
2019 lnet_ping_target_fini(void)
2021 lnet_ping_md_unlink(the_lnet.ln_ping_target,
2022 &the_lnet.ln_ping_target_md);
2024 lnet_assert_handler_unused(the_lnet.ln_ping_target_handler);
2025 lnet_ping_target_destroy();
2028 /* Resize the push target. */
2029 int lnet_push_target_resize(void)
2031 struct lnet_handle_md mdh;
2032 struct lnet_handle_md old_mdh;
2033 struct lnet_ping_buffer *pbuf;
2034 struct lnet_ping_buffer *old_pbuf;
2039 nnis = the_lnet.ln_push_target_nnis;
2041 CDEBUG(D_NET, "Invalid nnis %d\n", nnis);
2045 /* NB: lnet_ping_buffer_alloc() sets pbuf refcount to 1. That ref is
2046 * dropped when we need to resize again (see "old_pbuf" below) or when
2047 * LNet is shutdown (see lnet_push_target_fini())
2049 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
2051 CDEBUG(D_NET, "Can't allocate pbuf for nnis %d\n", nnis);
2055 rc = lnet_push_target_post(pbuf, &mdh);
2057 CDEBUG(D_NET, "Failed to post push target: %d\n", rc);
2058 lnet_ping_buffer_decref(pbuf);
2062 lnet_net_lock(LNET_LOCK_EX);
2063 old_pbuf = the_lnet.ln_push_target;
2064 old_mdh = the_lnet.ln_push_target_md;
2065 the_lnet.ln_push_target = pbuf;
2066 the_lnet.ln_push_target_md = mdh;
2067 lnet_net_unlock(LNET_LOCK_EX);
2070 LNetMDUnlink(old_mdh);
2071 /* Drop ref set by lnet_ping_buffer_alloc() */
2072 lnet_ping_buffer_decref(old_pbuf);
2075 /* Received another push or reply that requires a larger buffer */
2076 if (nnis < the_lnet.ln_push_target_nnis)
2079 CDEBUG(D_NET, "nnis %d success\n", nnis);
2083 int lnet_push_target_post(struct lnet_ping_buffer *pbuf,
2084 struct lnet_handle_md *mdhp)
2086 struct lnet_process_id id = { LNET_NID_ANY, LNET_PID_ANY };
2087 struct lnet_md md = { NULL };
2091 me = LNetMEAttach(LNET_RESERVED_PORTAL, id,
2092 LNET_PROTO_PING_MATCHBITS, 0,
2093 LNET_UNLINK, LNET_INS_AFTER);
2096 CERROR("Can't create push target ME: %d\n", rc);
2100 pbuf->pb_needs_post = false;
2102 /* This reference is dropped by lnet_push_target_event_handler() */
2103 lnet_ping_buffer_addref(pbuf);
2105 /* initialize md content */
2106 md.start = &pbuf->pb_info;
2107 md.length = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
2110 md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE;
2112 md.handler = the_lnet.ln_push_target_handler;
2114 rc = LNetMDAttach(me, &md, LNET_UNLINK, mdhp);
2116 CERROR("Can't attach push MD: %d\n", rc);
2117 lnet_ping_buffer_decref(pbuf);
2118 pbuf->pb_needs_post = true;
2122 CDEBUG(D_NET, "posted push target %p\n", pbuf);
2127 static void lnet_push_target_event_handler(struct lnet_event *ev)
2129 struct lnet_ping_buffer *pbuf = ev->md_user_ptr;
2131 CDEBUG(D_NET, "type %d status %d unlinked %d\n", ev->type, ev->status,
2134 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2135 lnet_swap_pinginfo(pbuf);
2137 if (ev->type == LNET_EVENT_UNLINK) {
2138 /* Drop ref added by lnet_push_target_post() */
2139 lnet_ping_buffer_decref(pbuf);
2143 lnet_peer_push_event(ev);
2145 /* Drop ref added by lnet_push_target_post */
2146 lnet_ping_buffer_decref(pbuf);
2149 /* Initialize the push target. */
2150 static int lnet_push_target_init(void)
2154 if (the_lnet.ln_push_target)
2157 the_lnet.ln_push_target_handler =
2158 lnet_push_target_event_handler;
2160 rc = LNetSetLazyPortal(LNET_RESERVED_PORTAL);
2163 /* Start at the required minimum, we'll enlarge if required. */
2164 the_lnet.ln_push_target_nnis = LNET_INTERFACES_MIN;
2166 rc = lnet_push_target_resize();
2169 LNetClearLazyPortal(LNET_RESERVED_PORTAL);
2170 the_lnet.ln_push_target_handler = NULL;
2176 /* Clean up the push target. */
2177 static void lnet_push_target_fini(void)
2179 if (!the_lnet.ln_push_target)
2182 /* Unlink and invalidate to prevent new references. */
2183 LNetMDUnlink(the_lnet.ln_push_target_md);
2184 LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
2186 /* Wait for the unlink to complete. */
2187 wait_var_event_warning(&the_lnet.ln_push_target->pb_refcnt,
2188 atomic_read(&the_lnet.ln_push_target->pb_refcnt) <= 1,
2189 "Still waiting for ping data MD to unlink\n");
2191 /* Drop ref set by lnet_ping_buffer_alloc() */
2192 lnet_ping_buffer_decref(the_lnet.ln_push_target);
2193 the_lnet.ln_push_target = NULL;
2194 the_lnet.ln_push_target_nnis = 0;
2196 LNetClearLazyPortal(LNET_RESERVED_PORTAL);
2197 lnet_assert_handler_unused(the_lnet.ln_push_target_handler);
2198 the_lnet.ln_push_target_handler = NULL;
2202 lnet_ni_tq_credits(struct lnet_ni *ni)
2206 LASSERT(ni->ni_ncpts >= 1);
2208 if (ni->ni_ncpts == 1)
2209 return ni->ni_net->net_tunables.lct_max_tx_credits;
2211 credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
2212 credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
2213 credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
2219 lnet_ni_unlink_locked(struct lnet_ni *ni)
2221 /* move it to zombie list and nobody can find it anymore */
2222 LASSERT(!list_empty(&ni->ni_netlist));
2223 list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
2224 lnet_ni_decref_locked(ni, 0);
2228 lnet_clear_zombies_nis_locked(struct lnet_net *net)
2233 struct list_head *zombie_list = &net->net_ni_zombie;
2236 * Now wait for the NIs I just nuked to show up on the zombie
2237 * list and shut them down in guaranteed thread context
2240 while (!list_empty(zombie_list)) {
2244 ni = list_entry(zombie_list->next,
2245 struct lnet_ni, ni_netlist);
2246 list_del_init(&ni->ni_netlist);
2247 /* the ni should be in deleting state. If it's not it's
2249 LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
2250 cfs_percpt_for_each(ref, j, ni->ni_refs) {
2253 /* still busy, add it back to zombie list */
2254 list_add(&ni->ni_netlist, zombie_list);
2258 if (!list_empty(&ni->ni_netlist)) {
2259 /* Unlock mutex while waiting to allow other
2260 * threads to read the LNet state and fall through
2263 lnet_net_unlock(LNET_LOCK_EX);
2264 mutex_unlock(&the_lnet.ln_api_mutex);
2267 if ((i & (-i)) == i) {
2269 "Waiting for zombie LNI %s\n",
2270 libcfs_nid2str(ni->ni_nid));
2272 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2274 mutex_lock(&the_lnet.ln_api_mutex);
2275 lnet_net_lock(LNET_LOCK_EX);
2279 lnet_net_unlock(LNET_LOCK_EX);
2281 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
2283 LASSERT(!in_interrupt());
2284 /* Holding the mutex makes it safe for lnd_shutdown
2285 * to call module_put(). Module unload cannot finish
2286 * until lnet_unregister_lnd() completes, and that
2287 * requires the mutex.
2289 mutex_lock(&the_lnet.ln_lnd_mutex);
2290 (net->net_lnd->lnd_shutdown)(ni);
2291 mutex_unlock(&the_lnet.ln_lnd_mutex);
2294 CDEBUG(D_LNI, "Removed LNI %s\n",
2295 libcfs_nid2str(ni->ni_nid));
2299 lnet_net_lock(LNET_LOCK_EX);
2303 /* shutdown down the NI and release refcount */
2305 lnet_shutdown_lndni(struct lnet_ni *ni)
2308 struct lnet_net *net = ni->ni_net;
2310 lnet_net_lock(LNET_LOCK_EX);
2312 ni->ni_state = LNET_NI_STATE_DELETING;
2314 lnet_ni_unlink_locked(ni);
2315 lnet_incr_dlc_seq();
2316 lnet_net_unlock(LNET_LOCK_EX);
2318 /* clear messages for this NI on the lazy portal */
2319 for (i = 0; i < the_lnet.ln_nportals; i++)
2320 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
2322 lnet_net_lock(LNET_LOCK_EX);
2323 lnet_clear_zombies_nis_locked(net);
2324 lnet_net_unlock(LNET_LOCK_EX);
2328 lnet_shutdown_lndnet(struct lnet_net *net)
2332 lnet_net_lock(LNET_LOCK_EX);
2334 list_del_init(&net->net_list);
2336 while (!list_empty(&net->net_ni_list)) {
2337 ni = list_entry(net->net_ni_list.next,
2338 struct lnet_ni, ni_netlist);
2339 lnet_net_unlock(LNET_LOCK_EX);
2340 lnet_shutdown_lndni(ni);
2341 lnet_net_lock(LNET_LOCK_EX);
2344 lnet_net_unlock(LNET_LOCK_EX);
2346 /* Do peer table cleanup for this net */
2347 lnet_peer_tables_cleanup(net);
2353 lnet_shutdown_lndnets(void)
2355 struct lnet_net *net;
2357 struct lnet_msg *msg, *tmp;
2359 /* NB called holding the global mutex */
2361 /* All quiet on the API front */
2362 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
2363 LASSERT(the_lnet.ln_refcount == 0);
2365 lnet_net_lock(LNET_LOCK_EX);
2366 the_lnet.ln_state = LNET_STATE_STOPPING;
2369 * move the nets to the zombie list to avoid them being
2370 * picked up for new work. LONET is also included in the
2371 * Nets that will be moved to the zombie list
2373 list_splice_init(&the_lnet.ln_nets, &the_lnet.ln_net_zombie);
2375 /* Drop the cached loopback Net. */
2376 if (the_lnet.ln_loni != NULL) {
2377 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
2378 the_lnet.ln_loni = NULL;
2380 lnet_net_unlock(LNET_LOCK_EX);
2382 /* iterate through the net zombie list and delete each net */
2383 while (!list_empty(&the_lnet.ln_net_zombie)) {
2384 net = list_entry(the_lnet.ln_net_zombie.next,
2385 struct lnet_net, net_list);
2386 lnet_shutdown_lndnet(net);
2389 spin_lock(&the_lnet.ln_msg_resend_lock);
2390 list_splice(&the_lnet.ln_msg_resend, &resend);
2391 spin_unlock(&the_lnet.ln_msg_resend_lock);
2393 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
2394 list_del_init(&msg->msg_list);
2395 msg->msg_no_resend = true;
2396 lnet_finalize(msg, -ECANCELED);
2399 lnet_net_lock(LNET_LOCK_EX);
2400 the_lnet.ln_state = LNET_STATE_SHUTDOWN;
2401 lnet_net_unlock(LNET_LOCK_EX);
2405 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
2408 struct lnet_tx_queue *tq;
2410 struct lnet_net *net = ni->ni_net;
2412 mutex_lock(&the_lnet.ln_lnd_mutex);
2415 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
2416 ni->ni_lnd_tunables_set = true;
2419 rc = (net->net_lnd->lnd_startup)(ni);
2421 mutex_unlock(&the_lnet.ln_lnd_mutex);
2424 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
2425 rc, libcfs_lnd2str(net->net_lnd->lnd_type));
2430 ni->ni_state = LNET_NI_STATE_ACTIVE;
2433 /* We keep a reference on the loopback net through the loopback NI */
2434 if (net->net_lnd->lnd_type == LOLND) {
2436 LASSERT(the_lnet.ln_loni == NULL);
2437 the_lnet.ln_loni = ni;
2438 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
2439 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
2440 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
2441 ni->ni_net->net_tunables.lct_peer_timeout = 0;
2445 if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
2446 ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
2447 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
2448 libcfs_lnd2str(net->net_lnd->lnd_type),
2449 ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
2451 /* shutdown the NI since if we get here then it must've already
2454 lnet_shutdown_lndni(ni);
2458 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
2459 tq->tq_credits_min =
2460 tq->tq_credits_max =
2461 tq->tq_credits = lnet_ni_tq_credits(ni);
2464 atomic_set(&ni->ni_tx_credits,
2465 lnet_ni_tq_credits(ni) * ni->ni_ncpts);
2466 atomic_set(&ni->ni_healthv, LNET_MAX_HEALTH_VALUE);
2468 CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
2469 libcfs_nid2str(ni->ni_nid),
2470 ni->ni_net->net_tunables.lct_peer_tx_credits,
2471 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
2472 ni->ni_net->net_tunables.lct_peer_rtr_credits,
2473 ni->ni_net->net_tunables.lct_peer_timeout);
2482 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
2485 struct lnet_net *net_l = NULL;
2486 LIST_HEAD(local_ni_list);
2490 const struct lnet_lnd *lnd;
2492 net->net_tunables.lct_peer_timeout;
2494 net->net_tunables.lct_max_tx_credits;
2495 int peerrtrcredits =
2496 net->net_tunables.lct_peer_rtr_credits;
2499 * make sure that this net is unique. If it isn't then
2500 * we are adding interfaces to an already existing network, and
2501 * 'net' is just a convenient way to pass in the list.
2502 * if it is unique we need to find the LND and load it if
2505 if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
2506 lnd_type = LNET_NETTYP(net->net_id);
2508 mutex_lock(&the_lnet.ln_lnd_mutex);
2509 lnd = lnet_find_lnd_by_type(lnd_type);
2512 mutex_unlock(&the_lnet.ln_lnd_mutex);
2513 rc = request_module("%s", libcfs_lnd2modname(lnd_type));
2514 mutex_lock(&the_lnet.ln_lnd_mutex);
2516 lnd = lnet_find_lnd_by_type(lnd_type);
2518 mutex_unlock(&the_lnet.ln_lnd_mutex);
2519 CERROR("Can't load LND %s, module %s, rc=%d\n",
2520 libcfs_lnd2str(lnd_type),
2521 libcfs_lnd2modname(lnd_type), rc);
2522 #ifndef HAVE_MODULE_LOADING_SUPPORT
2523 LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
2524 "compiled with kernel module "
2525 "loading support.");
2534 mutex_unlock(&the_lnet.ln_lnd_mutex);
2540 * net_l: if the network being added is unique then net_l
2541 * will point to that network
2542 * if the network being added is not unique then
2543 * net_l points to the existing network.
2545 * When we enter the loop below, we'll pick NIs off he
2546 * network beign added and start them up, then add them to
2547 * a local ni list. Once we've successfully started all
2548 * the NIs then we join the local NI list (of started up
2549 * networks) with the net_l->net_ni_list, which should
2550 * point to the correct network to add the new ni list to
2552 * If any of the new NIs fail to start up, then we want to
2553 * iterate through the local ni list, which should include
2554 * any NIs which were successfully started up, and shut
2557 * After than we want to delete the network being added,
2558 * to avoid a memory leak.
2562 * When a network uses TCP bonding then all its interfaces
2563 * must be specified when the network is first defined: the
2564 * TCP bonding code doesn't allow for interfaces to be added
2567 if (net_l != net && net_l != NULL && use_tcp_bonding &&
2568 LNET_NETTYP(net_l->net_id) == SOCKLND) {
2573 while (!list_empty(&net->net_ni_added)) {
2574 ni = list_entry(net->net_ni_added.next, struct lnet_ni,
2576 list_del_init(&ni->ni_netlist);
2578 /* make sure that the the NI we're about to start
2579 * up is actually unique. if it's not fail. */
2580 if (!lnet_ni_unique_net(&net_l->net_ni_list,
2581 ni->ni_interfaces[0])) {
2586 /* adjust the pointer the parent network, just in case it
2587 * the net is a duplicate */
2590 rc = lnet_startup_lndni(ni, tun);
2596 list_add_tail(&ni->ni_netlist, &local_ni_list);
2601 lnet_net_lock(LNET_LOCK_EX);
2602 list_splice_tail(&local_ni_list, &net_l->net_ni_list);
2603 lnet_incr_dlc_seq();
2604 lnet_net_unlock(LNET_LOCK_EX);
2606 /* if the network is not unique then we don't want to keep
2607 * it around after we're done. Free it. Otherwise add that
2608 * net to the global the_lnet.ln_nets */
2609 if (net_l != net && net_l != NULL) {
2611 * TODO - note. currently the tunables can not be updated
2617 * restore tunables after it has been overwitten by the
2620 if (peer_timeout != -1)
2621 net->net_tunables.lct_peer_timeout = peer_timeout;
2622 if (maxtxcredits != -1)
2623 net->net_tunables.lct_max_tx_credits = maxtxcredits;
2624 if (peerrtrcredits != -1)
2625 net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
2627 lnet_net_lock(LNET_LOCK_EX);
2628 list_add_tail(&net->net_list, &the_lnet.ln_nets);
2629 lnet_net_unlock(LNET_LOCK_EX);
2632 /* update net count */
2633 lnet_current_net_count = lnet_get_net_count();
2639 * shutdown the new NIs that are being started up
2640 * free the NET being started
2642 while (!list_empty(&local_ni_list)) {
2643 ni = list_entry(local_ni_list.next, struct lnet_ni,
2646 lnet_shutdown_lndni(ni);
2656 lnet_startup_lndnets(struct list_head *netlist)
2658 struct lnet_net *net;
2663 * Change to running state before bringing up the LNDs. This
2664 * allows lnet_shutdown_lndnets() to assert that we've passed
2667 lnet_net_lock(LNET_LOCK_EX);
2668 the_lnet.ln_state = LNET_STATE_RUNNING;
2669 lnet_net_unlock(LNET_LOCK_EX);
2671 while (!list_empty(netlist)) {
2672 net = list_entry(netlist->next, struct lnet_net, net_list);
2673 list_del_init(&net->net_list);
2675 rc = lnet_startup_lndnet(net, NULL);
2685 lnet_shutdown_lndnets();
2691 * Initialize LNet library.
2693 * Automatically called at module loading time. Caller has to call
2694 * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
2695 * latter returned 0. It must be called exactly once.
2697 * \retval 0 on success
2698 * \retval -ve on failures.
2700 int lnet_lib_init(void)
2704 lnet_assert_wire_constants();
2706 /* refer to global cfs_cpt_table for now */
2707 the_lnet.ln_cpt_table = cfs_cpt_tab;
2708 the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_tab);
2710 LASSERT(the_lnet.ln_cpt_number > 0);
2711 if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
2712 /* we are under risk of consuming all lh_cookie */
2713 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
2714 "please change setting of CPT-table and retry\n",
2715 the_lnet.ln_cpt_number, LNET_CPT_MAX);
2719 while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
2720 the_lnet.ln_cpt_bits++;
2722 rc = lnet_create_locks();
2724 CERROR("Can't create LNet global locks: %d\n", rc);
2728 the_lnet.ln_refcount = 0;
2729 INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
2730 INIT_LIST_HEAD(&the_lnet.ln_msg_resend);
2732 /* The hash table size is the number of bits it takes to express the set
2733 * ln_num_routes, minus 1 (better to under estimate than over so we
2734 * don't waste memory). */
2735 if (rnet_htable_size <= 0)
2736 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
2737 else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
2738 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
2739 the_lnet.ln_remote_nets_hbits = max_t(int, 1,
2740 order_base_2(rnet_htable_size) - 1);
2742 /* All LNDs apart from the LOLND are in separate modules. They
2743 * register themselves when their module loads, and unregister
2744 * themselves when their module is unloaded. */
2745 lnet_register_lnd(&the_lolnd);
2750 * Finalize LNet library.
2752 * \pre lnet_lib_init() called with success.
2753 * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
2755 * As this happens at module-unload, all lnds must already be unloaded,
2756 * so they must already be unregistered.
2758 void lnet_lib_exit(void)
2762 LASSERT(the_lnet.ln_refcount == 0);
2763 lnet_unregister_lnd(&the_lolnd);
2764 for (i = 0; i < NUM_LNDS; i++)
2765 LASSERT(!the_lnet.ln_lnds[i]);
2766 lnet_destroy_locks();
2770 * Set LNet PID and start LNet interfaces, routing, and forwarding.
2772 * Users must call this function at least once before any other functions.
2773 * For each successful call there must be a corresponding call to
2774 * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
2777 * The PID used by LNet may be different from the one requested.
2780 * \param requested_pid PID requested by the caller.
2782 * \return >= 0 on success, and < 0 error code on failures.
2785 LNetNIInit(lnet_pid_t requested_pid)
2787 int im_a_router = 0;
2790 struct lnet_ping_buffer *pbuf;
2791 struct lnet_handle_md ping_mdh;
2792 LIST_HEAD(net_head);
2793 struct lnet_net *net;
2795 mutex_lock(&the_lnet.ln_api_mutex);
2797 CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
2799 if (the_lnet.ln_refcount > 0) {
2800 rc = the_lnet.ln_refcount++;
2801 mutex_unlock(&the_lnet.ln_api_mutex);
2805 rc = lnet_prepare(requested_pid);
2807 mutex_unlock(&the_lnet.ln_api_mutex);
2811 /* create a network for Loopback network */
2812 net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
2815 goto err_empty_list;
2818 /* Add in the loopback NI */
2819 if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
2821 goto err_empty_list;
2824 if (use_tcp_bonding)
2825 CWARN("'use_tcp_bonding' option has been deprecated. See LU-13641\n");
2827 /* If LNet is being initialized via DLC it is possible
2828 * that the user requests not to load module parameters (ones which
2829 * are supported by DLC) on initialization. Therefore, make sure not
2830 * to load networks, routes and forwarding from module parameters
2831 * in this case. On cleanup in case of failure only clean up
2832 * routes if it has been loaded */
2833 if (!the_lnet.ln_nis_from_mod_params) {
2834 rc = lnet_parse_networks(&net_head, lnet_get_networks(),
2837 goto err_empty_list;
2840 ni_count = lnet_startup_lndnets(&net_head);
2843 goto err_empty_list;
2846 if (!the_lnet.ln_nis_from_mod_params) {
2847 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
2849 goto err_shutdown_lndnis;
2851 rc = lnet_rtrpools_alloc(im_a_router);
2853 goto err_destroy_routes;
2856 rc = lnet_acceptor_start();
2858 goto err_destroy_routes;
2860 the_lnet.ln_refcount = 1;
2861 /* Now I may use my own API functions... */
2863 rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_count, true);
2865 goto err_acceptor_stop;
2867 lnet_ping_target_update(pbuf, ping_mdh);
2869 the_lnet.ln_mt_handler = lnet_mt_event_handler;
2871 rc = lnet_push_target_init();
2875 rc = lnet_peer_discovery_start();
2877 goto err_destroy_push_target;
2879 rc = lnet_monitor_thr_start();
2881 goto err_stop_discovery_thr;
2884 lnet_router_debugfs_init();
2886 mutex_unlock(&the_lnet.ln_api_mutex);
2888 complete_all(&the_lnet.ln_started);
2890 /* wait for all routers to start */
2891 lnet_wait_router_start();
2895 err_stop_discovery_thr:
2896 lnet_peer_discovery_stop();
2897 err_destroy_push_target:
2898 lnet_push_target_fini();
2900 lnet_ping_target_fini();
2902 the_lnet.ln_refcount = 0;
2903 lnet_acceptor_stop();
2905 if (!the_lnet.ln_nis_from_mod_params)
2906 lnet_destroy_routes();
2907 err_shutdown_lndnis:
2908 lnet_shutdown_lndnets();
2912 mutex_unlock(&the_lnet.ln_api_mutex);
2913 while (!list_empty(&net_head)) {
2914 struct lnet_net *net;
2916 net = list_entry(net_head.next, struct lnet_net, net_list);
2917 list_del_init(&net->net_list);
2922 EXPORT_SYMBOL(LNetNIInit);
2925 * Stop LNet interfaces, routing, and forwarding.
2927 * Users must call this function once for each successful call to LNetNIInit().
2928 * Once the LNetNIFini() operation has been started, the results of pending
2929 * API operations are undefined.
2931 * \return always 0 for current implementation.
2936 mutex_lock(&the_lnet.ln_api_mutex);
2938 LASSERT(the_lnet.ln_refcount > 0);
2940 if (the_lnet.ln_refcount != 1) {
2941 the_lnet.ln_refcount--;
2943 LASSERT(!the_lnet.ln_niinit_self);
2947 lnet_router_debugfs_fini();
2948 lnet_monitor_thr_stop();
2949 lnet_peer_discovery_stop();
2950 lnet_push_target_fini();
2951 lnet_ping_target_fini();
2953 /* Teardown fns that use my own API functions BEFORE here */
2954 the_lnet.ln_refcount = 0;
2956 lnet_acceptor_stop();
2957 lnet_destroy_routes();
2958 lnet_shutdown_lndnets();
2962 mutex_unlock(&the_lnet.ln_api_mutex);
2965 EXPORT_SYMBOL(LNetNIFini);
2968 * Grabs the ni data from the ni structure and fills the out
2971 * \param[in] ni network interface structure
2972 * \param[out] cfg_ni NI config information
2973 * \param[out] tun network and LND tunables
2976 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
2977 struct lnet_ioctl_config_lnd_tunables *tun,
2978 struct lnet_ioctl_element_stats *stats,
2981 size_t min_size = 0;
2984 if (!ni || !cfg_ni || !tun)
2987 if (ni->ni_interfaces[0] != NULL) {
2988 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2989 if (ni->ni_interfaces[i] != NULL) {
2990 strncpy(cfg_ni->lic_ni_intf[i],
2991 ni->ni_interfaces[i],
2992 sizeof(cfg_ni->lic_ni_intf[i]));
2997 cfg_ni->lic_nid = ni->ni_nid;
2998 if (ni->ni_nid == LNET_NID_LO_0)
2999 cfg_ni->lic_status = LNET_NI_STATUS_UP;
3001 cfg_ni->lic_status = ni->ni_status->ns_status;
3002 cfg_ni->lic_tcp_bonding = use_tcp_bonding;
3003 cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
3005 memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
3008 stats->iel_send_count = lnet_sum_stats(&ni->ni_stats,
3009 LNET_STATS_TYPE_SEND);
3010 stats->iel_recv_count = lnet_sum_stats(&ni->ni_stats,
3011 LNET_STATS_TYPE_RECV);
3012 stats->iel_drop_count = lnet_sum_stats(&ni->ni_stats,
3013 LNET_STATS_TYPE_DROP);
3017 * tun->lt_tun will always be present, but in order to be
3018 * backwards compatible, we need to deal with the cases when
3019 * tun->lt_tun is smaller than what the kernel has, because it
3020 * comes from an older version of a userspace program, then we'll
3021 * need to copy as much information as we have available space.
3023 min_size = tun_size - sizeof(tun->lt_cmn);
3024 memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
3026 /* copy over the cpts */
3027 if (ni->ni_ncpts == LNET_CPT_NUMBER &&
3028 ni->ni_cpts == NULL) {
3029 for (i = 0; i < ni->ni_ncpts; i++)
3030 cfg_ni->lic_cpts[i] = i;
3033 ni->ni_cpts != NULL && i < ni->ni_ncpts &&
3034 i < LNET_MAX_SHOW_NUM_CPT;
3036 cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
3038 cfg_ni->lic_ncpts = ni->ni_ncpts;
3042 * NOTE: This is a legacy function left in the code to be backwards
3043 * compatible with older userspace programs. It should eventually be
3046 * Grabs the ni data from the ni structure and fills the out
3049 * \param[in] ni network interface structure
3050 * \param[out] config config information
3053 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
3054 struct lnet_ioctl_config_data *config)
3056 struct lnet_ioctl_net_config *net_config;
3057 struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
3058 size_t min_size, tunable_size = 0;
3064 net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
3068 BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
3069 ARRAY_SIZE(net_config->ni_interfaces));
3071 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
3072 if (!ni->ni_interfaces[i])
3075 strncpy(net_config->ni_interfaces[i],
3076 ni->ni_interfaces[i],
3077 sizeof(net_config->ni_interfaces[i]));
3080 config->cfg_nid = ni->ni_nid;
3081 config->cfg_config_u.cfg_net.net_peer_timeout =
3082 ni->ni_net->net_tunables.lct_peer_timeout;
3083 config->cfg_config_u.cfg_net.net_max_tx_credits =
3084 ni->ni_net->net_tunables.lct_max_tx_credits;
3085 config->cfg_config_u.cfg_net.net_peer_tx_credits =
3086 ni->ni_net->net_tunables.lct_peer_tx_credits;
3087 config->cfg_config_u.cfg_net.net_peer_rtr_credits =
3088 ni->ni_net->net_tunables.lct_peer_rtr_credits;
3090 if (ni->ni_nid == LNET_NID_LO_0)
3091 net_config->ni_status = LNET_NI_STATUS_UP;
3093 net_config->ni_status = ni->ni_status->ns_status;
3096 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
3098 for (i = 0; i < num_cpts; i++)
3099 net_config->ni_cpts[i] = ni->ni_cpts[i];
3101 config->cfg_ncpts = num_cpts;
3105 * See if user land tools sent in a newer and larger version
3106 * of struct lnet_tunables than what the kernel uses.
3108 min_size = sizeof(*config) + sizeof(*net_config);
3110 if (config->cfg_hdr.ioc_len > min_size)
3111 tunable_size = config->cfg_hdr.ioc_len - min_size;
3113 /* Don't copy too much data to user space */
3114 min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
3115 lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
3117 if (lnd_cfg && min_size) {
3118 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
3119 config->cfg_config_u.cfg_net.net_interface_count = 1;
3121 /* Tell user land that kernel side has less data */
3122 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
3123 min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
3124 config->cfg_hdr.ioc_len -= min_size;
3130 lnet_get_ni_idx_locked(int idx)
3133 struct lnet_net *net;
3135 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3136 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3145 int lnet_get_net_healthv_locked(struct lnet_net *net)
3148 int best_healthv = 0;
3151 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3152 healthv = atomic_read(&ni->ni_healthv);
3153 if (healthv > best_healthv)
3154 best_healthv = healthv;
3157 return best_healthv;
3161 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
3164 struct lnet_net *net = mynet;
3167 * It is possible that the net has been cleaned out while there is
3168 * a message being sent. This function accessed the net without
3169 * checking if the list is empty
3173 net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
3175 if (list_empty(&net->net_ni_list))
3177 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
3183 if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
3184 /* if you reached the end of the ni list and the net is
3185 * specified, then there are no more nis in that net */
3189 /* we reached the end of this net ni list. move to the
3191 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
3192 /* no more nets and no more NIs. */
3195 /* get the next net */
3196 net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
3198 if (list_empty(&net->net_ni_list))
3200 /* get the ni on it */
3201 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
3207 if (list_empty(&prev->ni_netlist))
3210 /* there are more nis left */
3211 ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
3217 lnet_get_net_config(struct lnet_ioctl_config_data *config)
3222 int idx = config->cfg_count;
3224 cpt = lnet_net_lock_current();
3226 ni = lnet_get_ni_idx_locked(idx);
3231 lnet_fill_ni_info_legacy(ni, config);
3235 lnet_net_unlock(cpt);
3240 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
3241 struct lnet_ioctl_config_lnd_tunables *tun,
3242 struct lnet_ioctl_element_stats *stats,
3249 if (!cfg_ni || !tun || !stats)
3252 cpt = lnet_net_lock_current();
3254 ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
3259 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
3263 lnet_net_unlock(cpt);
3267 int lnet_get_ni_stats(struct lnet_ioctl_element_msg_stats *msg_stats)
3276 cpt = lnet_net_lock_current();
3278 ni = lnet_get_ni_idx_locked(msg_stats->im_idx);
3281 lnet_usr_translate_stats(msg_stats, &ni->ni_stats);
3285 lnet_net_unlock(cpt);
3290 static int lnet_add_net_common(struct lnet_net *net,
3291 struct lnet_ioctl_config_lnd_tunables *tun)
3293 struct lnet_handle_md ping_mdh;
3294 struct lnet_ping_buffer *pbuf;
3295 struct lnet_remotenet *rnet;
3301 lnet_net_lock(LNET_LOCK_EX);
3302 rnet = lnet_find_rnet_locked(net->net_id);
3303 lnet_net_unlock(LNET_LOCK_EX);
3305 * make sure that the net added doesn't invalidate the current
3306 * configuration LNet is keeping
3309 CERROR("Adding net %s will invalidate routing configuration\n",
3310 libcfs_net2str(net->net_id));
3316 * make sure you calculate the correct number of slots in the ping
3317 * buffer. Since the ping info is a flattened list of all the NIs,
3318 * we should allocate enough slots to accomodate the number of NIs
3319 * which will be added.
3321 * since ni hasn't been configured yet, use
3322 * lnet_get_net_ni_count_pre() which checks the net_ni_added list
3324 net_ni_count = lnet_get_net_ni_count_pre(net);
3326 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3327 net_ni_count + lnet_get_ni_count(),
3335 memcpy(&net->net_tunables,
3336 &tun->lt_cmn, sizeof(net->net_tunables));
3338 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
3340 net_id = net->net_id;
3342 rc = lnet_startup_lndnet(net,
3343 (tun) ? &tun->lt_tun : NULL);
3347 lnet_net_lock(LNET_LOCK_EX);
3348 net = lnet_get_net_locked(net_id);
3351 /* apply the UDSPs */
3352 rc = lnet_udsp_apply_policies_on_net(net);
3354 CERROR("Failed to apply UDSPs on local net %s\n",
3355 libcfs_net2str(net->net_id));
3357 /* At this point we lost track of which NI was just added, so we
3358 * just re-apply the policies on all of the NIs on this net
3360 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3361 rc = lnet_udsp_apply_policies_on_ni(ni);
3363 CERROR("Failed to apply UDSPs on ni %s\n",
3364 libcfs_nid2str(ni->ni_nid));
3366 lnet_net_unlock(LNET_LOCK_EX);
3369 * Start the acceptor thread if this is the first network
3370 * being added that requires the thread.
3372 if (net->net_lnd->lnd_accept) {
3373 rc = lnet_acceptor_start();
3375 /* shutdown the net that we just started */
3376 CERROR("Failed to start up acceptor thread\n");
3377 lnet_shutdown_lndnet(net);
3382 lnet_net_lock(LNET_LOCK_EX);
3383 lnet_peer_net_added(net);
3384 lnet_net_unlock(LNET_LOCK_EX);
3386 lnet_ping_target_update(pbuf, ping_mdh);
3391 lnet_ping_md_unlink(pbuf, &ping_mdh);
3392 lnet_ping_buffer_decref(pbuf);
3397 lnet_set_tune_defaults(struct lnet_ioctl_config_lnd_tunables *tun)
3400 if (!tun->lt_cmn.lct_peer_timeout)
3401 tun->lt_cmn.lct_peer_timeout = DEFAULT_PEER_TIMEOUT;
3402 if (!tun->lt_cmn.lct_peer_tx_credits)
3403 tun->lt_cmn.lct_peer_tx_credits = DEFAULT_PEER_CREDITS;
3404 if (!tun->lt_cmn.lct_max_tx_credits)
3405 tun->lt_cmn.lct_max_tx_credits = DEFAULT_CREDITS;
3409 static int lnet_handle_legacy_ip2nets(char *ip2nets,
3410 struct lnet_ioctl_config_lnd_tunables *tun)
3412 struct lnet_net *net;
3415 LIST_HEAD(net_head);
3417 rc = lnet_parse_ip2nets(&nets, ip2nets);
3421 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
3425 lnet_set_tune_defaults(tun);
3427 mutex_lock(&the_lnet.ln_api_mutex);
3428 while (!list_empty(&net_head)) {
3429 net = list_entry(net_head.next, struct lnet_net, net_list);
3430 list_del_init(&net->net_list);
3431 rc = lnet_add_net_common(net, tun);
3437 mutex_unlock(&the_lnet.ln_api_mutex);
3439 while (!list_empty(&net_head)) {
3440 net = list_entry(net_head.next, struct lnet_net, net_list);
3441 list_del_init(&net->net_list);
3447 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
3449 struct lnet_net *net;
3451 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
3453 __u32 net_id, lnd_type;
3455 /* get the tunables if they are available */
3456 if (conf->lic_cfg_hdr.ioc_len >=
3457 sizeof(*conf) + sizeof(*tun))
3458 tun = (struct lnet_ioctl_config_lnd_tunables *)
3461 /* handle legacy ip2nets from DLC */
3462 if (conf->lic_legacy_ip2nets[0] != '\0')
3463 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
3466 net_id = LNET_NIDNET(conf->lic_nid);
3467 lnd_type = LNET_NETTYP(net_id);
3469 if (!libcfs_isknown_lnd(lnd_type)) {
3470 CERROR("No valid net and lnd information provided\n");
3474 net = lnet_net_alloc(net_id, NULL);
3478 for (i = 0; i < conf->lic_ncpts; i++) {
3479 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER)
3483 ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
3484 conf->lic_ni_intf[0]);
3488 lnet_set_tune_defaults(tun);
3490 mutex_lock(&the_lnet.ln_api_mutex);
3492 rc = lnet_add_net_common(net, tun);
3494 mutex_unlock(&the_lnet.ln_api_mutex);
3499 int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
3501 struct lnet_net *net;
3503 __u32 net_id = LNET_NIDNET(conf->lic_nid);
3504 struct lnet_ping_buffer *pbuf;
3505 struct lnet_handle_md ping_mdh;
3510 /* don't allow userspace to shutdown the LOLND */
3511 if (LNET_NETTYP(net_id) == LOLND)
3514 mutex_lock(&the_lnet.ln_api_mutex);
3518 net = lnet_get_net_locked(net_id);
3520 CERROR("net %s not found\n",
3521 libcfs_net2str(net_id));
3526 addr = LNET_NIDADDR(conf->lic_nid);
3528 /* remove the entire net */
3529 net_count = lnet_get_net_ni_count_locked(net);
3533 /* create and link a new ping info, before removing the old one */
3534 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3535 lnet_get_ni_count() - net_count,
3538 goto unlock_api_mutex;
3540 lnet_shutdown_lndnet(net);
3542 lnet_acceptor_stop();
3544 lnet_ping_target_update(pbuf, ping_mdh);
3546 goto unlock_api_mutex;
3549 ni = lnet_nid2ni_locked(conf->lic_nid, 0);
3551 CERROR("nid %s not found\n",
3552 libcfs_nid2str(conf->lic_nid));
3557 net_count = lnet_get_net_ni_count_locked(net);
3561 /* create and link a new ping info, before removing the old one */
3562 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3563 lnet_get_ni_count() - 1, false);
3565 goto unlock_api_mutex;
3567 lnet_shutdown_lndni(ni);
3569 lnet_acceptor_stop();
3571 lnet_ping_target_update(pbuf, ping_mdh);
3573 /* check if the net is empty and remove it if it is */
3575 lnet_shutdown_lndnet(net);
3577 goto unlock_api_mutex;
3582 mutex_unlock(&the_lnet.ln_api_mutex);
3588 * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
3589 * They are only expected to be called for unique networks.
3590 * That can be as a result of older DLC library
3591 * calls. Multi-Rail DLC and beyond no longer uses these APIs.
3594 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
3596 struct lnet_net *net;
3597 LIST_HEAD(net_head);
3599 struct lnet_ioctl_config_lnd_tunables tun;
3600 const char *nets = conf->cfg_config_u.cfg_net.net_intf;
3602 /* Create a net/ni structures for the network string */
3603 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
3605 return rc == 0 ? -EINVAL : rc;
3607 mutex_lock(&the_lnet.ln_api_mutex);
3610 rc = -EINVAL; /* only add one network per call */
3611 goto out_unlock_clean;
3614 net = list_entry(net_head.next, struct lnet_net, net_list);
3615 list_del_init(&net->net_list);
3617 LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
3619 memset(&tun, 0, sizeof(tun));
3621 tun.lt_cmn.lct_peer_timeout =
3622 (!conf->cfg_config_u.cfg_net.net_peer_timeout) ? DEFAULT_PEER_TIMEOUT :
3623 conf->cfg_config_u.cfg_net.net_peer_timeout;
3624 tun.lt_cmn.lct_peer_tx_credits =
3625 (!conf->cfg_config_u.cfg_net.net_peer_tx_credits) ? DEFAULT_PEER_CREDITS :
3626 conf->cfg_config_u.cfg_net.net_peer_tx_credits;
3627 tun.lt_cmn.lct_peer_rtr_credits =
3628 conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
3629 tun.lt_cmn.lct_max_tx_credits =
3630 (!conf->cfg_config_u.cfg_net.net_max_tx_credits) ? DEFAULT_CREDITS :
3631 conf->cfg_config_u.cfg_net.net_max_tx_credits;
3633 rc = lnet_add_net_common(net, &tun);
3636 mutex_unlock(&the_lnet.ln_api_mutex);
3637 while (!list_empty(&net_head)) {
3638 /* net_head list is empty in success case */
3639 net = list_entry(net_head.next, struct lnet_net, net_list);
3640 list_del_init(&net->net_list);
3647 lnet_dyn_del_net(__u32 net_id)
3649 struct lnet_net *net;
3650 struct lnet_ping_buffer *pbuf;
3651 struct lnet_handle_md ping_mdh;
3655 /* don't allow userspace to shutdown the LOLND */
3656 if (LNET_NETTYP(net_id) == LOLND)
3659 mutex_lock(&the_lnet.ln_api_mutex);
3663 net = lnet_get_net_locked(net_id);
3670 net_ni_count = lnet_get_net_ni_count_locked(net);
3674 /* create and link a new ping info, before removing the old one */
3675 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3676 lnet_get_ni_count() - net_ni_count, false);
3680 lnet_shutdown_lndnet(net);
3682 lnet_acceptor_stop();
3684 lnet_ping_target_update(pbuf, ping_mdh);
3687 mutex_unlock(&the_lnet.ln_api_mutex);
3692 void lnet_incr_dlc_seq(void)
3694 atomic_inc(&lnet_dlc_seq_no);
3697 __u32 lnet_get_dlc_seq_locked(void)
3699 return atomic_read(&lnet_dlc_seq_no);
3703 lnet_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3705 struct lnet_net *net;
3708 lnet_net_lock(LNET_LOCK_EX);
3709 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3710 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3711 if (ni->ni_nid == nid || all) {
3712 atomic_set(&ni->ni_healthv, value);
3713 if (list_empty(&ni->ni_recovery) &&
3714 value < LNET_MAX_HEALTH_VALUE) {
3715 CERROR("manually adding local NI %s to recovery\n",
3716 libcfs_nid2str(ni->ni_nid));
3717 list_add_tail(&ni->ni_recovery,
3718 &the_lnet.ln_mt_localNIRecovq);
3719 lnet_ni_addref_locked(ni, 0);
3722 lnet_net_unlock(LNET_LOCK_EX);
3728 lnet_net_unlock(LNET_LOCK_EX);
3732 lnet_get_local_ni_hstats(struct lnet_ioctl_local_ni_hstats *stats)
3736 lnet_nid_t nid = stats->hlni_nid;
3738 cpt = lnet_net_lock_current();
3739 ni = lnet_nid2ni_locked(nid, cpt);
3746 stats->hlni_local_interrupt = atomic_read(&ni->ni_hstats.hlt_local_interrupt);
3747 stats->hlni_local_dropped = atomic_read(&ni->ni_hstats.hlt_local_dropped);
3748 stats->hlni_local_aborted = atomic_read(&ni->ni_hstats.hlt_local_aborted);
3749 stats->hlni_local_no_route = atomic_read(&ni->ni_hstats.hlt_local_no_route);
3750 stats->hlni_local_timeout = atomic_read(&ni->ni_hstats.hlt_local_timeout);
3751 stats->hlni_local_error = atomic_read(&ni->ni_hstats.hlt_local_error);
3752 stats->hlni_health_value = atomic_read(&ni->ni_healthv);
3755 lnet_net_unlock(cpt);
3761 lnet_get_local_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
3766 lnet_net_lock(LNET_LOCK_EX);
3767 list_for_each_entry(ni, &the_lnet.ln_mt_localNIRecovq, ni_recovery) {
3768 list->rlst_nid_array[i] = ni->ni_nid;
3770 if (i >= LNET_MAX_SHOW_NUM_NID)
3773 lnet_net_unlock(LNET_LOCK_EX);
3774 list->rlst_num_nids = i;
3780 lnet_get_peer_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
3782 struct lnet_peer_ni *lpni;
3785 lnet_net_lock(LNET_LOCK_EX);
3786 list_for_each_entry(lpni, &the_lnet.ln_mt_peerNIRecovq, lpni_recovery) {
3787 list->rlst_nid_array[i] = lpni->lpni_nid;
3789 if (i >= LNET_MAX_SHOW_NUM_NID)
3792 lnet_net_unlock(LNET_LOCK_EX);
3793 list->rlst_num_nids = i;
3799 * LNet ioctl handler.
3803 LNetCtl(unsigned int cmd, void *arg)
3805 struct libcfs_ioctl_data *data = arg;
3806 struct lnet_ioctl_config_data *config;
3807 struct lnet_process_id id = {0};
3811 BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
3812 sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
3815 case IOC_LIBCFS_GET_NI:
3816 rc = LNetGetId(data->ioc_count, &id);
3817 data->ioc_nid = id.nid;
3820 case IOC_LIBCFS_FAIL_NID:
3821 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
3823 case IOC_LIBCFS_ADD_ROUTE: {
3824 /* default router sensitivity to 1 */
3825 unsigned int sensitivity = 1;
3828 if (config->cfg_hdr.ioc_len < sizeof(*config))
3831 if (config->cfg_config_u.cfg_route.rtr_sensitivity) {
3833 config->cfg_config_u.cfg_route.rtr_sensitivity;
3836 mutex_lock(&the_lnet.ln_api_mutex);
3837 rc = lnet_add_route(config->cfg_net,
3838 config->cfg_config_u.cfg_route.rtr_hop,
3840 config->cfg_config_u.cfg_route.
3841 rtr_priority, sensitivity);
3842 mutex_unlock(&the_lnet.ln_api_mutex);
3846 case IOC_LIBCFS_DEL_ROUTE:
3849 if (config->cfg_hdr.ioc_len < sizeof(*config))
3852 mutex_lock(&the_lnet.ln_api_mutex);
3853 rc = lnet_del_route(config->cfg_net, config->cfg_nid);
3854 mutex_unlock(&the_lnet.ln_api_mutex);
3857 case IOC_LIBCFS_GET_ROUTE:
3860 if (config->cfg_hdr.ioc_len < sizeof(*config))
3863 mutex_lock(&the_lnet.ln_api_mutex);
3864 rc = lnet_get_route(config->cfg_count,
3866 &config->cfg_config_u.cfg_route.rtr_hop,
3868 &config->cfg_config_u.cfg_route.rtr_flags,
3869 &config->cfg_config_u.cfg_route.
3871 &config->cfg_config_u.cfg_route.
3873 mutex_unlock(&the_lnet.ln_api_mutex);
3876 case IOC_LIBCFS_GET_LOCAL_NI: {
3877 struct lnet_ioctl_config_ni *cfg_ni;
3878 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
3879 struct lnet_ioctl_element_stats *stats;
3884 /* get the tunables if they are available */
3885 if (cfg_ni->lic_cfg_hdr.ioc_len <
3886 sizeof(*cfg_ni) + sizeof(*stats) + sizeof(*tun))
3889 stats = (struct lnet_ioctl_element_stats *)
3891 tun = (struct lnet_ioctl_config_lnd_tunables *)
3892 (cfg_ni->lic_bulk + sizeof(*stats));
3894 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
3897 mutex_lock(&the_lnet.ln_api_mutex);
3898 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
3899 mutex_unlock(&the_lnet.ln_api_mutex);
3903 case IOC_LIBCFS_GET_LOCAL_NI_MSG_STATS: {
3904 struct lnet_ioctl_element_msg_stats *msg_stats = arg;
3906 if (msg_stats->im_hdr.ioc_len != sizeof(*msg_stats))
3909 mutex_lock(&the_lnet.ln_api_mutex);
3910 rc = lnet_get_ni_stats(msg_stats);
3911 mutex_unlock(&the_lnet.ln_api_mutex);
3916 case IOC_LIBCFS_GET_NET: {
3917 size_t total = sizeof(*config) +
3918 sizeof(struct lnet_ioctl_net_config);
3921 if (config->cfg_hdr.ioc_len < total)
3924 mutex_lock(&the_lnet.ln_api_mutex);
3925 rc = lnet_get_net_config(config);
3926 mutex_unlock(&the_lnet.ln_api_mutex);
3930 case IOC_LIBCFS_GET_LNET_STATS:
3932 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
3934 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
3937 mutex_lock(&the_lnet.ln_api_mutex);
3938 rc = lnet_counters_get(&lnet_stats->st_cntrs);
3939 mutex_unlock(&the_lnet.ln_api_mutex);
3943 case IOC_LIBCFS_CONFIG_RTR:
3946 if (config->cfg_hdr.ioc_len < sizeof(*config))
3949 mutex_lock(&the_lnet.ln_api_mutex);
3950 if (config->cfg_config_u.cfg_buffers.buf_enable) {
3951 rc = lnet_rtrpools_enable();
3952 mutex_unlock(&the_lnet.ln_api_mutex);
3955 lnet_rtrpools_disable();
3956 mutex_unlock(&the_lnet.ln_api_mutex);
3959 case IOC_LIBCFS_ADD_BUF:
3962 if (config->cfg_hdr.ioc_len < sizeof(*config))
3965 mutex_lock(&the_lnet.ln_api_mutex);
3966 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
3968 config->cfg_config_u.cfg_buffers.
3970 config->cfg_config_u.cfg_buffers.
3972 mutex_unlock(&the_lnet.ln_api_mutex);
3975 case IOC_LIBCFS_SET_NUMA_RANGE: {
3976 struct lnet_ioctl_set_value *numa;
3978 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3980 lnet_net_lock(LNET_LOCK_EX);
3981 lnet_numa_range = numa->sv_value;
3982 lnet_net_unlock(LNET_LOCK_EX);
3986 case IOC_LIBCFS_GET_NUMA_RANGE: {
3987 struct lnet_ioctl_set_value *numa;
3989 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3991 numa->sv_value = lnet_numa_range;
3995 case IOC_LIBCFS_GET_BUF: {
3996 struct lnet_ioctl_pool_cfg *pool_cfg;
3997 size_t total = sizeof(*config) + sizeof(*pool_cfg);
4001 if (config->cfg_hdr.ioc_len < total)
4004 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
4006 mutex_lock(&the_lnet.ln_api_mutex);
4007 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
4008 mutex_unlock(&the_lnet.ln_api_mutex);
4012 case IOC_LIBCFS_GET_LOCAL_HSTATS: {
4013 struct lnet_ioctl_local_ni_hstats *stats = arg;
4015 if (stats->hlni_hdr.ioc_len < sizeof(*stats))
4018 mutex_lock(&the_lnet.ln_api_mutex);
4019 rc = lnet_get_local_ni_hstats(stats);
4020 mutex_unlock(&the_lnet.ln_api_mutex);
4025 case IOC_LIBCFS_GET_RECOVERY_QUEUE: {
4026 struct lnet_ioctl_recovery_list *list = arg;
4027 if (list->rlst_hdr.ioc_len < sizeof(*list))
4030 mutex_lock(&the_lnet.ln_api_mutex);
4031 if (list->rlst_type == LNET_HEALTH_TYPE_LOCAL_NI)
4032 rc = lnet_get_local_ni_recovery_list(list);
4034 rc = lnet_get_peer_ni_recovery_list(list);
4035 mutex_unlock(&the_lnet.ln_api_mutex);
4039 case IOC_LIBCFS_ADD_PEER_NI: {
4040 struct lnet_ioctl_peer_cfg *cfg = arg;
4042 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4045 mutex_lock(&the_lnet.ln_api_mutex);
4046 rc = lnet_add_peer_ni(cfg->prcfg_prim_nid,
4049 mutex_unlock(&the_lnet.ln_api_mutex);
4053 case IOC_LIBCFS_DEL_PEER_NI: {
4054 struct lnet_ioctl_peer_cfg *cfg = arg;
4056 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4059 mutex_lock(&the_lnet.ln_api_mutex);
4060 rc = lnet_del_peer_ni(cfg->prcfg_prim_nid,
4061 cfg->prcfg_cfg_nid);
4062 mutex_unlock(&the_lnet.ln_api_mutex);
4066 case IOC_LIBCFS_GET_PEER_INFO: {
4067 struct lnet_ioctl_peer *peer_info = arg;
4069 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
4072 mutex_lock(&the_lnet.ln_api_mutex);
4073 rc = lnet_get_peer_ni_info(
4074 peer_info->pr_count,
4076 peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
4077 &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
4078 &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
4079 &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
4080 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
4081 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
4082 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
4083 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
4084 mutex_unlock(&the_lnet.ln_api_mutex);
4088 case IOC_LIBCFS_GET_PEER_NI: {
4089 struct lnet_ioctl_peer_cfg *cfg = arg;
4091 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4094 mutex_lock(&the_lnet.ln_api_mutex);
4095 rc = lnet_get_peer_info(cfg,
4096 (void __user *)cfg->prcfg_bulk);
4097 mutex_unlock(&the_lnet.ln_api_mutex);
4101 case IOC_LIBCFS_GET_PEER_LIST: {
4102 struct lnet_ioctl_peer_cfg *cfg = arg;
4104 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4107 mutex_lock(&the_lnet.ln_api_mutex);
4108 rc = lnet_get_peer_list(&cfg->prcfg_count, &cfg->prcfg_size,
4109 (struct lnet_process_id __user *)cfg->prcfg_bulk);
4110 mutex_unlock(&the_lnet.ln_api_mutex);
4114 case IOC_LIBCFS_SET_HEALHV: {
4115 struct lnet_ioctl_reset_health_cfg *cfg = arg;
4117 if (cfg->rh_hdr.ioc_len < sizeof(*cfg))
4119 if (cfg->rh_value < 0 ||
4120 cfg->rh_value > LNET_MAX_HEALTH_VALUE)
4121 value = LNET_MAX_HEALTH_VALUE;
4123 value = cfg->rh_value;
4124 CDEBUG(D_NET, "Manually setting healthv to %d for %s:%s. all = %d\n",
4125 value, (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI) ?
4126 "local" : "peer", libcfs_nid2str(cfg->rh_nid), cfg->rh_all);
4127 mutex_lock(&the_lnet.ln_api_mutex);
4128 if (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI)
4129 lnet_ni_set_healthv(cfg->rh_nid, value,
4132 lnet_peer_ni_set_healthv(cfg->rh_nid, value,
4134 mutex_unlock(&the_lnet.ln_api_mutex);
4138 case IOC_LIBCFS_NOTIFY_ROUTER: {
4139 time64_t deadline = ktime_get_real_seconds() - data->ioc_u64[0];
4141 /* The deadline passed in by the user should be some time in
4142 * seconds in the future since the UNIX epoch. We have to map
4143 * that deadline to the wall clock.
4145 deadline += ktime_get_seconds();
4146 return lnet_notify(NULL, data->ioc_nid, data->ioc_flags, false,
4150 case IOC_LIBCFS_LNET_DIST:
4151 rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
4152 if (rc < 0 && rc != -EHOSTUNREACH)
4155 data->ioc_u32[0] = rc;
4158 case IOC_LIBCFS_TESTPROTOCOMPAT:
4159 the_lnet.ln_testprotocompat = data->ioc_flags;
4162 case IOC_LIBCFS_LNET_FAULT:
4163 return lnet_fault_ctl(data->ioc_flags, data);
4165 case IOC_LIBCFS_PING: {
4166 signed long timeout;
4168 id.nid = data->ioc_nid;
4169 id.pid = data->ioc_u32[0];
4171 /* If timeout is negative then set default of 3 minutes */
4172 if (((s32)data->ioc_u32[1] <= 0) ||
4173 data->ioc_u32[1] > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
4174 timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
4176 timeout = nsecs_to_jiffies(data->ioc_u32[1] * NSEC_PER_MSEC);
4178 rc = lnet_ping(id, timeout, data->ioc_pbuf1,
4179 data->ioc_plen1 / sizeof(struct lnet_process_id));
4184 data->ioc_count = rc;
4188 case IOC_LIBCFS_PING_PEER: {
4189 struct lnet_ioctl_ping_data *ping = arg;
4190 struct lnet_peer *lp;
4191 signed long timeout;
4193 /* If timeout is negative then set default of 3 minutes */
4194 if (((s32)ping->op_param) <= 0 ||
4195 ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
4196 timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
4198 timeout = nsecs_to_jiffies(ping->op_param * NSEC_PER_MSEC);
4200 rc = lnet_ping(ping->ping_id, timeout,
4206 mutex_lock(&the_lnet.ln_api_mutex);
4207 lp = lnet_find_peer(ping->ping_id.nid);
4209 ping->ping_id.nid = lp->lp_primary_nid;
4210 ping->mr_info = lnet_peer_is_multi_rail(lp);
4211 lnet_peer_decref_locked(lp);
4213 mutex_unlock(&the_lnet.ln_api_mutex);
4215 ping->ping_count = rc;
4219 case IOC_LIBCFS_DISCOVER: {
4220 struct lnet_ioctl_ping_data *discover = arg;
4221 struct lnet_peer *lp;
4223 rc = lnet_discover(discover->ping_id, discover->op_param,
4225 discover->ping_count);
4229 mutex_lock(&the_lnet.ln_api_mutex);
4230 lp = lnet_find_peer(discover->ping_id.nid);
4232 discover->ping_id.nid = lp->lp_primary_nid;
4233 discover->mr_info = lnet_peer_is_multi_rail(lp);
4234 lnet_peer_decref_locked(lp);
4236 mutex_unlock(&the_lnet.ln_api_mutex);
4238 discover->ping_count = rc;
4242 case IOC_LIBCFS_ADD_UDSP: {
4243 struct lnet_ioctl_udsp *ioc_udsp = arg;
4244 __u32 bulk_size = ioc_udsp->iou_hdr.ioc_len;
4246 mutex_lock(&the_lnet.ln_api_mutex);
4247 rc = lnet_udsp_demarshal_add(arg, bulk_size);
4249 rc = lnet_udsp_apply_policies(NULL, false);
4250 CDEBUG(D_NET, "policy application returned %d\n", rc);
4253 mutex_unlock(&the_lnet.ln_api_mutex);
4258 case IOC_LIBCFS_DEL_UDSP: {
4259 struct lnet_ioctl_udsp *ioc_udsp = arg;
4260 int idx = ioc_udsp->iou_idx;
4262 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4265 mutex_lock(&the_lnet.ln_api_mutex);
4266 rc = lnet_udsp_del_policy(idx);
4268 rc = lnet_udsp_apply_policies(NULL, false);
4269 CDEBUG(D_NET, "policy re-application returned %d\n",
4273 mutex_unlock(&the_lnet.ln_api_mutex);
4278 case IOC_LIBCFS_GET_UDSP_SIZE: {
4279 struct lnet_ioctl_udsp *ioc_udsp = arg;
4280 struct lnet_udsp *udsp;
4282 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4287 mutex_lock(&the_lnet.ln_api_mutex);
4288 udsp = lnet_udsp_get_policy(ioc_udsp->iou_idx);
4292 /* coming in iou_idx will hold the idx of the udsp
4293 * to get the size of. going out the iou_idx will
4294 * hold the size of the UDSP found at the passed
4297 ioc_udsp->iou_idx = lnet_get_udsp_size(udsp);
4298 if (ioc_udsp->iou_idx < 0)
4301 mutex_unlock(&the_lnet.ln_api_mutex);
4306 case IOC_LIBCFS_GET_UDSP: {
4307 struct lnet_ioctl_udsp *ioc_udsp = arg;
4308 struct lnet_udsp *udsp;
4310 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4315 mutex_lock(&the_lnet.ln_api_mutex);
4316 udsp = lnet_udsp_get_policy(ioc_udsp->iou_idx);
4320 rc = lnet_udsp_marshal(udsp, ioc_udsp);
4321 mutex_unlock(&the_lnet.ln_api_mutex);
4326 case IOC_LIBCFS_GET_CONST_UDSP_INFO: {
4327 struct lnet_ioctl_construct_udsp_info *info = arg;
4329 if (info->cud_hdr.ioc_len < sizeof(*info))
4332 CDEBUG(D_NET, "GET_UDSP_INFO for %s\n",
4333 libcfs_nid2str(info->cud_nid));
4335 mutex_lock(&the_lnet.ln_api_mutex);
4336 lnet_udsp_get_construct_info(info);
4337 mutex_unlock(&the_lnet.ln_api_mutex);
4343 ni = lnet_net2ni_addref(data->ioc_net);
4347 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
4350 rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
4357 EXPORT_SYMBOL(LNetCtl);
4359 void LNetDebugPeer(struct lnet_process_id id)
4361 lnet_debug_peer(id.nid);
4363 EXPORT_SYMBOL(LNetDebugPeer);
4366 * Determine if the specified peer \a nid is on the local node.
4368 * \param nid peer nid to check
4370 * \retval true If peer NID is on the local node.
4371 * \retval false If peer NID is not on the local node.
4373 bool LNetIsPeerLocal(lnet_nid_t nid)
4375 struct lnet_net *net;
4379 cpt = lnet_net_lock_current();
4380 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
4381 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
4382 if (ni->ni_nid == nid) {
4383 lnet_net_unlock(cpt);
4388 lnet_net_unlock(cpt);
4392 EXPORT_SYMBOL(LNetIsPeerLocal);
4395 * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
4396 * Note that all interfaces share a same PID, as requested by LNetNIInit().
4398 * \param index Index of the interface to look up.
4399 * \param id On successful return, this location will hold the
4400 * struct lnet_process_id ID of the interface.
4402 * \retval 0 If an interface exists at \a index.
4403 * \retval -ENOENT If no interface has been found.
4406 LNetGetId(unsigned int index, struct lnet_process_id *id)
4409 struct lnet_net *net;
4413 LASSERT(the_lnet.ln_refcount > 0);
4415 cpt = lnet_net_lock_current();
4417 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
4418 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
4422 id->nid = ni->ni_nid;
4423 id->pid = the_lnet.ln_pid;
4429 lnet_net_unlock(cpt);
4432 EXPORT_SYMBOL(LNetGetId);
4437 struct lnet_handle_md mdh;
4438 struct completion completion;
4442 lnet_ping_event_handler(struct lnet_event *event)
4444 struct ping_data *pd = event->md_user_ptr;
4446 CDEBUG(D_NET, "ping event (%d %d)%s\n",
4447 event->type, event->status,
4448 event->unlinked ? " unlinked" : "");
4450 if (event->status) {
4452 pd->rc = event->status;
4453 } else if (event->type == LNET_EVENT_REPLY) {
4455 pd->rc = event->mlength;
4457 if (event->unlinked)
4458 complete(&pd->completion);
4461 static int lnet_ping(struct lnet_process_id id, signed long timeout,
4462 struct lnet_process_id __user *ids, int n_ids)
4464 struct lnet_md md = { NULL };
4465 struct ping_data pd = { 0 };
4466 struct lnet_ping_buffer *pbuf;
4467 struct lnet_process_id tmpid;
4473 /* n_ids limit is arbitrary */
4474 if (n_ids <= 0 || id.nid == LNET_NID_ANY)
4478 * if the user buffer has more space than the lnet_interfaces_max
4479 * then only fill it up to lnet_interfaces_max
4481 if (n_ids > lnet_interfaces_max)
4482 n_ids = lnet_interfaces_max;
4484 if (id.pid == LNET_PID_ANY)
4485 id.pid = LNET_PID_LUSTRE;
4487 pbuf = lnet_ping_buffer_alloc(n_ids, GFP_NOFS);
4491 /* initialize md content */
4492 md.start = &pbuf->pb_info;
4493 md.length = LNET_PING_INFO_SIZE(n_ids);
4494 md.threshold = 2; /* GET/REPLY */
4496 md.options = LNET_MD_TRUNCATE;
4498 md.handler = lnet_ping_event_handler;
4500 init_completion(&pd.completion);
4502 rc = LNetMDBind(&md, LNET_UNLINK, &pd.mdh);
4504 CERROR("Can't bind MD: %d\n", rc);
4505 goto fail_ping_buffer_decref;
4508 rc = LNetGet(LNET_NID_ANY, pd.mdh, id,
4509 LNET_RESERVED_PORTAL,
4510 LNET_PROTO_PING_MATCHBITS, 0, false);
4513 /* Don't CERROR; this could be deliberate! */
4514 rc2 = LNetMDUnlink(pd.mdh);
4517 /* NB must wait for the UNLINK event below... */
4520 if (wait_for_completion_timeout(&pd.completion, timeout) == 0) {
4521 /* Ensure completion in finite time... */
4522 LNetMDUnlink(pd.mdh);
4523 wait_for_completion(&pd.completion);
4527 goto fail_ping_buffer_decref;
4531 LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
4533 rc = -EPROTO; /* if I can't parse... */
4536 CERROR("%s: ping info too short %d\n",
4537 libcfs_id2str(id), nob);
4538 goto fail_ping_buffer_decref;
4541 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
4542 lnet_swap_pinginfo(pbuf);
4543 } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
4544 CERROR("%s: Unexpected magic %08x\n",
4545 libcfs_id2str(id), pbuf->pb_info.pi_magic);
4546 goto fail_ping_buffer_decref;
4549 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
4550 CERROR("%s: ping w/o NI status: 0x%x\n",
4551 libcfs_id2str(id), pbuf->pb_info.pi_features);
4552 goto fail_ping_buffer_decref;
4555 if (nob < LNET_PING_INFO_SIZE(0)) {
4556 CERROR("%s: Short reply %d(%d min)\n",
4558 nob, (int)LNET_PING_INFO_SIZE(0));
4559 goto fail_ping_buffer_decref;
4562 if (pbuf->pb_info.pi_nnis < n_ids)
4563 n_ids = pbuf->pb_info.pi_nnis;
4565 if (nob < LNET_PING_INFO_SIZE(n_ids)) {
4566 CERROR("%s: Short reply %d(%d expected)\n",
4568 nob, (int)LNET_PING_INFO_SIZE(n_ids));
4569 goto fail_ping_buffer_decref;
4572 rc = -EFAULT; /* if I segv in copy_to_user()... */
4574 memset(&tmpid, 0, sizeof(tmpid));
4575 for (i = 0; i < n_ids; i++) {
4576 tmpid.pid = pbuf->pb_info.pi_pid;
4577 tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
4578 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
4579 goto fail_ping_buffer_decref;
4581 rc = pbuf->pb_info.pi_nnis;
4583 fail_ping_buffer_decref:
4584 lnet_ping_buffer_decref(pbuf);
4589 lnet_discover(struct lnet_process_id id, __u32 force,
4590 struct lnet_process_id __user *ids, int n_ids)
4592 struct lnet_peer_ni *lpni;
4593 struct lnet_peer_ni *p;
4594 struct lnet_peer *lp;
4595 struct lnet_process_id *buf;
4601 id.nid == LNET_NID_ANY)
4604 if (id.pid == LNET_PID_ANY)
4605 id.pid = LNET_PID_LUSTRE;
4608 * If the user buffer has more space than the lnet_interfaces_max,
4609 * then only fill it up to lnet_interfaces_max.
4611 if (n_ids > lnet_interfaces_max)
4612 n_ids = lnet_interfaces_max;
4614 CFS_ALLOC_PTR_ARRAY(buf, n_ids);
4618 cpt = lnet_net_lock_current();
4619 lpni = lnet_nid2peerni_locked(id.nid, LNET_NID_ANY, cpt);
4626 * Clearing the NIDS_UPTODATE flag ensures the peer will
4627 * be discovered, provided discovery has not been disabled.
4629 lp = lpni->lpni_peer_net->lpn_peer;
4630 spin_lock(&lp->lp_lock);
4631 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
4632 /* If the force flag is set, force a PING and PUSH as well. */
4634 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
4635 spin_unlock(&lp->lp_lock);
4636 rc = lnet_discover_peer_locked(lpni, cpt, true);
4640 /* The lpni (or lp) for this NID may have changed and our ref is
4641 * the only thing keeping the old one around. Release the ref
4642 * and lookup the lpni again
4644 lnet_peer_ni_decref_locked(lpni);
4645 lpni = lnet_find_peer_ni_locked(id.nid);
4650 lp = lpni->lpni_peer_net->lpn_peer;
4654 while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
4655 buf[i].pid = id.pid;
4656 buf[i].nid = p->lpni_nid;
4663 lnet_peer_ni_decref_locked(lpni);
4665 lnet_net_unlock(cpt);
4668 if (copy_to_user(ids, buf, rc * sizeof(*buf)))
4670 CFS_FREE_PTR_ARRAY(buf, n_ids);
4676 * Retrieve peer discovery status.
4678 * \retval 1 if lnet_peer_discovery_disabled is 0
4679 * \retval 0 if lnet_peer_discovery_disabled is 1
4682 LNetGetPeerDiscoveryStatus(void)
4684 return !lnet_peer_discovery_disabled;
4686 EXPORT_SYMBOL(LNetGetPeerDiscoveryStatus);