4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #define DEBUG_SUBSYSTEM S_LNET
34 #include <linux/ctype.h>
35 #include <linux/log2.h>
36 #include <linux/ktime.h>
37 #include <linux/moduleparam.h>
38 #include <linux/uaccess.h>
39 #ifdef HAVE_SCHED_HEADERS
40 #include <linux/sched/signal.h>
43 #include <lnet/udsp.h>
44 #include <lnet/lib-lnet.h>
46 #define D_LNI D_CONSOLE
49 * initialize ln_api_mutex statically, since it needs to be used in
50 * discovery_set callback. That module parameter callback can be called
51 * before module init completes. The mutex needs to be ready for use then.
53 struct lnet the_lnet = {
54 .ln_api_mutex = __MUTEX_INITIALIZER(the_lnet.ln_api_mutex),
55 }; /* THE state of the network */
56 EXPORT_SYMBOL(the_lnet);
58 static char *ip2nets = "";
59 module_param(ip2nets, charp, 0444);
60 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
62 static char *networks = "";
63 module_param(networks, charp, 0444);
64 MODULE_PARM_DESC(networks, "local networks");
66 static char *routes = "";
67 module_param(routes, charp, 0444);
68 MODULE_PARM_DESC(routes, "routes to non-local networks");
70 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
71 module_param(rnet_htable_size, int, 0444);
72 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
74 static int use_tcp_bonding;
75 module_param(use_tcp_bonding, int, 0444);
76 MODULE_PARM_DESC(use_tcp_bonding,
77 "use_tcp_bonding parameter has been removed");
79 unsigned int lnet_numa_range = 0;
80 module_param(lnet_numa_range, uint, 0444);
81 MODULE_PARM_DESC(lnet_numa_range,
82 "NUMA range to consider during Multi-Rail selection");
85 * lnet_health_sensitivity determines by how much we decrement the health
86 * value on sending error. The value defaults to 100, which means health
87 * interface health is decremented by 100 points every failure.
89 unsigned int lnet_health_sensitivity = 100;
90 static int sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
91 #ifdef HAVE_KERNEL_PARAM_OPS
92 static struct kernel_param_ops param_ops_health_sensitivity = {
93 .set = sensitivity_set,
96 #define param_check_health_sensitivity(name, p) \
97 __param_check(name, p, int)
98 module_param(lnet_health_sensitivity, health_sensitivity, S_IRUGO|S_IWUSR);
100 module_param_call(lnet_health_sensitivity, sensitivity_set, param_get_int,
101 &lnet_health_sensitivity, S_IRUGO|S_IWUSR);
103 MODULE_PARM_DESC(lnet_health_sensitivity,
104 "Value to decrement the health value by on error");
107 * lnet_recovery_interval determines how often we should perform recovery
108 * on unhealthy interfaces.
110 unsigned int lnet_recovery_interval = 1;
111 static int recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp);
112 #ifdef HAVE_KERNEL_PARAM_OPS
113 static struct kernel_param_ops param_ops_recovery_interval = {
114 .set = recovery_interval_set,
115 .get = param_get_int,
117 #define param_check_recovery_interval(name, p) \
118 __param_check(name, p, int)
119 module_param(lnet_recovery_interval, recovery_interval, S_IRUGO|S_IWUSR);
121 module_param_call(lnet_recovery_interval, recovery_interval_set, param_get_int,
122 &lnet_recovery_interval, S_IRUGO|S_IWUSR);
124 MODULE_PARM_DESC(lnet_recovery_interval,
125 "DEPRECATED - Interval to recover unhealthy interfaces in seconds");
127 unsigned int lnet_recovery_limit;
128 module_param(lnet_recovery_limit, uint, 0644);
129 MODULE_PARM_DESC(lnet_recovery_limit,
130 "How long to attempt recovery of unhealthy peer interfaces in seconds. Set to 0 to allow indefinite recovery");
132 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
133 static int intf_max_set(const char *val, cfs_kernel_param_arg_t *kp);
135 static struct kernel_param_ops param_ops_interfaces_max = {
137 .get = param_get_int,
140 #define param_check_interfaces_max(name, p) \
141 __param_check(name, p, int)
143 #ifdef HAVE_KERNEL_PARAM_OPS
144 module_param(lnet_interfaces_max, interfaces_max, 0644);
146 module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
147 ¶m_ops_interfaces_max, 0644);
149 MODULE_PARM_DESC(lnet_interfaces_max,
150 "Maximum number of interfaces in a node.");
152 unsigned lnet_peer_discovery_disabled = 0;
153 static int discovery_set(const char *val, cfs_kernel_param_arg_t *kp);
155 static struct kernel_param_ops param_ops_discovery_disabled = {
156 .set = discovery_set,
157 .get = param_get_int,
160 #define param_check_discovery_disabled(name, p) \
161 __param_check(name, p, int)
162 #ifdef HAVE_KERNEL_PARAM_OPS
163 module_param(lnet_peer_discovery_disabled, discovery_disabled, 0644);
165 module_param_call(lnet_peer_discovery_disabled, discovery_set, param_get_int,
166 ¶m_ops_discovery_disabled, 0644);
168 MODULE_PARM_DESC(lnet_peer_discovery_disabled,
169 "Set to 1 to disable peer discovery on this node.");
171 unsigned int lnet_drop_asym_route;
172 static int drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp);
174 static struct kernel_param_ops param_ops_drop_asym_route = {
175 .set = drop_asym_route_set,
176 .get = param_get_int,
179 #define param_check_drop_asym_route(name, p) \
180 __param_check(name, p, int)
181 #ifdef HAVE_KERNEL_PARAM_OPS
182 module_param(lnet_drop_asym_route, drop_asym_route, 0644);
184 module_param_call(lnet_drop_asym_route, drop_asym_route_set, param_get_int,
185 ¶m_ops_drop_asym_route, 0644);
187 MODULE_PARM_DESC(lnet_drop_asym_route,
188 "Set to 1 to drop asymmetrical route messages.");
190 #define LNET_TRANSACTION_TIMEOUT_DEFAULT 50
191 unsigned int lnet_transaction_timeout = LNET_TRANSACTION_TIMEOUT_DEFAULT;
192 static int transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp);
193 #ifdef HAVE_KERNEL_PARAM_OPS
194 static struct kernel_param_ops param_ops_transaction_timeout = {
195 .set = transaction_to_set,
196 .get = param_get_int,
199 #define param_check_transaction_timeout(name, p) \
200 __param_check(name, p, int)
201 module_param(lnet_transaction_timeout, transaction_timeout, S_IRUGO|S_IWUSR);
203 module_param_call(lnet_transaction_timeout, transaction_to_set, param_get_int,
204 &lnet_transaction_timeout, S_IRUGO|S_IWUSR);
206 MODULE_PARM_DESC(lnet_transaction_timeout,
207 "Maximum number of seconds to wait for a peer response.");
209 #define LNET_RETRY_COUNT_DEFAULT 2
210 unsigned int lnet_retry_count = LNET_RETRY_COUNT_DEFAULT;
211 static int retry_count_set(const char *val, cfs_kernel_param_arg_t *kp);
212 #ifdef HAVE_KERNEL_PARAM_OPS
213 static struct kernel_param_ops param_ops_retry_count = {
214 .set = retry_count_set,
215 .get = param_get_int,
218 #define param_check_retry_count(name, p) \
219 __param_check(name, p, int)
220 module_param(lnet_retry_count, retry_count, S_IRUGO|S_IWUSR);
222 module_param_call(lnet_retry_count, retry_count_set, param_get_int,
223 &lnet_retry_count, S_IRUGO|S_IWUSR);
225 MODULE_PARM_DESC(lnet_retry_count,
226 "Maximum number of times to retry transmitting a message");
228 unsigned int lnet_response_tracking = 3;
229 static int response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp);
231 #ifdef HAVE_KERNEL_PARAM_OPS
232 static struct kernel_param_ops param_ops_response_tracking = {
233 .set = response_tracking_set,
234 .get = param_get_int,
237 #define param_check_response_tracking(name, p) \
238 __param_check(name, p, int)
239 module_param(lnet_response_tracking, response_tracking, 0644);
241 module_param_call(lnet_response_tracking, response_tracking_set, param_get_int,
242 &lnet_response_tracking, 0644);
244 MODULE_PARM_DESC(lnet_response_tracking,
245 "(0|1|2|3) LNet Internal Only|GET Reply only|PUT ACK only|Full Tracking (default)");
247 #define LNET_LND_TIMEOUT_DEFAULT ((LNET_TRANSACTION_TIMEOUT_DEFAULT - 1) / \
248 (LNET_RETRY_COUNT_DEFAULT + 1))
249 unsigned int lnet_lnd_timeout = LNET_LND_TIMEOUT_DEFAULT;
250 static void lnet_set_lnd_timeout(void)
252 lnet_lnd_timeout = (lnet_transaction_timeout - 1) /
253 (lnet_retry_count + 1);
256 unsigned int lnet_current_net_count;
259 * This sequence number keeps track of how many times DLC was used to
260 * update the local NIs. It is incremented when a NI is added or
261 * removed and checked when sending a message to determine if there is
262 * a need to re-run the selection algorithm. See lnet_select_pathway()
263 * for more details on its usage.
265 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
267 static int lnet_ping(struct lnet_process_id id, signed long timeout,
268 struct lnet_process_id __user *ids, int n_ids);
270 static int lnet_discover(struct lnet_process_id id, __u32 force,
271 struct lnet_process_id __user *ids, int n_ids);
274 sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
277 unsigned *sensitivity = (unsigned *)kp->arg;
280 rc = kstrtoul(val, 0, &value);
282 CERROR("Invalid module parameter value for 'lnet_health_sensitivity'\n");
287 * The purpose of locking the api_mutex here is to ensure that
288 * the correct value ends up stored properly.
290 mutex_lock(&the_lnet.ln_api_mutex);
292 if (value > LNET_MAX_HEALTH_VALUE) {
293 mutex_unlock(&the_lnet.ln_api_mutex);
294 CERROR("Invalid health value. Maximum: %d value = %lu\n",
295 LNET_MAX_HEALTH_VALUE, value);
299 if (*sensitivity != 0 && value == 0 && lnet_retry_count != 0) {
300 lnet_retry_count = 0;
301 lnet_set_lnd_timeout();
304 *sensitivity = value;
306 mutex_unlock(&the_lnet.ln_api_mutex);
312 recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp)
314 CWARN("'lnet_recovery_interval' has been deprecated\n");
320 discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
323 unsigned *discovery_off = (unsigned *)kp->arg;
325 struct lnet_ping_buffer *pbuf;
327 rc = kstrtoul(val, 0, &value);
329 CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
333 value = (value) ? 1 : 0;
336 * The purpose of locking the api_mutex here is to ensure that
337 * the correct value ends up stored properly.
339 mutex_lock(&the_lnet.ln_api_mutex);
341 if (value == *discovery_off) {
342 mutex_unlock(&the_lnet.ln_api_mutex);
347 * We still want to set the discovery value even when LNet is not
348 * running. This is the case when LNet is being loaded and we want
349 * the module parameters to take effect. Otherwise if we're
350 * changing the value dynamically, we want to set it after
353 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
354 *discovery_off = value;
355 mutex_unlock(&the_lnet.ln_api_mutex);
359 /* tell peers that discovery setting has changed */
360 lnet_net_lock(LNET_LOCK_EX);
361 pbuf = the_lnet.ln_ping_target;
363 pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
365 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
366 lnet_net_unlock(LNET_LOCK_EX);
368 /* only send a push when we're turning off discovery */
369 if (*discovery_off <= 0 && value > 0)
370 lnet_push_update_to_peers(1);
371 *discovery_off = value;
373 mutex_unlock(&the_lnet.ln_api_mutex);
379 drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp)
382 unsigned int *drop_asym_route = (unsigned int *)kp->arg;
385 rc = kstrtoul(val, 0, &value);
387 CERROR("Invalid module parameter value for "
388 "'lnet_drop_asym_route'\n");
393 * The purpose of locking the api_mutex here is to ensure that
394 * the correct value ends up stored properly.
396 mutex_lock(&the_lnet.ln_api_mutex);
398 if (value == *drop_asym_route) {
399 mutex_unlock(&the_lnet.ln_api_mutex);
403 *drop_asym_route = value;
405 mutex_unlock(&the_lnet.ln_api_mutex);
411 transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp)
414 unsigned *transaction_to = (unsigned *)kp->arg;
417 rc = kstrtoul(val, 0, &value);
419 CERROR("Invalid module parameter value for 'lnet_transaction_timeout'\n");
424 * The purpose of locking the api_mutex here is to ensure that
425 * the correct value ends up stored properly.
427 mutex_lock(&the_lnet.ln_api_mutex);
429 if (value <= lnet_retry_count || value == 0) {
430 mutex_unlock(&the_lnet.ln_api_mutex);
431 CERROR("Invalid value for lnet_transaction_timeout (%lu). "
432 "Has to be greater than lnet_retry_count (%u)\n",
433 value, lnet_retry_count);
437 if (value == *transaction_to) {
438 mutex_unlock(&the_lnet.ln_api_mutex);
442 *transaction_to = value;
443 /* Update the lnet_lnd_timeout now that we've modified the
444 * transaction timeout
446 lnet_set_lnd_timeout();
448 mutex_unlock(&the_lnet.ln_api_mutex);
454 retry_count_set(const char *val, cfs_kernel_param_arg_t *kp)
457 unsigned *retry_count = (unsigned *)kp->arg;
460 rc = kstrtoul(val, 0, &value);
462 CERROR("Invalid module parameter value for 'lnet_retry_count'\n");
467 * The purpose of locking the api_mutex here is to ensure that
468 * the correct value ends up stored properly.
470 mutex_lock(&the_lnet.ln_api_mutex);
472 if (lnet_health_sensitivity == 0 && value > 0) {
473 mutex_unlock(&the_lnet.ln_api_mutex);
474 CERROR("Can not set lnet_retry_count when health feature is turned off\n");
478 if (value > lnet_transaction_timeout) {
479 mutex_unlock(&the_lnet.ln_api_mutex);
480 CERROR("Invalid value for lnet_retry_count (%lu). "
481 "Has to be smaller than lnet_transaction_timeout (%u)\n",
482 value, lnet_transaction_timeout);
486 *retry_count = value;
488 /* Update the lnet_lnd_timeout now that we've modified the
491 lnet_set_lnd_timeout();
493 mutex_unlock(&the_lnet.ln_api_mutex);
499 intf_max_set(const char *val, cfs_kernel_param_arg_t *kp)
503 rc = kstrtoint(val, 0, &value);
505 CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
509 if (value < LNET_INTERFACES_MIN) {
510 CWARN("max interfaces provided are too small, setting to %d\n",
511 LNET_INTERFACES_MAX_DEFAULT);
512 value = LNET_INTERFACES_MAX_DEFAULT;
515 *(int *)kp->arg = value;
521 response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp)
524 unsigned long new_value;
526 rc = kstrtoul(val, 0, &new_value);
528 CERROR("Invalid value for 'lnet_response_tracking'\n");
532 if (new_value < 0 || new_value > 3) {
533 CWARN("Invalid value (%lu) for 'lnet_response_tracking'\n",
538 lnet_response_tracking = new_value;
544 lnet_get_routes(void)
550 lnet_get_networks(void)
555 if (*networks != 0 && *ip2nets != 0) {
556 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
557 "'ip2nets' but not both at once\n");
562 rc = lnet_parse_ip2nets(&nets, ip2nets);
563 return (rc == 0) ? nets : NULL;
573 lnet_init_locks(void)
575 spin_lock_init(&the_lnet.ln_eq_wait_lock);
576 spin_lock_init(&the_lnet.ln_msg_resend_lock);
577 init_completion(&the_lnet.ln_mt_wait_complete);
578 mutex_init(&the_lnet.ln_lnd_mutex);
581 struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
582 struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
584 struct kmem_cache *lnet_udsp_cachep; /* udsp cache */
585 struct kmem_cache *lnet_rspt_cachep; /* response tracker cache */
586 struct kmem_cache *lnet_msg_cachep;
589 lnet_slab_setup(void)
591 /* create specific kmem_cache for MEs and small MDs (i.e., originally
592 * allocated in <size-xxx> kmem_cache).
594 lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(struct lnet_me),
596 if (!lnet_mes_cachep)
599 lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
600 LNET_SMALL_MD_SIZE, 0, 0,
602 if (!lnet_small_mds_cachep)
605 lnet_udsp_cachep = kmem_cache_create("lnet_udsp",
606 sizeof(struct lnet_udsp),
608 if (!lnet_udsp_cachep)
611 lnet_rspt_cachep = kmem_cache_create("lnet_rspt", sizeof(struct lnet_rsp_tracker),
613 if (!lnet_rspt_cachep)
616 lnet_msg_cachep = kmem_cache_create("lnet_msg", sizeof(struct lnet_msg),
618 if (!lnet_msg_cachep)
625 lnet_slab_cleanup(void)
627 if (lnet_msg_cachep) {
628 kmem_cache_destroy(lnet_msg_cachep);
629 lnet_msg_cachep = NULL;
632 if (lnet_rspt_cachep) {
633 kmem_cache_destroy(lnet_rspt_cachep);
634 lnet_rspt_cachep = NULL;
637 if (lnet_udsp_cachep) {
638 kmem_cache_destroy(lnet_udsp_cachep);
639 lnet_udsp_cachep = NULL;
642 if (lnet_small_mds_cachep) {
643 kmem_cache_destroy(lnet_small_mds_cachep);
644 lnet_small_mds_cachep = NULL;
647 if (lnet_mes_cachep) {
648 kmem_cache_destroy(lnet_mes_cachep);
649 lnet_mes_cachep = NULL;
654 lnet_create_remote_nets_table(void)
657 struct list_head *hash;
659 LASSERT(the_lnet.ln_remote_nets_hash == NULL);
660 LASSERT(the_lnet.ln_remote_nets_hbits > 0);
661 CFS_ALLOC_PTR_ARRAY(hash, LNET_REMOTE_NETS_HASH_SIZE);
663 CERROR("Failed to create remote nets hash table\n");
667 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
668 INIT_LIST_HEAD(&hash[i]);
669 the_lnet.ln_remote_nets_hash = hash;
674 lnet_destroy_remote_nets_table(void)
678 if (the_lnet.ln_remote_nets_hash == NULL)
681 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
682 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
684 CFS_FREE_PTR_ARRAY(the_lnet.ln_remote_nets_hash,
685 LNET_REMOTE_NETS_HASH_SIZE);
686 the_lnet.ln_remote_nets_hash = NULL;
690 lnet_destroy_locks(void)
692 if (the_lnet.ln_res_lock != NULL) {
693 cfs_percpt_lock_free(the_lnet.ln_res_lock);
694 the_lnet.ln_res_lock = NULL;
697 if (the_lnet.ln_net_lock != NULL) {
698 cfs_percpt_lock_free(the_lnet.ln_net_lock);
699 the_lnet.ln_net_lock = NULL;
704 lnet_create_locks(void)
708 the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
709 if (the_lnet.ln_res_lock == NULL)
712 the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
713 if (the_lnet.ln_net_lock == NULL)
719 lnet_destroy_locks();
723 static void lnet_assert_wire_constants(void)
725 /* Wire protocol assertions generated by 'wirecheck'
726 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
727 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
728 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7)
732 BUILD_BUG_ON(LNET_PROTO_TCP_MAGIC != 0xeebc0ded);
733 BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MAJOR != 1);
734 BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MINOR != 0);
735 BUILD_BUG_ON(LNET_MSG_ACK != 0);
736 BUILD_BUG_ON(LNET_MSG_PUT != 1);
737 BUILD_BUG_ON(LNET_MSG_GET != 2);
738 BUILD_BUG_ON(LNET_MSG_REPLY != 3);
739 BUILD_BUG_ON(LNET_MSG_HELLO != 4);
741 BUILD_BUG_ON((int)sizeof(lnet_nid_t) != 8);
742 BUILD_BUG_ON((int)sizeof(lnet_pid_t) != 4);
744 /* Checks for struct lnet_process_id_packed */
745 BUILD_BUG_ON((int)sizeof(struct lnet_process_id_packed) != 12);
746 BUILD_BUG_ON((int)offsetof(struct lnet_process_id_packed, nid) != 0);
747 BUILD_BUG_ON((int)sizeof(((struct lnet_process_id_packed *)0)->nid) != 8);
748 BUILD_BUG_ON((int)offsetof(struct lnet_process_id_packed, pid) != 8);
749 BUILD_BUG_ON((int)sizeof(((struct lnet_process_id_packed *)0)->pid) != 4);
751 /* Checks for struct lnet_handle_wire */
752 BUILD_BUG_ON((int)sizeof(struct lnet_handle_wire) != 16);
753 BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
754 wh_interface_cookie) != 0);
755 BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) != 8);
756 BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
757 wh_object_cookie) != 8);
758 BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) != 8);
760 /* Checks for struct struct lnet_magicversion */
761 BUILD_BUG_ON((int)sizeof(struct lnet_magicversion) != 8);
762 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, magic) != 0);
763 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->magic) != 4);
764 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, version_major) != 4);
765 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_major) != 2);
766 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion,
767 version_minor) != 6);
768 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_minor) != 2);
770 /* Checks for struct struct lnet_hdr */
771 BUILD_BUG_ON((int)sizeof(struct lnet_hdr) != 72);
772 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_nid) != 0);
773 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_nid) != 8);
774 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_nid) != 8);
775 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_nid) != 8);
776 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_pid) != 16);
777 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_pid) != 4);
778 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_pid) != 20);
779 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_pid) != 4);
780 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, type) != 24);
781 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->type) != 4);
782 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, payload_length) != 28);
783 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->payload_length) != 4);
784 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg) != 32);
785 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg) != 40);
788 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) != 32);
789 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) != 16);
790 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.match_bits) != 48);
791 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) != 8);
792 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.mlength) != 56);
793 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) != 4);
796 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) != 32);
797 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) != 16);
798 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.match_bits) != 48);
799 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) != 8);
800 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.hdr_data) != 56);
801 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) != 8);
802 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ptl_index) != 64);
803 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) != 4);
804 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.offset) != 68);
805 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) != 4);
808 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.return_wmd) != 32);
809 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) != 16);
810 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.match_bits) != 48);
811 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) != 8);
812 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.ptl_index) != 56);
813 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) != 4);
814 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.src_offset) != 60);
815 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) != 4);
816 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.sink_length) != 64);
817 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) != 4);
820 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) != 32);
821 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) != 16);
824 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.incarnation) != 32);
825 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) != 8);
826 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.type) != 40);
827 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) != 4);
829 /* Checks for struct lnet_ni_status and related constants */
830 BUILD_BUG_ON(LNET_NI_STATUS_INVALID != 0x00000000);
831 BUILD_BUG_ON(LNET_NI_STATUS_UP != 0x15aac0de);
832 BUILD_BUG_ON(LNET_NI_STATUS_DOWN != 0xdeadface);
834 /* Checks for struct lnet_ni_status */
835 BUILD_BUG_ON((int)sizeof(struct lnet_ni_status) != 16);
836 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_nid) != 0);
837 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) != 8);
838 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_status) != 8);
839 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_status) != 4);
840 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_unused) != 12);
841 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_unused) != 4);
843 /* Checks for struct lnet_ping_info and related constants */
844 BUILD_BUG_ON(LNET_PROTO_PING_MAGIC != 0x70696E67);
845 BUILD_BUG_ON(LNET_PING_FEAT_INVAL != 0);
846 BUILD_BUG_ON(LNET_PING_FEAT_BASE != 1);
847 BUILD_BUG_ON(LNET_PING_FEAT_NI_STATUS != 2);
848 BUILD_BUG_ON(LNET_PING_FEAT_RTE_DISABLED != 4);
849 BUILD_BUG_ON(LNET_PING_FEAT_MULTI_RAIL != 8);
850 BUILD_BUG_ON(LNET_PING_FEAT_DISCOVERY != 16);
851 BUILD_BUG_ON(LNET_PING_FEAT_BITS != 31);
853 /* Checks for struct lnet_ping_info */
854 BUILD_BUG_ON((int)sizeof(struct lnet_ping_info) != 16);
855 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_magic) != 0);
856 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) != 4);
857 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_features) != 4);
858 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_features) != 4);
859 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_pid) != 8);
860 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) != 4);
861 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_nnis) != 12);
862 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) != 4);
863 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_ni) != 16);
864 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) != 0);
866 /* Acceptor connection request */
867 BUILD_BUG_ON(LNET_PROTO_ACCEPTOR_VERSION != 1);
869 /* Checks for struct lnet_acceptor_connreq */
870 BUILD_BUG_ON((int)sizeof(struct lnet_acceptor_connreq) != 16);
871 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_magic) != 0);
872 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_magic) != 4);
873 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_version) != 4);
874 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_version) != 4);
875 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_nid) != 8);
876 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_nid) != 8);
878 /* Checks for struct lnet_counters_common */
879 BUILD_BUG_ON((int)sizeof(struct lnet_counters_common) != 60);
880 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_msgs_alloc) != 0);
881 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_msgs_alloc) != 4);
882 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_msgs_max) != 4);
883 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_msgs_max) != 4);
884 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_errors) != 8);
885 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_errors) != 4);
886 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_send_count) != 12);
887 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_send_count) != 4);
888 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_recv_count) != 16);
889 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_recv_count) != 4);
890 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_route_count) != 20);
891 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_route_count) != 4);
892 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_drop_count) != 24);
893 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_drop_count) != 4);
894 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_send_length) != 28);
895 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_send_length) != 8);
896 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_recv_length) != 36);
897 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_recv_length) != 8);
898 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_route_length) != 44);
899 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_route_length) != 8);
900 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_drop_length) != 52);
901 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_drop_length) != 8);
904 static const struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
906 const struct lnet_lnd *lnd;
908 /* holding lnd mutex */
909 if (type >= NUM_LNDS)
911 lnd = the_lnet.ln_lnds[type];
912 LASSERT(!lnd || lnd->lnd_type == type);
918 lnet_get_lnd_timeout(void)
920 return lnet_lnd_timeout;
922 EXPORT_SYMBOL(lnet_get_lnd_timeout);
925 lnet_register_lnd(const struct lnet_lnd *lnd)
927 mutex_lock(&the_lnet.ln_lnd_mutex);
929 LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
930 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
932 the_lnet.ln_lnds[lnd->lnd_type] = lnd;
934 CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
936 mutex_unlock(&the_lnet.ln_lnd_mutex);
938 EXPORT_SYMBOL(lnet_register_lnd);
941 lnet_unregister_lnd(const struct lnet_lnd *lnd)
943 mutex_lock(&the_lnet.ln_lnd_mutex);
945 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
947 the_lnet.ln_lnds[lnd->lnd_type] = NULL;
948 CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
950 mutex_unlock(&the_lnet.ln_lnd_mutex);
952 EXPORT_SYMBOL(lnet_unregister_lnd);
955 lnet_counters_get_common_locked(struct lnet_counters_common *common)
957 struct lnet_counters *ctr;
960 /* FIXME !!! Their is no assert_lnet_net_locked() to ensure this
961 * actually called under the protection of the lnet_net_lock.
963 memset(common, 0, sizeof(*common));
965 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
966 common->lcc_msgs_max += ctr->lct_common.lcc_msgs_max;
967 common->lcc_msgs_alloc += ctr->lct_common.lcc_msgs_alloc;
968 common->lcc_errors += ctr->lct_common.lcc_errors;
969 common->lcc_send_count += ctr->lct_common.lcc_send_count;
970 common->lcc_recv_count += ctr->lct_common.lcc_recv_count;
971 common->lcc_route_count += ctr->lct_common.lcc_route_count;
972 common->lcc_drop_count += ctr->lct_common.lcc_drop_count;
973 common->lcc_send_length += ctr->lct_common.lcc_send_length;
974 common->lcc_recv_length += ctr->lct_common.lcc_recv_length;
975 common->lcc_route_length += ctr->lct_common.lcc_route_length;
976 common->lcc_drop_length += ctr->lct_common.lcc_drop_length;
981 lnet_counters_get_common(struct lnet_counters_common *common)
983 lnet_net_lock(LNET_LOCK_EX);
984 lnet_counters_get_common_locked(common);
985 lnet_net_unlock(LNET_LOCK_EX);
987 EXPORT_SYMBOL(lnet_counters_get_common);
990 lnet_counters_get(struct lnet_counters *counters)
992 struct lnet_counters *ctr;
993 struct lnet_counters_health *health = &counters->lct_health;
996 memset(counters, 0, sizeof(*counters));
998 lnet_net_lock(LNET_LOCK_EX);
1000 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1001 GOTO(out_unlock, rc = -ENODEV);
1003 lnet_counters_get_common_locked(&counters->lct_common);
1005 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
1006 health->lch_rst_alloc += ctr->lct_health.lch_rst_alloc;
1007 health->lch_resend_count += ctr->lct_health.lch_resend_count;
1008 health->lch_response_timeout_count +=
1009 ctr->lct_health.lch_response_timeout_count;
1010 health->lch_local_interrupt_count +=
1011 ctr->lct_health.lch_local_interrupt_count;
1012 health->lch_local_dropped_count +=
1013 ctr->lct_health.lch_local_dropped_count;
1014 health->lch_local_aborted_count +=
1015 ctr->lct_health.lch_local_aborted_count;
1016 health->lch_local_no_route_count +=
1017 ctr->lct_health.lch_local_no_route_count;
1018 health->lch_local_timeout_count +=
1019 ctr->lct_health.lch_local_timeout_count;
1020 health->lch_local_error_count +=
1021 ctr->lct_health.lch_local_error_count;
1022 health->lch_remote_dropped_count +=
1023 ctr->lct_health.lch_remote_dropped_count;
1024 health->lch_remote_error_count +=
1025 ctr->lct_health.lch_remote_error_count;
1026 health->lch_remote_timeout_count +=
1027 ctr->lct_health.lch_remote_timeout_count;
1028 health->lch_network_timeout_count +=
1029 ctr->lct_health.lch_network_timeout_count;
1032 lnet_net_unlock(LNET_LOCK_EX);
1035 EXPORT_SYMBOL(lnet_counters_get);
1038 lnet_counters_reset(void)
1040 struct lnet_counters *counters;
1043 lnet_net_lock(LNET_LOCK_EX);
1045 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1048 cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
1049 memset(counters, 0, sizeof(struct lnet_counters));
1051 lnet_net_unlock(LNET_LOCK_EX);
1055 lnet_res_type2str(int type)
1060 case LNET_COOKIE_TYPE_MD:
1062 case LNET_COOKIE_TYPE_ME:
1064 case LNET_COOKIE_TYPE_EQ:
1070 lnet_res_container_cleanup(struct lnet_res_container *rec)
1074 if (rec->rec_type == 0) /* not set yet, it's uninitialized */
1077 while (!list_empty(&rec->rec_active)) {
1078 struct list_head *e = rec->rec_active.next;
1081 if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
1082 lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
1084 } else { /* NB: Active MEs should be attached on portals */
1091 /* Found alive MD/ME/EQ, user really should unlink/free
1092 * all of them before finalize LNet, but if someone didn't,
1093 * we have to recycle garbage for him */
1094 CERROR("%d active elements on exit of %s container\n",
1095 count, lnet_res_type2str(rec->rec_type));
1098 if (rec->rec_lh_hash != NULL) {
1099 CFS_FREE_PTR_ARRAY(rec->rec_lh_hash, LNET_LH_HASH_SIZE);
1100 rec->rec_lh_hash = NULL;
1103 rec->rec_type = 0; /* mark it as finalized */
1107 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
1112 LASSERT(rec->rec_type == 0);
1114 rec->rec_type = type;
1115 INIT_LIST_HEAD(&rec->rec_active);
1117 rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
1119 /* Arbitrary choice of hash table size */
1120 LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
1121 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
1122 if (rec->rec_lh_hash == NULL) {
1127 for (i = 0; i < LNET_LH_HASH_SIZE; i++)
1128 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
1133 CERROR("Failed to setup %s resource container\n",
1134 lnet_res_type2str(type));
1135 lnet_res_container_cleanup(rec);
1140 lnet_res_containers_destroy(struct lnet_res_container **recs)
1142 struct lnet_res_container *rec;
1145 cfs_percpt_for_each(rec, i, recs)
1146 lnet_res_container_cleanup(rec);
1148 cfs_percpt_free(recs);
1151 static struct lnet_res_container **
1152 lnet_res_containers_create(int type)
1154 struct lnet_res_container **recs;
1155 struct lnet_res_container *rec;
1159 recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
1161 CERROR("Failed to allocate %s resource containers\n",
1162 lnet_res_type2str(type));
1166 cfs_percpt_for_each(rec, i, recs) {
1167 rc = lnet_res_container_setup(rec, i, type);
1169 lnet_res_containers_destroy(recs);
1177 struct lnet_libhandle *
1178 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
1180 /* ALWAYS called with lnet_res_lock held */
1181 struct list_head *head;
1182 struct lnet_libhandle *lh;
1185 if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
1188 hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
1189 head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
1191 list_for_each_entry(lh, head, lh_hash_chain) {
1192 if (lh->lh_cookie == cookie)
1200 lnet_res_lh_initialize(struct lnet_res_container *rec,
1201 struct lnet_libhandle *lh)
1203 /* ALWAYS called with lnet_res_lock held */
1204 unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
1207 lh->lh_cookie = rec->rec_lh_cookie;
1208 rec->rec_lh_cookie += 1 << ibits;
1210 hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
1212 list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
1216 lnet_create_array_of_queues(void)
1218 struct list_head **qs;
1219 struct list_head *q;
1222 qs = cfs_percpt_alloc(lnet_cpt_table(),
1223 sizeof(struct list_head));
1225 CERROR("Failed to allocate queues\n");
1229 cfs_percpt_for_each(q, i, qs)
1235 static int lnet_unprepare(void);
1238 lnet_prepare(lnet_pid_t requested_pid)
1240 /* Prepare to bring up the network */
1241 struct lnet_res_container **recs;
1244 if (requested_pid == LNET_PID_ANY) {
1245 /* Don't instantiate LNET just for me */
1249 LASSERT(the_lnet.ln_refcount == 0);
1251 the_lnet.ln_routing = 0;
1253 LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
1254 the_lnet.ln_pid = requested_pid;
1256 INIT_LIST_HEAD(&the_lnet.ln_test_peers);
1257 INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
1258 INIT_LIST_HEAD(&the_lnet.ln_nets);
1259 INIT_LIST_HEAD(&the_lnet.ln_routers);
1260 INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
1261 INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
1262 INIT_LIST_HEAD(&the_lnet.ln_dc_request);
1263 INIT_LIST_HEAD(&the_lnet.ln_dc_working);
1264 INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
1265 INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq);
1266 INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
1267 INIT_LIST_HEAD(&the_lnet.ln_udsp_list);
1268 init_waitqueue_head(&the_lnet.ln_dc_waitq);
1269 the_lnet.ln_mt_handler = NULL;
1270 init_completion(&the_lnet.ln_started);
1272 rc = lnet_slab_setup();
1276 rc = lnet_create_remote_nets_table();
1281 * NB the interface cookie in wire handles guards against delayed
1282 * replies and ACKs appearing valid after reboot.
1284 the_lnet.ln_interface_cookie = ktime_get_real_ns();
1286 the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
1287 sizeof(struct lnet_counters));
1288 if (the_lnet.ln_counters == NULL) {
1289 CERROR("Failed to allocate counters for LNet\n");
1294 rc = lnet_peer_tables_create();
1298 rc = lnet_msg_containers_create();
1302 rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
1303 LNET_COOKIE_TYPE_EQ);
1307 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
1313 the_lnet.ln_md_containers = recs;
1315 rc = lnet_portals_create();
1317 CERROR("Failed to create portals for LNet: %d\n", rc);
1321 the_lnet.ln_mt_zombie_rstqs = lnet_create_array_of_queues();
1322 if (!the_lnet.ln_mt_zombie_rstqs) {
1335 lnet_unprepare (void)
1337 /* NB no LNET_LOCK since this is the last reference. All LND instances
1338 * have shut down already, so it is safe to unlink and free all
1339 * descriptors, even those that appear committed to a network op (eg MD
1340 * with non-zero pending count) */
1342 lnet_fail_nid(LNET_NID_ANY, 0);
1344 LASSERT(the_lnet.ln_refcount == 0);
1345 LASSERT(list_empty(&the_lnet.ln_test_peers));
1346 LASSERT(list_empty(&the_lnet.ln_nets));
1348 if (the_lnet.ln_mt_zombie_rstqs) {
1349 lnet_clean_zombie_rstqs();
1350 the_lnet.ln_mt_zombie_rstqs = NULL;
1353 lnet_assert_handler_unused(the_lnet.ln_mt_handler);
1354 the_lnet.ln_mt_handler = NULL;
1356 lnet_portals_destroy();
1358 if (the_lnet.ln_md_containers != NULL) {
1359 lnet_res_containers_destroy(the_lnet.ln_md_containers);
1360 the_lnet.ln_md_containers = NULL;
1363 lnet_res_container_cleanup(&the_lnet.ln_eq_container);
1365 lnet_msg_containers_destroy();
1367 lnet_rtrpools_free(0);
1369 if (the_lnet.ln_counters != NULL) {
1370 cfs_percpt_free(the_lnet.ln_counters);
1371 the_lnet.ln_counters = NULL;
1373 lnet_destroy_remote_nets_table();
1374 lnet_udsp_destroy(true);
1375 lnet_slab_cleanup();
1381 lnet_net2ni_locked(__u32 net_id, int cpt)
1384 struct lnet_net *net;
1386 LASSERT(cpt != LNET_LOCK_EX);
1388 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1389 if (net->net_id == net_id) {
1390 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
1400 lnet_net2ni_addref(__u32 net)
1405 ni = lnet_net2ni_locked(net, 0);
1407 lnet_ni_addref_locked(ni, 0);
1412 EXPORT_SYMBOL(lnet_net2ni_addref);
1415 lnet_get_net_locked(__u32 net_id)
1417 struct lnet_net *net;
1419 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1420 if (net->net_id == net_id)
1428 lnet_net_clr_pref_rtrs(struct lnet_net *net)
1430 struct list_head zombies;
1431 struct lnet_nid_list *ne;
1432 struct lnet_nid_list *tmp;
1434 INIT_LIST_HEAD(&zombies);
1436 lnet_net_lock(LNET_LOCK_EX);
1437 list_splice_init(&net->net_rtr_pref_nids, &zombies);
1438 lnet_net_unlock(LNET_LOCK_EX);
1440 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1441 list_del_init(&ne->nl_list);
1442 LIBCFS_FREE(ne, sizeof(*ne));
1447 lnet_net_add_pref_rtr(struct lnet_net *net,
1449 __must_hold(&the_lnet.ln_api_mutex)
1451 struct lnet_nid_list *ne;
1453 /* This function is called with api_mutex held. When the api_mutex
1454 * is held the list can not be modified, as it is only modified as
1455 * a result of applying a UDSP and that happens under api_mutex
1458 list_for_each_entry(ne, &net->net_rtr_pref_nids, nl_list) {
1459 if (ne->nl_nid == gw_nid)
1463 LIBCFS_ALLOC(ne, sizeof(*ne));
1467 ne->nl_nid = gw_nid;
1469 /* Lock the cpt to protect against addition and checks in the
1470 * selection algorithm
1472 lnet_net_lock(LNET_LOCK_EX);
1473 list_add(&ne->nl_list, &net->net_rtr_pref_nids);
1474 lnet_net_unlock(LNET_LOCK_EX);
1480 lnet_net_is_pref_rtr_locked(struct lnet_net *net, lnet_nid_t rtr_nid)
1482 struct lnet_nid_list *ne;
1484 CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
1485 libcfs_net2str(net->net_id),
1486 list_empty(&net->net_rtr_pref_nids));
1488 if (list_empty(&net->net_rtr_pref_nids))
1491 list_for_each_entry(ne, &net->net_rtr_pref_nids, nl_list) {
1492 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
1493 libcfs_nid2str(ne->nl_nid),
1494 libcfs_nid2str(rtr_nid));
1495 if (rtr_nid == ne->nl_nid)
1503 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
1508 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
1513 val = hash_long(key, LNET_CPT_BITS);
1514 /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
1518 return (unsigned int)(key + val + (val >> 1)) % number;
1522 lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
1524 struct lnet_net *net;
1526 /* must called with hold of lnet_net_lock */
1527 if (LNET_CPT_NUMBER == 1)
1528 return 0; /* the only one */
1531 * If NI is provided then use the CPT identified in the NI cpt
1532 * list if one exists. If one doesn't exist, then that NI is
1533 * associated with all CPTs and it follows that the net it belongs
1534 * to is implicitly associated with all CPTs, so just hash the nid
1538 if (ni->ni_cpts != NULL)
1539 return ni->ni_cpts[lnet_nid_cpt_hash(nid,
1542 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1545 /* no NI provided so look at the net */
1546 net = lnet_get_net_locked(LNET_NIDNET(nid));
1548 if (net != NULL && net->net_cpts != NULL) {
1549 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
1552 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1556 lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
1561 if (LNET_CPT_NUMBER == 1)
1562 return 0; /* the only one */
1564 cpt = lnet_net_lock_current();
1566 cpt2 = lnet_cpt_of_nid_locked(nid, ni);
1568 lnet_net_unlock(cpt);
1572 EXPORT_SYMBOL(lnet_cpt_of_nid);
1575 lnet_islocalnet_locked(__u32 net_id)
1577 struct lnet_net *net;
1580 net = lnet_get_net_locked(net_id);
1582 local = net != NULL;
1588 lnet_islocalnet(__u32 net_id)
1593 cpt = lnet_net_lock_current();
1595 local = lnet_islocalnet_locked(net_id);
1597 lnet_net_unlock(cpt);
1603 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
1605 struct lnet_net *net;
1608 LASSERT(cpt != LNET_LOCK_EX);
1610 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1611 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1612 if (ni->ni_nid == nid)
1621 lnet_nid2ni_addref(lnet_nid_t nid)
1626 ni = lnet_nid2ni_locked(nid, 0);
1628 lnet_ni_addref_locked(ni, 0);
1633 EXPORT_SYMBOL(lnet_nid2ni_addref);
1636 lnet_islocalnid(lnet_nid_t nid)
1641 cpt = lnet_net_lock_current();
1642 ni = lnet_nid2ni_locked(nid, cpt);
1643 lnet_net_unlock(cpt);
1649 lnet_count_acceptor_nets(void)
1651 /* Return the # of NIs that need the acceptor. */
1653 struct lnet_net *net;
1656 cpt = lnet_net_lock_current();
1657 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1658 /* all socklnd type networks should have the acceptor
1660 if (net->net_lnd->lnd_accept != NULL)
1664 lnet_net_unlock(cpt);
1669 struct lnet_ping_buffer *
1670 lnet_ping_buffer_alloc(int nnis, gfp_t gfp)
1672 struct lnet_ping_buffer *pbuf;
1674 LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nnis), gfp);
1676 pbuf->pb_nnis = nnis;
1677 pbuf->pb_needs_post = false;
1678 atomic_set(&pbuf->pb_refcnt, 1);
1685 lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
1687 LASSERT(atomic_read(&pbuf->pb_refcnt) == 0);
1688 LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
1691 static struct lnet_ping_buffer *
1692 lnet_ping_target_create(int nnis)
1694 struct lnet_ping_buffer *pbuf;
1696 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1698 CERROR("Can't allocate ping source [%d]\n", nnis);
1702 pbuf->pb_info.pi_nnis = nnis;
1703 pbuf->pb_info.pi_pid = the_lnet.ln_pid;
1704 pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
1705 pbuf->pb_info.pi_features =
1706 LNET_PING_FEAT_NI_STATUS | LNET_PING_FEAT_MULTI_RAIL;
1712 lnet_get_net_ni_count_locked(struct lnet_net *net)
1717 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1724 lnet_get_net_ni_count_pre(struct lnet_net *net)
1729 list_for_each_entry(ni, &net->net_ni_added, ni_netlist)
1736 lnet_get_ni_count(void)
1739 struct lnet_net *net;
1744 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1745 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1755 lnet_get_net_count(void)
1757 struct lnet_net *net;
1762 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1772 lnet_swap_pinginfo(struct lnet_ping_buffer *pbuf)
1774 struct lnet_ni_status *stat;
1778 __swab32s(&pbuf->pb_info.pi_magic);
1779 __swab32s(&pbuf->pb_info.pi_features);
1780 __swab32s(&pbuf->pb_info.pi_pid);
1781 __swab32s(&pbuf->pb_info.pi_nnis);
1782 nnis = pbuf->pb_info.pi_nnis;
1783 if (nnis > pbuf->pb_nnis)
1784 nnis = pbuf->pb_nnis;
1785 for (i = 0; i < nnis; i++) {
1786 stat = &pbuf->pb_info.pi_ni[i];
1787 __swab64s(&stat->ns_nid);
1788 __swab32s(&stat->ns_status);
1793 lnet_ping_info_validate(struct lnet_ping_info *pinfo)
1797 if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
1799 if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
1801 /* Loopback is guaranteed to be present */
1802 if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
1804 if (LNET_PING_INFO_LONI(pinfo) != LNET_NID_LO_0)
1810 lnet_ping_target_destroy(void)
1812 struct lnet_net *net;
1815 lnet_net_lock(LNET_LOCK_EX);
1817 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1818 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1820 ni->ni_status = NULL;
1825 lnet_ping_buffer_decref(the_lnet.ln_ping_target);
1826 the_lnet.ln_ping_target = NULL;
1828 lnet_net_unlock(LNET_LOCK_EX);
1832 lnet_ping_target_event_handler(struct lnet_event *event)
1834 struct lnet_ping_buffer *pbuf = event->md_user_ptr;
1836 if (event->unlinked)
1837 lnet_ping_buffer_decref(pbuf);
1841 lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
1842 struct lnet_handle_md *ping_mdh,
1843 int ni_count, bool set_eq)
1845 struct lnet_process_id id = {
1846 .nid = LNET_NID_ANY,
1850 struct lnet_md md = { NULL };
1854 the_lnet.ln_ping_target_handler =
1855 lnet_ping_target_event_handler;
1857 *ppbuf = lnet_ping_target_create(ni_count);
1858 if (*ppbuf == NULL) {
1863 /* Ping target ME/MD */
1864 me = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1865 LNET_PROTO_PING_MATCHBITS, 0,
1866 LNET_UNLINK, LNET_INS_AFTER);
1869 CERROR("Can't create ping target ME: %d\n", rc);
1870 goto fail_decref_ping_buffer;
1873 /* initialize md content */
1874 md.start = &(*ppbuf)->pb_info;
1875 md.length = LNET_PING_INFO_SIZE((*ppbuf)->pb_nnis);
1876 md.threshold = LNET_MD_THRESH_INF;
1878 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1879 LNET_MD_MANAGE_REMOTE;
1880 md.handler = the_lnet.ln_ping_target_handler;
1881 md.user_ptr = *ppbuf;
1883 rc = LNetMDAttach(me, &md, LNET_RETAIN, ping_mdh);
1885 CERROR("Can't attach ping target MD: %d\n", rc);
1886 goto fail_decref_ping_buffer;
1888 lnet_ping_buffer_addref(*ppbuf);
1892 fail_decref_ping_buffer:
1893 LASSERT(atomic_read(&(*ppbuf)->pb_refcnt) == 1);
1894 lnet_ping_buffer_decref(*ppbuf);
1901 lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
1902 struct lnet_handle_md *ping_mdh)
1904 LNetMDUnlink(*ping_mdh);
1905 LNetInvalidateMDHandle(ping_mdh);
1907 /* NB the MD could be busy; this just starts the unlink */
1908 wait_var_event_warning(&pbuf->pb_refcnt,
1909 atomic_read(&pbuf->pb_refcnt) <= 1,
1910 "Still waiting for ping data MD to unlink\n");
1914 lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
1917 struct lnet_net *net;
1918 struct lnet_ni_status *ns;
1923 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1924 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1925 LASSERT(i < pbuf->pb_nnis);
1927 ns = &pbuf->pb_info.pi_ni[i];
1929 ns->ns_nid = ni->ni_nid;
1932 ns->ns_status = (ni->ni_status != NULL) ?
1933 ni->ni_status->ns_status :
1942 * We (ab)use the ns_status of the loopback interface to
1943 * transmit the sequence number. The first interface listed
1944 * must be the loopback interface.
1946 rc = lnet_ping_info_validate(&pbuf->pb_info);
1948 LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
1951 LNET_PING_BUFFER_SEQNO(pbuf) =
1952 atomic_inc_return(&the_lnet.ln_ping_target_seqno);
1956 lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
1957 struct lnet_handle_md ping_mdh)
1959 struct lnet_ping_buffer *old_pbuf = NULL;
1960 struct lnet_handle_md old_ping_md;
1962 /* switch the NIs to point to the new ping info created */
1963 lnet_net_lock(LNET_LOCK_EX);
1965 if (!the_lnet.ln_routing)
1966 pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1967 if (!lnet_peer_discovery_disabled)
1968 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
1970 /* Ensure only known feature bits have been set. */
1971 LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
1972 LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
1974 lnet_ping_target_install_locked(pbuf);
1976 if (the_lnet.ln_ping_target) {
1977 old_pbuf = the_lnet.ln_ping_target;
1978 old_ping_md = the_lnet.ln_ping_target_md;
1980 the_lnet.ln_ping_target_md = ping_mdh;
1981 the_lnet.ln_ping_target = pbuf;
1983 lnet_net_unlock(LNET_LOCK_EX);
1986 /* unlink and free the old ping info */
1987 lnet_ping_md_unlink(old_pbuf, &old_ping_md);
1988 lnet_ping_buffer_decref(old_pbuf);
1991 lnet_push_update_to_peers(0);
1995 lnet_ping_target_fini(void)
1997 lnet_ping_md_unlink(the_lnet.ln_ping_target,
1998 &the_lnet.ln_ping_target_md);
2000 lnet_assert_handler_unused(the_lnet.ln_ping_target_handler);
2001 lnet_ping_target_destroy();
2004 /* Resize the push target. */
2005 int lnet_push_target_resize(void)
2007 struct lnet_handle_md mdh;
2008 struct lnet_handle_md old_mdh;
2009 struct lnet_ping_buffer *pbuf;
2010 struct lnet_ping_buffer *old_pbuf;
2015 nnis = the_lnet.ln_push_target_nnis;
2017 CDEBUG(D_NET, "Invalid nnis %d\n", nnis);
2021 /* NB: lnet_ping_buffer_alloc() sets pbuf refcount to 1. That ref is
2022 * dropped when we need to resize again (see "old_pbuf" below) or when
2023 * LNet is shutdown (see lnet_push_target_fini())
2025 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
2027 CDEBUG(D_NET, "Can't allocate pbuf for nnis %d\n", nnis);
2031 rc = lnet_push_target_post(pbuf, &mdh);
2033 CDEBUG(D_NET, "Failed to post push target: %d\n", rc);
2034 lnet_ping_buffer_decref(pbuf);
2038 lnet_net_lock(LNET_LOCK_EX);
2039 old_pbuf = the_lnet.ln_push_target;
2040 old_mdh = the_lnet.ln_push_target_md;
2041 the_lnet.ln_push_target = pbuf;
2042 the_lnet.ln_push_target_md = mdh;
2043 lnet_net_unlock(LNET_LOCK_EX);
2046 LNetMDUnlink(old_mdh);
2047 /* Drop ref set by lnet_ping_buffer_alloc() */
2048 lnet_ping_buffer_decref(old_pbuf);
2051 /* Received another push or reply that requires a larger buffer */
2052 if (nnis < the_lnet.ln_push_target_nnis)
2055 CDEBUG(D_NET, "nnis %d success\n", nnis);
2059 int lnet_push_target_post(struct lnet_ping_buffer *pbuf,
2060 struct lnet_handle_md *mdhp)
2062 struct lnet_process_id id = { LNET_NID_ANY, LNET_PID_ANY };
2063 struct lnet_md md = { NULL };
2067 me = LNetMEAttach(LNET_RESERVED_PORTAL, id,
2068 LNET_PROTO_PING_MATCHBITS, 0,
2069 LNET_UNLINK, LNET_INS_AFTER);
2072 CERROR("Can't create push target ME: %d\n", rc);
2076 pbuf->pb_needs_post = false;
2078 /* This reference is dropped by lnet_push_target_event_handler() */
2079 lnet_ping_buffer_addref(pbuf);
2081 /* initialize md content */
2082 md.start = &pbuf->pb_info;
2083 md.length = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
2086 md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE;
2088 md.handler = the_lnet.ln_push_target_handler;
2090 rc = LNetMDAttach(me, &md, LNET_UNLINK, mdhp);
2092 CERROR("Can't attach push MD: %d\n", rc);
2093 lnet_ping_buffer_decref(pbuf);
2094 pbuf->pb_needs_post = true;
2098 CDEBUG(D_NET, "posted push target %p\n", pbuf);
2103 static void lnet_push_target_event_handler(struct lnet_event *ev)
2105 struct lnet_ping_buffer *pbuf = ev->md_user_ptr;
2107 CDEBUG(D_NET, "type %d status %d unlinked %d\n", ev->type, ev->status,
2110 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2111 lnet_swap_pinginfo(pbuf);
2113 if (ev->type == LNET_EVENT_UNLINK) {
2114 /* Drop ref added by lnet_push_target_post() */
2115 lnet_ping_buffer_decref(pbuf);
2119 lnet_peer_push_event(ev);
2121 /* Drop ref added by lnet_push_target_post */
2122 lnet_ping_buffer_decref(pbuf);
2125 /* Initialize the push target. */
2126 static int lnet_push_target_init(void)
2130 if (the_lnet.ln_push_target)
2133 the_lnet.ln_push_target_handler =
2134 lnet_push_target_event_handler;
2136 rc = LNetSetLazyPortal(LNET_RESERVED_PORTAL);
2139 /* Start at the required minimum, we'll enlarge if required. */
2140 the_lnet.ln_push_target_nnis = LNET_INTERFACES_MIN;
2142 rc = lnet_push_target_resize();
2145 LNetClearLazyPortal(LNET_RESERVED_PORTAL);
2146 the_lnet.ln_push_target_handler = NULL;
2152 /* Clean up the push target. */
2153 static void lnet_push_target_fini(void)
2155 if (!the_lnet.ln_push_target)
2158 /* Unlink and invalidate to prevent new references. */
2159 LNetMDUnlink(the_lnet.ln_push_target_md);
2160 LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
2162 /* Wait for the unlink to complete. */
2163 wait_var_event_warning(&the_lnet.ln_push_target->pb_refcnt,
2164 atomic_read(&the_lnet.ln_push_target->pb_refcnt) <= 1,
2165 "Still waiting for ping data MD to unlink\n");
2167 /* Drop ref set by lnet_ping_buffer_alloc() */
2168 lnet_ping_buffer_decref(the_lnet.ln_push_target);
2169 the_lnet.ln_push_target = NULL;
2170 the_lnet.ln_push_target_nnis = 0;
2172 LNetClearLazyPortal(LNET_RESERVED_PORTAL);
2173 lnet_assert_handler_unused(the_lnet.ln_push_target_handler);
2174 the_lnet.ln_push_target_handler = NULL;
2178 lnet_ni_tq_credits(struct lnet_ni *ni)
2182 LASSERT(ni->ni_ncpts >= 1);
2184 if (ni->ni_ncpts == 1)
2185 return ni->ni_net->net_tunables.lct_max_tx_credits;
2187 credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
2188 credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
2189 credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
2195 lnet_ni_unlink_locked(struct lnet_ni *ni)
2197 /* move it to zombie list and nobody can find it anymore */
2198 LASSERT(!list_empty(&ni->ni_netlist));
2199 list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
2200 lnet_ni_decref_locked(ni, 0);
2204 lnet_clear_zombies_nis_locked(struct lnet_net *net)
2209 struct list_head *zombie_list = &net->net_ni_zombie;
2212 * Now wait for the NIs I just nuked to show up on the zombie
2213 * list and shut them down in guaranteed thread context
2216 while (!list_empty(zombie_list)) {
2220 ni = list_entry(zombie_list->next,
2221 struct lnet_ni, ni_netlist);
2222 list_del_init(&ni->ni_netlist);
2223 /* the ni should be in deleting state. If it's not it's
2225 LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
2226 cfs_percpt_for_each(ref, j, ni->ni_refs) {
2229 /* still busy, add it back to zombie list */
2230 list_add(&ni->ni_netlist, zombie_list);
2234 if (!list_empty(&ni->ni_netlist)) {
2235 /* Unlock mutex while waiting to allow other
2236 * threads to read the LNet state and fall through
2239 lnet_net_unlock(LNET_LOCK_EX);
2240 mutex_unlock(&the_lnet.ln_api_mutex);
2243 if ((i & (-i)) == i) {
2245 "Waiting for zombie LNI %s\n",
2246 libcfs_nid2str(ni->ni_nid));
2248 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2250 mutex_lock(&the_lnet.ln_api_mutex);
2251 lnet_net_lock(LNET_LOCK_EX);
2255 lnet_net_unlock(LNET_LOCK_EX);
2257 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
2259 LASSERT(!in_interrupt());
2260 /* Holding the mutex makes it safe for lnd_shutdown
2261 * to call module_put(). Module unload cannot finish
2262 * until lnet_unregister_lnd() completes, and that
2263 * requires the mutex.
2265 mutex_lock(&the_lnet.ln_lnd_mutex);
2266 (net->net_lnd->lnd_shutdown)(ni);
2267 mutex_unlock(&the_lnet.ln_lnd_mutex);
2270 CDEBUG(D_LNI, "Removed LNI %s\n",
2271 libcfs_nid2str(ni->ni_nid));
2275 lnet_net_lock(LNET_LOCK_EX);
2279 /* shutdown down the NI and release refcount */
2281 lnet_shutdown_lndni(struct lnet_ni *ni)
2284 struct lnet_net *net = ni->ni_net;
2286 lnet_net_lock(LNET_LOCK_EX);
2288 ni->ni_state = LNET_NI_STATE_DELETING;
2290 lnet_ni_unlink_locked(ni);
2291 lnet_incr_dlc_seq();
2292 lnet_net_unlock(LNET_LOCK_EX);
2294 /* clear messages for this NI on the lazy portal */
2295 for (i = 0; i < the_lnet.ln_nportals; i++)
2296 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
2298 lnet_net_lock(LNET_LOCK_EX);
2299 lnet_clear_zombies_nis_locked(net);
2300 lnet_net_unlock(LNET_LOCK_EX);
2304 lnet_shutdown_lndnet(struct lnet_net *net)
2308 lnet_net_lock(LNET_LOCK_EX);
2310 list_del_init(&net->net_list);
2312 while (!list_empty(&net->net_ni_list)) {
2313 ni = list_entry(net->net_ni_list.next,
2314 struct lnet_ni, ni_netlist);
2315 lnet_net_unlock(LNET_LOCK_EX);
2316 lnet_shutdown_lndni(ni);
2317 lnet_net_lock(LNET_LOCK_EX);
2320 lnet_net_unlock(LNET_LOCK_EX);
2322 /* Do peer table cleanup for this net */
2323 lnet_peer_tables_cleanup(net);
2329 lnet_shutdown_lndnets(void)
2331 struct lnet_net *net;
2333 struct lnet_msg *msg, *tmp;
2335 /* NB called holding the global mutex */
2337 /* All quiet on the API front */
2338 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
2339 LASSERT(the_lnet.ln_refcount == 0);
2341 lnet_net_lock(LNET_LOCK_EX);
2342 the_lnet.ln_state = LNET_STATE_STOPPING;
2345 * move the nets to the zombie list to avoid them being
2346 * picked up for new work. LONET is also included in the
2347 * Nets that will be moved to the zombie list
2349 list_splice_init(&the_lnet.ln_nets, &the_lnet.ln_net_zombie);
2351 /* Drop the cached loopback Net. */
2352 if (the_lnet.ln_loni != NULL) {
2353 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
2354 the_lnet.ln_loni = NULL;
2356 lnet_net_unlock(LNET_LOCK_EX);
2358 /* iterate through the net zombie list and delete each net */
2359 while (!list_empty(&the_lnet.ln_net_zombie)) {
2360 net = list_entry(the_lnet.ln_net_zombie.next,
2361 struct lnet_net, net_list);
2362 lnet_shutdown_lndnet(net);
2365 spin_lock(&the_lnet.ln_msg_resend_lock);
2366 list_splice(&the_lnet.ln_msg_resend, &resend);
2367 spin_unlock(&the_lnet.ln_msg_resend_lock);
2369 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
2370 list_del_init(&msg->msg_list);
2371 msg->msg_no_resend = true;
2372 lnet_finalize(msg, -ECANCELED);
2375 lnet_net_lock(LNET_LOCK_EX);
2376 the_lnet.ln_state = LNET_STATE_SHUTDOWN;
2377 lnet_net_unlock(LNET_LOCK_EX);
2381 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
2384 struct lnet_tx_queue *tq;
2386 struct lnet_net *net = ni->ni_net;
2388 mutex_lock(&the_lnet.ln_lnd_mutex);
2391 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
2392 ni->ni_lnd_tunables_set = true;
2395 rc = (net->net_lnd->lnd_startup)(ni);
2397 mutex_unlock(&the_lnet.ln_lnd_mutex);
2400 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
2401 rc, libcfs_lnd2str(net->net_lnd->lnd_type));
2406 ni->ni_state = LNET_NI_STATE_ACTIVE;
2409 /* We keep a reference on the loopback net through the loopback NI */
2410 if (net->net_lnd->lnd_type == LOLND) {
2412 LASSERT(the_lnet.ln_loni == NULL);
2413 the_lnet.ln_loni = ni;
2414 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
2415 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
2416 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
2417 ni->ni_net->net_tunables.lct_peer_timeout = 0;
2421 if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
2422 ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
2423 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
2424 libcfs_lnd2str(net->net_lnd->lnd_type),
2425 ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
2427 /* shutdown the NI since if we get here then it must've already
2430 lnet_shutdown_lndni(ni);
2434 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
2435 tq->tq_credits_min =
2436 tq->tq_credits_max =
2437 tq->tq_credits = lnet_ni_tq_credits(ni);
2440 atomic_set(&ni->ni_tx_credits,
2441 lnet_ni_tq_credits(ni) * ni->ni_ncpts);
2442 atomic_set(&ni->ni_healthv, LNET_MAX_HEALTH_VALUE);
2444 CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
2445 libcfs_nid2str(ni->ni_nid),
2446 ni->ni_net->net_tunables.lct_peer_tx_credits,
2447 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
2448 ni->ni_net->net_tunables.lct_peer_rtr_credits,
2449 ni->ni_net->net_tunables.lct_peer_timeout);
2458 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
2461 struct lnet_net *net_l = NULL;
2462 LIST_HEAD(local_ni_list);
2466 const struct lnet_lnd *lnd;
2468 net->net_tunables.lct_peer_timeout;
2470 net->net_tunables.lct_max_tx_credits;
2471 int peerrtrcredits =
2472 net->net_tunables.lct_peer_rtr_credits;
2475 * make sure that this net is unique. If it isn't then
2476 * we are adding interfaces to an already existing network, and
2477 * 'net' is just a convenient way to pass in the list.
2478 * if it is unique we need to find the LND and load it if
2481 if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
2482 lnd_type = LNET_NETTYP(net->net_id);
2484 mutex_lock(&the_lnet.ln_lnd_mutex);
2485 lnd = lnet_find_lnd_by_type(lnd_type);
2488 mutex_unlock(&the_lnet.ln_lnd_mutex);
2489 rc = request_module("%s", libcfs_lnd2modname(lnd_type));
2490 mutex_lock(&the_lnet.ln_lnd_mutex);
2492 lnd = lnet_find_lnd_by_type(lnd_type);
2494 mutex_unlock(&the_lnet.ln_lnd_mutex);
2495 CERROR("Can't load LND %s, module %s, rc=%d\n",
2496 libcfs_lnd2str(lnd_type),
2497 libcfs_lnd2modname(lnd_type), rc);
2498 #ifndef HAVE_MODULE_LOADING_SUPPORT
2499 LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
2500 "compiled with kernel module "
2501 "loading support.");
2510 mutex_unlock(&the_lnet.ln_lnd_mutex);
2516 * net_l: if the network being added is unique then net_l
2517 * will point to that network
2518 * if the network being added is not unique then
2519 * net_l points to the existing network.
2521 * When we enter the loop below, we'll pick NIs off he
2522 * network beign added and start them up, then add them to
2523 * a local ni list. Once we've successfully started all
2524 * the NIs then we join the local NI list (of started up
2525 * networks) with the net_l->net_ni_list, which should
2526 * point to the correct network to add the new ni list to
2528 * If any of the new NIs fail to start up, then we want to
2529 * iterate through the local ni list, which should include
2530 * any NIs which were successfully started up, and shut
2533 * After than we want to delete the network being added,
2534 * to avoid a memory leak.
2536 while (!list_empty(&net->net_ni_added)) {
2537 ni = list_entry(net->net_ni_added.next, struct lnet_ni,
2539 list_del_init(&ni->ni_netlist);
2541 /* make sure that the the NI we're about to start
2542 * up is actually unique. if it's not fail. */
2543 if (!lnet_ni_unique_net(&net_l->net_ni_list,
2544 ni->ni_interface)) {
2549 /* adjust the pointer the parent network, just in case it
2550 * the net is a duplicate */
2553 rc = lnet_startup_lndni(ni, tun);
2559 list_add_tail(&ni->ni_netlist, &local_ni_list);
2564 lnet_net_lock(LNET_LOCK_EX);
2565 list_splice_tail(&local_ni_list, &net_l->net_ni_list);
2566 lnet_incr_dlc_seq();
2567 lnet_net_unlock(LNET_LOCK_EX);
2569 /* if the network is not unique then we don't want to keep
2570 * it around after we're done. Free it. Otherwise add that
2571 * net to the global the_lnet.ln_nets */
2572 if (net_l != net && net_l != NULL) {
2574 * TODO - note. currently the tunables can not be updated
2580 * restore tunables after it has been overwitten by the
2583 if (peer_timeout != -1)
2584 net->net_tunables.lct_peer_timeout = peer_timeout;
2585 if (maxtxcredits != -1)
2586 net->net_tunables.lct_max_tx_credits = maxtxcredits;
2587 if (peerrtrcredits != -1)
2588 net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
2590 lnet_net_lock(LNET_LOCK_EX);
2591 list_add_tail(&net->net_list, &the_lnet.ln_nets);
2592 lnet_net_unlock(LNET_LOCK_EX);
2595 /* update net count */
2596 lnet_current_net_count = lnet_get_net_count();
2602 * shutdown the new NIs that are being started up
2603 * free the NET being started
2605 while (!list_empty(&local_ni_list)) {
2606 ni = list_entry(local_ni_list.next, struct lnet_ni,
2609 lnet_shutdown_lndni(ni);
2619 lnet_startup_lndnets(struct list_head *netlist)
2621 struct lnet_net *net;
2626 * Change to running state before bringing up the LNDs. This
2627 * allows lnet_shutdown_lndnets() to assert that we've passed
2630 lnet_net_lock(LNET_LOCK_EX);
2631 the_lnet.ln_state = LNET_STATE_RUNNING;
2632 lnet_net_unlock(LNET_LOCK_EX);
2634 while (!list_empty(netlist)) {
2635 net = list_entry(netlist->next, struct lnet_net, net_list);
2636 list_del_init(&net->net_list);
2638 rc = lnet_startup_lndnet(net, NULL);
2648 lnet_shutdown_lndnets();
2654 * Initialize LNet library.
2656 * Automatically called at module loading time. Caller has to call
2657 * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
2658 * latter returned 0. It must be called exactly once.
2660 * \retval 0 on success
2661 * \retval -ve on failures.
2663 int lnet_lib_init(void)
2667 lnet_assert_wire_constants();
2669 /* refer to global cfs_cpt_table for now */
2670 the_lnet.ln_cpt_table = cfs_cpt_tab;
2671 the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_tab);
2673 LASSERT(the_lnet.ln_cpt_number > 0);
2674 if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
2675 /* we are under risk of consuming all lh_cookie */
2676 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
2677 "please change setting of CPT-table and retry\n",
2678 the_lnet.ln_cpt_number, LNET_CPT_MAX);
2682 while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
2683 the_lnet.ln_cpt_bits++;
2685 rc = lnet_create_locks();
2687 CERROR("Can't create LNet global locks: %d\n", rc);
2691 the_lnet.ln_refcount = 0;
2692 INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
2693 INIT_LIST_HEAD(&the_lnet.ln_msg_resend);
2695 /* The hash table size is the number of bits it takes to express the set
2696 * ln_num_routes, minus 1 (better to under estimate than over so we
2697 * don't waste memory). */
2698 if (rnet_htable_size <= 0)
2699 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
2700 else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
2701 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
2702 the_lnet.ln_remote_nets_hbits = max_t(int, 1,
2703 order_base_2(rnet_htable_size) - 1);
2705 /* All LNDs apart from the LOLND are in separate modules. They
2706 * register themselves when their module loads, and unregister
2707 * themselves when their module is unloaded. */
2708 lnet_register_lnd(&the_lolnd);
2713 * Finalize LNet library.
2715 * \pre lnet_lib_init() called with success.
2716 * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
2718 * As this happens at module-unload, all lnds must already be unloaded,
2719 * so they must already be unregistered.
2721 void lnet_lib_exit(void)
2725 LASSERT(the_lnet.ln_refcount == 0);
2726 lnet_unregister_lnd(&the_lolnd);
2727 for (i = 0; i < NUM_LNDS; i++)
2728 LASSERT(!the_lnet.ln_lnds[i]);
2729 lnet_destroy_locks();
2733 * Set LNet PID and start LNet interfaces, routing, and forwarding.
2735 * Users must call this function at least once before any other functions.
2736 * For each successful call there must be a corresponding call to
2737 * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
2740 * The PID used by LNet may be different from the one requested.
2743 * \param requested_pid PID requested by the caller.
2745 * \return >= 0 on success, and < 0 error code on failures.
2748 LNetNIInit(lnet_pid_t requested_pid)
2750 int im_a_router = 0;
2753 struct lnet_ping_buffer *pbuf;
2754 struct lnet_handle_md ping_mdh;
2755 LIST_HEAD(net_head);
2756 struct lnet_net *net;
2758 mutex_lock(&the_lnet.ln_api_mutex);
2760 CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
2762 if (the_lnet.ln_refcount > 0) {
2763 rc = the_lnet.ln_refcount++;
2764 mutex_unlock(&the_lnet.ln_api_mutex);
2768 rc = lnet_prepare(requested_pid);
2770 mutex_unlock(&the_lnet.ln_api_mutex);
2774 /* create a network for Loopback network */
2775 net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
2778 goto err_empty_list;
2781 /* Add in the loopback NI */
2782 if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
2784 goto err_empty_list;
2787 if (use_tcp_bonding)
2788 CWARN("use_tcp_bonding has been removed. Use Multi-Rail and Dynamic Discovery instead, see LU-13641\n");
2790 /* If LNet is being initialized via DLC it is possible
2791 * that the user requests not to load module parameters (ones which
2792 * are supported by DLC) on initialization. Therefore, make sure not
2793 * to load networks, routes and forwarding from module parameters
2794 * in this case. On cleanup in case of failure only clean up
2795 * routes if it has been loaded */
2796 if (!the_lnet.ln_nis_from_mod_params) {
2797 rc = lnet_parse_networks(&net_head, lnet_get_networks());
2799 goto err_empty_list;
2802 ni_count = lnet_startup_lndnets(&net_head);
2805 goto err_empty_list;
2808 if (!the_lnet.ln_nis_from_mod_params) {
2809 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
2811 goto err_shutdown_lndnis;
2813 rc = lnet_rtrpools_alloc(im_a_router);
2815 goto err_destroy_routes;
2818 rc = lnet_acceptor_start();
2820 goto err_destroy_routes;
2822 the_lnet.ln_refcount = 1;
2823 /* Now I may use my own API functions... */
2825 rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_count, true);
2827 goto err_acceptor_stop;
2829 lnet_ping_target_update(pbuf, ping_mdh);
2831 the_lnet.ln_mt_handler = lnet_mt_event_handler;
2833 rc = lnet_push_target_init();
2837 rc = lnet_peer_discovery_start();
2839 goto err_destroy_push_target;
2841 rc = lnet_monitor_thr_start();
2843 goto err_stop_discovery_thr;
2846 lnet_router_debugfs_init();
2848 mutex_unlock(&the_lnet.ln_api_mutex);
2850 complete_all(&the_lnet.ln_started);
2852 /* wait for all routers to start */
2853 lnet_wait_router_start();
2857 err_stop_discovery_thr:
2858 lnet_peer_discovery_stop();
2859 err_destroy_push_target:
2860 lnet_push_target_fini();
2862 lnet_ping_target_fini();
2864 the_lnet.ln_refcount = 0;
2865 lnet_acceptor_stop();
2867 if (!the_lnet.ln_nis_from_mod_params)
2868 lnet_destroy_routes();
2869 err_shutdown_lndnis:
2870 lnet_shutdown_lndnets();
2874 mutex_unlock(&the_lnet.ln_api_mutex);
2875 while (!list_empty(&net_head)) {
2876 struct lnet_net *net;
2878 net = list_entry(net_head.next, struct lnet_net, net_list);
2879 list_del_init(&net->net_list);
2884 EXPORT_SYMBOL(LNetNIInit);
2887 * Stop LNet interfaces, routing, and forwarding.
2889 * Users must call this function once for each successful call to LNetNIInit().
2890 * Once the LNetNIFini() operation has been started, the results of pending
2891 * API operations are undefined.
2893 * \return always 0 for current implementation.
2898 mutex_lock(&the_lnet.ln_api_mutex);
2900 LASSERT(the_lnet.ln_refcount > 0);
2902 if (the_lnet.ln_refcount != 1) {
2903 the_lnet.ln_refcount--;
2905 LASSERT(!the_lnet.ln_niinit_self);
2909 lnet_router_debugfs_fini();
2910 lnet_monitor_thr_stop();
2911 lnet_peer_discovery_stop();
2912 lnet_push_target_fini();
2913 lnet_ping_target_fini();
2915 /* Teardown fns that use my own API functions BEFORE here */
2916 the_lnet.ln_refcount = 0;
2918 lnet_acceptor_stop();
2919 lnet_destroy_routes();
2920 lnet_shutdown_lndnets();
2924 mutex_unlock(&the_lnet.ln_api_mutex);
2927 EXPORT_SYMBOL(LNetNIFini);
2930 * Grabs the ni data from the ni structure and fills the out
2933 * \param[in] ni network interface structure
2934 * \param[out] cfg_ni NI config information
2935 * \param[out] tun network and LND tunables
2938 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
2939 struct lnet_ioctl_config_lnd_tunables *tun,
2940 struct lnet_ioctl_element_stats *stats,
2943 size_t min_size = 0;
2946 if (!ni || !cfg_ni || !tun)
2949 if (ni->ni_interface != NULL) {
2950 strncpy(cfg_ni->lic_ni_intf,
2952 sizeof(cfg_ni->lic_ni_intf));
2955 cfg_ni->lic_nid = ni->ni_nid;
2956 if (ni->ni_nid == LNET_NID_LO_0)
2957 cfg_ni->lic_status = LNET_NI_STATUS_UP;
2959 cfg_ni->lic_status = ni->ni_status->ns_status;
2960 cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
2962 memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
2965 stats->iel_send_count = lnet_sum_stats(&ni->ni_stats,
2966 LNET_STATS_TYPE_SEND);
2967 stats->iel_recv_count = lnet_sum_stats(&ni->ni_stats,
2968 LNET_STATS_TYPE_RECV);
2969 stats->iel_drop_count = lnet_sum_stats(&ni->ni_stats,
2970 LNET_STATS_TYPE_DROP);
2974 * tun->lt_tun will always be present, but in order to be
2975 * backwards compatible, we need to deal with the cases when
2976 * tun->lt_tun is smaller than what the kernel has, because it
2977 * comes from an older version of a userspace program, then we'll
2978 * need to copy as much information as we have available space.
2980 min_size = tun_size - sizeof(tun->lt_cmn);
2981 memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
2983 /* copy over the cpts */
2984 if (ni->ni_ncpts == LNET_CPT_NUMBER &&
2985 ni->ni_cpts == NULL) {
2986 for (i = 0; i < ni->ni_ncpts; i++)
2987 cfg_ni->lic_cpts[i] = i;
2990 ni->ni_cpts != NULL && i < ni->ni_ncpts &&
2991 i < LNET_MAX_SHOW_NUM_CPT;
2993 cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
2995 cfg_ni->lic_ncpts = ni->ni_ncpts;
2999 * NOTE: This is a legacy function left in the code to be backwards
3000 * compatible with older userspace programs. It should eventually be
3003 * Grabs the ni data from the ni structure and fills the out
3006 * \param[in] ni network interface structure
3007 * \param[out] config config information
3010 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
3011 struct lnet_ioctl_config_data *config)
3013 struct lnet_ioctl_net_config *net_config;
3014 struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
3015 size_t min_size, tunable_size = 0;
3021 net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
3025 if (!ni->ni_interface)
3028 strncpy(net_config->ni_interface,
3030 sizeof(net_config->ni_interface));
3032 config->cfg_nid = ni->ni_nid;
3033 config->cfg_config_u.cfg_net.net_peer_timeout =
3034 ni->ni_net->net_tunables.lct_peer_timeout;
3035 config->cfg_config_u.cfg_net.net_max_tx_credits =
3036 ni->ni_net->net_tunables.lct_max_tx_credits;
3037 config->cfg_config_u.cfg_net.net_peer_tx_credits =
3038 ni->ni_net->net_tunables.lct_peer_tx_credits;
3039 config->cfg_config_u.cfg_net.net_peer_rtr_credits =
3040 ni->ni_net->net_tunables.lct_peer_rtr_credits;
3042 if (ni->ni_nid == LNET_NID_LO_0)
3043 net_config->ni_status = LNET_NI_STATUS_UP;
3045 net_config->ni_status = ni->ni_status->ns_status;
3048 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
3050 for (i = 0; i < num_cpts; i++)
3051 net_config->ni_cpts[i] = ni->ni_cpts[i];
3053 config->cfg_ncpts = num_cpts;
3057 * See if user land tools sent in a newer and larger version
3058 * of struct lnet_tunables than what the kernel uses.
3060 min_size = sizeof(*config) + sizeof(*net_config);
3062 if (config->cfg_hdr.ioc_len > min_size)
3063 tunable_size = config->cfg_hdr.ioc_len - min_size;
3065 /* Don't copy too much data to user space */
3066 min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
3067 lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
3069 if (lnd_cfg && min_size) {
3070 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
3071 config->cfg_config_u.cfg_net.net_interface_count = 1;
3073 /* Tell user land that kernel side has less data */
3074 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
3075 min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
3076 config->cfg_hdr.ioc_len -= min_size;
3082 lnet_get_ni_idx_locked(int idx)
3085 struct lnet_net *net;
3087 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3088 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3097 int lnet_get_net_healthv_locked(struct lnet_net *net)
3100 int best_healthv = 0;
3103 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3104 healthv = atomic_read(&ni->ni_healthv);
3105 if (healthv > best_healthv)
3106 best_healthv = healthv;
3109 return best_healthv;
3113 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
3116 struct lnet_net *net = mynet;
3119 * It is possible that the net has been cleaned out while there is
3120 * a message being sent. This function accessed the net without
3121 * checking if the list is empty
3125 net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
3127 if (list_empty(&net->net_ni_list))
3129 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
3135 if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
3136 /* if you reached the end of the ni list and the net is
3137 * specified, then there are no more nis in that net */
3141 /* we reached the end of this net ni list. move to the
3143 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
3144 /* no more nets and no more NIs. */
3147 /* get the next net */
3148 net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
3150 if (list_empty(&net->net_ni_list))
3152 /* get the ni on it */
3153 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
3159 if (list_empty(&prev->ni_netlist))
3162 /* there are more nis left */
3163 ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
3169 lnet_get_net_config(struct lnet_ioctl_config_data *config)
3174 int idx = config->cfg_count;
3176 cpt = lnet_net_lock_current();
3178 ni = lnet_get_ni_idx_locked(idx);
3183 lnet_fill_ni_info_legacy(ni, config);
3187 lnet_net_unlock(cpt);
3192 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
3193 struct lnet_ioctl_config_lnd_tunables *tun,
3194 struct lnet_ioctl_element_stats *stats,
3201 if (!cfg_ni || !tun || !stats)
3204 cpt = lnet_net_lock_current();
3206 ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
3211 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
3215 lnet_net_unlock(cpt);
3219 int lnet_get_ni_stats(struct lnet_ioctl_element_msg_stats *msg_stats)
3228 cpt = lnet_net_lock_current();
3230 ni = lnet_get_ni_idx_locked(msg_stats->im_idx);
3233 lnet_usr_translate_stats(msg_stats, &ni->ni_stats);
3237 lnet_net_unlock(cpt);
3242 static int lnet_add_net_common(struct lnet_net *net,
3243 struct lnet_ioctl_config_lnd_tunables *tun)
3245 struct lnet_handle_md ping_mdh;
3246 struct lnet_ping_buffer *pbuf;
3247 struct lnet_remotenet *rnet;
3253 lnet_net_lock(LNET_LOCK_EX);
3254 rnet = lnet_find_rnet_locked(net->net_id);
3255 lnet_net_unlock(LNET_LOCK_EX);
3257 * make sure that the net added doesn't invalidate the current
3258 * configuration LNet is keeping
3261 CERROR("Adding net %s will invalidate routing configuration\n",
3262 libcfs_net2str(net->net_id));
3268 * make sure you calculate the correct number of slots in the ping
3269 * buffer. Since the ping info is a flattened list of all the NIs,
3270 * we should allocate enough slots to accomodate the number of NIs
3271 * which will be added.
3273 * since ni hasn't been configured yet, use
3274 * lnet_get_net_ni_count_pre() which checks the net_ni_added list
3276 net_ni_count = lnet_get_net_ni_count_pre(net);
3278 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3279 net_ni_count + lnet_get_ni_count(),
3287 memcpy(&net->net_tunables,
3288 &tun->lt_cmn, sizeof(net->net_tunables));
3290 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
3292 net_id = net->net_id;
3294 rc = lnet_startup_lndnet(net,
3295 (tun) ? &tun->lt_tun : NULL);
3299 lnet_net_lock(LNET_LOCK_EX);
3300 net = lnet_get_net_locked(net_id);
3303 /* apply the UDSPs */
3304 rc = lnet_udsp_apply_policies_on_net(net);
3306 CERROR("Failed to apply UDSPs on local net %s\n",
3307 libcfs_net2str(net->net_id));
3309 /* At this point we lost track of which NI was just added, so we
3310 * just re-apply the policies on all of the NIs on this net
3312 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3313 rc = lnet_udsp_apply_policies_on_ni(ni);
3315 CERROR("Failed to apply UDSPs on ni %s\n",
3316 libcfs_nid2str(ni->ni_nid));
3318 lnet_net_unlock(LNET_LOCK_EX);
3321 * Start the acceptor thread if this is the first network
3322 * being added that requires the thread.
3324 if (net->net_lnd->lnd_accept) {
3325 rc = lnet_acceptor_start();
3327 /* shutdown the net that we just started */
3328 CERROR("Failed to start up acceptor thread\n");
3329 lnet_shutdown_lndnet(net);
3334 lnet_net_lock(LNET_LOCK_EX);
3335 lnet_peer_net_added(net);
3336 lnet_net_unlock(LNET_LOCK_EX);
3338 lnet_ping_target_update(pbuf, ping_mdh);
3343 lnet_ping_md_unlink(pbuf, &ping_mdh);
3344 lnet_ping_buffer_decref(pbuf);
3349 lnet_set_tune_defaults(struct lnet_ioctl_config_lnd_tunables *tun)
3352 if (!tun->lt_cmn.lct_peer_timeout)
3353 tun->lt_cmn.lct_peer_timeout = DEFAULT_PEER_TIMEOUT;
3354 if (!tun->lt_cmn.lct_peer_tx_credits)
3355 tun->lt_cmn.lct_peer_tx_credits = DEFAULT_PEER_CREDITS;
3356 if (!tun->lt_cmn.lct_max_tx_credits)
3357 tun->lt_cmn.lct_max_tx_credits = DEFAULT_CREDITS;
3361 static int lnet_handle_legacy_ip2nets(char *ip2nets,
3362 struct lnet_ioctl_config_lnd_tunables *tun)
3364 struct lnet_net *net;
3367 LIST_HEAD(net_head);
3369 rc = lnet_parse_ip2nets(&nets, ip2nets);
3373 rc = lnet_parse_networks(&net_head, nets);
3377 lnet_set_tune_defaults(tun);
3379 mutex_lock(&the_lnet.ln_api_mutex);
3380 while (!list_empty(&net_head)) {
3381 net = list_entry(net_head.next, struct lnet_net, net_list);
3382 list_del_init(&net->net_list);
3383 rc = lnet_add_net_common(net, tun);
3389 mutex_unlock(&the_lnet.ln_api_mutex);
3391 while (!list_empty(&net_head)) {
3392 net = list_entry(net_head.next, struct lnet_net, net_list);
3393 list_del_init(&net->net_list);
3399 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
3401 struct lnet_net *net;
3403 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
3405 __u32 net_id, lnd_type;
3407 /* get the tunables if they are available */
3408 if (conf->lic_cfg_hdr.ioc_len >=
3409 sizeof(*conf) + sizeof(*tun))
3410 tun = (struct lnet_ioctl_config_lnd_tunables *)
3413 /* handle legacy ip2nets from DLC */
3414 if (conf->lic_legacy_ip2nets[0] != '\0')
3415 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
3418 net_id = LNET_NIDNET(conf->lic_nid);
3419 lnd_type = LNET_NETTYP(net_id);
3421 if (!libcfs_isknown_lnd(lnd_type)) {
3422 CERROR("No valid net and lnd information provided\n");
3426 net = lnet_net_alloc(net_id, NULL);
3430 for (i = 0; i < conf->lic_ncpts; i++) {
3431 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER)
3435 ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
3440 lnet_set_tune_defaults(tun);
3442 mutex_lock(&the_lnet.ln_api_mutex);
3444 rc = lnet_add_net_common(net, tun);
3446 mutex_unlock(&the_lnet.ln_api_mutex);
3451 int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
3453 struct lnet_net *net;
3455 __u32 net_id = LNET_NIDNET(conf->lic_nid);
3456 struct lnet_ping_buffer *pbuf;
3457 struct lnet_handle_md ping_mdh;
3462 /* don't allow userspace to shutdown the LOLND */
3463 if (LNET_NETTYP(net_id) == LOLND)
3466 mutex_lock(&the_lnet.ln_api_mutex);
3470 net = lnet_get_net_locked(net_id);
3472 CERROR("net %s not found\n",
3473 libcfs_net2str(net_id));
3478 addr = LNET_NIDADDR(conf->lic_nid);
3480 /* remove the entire net */
3481 net_count = lnet_get_net_ni_count_locked(net);
3485 /* create and link a new ping info, before removing the old one */
3486 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3487 lnet_get_ni_count() - net_count,
3490 goto unlock_api_mutex;
3492 lnet_shutdown_lndnet(net);
3494 lnet_acceptor_stop();
3496 lnet_ping_target_update(pbuf, ping_mdh);
3498 goto unlock_api_mutex;
3501 ni = lnet_nid2ni_locked(conf->lic_nid, 0);
3503 CERROR("nid %s not found\n",
3504 libcfs_nid2str(conf->lic_nid));
3509 net_count = lnet_get_net_ni_count_locked(net);
3513 /* create and link a new ping info, before removing the old one */
3514 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3515 lnet_get_ni_count() - 1, false);
3517 goto unlock_api_mutex;
3519 lnet_shutdown_lndni(ni);
3521 lnet_acceptor_stop();
3523 lnet_ping_target_update(pbuf, ping_mdh);
3525 /* check if the net is empty and remove it if it is */
3527 lnet_shutdown_lndnet(net);
3529 goto unlock_api_mutex;
3534 mutex_unlock(&the_lnet.ln_api_mutex);
3540 * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
3541 * They are only expected to be called for unique networks.
3542 * That can be as a result of older DLC library
3543 * calls. Multi-Rail DLC and beyond no longer uses these APIs.
3546 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
3548 struct lnet_net *net;
3549 LIST_HEAD(net_head);
3551 struct lnet_ioctl_config_lnd_tunables tun;
3552 const char *nets = conf->cfg_config_u.cfg_net.net_intf;
3554 /* Create a net/ni structures for the network string */
3555 rc = lnet_parse_networks(&net_head, nets);
3557 return rc == 0 ? -EINVAL : rc;
3559 mutex_lock(&the_lnet.ln_api_mutex);
3562 rc = -EINVAL; /* only add one network per call */
3563 goto out_unlock_clean;
3566 net = list_entry(net_head.next, struct lnet_net, net_list);
3567 list_del_init(&net->net_list);
3569 LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
3571 memset(&tun, 0, sizeof(tun));
3573 tun.lt_cmn.lct_peer_timeout =
3574 (!conf->cfg_config_u.cfg_net.net_peer_timeout) ? DEFAULT_PEER_TIMEOUT :
3575 conf->cfg_config_u.cfg_net.net_peer_timeout;
3576 tun.lt_cmn.lct_peer_tx_credits =
3577 (!conf->cfg_config_u.cfg_net.net_peer_tx_credits) ? DEFAULT_PEER_CREDITS :
3578 conf->cfg_config_u.cfg_net.net_peer_tx_credits;
3579 tun.lt_cmn.lct_peer_rtr_credits =
3580 conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
3581 tun.lt_cmn.lct_max_tx_credits =
3582 (!conf->cfg_config_u.cfg_net.net_max_tx_credits) ? DEFAULT_CREDITS :
3583 conf->cfg_config_u.cfg_net.net_max_tx_credits;
3585 rc = lnet_add_net_common(net, &tun);
3588 mutex_unlock(&the_lnet.ln_api_mutex);
3589 while (!list_empty(&net_head)) {
3590 /* net_head list is empty in success case */
3591 net = list_entry(net_head.next, struct lnet_net, net_list);
3592 list_del_init(&net->net_list);
3599 lnet_dyn_del_net(__u32 net_id)
3601 struct lnet_net *net;
3602 struct lnet_ping_buffer *pbuf;
3603 struct lnet_handle_md ping_mdh;
3607 /* don't allow userspace to shutdown the LOLND */
3608 if (LNET_NETTYP(net_id) == LOLND)
3611 mutex_lock(&the_lnet.ln_api_mutex);
3615 net = lnet_get_net_locked(net_id);
3622 net_ni_count = lnet_get_net_ni_count_locked(net);
3626 /* create and link a new ping info, before removing the old one */
3627 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3628 lnet_get_ni_count() - net_ni_count, false);
3632 lnet_shutdown_lndnet(net);
3634 lnet_acceptor_stop();
3636 lnet_ping_target_update(pbuf, ping_mdh);
3639 mutex_unlock(&the_lnet.ln_api_mutex);
3644 void lnet_incr_dlc_seq(void)
3646 atomic_inc(&lnet_dlc_seq_no);
3649 __u32 lnet_get_dlc_seq_locked(void)
3651 return atomic_read(&lnet_dlc_seq_no);
3655 lnet_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3657 struct lnet_net *net;
3660 lnet_net_lock(LNET_LOCK_EX);
3661 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3662 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3663 if (ni->ni_nid == nid || all) {
3664 atomic_set(&ni->ni_healthv, value);
3665 if (list_empty(&ni->ni_recovery) &&
3666 value < LNET_MAX_HEALTH_VALUE) {
3667 CERROR("manually adding local NI %s to recovery\n",
3668 libcfs_nid2str(ni->ni_nid));
3669 list_add_tail(&ni->ni_recovery,
3670 &the_lnet.ln_mt_localNIRecovq);
3671 lnet_ni_addref_locked(ni, 0);
3674 lnet_net_unlock(LNET_LOCK_EX);
3680 lnet_net_unlock(LNET_LOCK_EX);
3684 lnet_get_local_ni_hstats(struct lnet_ioctl_local_ni_hstats *stats)
3688 lnet_nid_t nid = stats->hlni_nid;
3690 cpt = lnet_net_lock_current();
3691 ni = lnet_nid2ni_locked(nid, cpt);
3698 stats->hlni_local_interrupt = atomic_read(&ni->ni_hstats.hlt_local_interrupt);
3699 stats->hlni_local_dropped = atomic_read(&ni->ni_hstats.hlt_local_dropped);
3700 stats->hlni_local_aborted = atomic_read(&ni->ni_hstats.hlt_local_aborted);
3701 stats->hlni_local_no_route = atomic_read(&ni->ni_hstats.hlt_local_no_route);
3702 stats->hlni_local_timeout = atomic_read(&ni->ni_hstats.hlt_local_timeout);
3703 stats->hlni_local_error = atomic_read(&ni->ni_hstats.hlt_local_error);
3704 stats->hlni_health_value = atomic_read(&ni->ni_healthv);
3705 stats->hlni_ping_count = ni->ni_ping_count;
3706 stats->hlni_next_ping = ni->ni_next_ping;
3709 lnet_net_unlock(cpt);
3715 lnet_get_local_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
3720 lnet_net_lock(LNET_LOCK_EX);
3721 list_for_each_entry(ni, &the_lnet.ln_mt_localNIRecovq, ni_recovery) {
3722 list->rlst_nid_array[i] = ni->ni_nid;
3724 if (i >= LNET_MAX_SHOW_NUM_NID)
3727 lnet_net_unlock(LNET_LOCK_EX);
3728 list->rlst_num_nids = i;
3734 lnet_get_peer_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
3736 struct lnet_peer_ni *lpni;
3739 lnet_net_lock(LNET_LOCK_EX);
3740 list_for_each_entry(lpni, &the_lnet.ln_mt_peerNIRecovq, lpni_recovery) {
3741 list->rlst_nid_array[i] = lpni->lpni_nid;
3743 if (i >= LNET_MAX_SHOW_NUM_NID)
3746 lnet_net_unlock(LNET_LOCK_EX);
3747 list->rlst_num_nids = i;
3753 * LNet ioctl handler.
3757 LNetCtl(unsigned int cmd, void *arg)
3759 struct libcfs_ioctl_data *data = arg;
3760 struct lnet_ioctl_config_data *config;
3761 struct lnet_process_id id = {0};
3765 BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
3766 sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
3769 case IOC_LIBCFS_GET_NI:
3770 rc = LNetGetId(data->ioc_count, &id);
3771 data->ioc_nid = id.nid;
3774 case IOC_LIBCFS_FAIL_NID:
3775 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
3777 case IOC_LIBCFS_ADD_ROUTE: {
3778 /* default router sensitivity to 1 */
3779 unsigned int sensitivity = 1;
3782 if (config->cfg_hdr.ioc_len < sizeof(*config))
3785 if (config->cfg_config_u.cfg_route.rtr_sensitivity) {
3787 config->cfg_config_u.cfg_route.rtr_sensitivity;
3790 mutex_lock(&the_lnet.ln_api_mutex);
3791 rc = lnet_add_route(config->cfg_net,
3792 config->cfg_config_u.cfg_route.rtr_hop,
3794 config->cfg_config_u.cfg_route.
3795 rtr_priority, sensitivity);
3796 mutex_unlock(&the_lnet.ln_api_mutex);
3800 case IOC_LIBCFS_DEL_ROUTE:
3803 if (config->cfg_hdr.ioc_len < sizeof(*config))
3806 mutex_lock(&the_lnet.ln_api_mutex);
3807 rc = lnet_del_route(config->cfg_net, config->cfg_nid);
3808 mutex_unlock(&the_lnet.ln_api_mutex);
3811 case IOC_LIBCFS_GET_ROUTE:
3814 if (config->cfg_hdr.ioc_len < sizeof(*config))
3817 mutex_lock(&the_lnet.ln_api_mutex);
3818 rc = lnet_get_route(config->cfg_count,
3820 &config->cfg_config_u.cfg_route.rtr_hop,
3822 &config->cfg_config_u.cfg_route.rtr_flags,
3823 &config->cfg_config_u.cfg_route.
3825 &config->cfg_config_u.cfg_route.
3827 mutex_unlock(&the_lnet.ln_api_mutex);
3830 case IOC_LIBCFS_GET_LOCAL_NI: {
3831 struct lnet_ioctl_config_ni *cfg_ni;
3832 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
3833 struct lnet_ioctl_element_stats *stats;
3838 /* get the tunables if they are available */
3839 if (cfg_ni->lic_cfg_hdr.ioc_len <
3840 sizeof(*cfg_ni) + sizeof(*stats) + sizeof(*tun))
3843 stats = (struct lnet_ioctl_element_stats *)
3845 tun = (struct lnet_ioctl_config_lnd_tunables *)
3846 (cfg_ni->lic_bulk + sizeof(*stats));
3848 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
3851 mutex_lock(&the_lnet.ln_api_mutex);
3852 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
3853 mutex_unlock(&the_lnet.ln_api_mutex);
3857 case IOC_LIBCFS_GET_LOCAL_NI_MSG_STATS: {
3858 struct lnet_ioctl_element_msg_stats *msg_stats = arg;
3860 if (msg_stats->im_hdr.ioc_len != sizeof(*msg_stats))
3863 mutex_lock(&the_lnet.ln_api_mutex);
3864 rc = lnet_get_ni_stats(msg_stats);
3865 mutex_unlock(&the_lnet.ln_api_mutex);
3870 case IOC_LIBCFS_GET_NET: {
3871 size_t total = sizeof(*config) +
3872 sizeof(struct lnet_ioctl_net_config);
3875 if (config->cfg_hdr.ioc_len < total)
3878 mutex_lock(&the_lnet.ln_api_mutex);
3879 rc = lnet_get_net_config(config);
3880 mutex_unlock(&the_lnet.ln_api_mutex);
3884 case IOC_LIBCFS_GET_LNET_STATS:
3886 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
3888 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
3891 mutex_lock(&the_lnet.ln_api_mutex);
3892 rc = lnet_counters_get(&lnet_stats->st_cntrs);
3893 mutex_unlock(&the_lnet.ln_api_mutex);
3897 case IOC_LIBCFS_CONFIG_RTR:
3900 if (config->cfg_hdr.ioc_len < sizeof(*config))
3903 mutex_lock(&the_lnet.ln_api_mutex);
3904 if (config->cfg_config_u.cfg_buffers.buf_enable) {
3905 rc = lnet_rtrpools_enable();
3906 mutex_unlock(&the_lnet.ln_api_mutex);
3909 lnet_rtrpools_disable();
3910 mutex_unlock(&the_lnet.ln_api_mutex);
3913 case IOC_LIBCFS_ADD_BUF:
3916 if (config->cfg_hdr.ioc_len < sizeof(*config))
3919 mutex_lock(&the_lnet.ln_api_mutex);
3920 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
3922 config->cfg_config_u.cfg_buffers.
3924 config->cfg_config_u.cfg_buffers.
3926 mutex_unlock(&the_lnet.ln_api_mutex);
3929 case IOC_LIBCFS_SET_NUMA_RANGE: {
3930 struct lnet_ioctl_set_value *numa;
3932 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3934 lnet_net_lock(LNET_LOCK_EX);
3935 lnet_numa_range = numa->sv_value;
3936 lnet_net_unlock(LNET_LOCK_EX);
3940 case IOC_LIBCFS_GET_NUMA_RANGE: {
3941 struct lnet_ioctl_set_value *numa;
3943 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3945 numa->sv_value = lnet_numa_range;
3949 case IOC_LIBCFS_GET_BUF: {
3950 struct lnet_ioctl_pool_cfg *pool_cfg;
3951 size_t total = sizeof(*config) + sizeof(*pool_cfg);
3955 if (config->cfg_hdr.ioc_len < total)
3958 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
3960 mutex_lock(&the_lnet.ln_api_mutex);
3961 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
3962 mutex_unlock(&the_lnet.ln_api_mutex);
3966 case IOC_LIBCFS_GET_LOCAL_HSTATS: {
3967 struct lnet_ioctl_local_ni_hstats *stats = arg;
3969 if (stats->hlni_hdr.ioc_len < sizeof(*stats))
3972 mutex_lock(&the_lnet.ln_api_mutex);
3973 rc = lnet_get_local_ni_hstats(stats);
3974 mutex_unlock(&the_lnet.ln_api_mutex);
3979 case IOC_LIBCFS_GET_RECOVERY_QUEUE: {
3980 struct lnet_ioctl_recovery_list *list = arg;
3981 if (list->rlst_hdr.ioc_len < sizeof(*list))
3984 mutex_lock(&the_lnet.ln_api_mutex);
3985 if (list->rlst_type == LNET_HEALTH_TYPE_LOCAL_NI)
3986 rc = lnet_get_local_ni_recovery_list(list);
3988 rc = lnet_get_peer_ni_recovery_list(list);
3989 mutex_unlock(&the_lnet.ln_api_mutex);
3993 case IOC_LIBCFS_ADD_PEER_NI: {
3994 struct lnet_ioctl_peer_cfg *cfg = arg;
3996 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3999 mutex_lock(&the_lnet.ln_api_mutex);
4000 rc = lnet_add_peer_ni(cfg->prcfg_prim_nid,
4003 mutex_unlock(&the_lnet.ln_api_mutex);
4007 case IOC_LIBCFS_DEL_PEER_NI: {
4008 struct lnet_ioctl_peer_cfg *cfg = arg;
4010 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4013 mutex_lock(&the_lnet.ln_api_mutex);
4014 rc = lnet_del_peer_ni(cfg->prcfg_prim_nid,
4015 cfg->prcfg_cfg_nid);
4016 mutex_unlock(&the_lnet.ln_api_mutex);
4020 case IOC_LIBCFS_GET_PEER_INFO: {
4021 struct lnet_ioctl_peer *peer_info = arg;
4023 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
4026 mutex_lock(&the_lnet.ln_api_mutex);
4027 rc = lnet_get_peer_ni_info(
4028 peer_info->pr_count,
4030 peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
4031 &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
4032 &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
4033 &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
4034 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
4035 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
4036 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
4037 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
4038 mutex_unlock(&the_lnet.ln_api_mutex);
4042 case IOC_LIBCFS_GET_PEER_NI: {
4043 struct lnet_ioctl_peer_cfg *cfg = arg;
4045 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4048 mutex_lock(&the_lnet.ln_api_mutex);
4049 rc = lnet_get_peer_info(cfg,
4050 (void __user *)cfg->prcfg_bulk);
4051 mutex_unlock(&the_lnet.ln_api_mutex);
4055 case IOC_LIBCFS_GET_PEER_LIST: {
4056 struct lnet_ioctl_peer_cfg *cfg = arg;
4058 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4061 mutex_lock(&the_lnet.ln_api_mutex);
4062 rc = lnet_get_peer_list(&cfg->prcfg_count, &cfg->prcfg_size,
4063 (struct lnet_process_id __user *)cfg->prcfg_bulk);
4064 mutex_unlock(&the_lnet.ln_api_mutex);
4068 case IOC_LIBCFS_SET_HEALHV: {
4069 struct lnet_ioctl_reset_health_cfg *cfg = arg;
4071 if (cfg->rh_hdr.ioc_len < sizeof(*cfg))
4073 if (cfg->rh_value < 0 ||
4074 cfg->rh_value > LNET_MAX_HEALTH_VALUE)
4075 value = LNET_MAX_HEALTH_VALUE;
4077 value = cfg->rh_value;
4078 CDEBUG(D_NET, "Manually setting healthv to %d for %s:%s. all = %d\n",
4079 value, (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI) ?
4080 "local" : "peer", libcfs_nid2str(cfg->rh_nid), cfg->rh_all);
4081 mutex_lock(&the_lnet.ln_api_mutex);
4082 if (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI)
4083 lnet_ni_set_healthv(cfg->rh_nid, value,
4086 lnet_peer_ni_set_healthv(cfg->rh_nid, value,
4088 mutex_unlock(&the_lnet.ln_api_mutex);
4092 case IOC_LIBCFS_NOTIFY_ROUTER: {
4093 time64_t deadline = ktime_get_real_seconds() - data->ioc_u64[0];
4095 /* The deadline passed in by the user should be some time in
4096 * seconds in the future since the UNIX epoch. We have to map
4097 * that deadline to the wall clock.
4099 deadline += ktime_get_seconds();
4100 return lnet_notify(NULL, data->ioc_nid, data->ioc_flags, false,
4104 case IOC_LIBCFS_LNET_DIST:
4105 rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
4106 if (rc < 0 && rc != -EHOSTUNREACH)
4109 data->ioc_u32[0] = rc;
4112 case IOC_LIBCFS_TESTPROTOCOMPAT:
4113 the_lnet.ln_testprotocompat = data->ioc_flags;
4116 case IOC_LIBCFS_LNET_FAULT:
4117 return lnet_fault_ctl(data->ioc_flags, data);
4119 case IOC_LIBCFS_PING: {
4120 signed long timeout;
4122 id.nid = data->ioc_nid;
4123 id.pid = data->ioc_u32[0];
4125 /* If timeout is negative then set default of 3 minutes */
4126 if (((s32)data->ioc_u32[1] <= 0) ||
4127 data->ioc_u32[1] > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
4128 timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
4130 timeout = nsecs_to_jiffies(data->ioc_u32[1] * NSEC_PER_MSEC);
4132 rc = lnet_ping(id, timeout, data->ioc_pbuf1,
4133 data->ioc_plen1 / sizeof(struct lnet_process_id));
4138 data->ioc_count = rc;
4142 case IOC_LIBCFS_PING_PEER: {
4143 struct lnet_ioctl_ping_data *ping = arg;
4144 struct lnet_peer *lp;
4145 signed long timeout;
4147 /* If timeout is negative then set default of 3 minutes */
4148 if (((s32)ping->op_param) <= 0 ||
4149 ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
4150 timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
4152 timeout = nsecs_to_jiffies(ping->op_param * NSEC_PER_MSEC);
4154 rc = lnet_ping(ping->ping_id, timeout,
4160 mutex_lock(&the_lnet.ln_api_mutex);
4161 lp = lnet_find_peer(ping->ping_id.nid);
4163 ping->ping_id.nid = lp->lp_primary_nid;
4164 ping->mr_info = lnet_peer_is_multi_rail(lp);
4165 lnet_peer_decref_locked(lp);
4167 mutex_unlock(&the_lnet.ln_api_mutex);
4169 ping->ping_count = rc;
4173 case IOC_LIBCFS_DISCOVER: {
4174 struct lnet_ioctl_ping_data *discover = arg;
4175 struct lnet_peer *lp;
4177 rc = lnet_discover(discover->ping_id, discover->op_param,
4179 discover->ping_count);
4183 mutex_lock(&the_lnet.ln_api_mutex);
4184 lp = lnet_find_peer(discover->ping_id.nid);
4186 discover->ping_id.nid = lp->lp_primary_nid;
4187 discover->mr_info = lnet_peer_is_multi_rail(lp);
4188 lnet_peer_decref_locked(lp);
4190 mutex_unlock(&the_lnet.ln_api_mutex);
4192 discover->ping_count = rc;
4196 case IOC_LIBCFS_ADD_UDSP: {
4197 struct lnet_ioctl_udsp *ioc_udsp = arg;
4198 __u32 bulk_size = ioc_udsp->iou_hdr.ioc_len;
4200 mutex_lock(&the_lnet.ln_api_mutex);
4201 rc = lnet_udsp_demarshal_add(arg, bulk_size);
4203 rc = lnet_udsp_apply_policies(NULL, false);
4204 CDEBUG(D_NET, "policy application returned %d\n", rc);
4207 mutex_unlock(&the_lnet.ln_api_mutex);
4212 case IOC_LIBCFS_DEL_UDSP: {
4213 struct lnet_ioctl_udsp *ioc_udsp = arg;
4214 int idx = ioc_udsp->iou_idx;
4216 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4219 mutex_lock(&the_lnet.ln_api_mutex);
4220 rc = lnet_udsp_del_policy(idx);
4222 rc = lnet_udsp_apply_policies(NULL, false);
4223 CDEBUG(D_NET, "policy re-application returned %d\n",
4227 mutex_unlock(&the_lnet.ln_api_mutex);
4232 case IOC_LIBCFS_GET_UDSP_SIZE: {
4233 struct lnet_ioctl_udsp *ioc_udsp = arg;
4234 struct lnet_udsp *udsp;
4236 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4241 mutex_lock(&the_lnet.ln_api_mutex);
4242 udsp = lnet_udsp_get_policy(ioc_udsp->iou_idx);
4246 /* coming in iou_idx will hold the idx of the udsp
4247 * to get the size of. going out the iou_idx will
4248 * hold the size of the UDSP found at the passed
4251 ioc_udsp->iou_idx = lnet_get_udsp_size(udsp);
4252 if (ioc_udsp->iou_idx < 0)
4255 mutex_unlock(&the_lnet.ln_api_mutex);
4260 case IOC_LIBCFS_GET_UDSP: {
4261 struct lnet_ioctl_udsp *ioc_udsp = arg;
4262 struct lnet_udsp *udsp;
4264 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4269 mutex_lock(&the_lnet.ln_api_mutex);
4270 udsp = lnet_udsp_get_policy(ioc_udsp->iou_idx);
4274 rc = lnet_udsp_marshal(udsp, ioc_udsp);
4275 mutex_unlock(&the_lnet.ln_api_mutex);
4280 case IOC_LIBCFS_GET_CONST_UDSP_INFO: {
4281 struct lnet_ioctl_construct_udsp_info *info = arg;
4283 if (info->cud_hdr.ioc_len < sizeof(*info))
4286 CDEBUG(D_NET, "GET_UDSP_INFO for %s\n",
4287 libcfs_nid2str(info->cud_nid));
4289 mutex_lock(&the_lnet.ln_api_mutex);
4290 lnet_udsp_get_construct_info(info);
4291 mutex_unlock(&the_lnet.ln_api_mutex);
4297 ni = lnet_net2ni_addref(data->ioc_net);
4301 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
4304 rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
4311 EXPORT_SYMBOL(LNetCtl);
4313 void LNetDebugPeer(struct lnet_process_id id)
4315 lnet_debug_peer(id.nid);
4317 EXPORT_SYMBOL(LNetDebugPeer);
4320 * Determine if the specified peer \a nid is on the local node.
4322 * \param nid peer nid to check
4324 * \retval true If peer NID is on the local node.
4325 * \retval false If peer NID is not on the local node.
4327 bool LNetIsPeerLocal(lnet_nid_t nid)
4329 struct lnet_net *net;
4333 cpt = lnet_net_lock_current();
4334 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
4335 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
4336 if (ni->ni_nid == nid) {
4337 lnet_net_unlock(cpt);
4342 lnet_net_unlock(cpt);
4346 EXPORT_SYMBOL(LNetIsPeerLocal);
4349 * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
4350 * Note that all interfaces share a same PID, as requested by LNetNIInit().
4352 * \param index Index of the interface to look up.
4353 * \param id On successful return, this location will hold the
4354 * struct lnet_process_id ID of the interface.
4356 * \retval 0 If an interface exists at \a index.
4357 * \retval -ENOENT If no interface has been found.
4360 LNetGetId(unsigned int index, struct lnet_process_id *id)
4363 struct lnet_net *net;
4367 LASSERT(the_lnet.ln_refcount > 0);
4369 cpt = lnet_net_lock_current();
4371 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
4372 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
4376 id->nid = ni->ni_nid;
4377 id->pid = the_lnet.ln_pid;
4383 lnet_net_unlock(cpt);
4386 EXPORT_SYMBOL(LNetGetId);
4391 struct lnet_handle_md mdh;
4392 struct completion completion;
4396 lnet_ping_event_handler(struct lnet_event *event)
4398 struct ping_data *pd = event->md_user_ptr;
4400 CDEBUG(D_NET, "ping event (%d %d)%s\n",
4401 event->type, event->status,
4402 event->unlinked ? " unlinked" : "");
4404 if (event->status) {
4406 pd->rc = event->status;
4407 } else if (event->type == LNET_EVENT_REPLY) {
4409 pd->rc = event->mlength;
4411 if (event->unlinked)
4412 complete(&pd->completion);
4415 static int lnet_ping(struct lnet_process_id id, signed long timeout,
4416 struct lnet_process_id __user *ids, int n_ids)
4418 struct lnet_md md = { NULL };
4419 struct ping_data pd = { 0 };
4420 struct lnet_ping_buffer *pbuf;
4421 struct lnet_process_id tmpid;
4427 /* n_ids limit is arbitrary */
4428 if (n_ids <= 0 || id.nid == LNET_NID_ANY)
4432 * if the user buffer has more space than the lnet_interfaces_max
4433 * then only fill it up to lnet_interfaces_max
4435 if (n_ids > lnet_interfaces_max)
4436 n_ids = lnet_interfaces_max;
4438 if (id.pid == LNET_PID_ANY)
4439 id.pid = LNET_PID_LUSTRE;
4441 pbuf = lnet_ping_buffer_alloc(n_ids, GFP_NOFS);
4445 /* initialize md content */
4446 md.start = &pbuf->pb_info;
4447 md.length = LNET_PING_INFO_SIZE(n_ids);
4448 md.threshold = 2; /* GET/REPLY */
4450 md.options = LNET_MD_TRUNCATE;
4452 md.handler = lnet_ping_event_handler;
4454 init_completion(&pd.completion);
4456 rc = LNetMDBind(&md, LNET_UNLINK, &pd.mdh);
4458 CERROR("Can't bind MD: %d\n", rc);
4459 goto fail_ping_buffer_decref;
4462 rc = LNetGet(LNET_NID_ANY, pd.mdh, id,
4463 LNET_RESERVED_PORTAL,
4464 LNET_PROTO_PING_MATCHBITS, 0, false);
4467 /* Don't CERROR; this could be deliberate! */
4468 rc2 = LNetMDUnlink(pd.mdh);
4471 /* NB must wait for the UNLINK event below... */
4474 if (wait_for_completion_timeout(&pd.completion, timeout) == 0) {
4475 /* Ensure completion in finite time... */
4476 LNetMDUnlink(pd.mdh);
4477 wait_for_completion(&pd.completion);
4481 goto fail_ping_buffer_decref;
4485 LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
4487 rc = -EPROTO; /* if I can't parse... */
4490 CERROR("%s: ping info too short %d\n",
4491 libcfs_id2str(id), nob);
4492 goto fail_ping_buffer_decref;
4495 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
4496 lnet_swap_pinginfo(pbuf);
4497 } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
4498 CERROR("%s: Unexpected magic %08x\n",
4499 libcfs_id2str(id), pbuf->pb_info.pi_magic);
4500 goto fail_ping_buffer_decref;
4503 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
4504 CERROR("%s: ping w/o NI status: 0x%x\n",
4505 libcfs_id2str(id), pbuf->pb_info.pi_features);
4506 goto fail_ping_buffer_decref;
4509 if (nob < LNET_PING_INFO_SIZE(0)) {
4510 CERROR("%s: Short reply %d(%d min)\n",
4512 nob, (int)LNET_PING_INFO_SIZE(0));
4513 goto fail_ping_buffer_decref;
4516 if (pbuf->pb_info.pi_nnis < n_ids)
4517 n_ids = pbuf->pb_info.pi_nnis;
4519 if (nob < LNET_PING_INFO_SIZE(n_ids)) {
4520 CERROR("%s: Short reply %d(%d expected)\n",
4522 nob, (int)LNET_PING_INFO_SIZE(n_ids));
4523 goto fail_ping_buffer_decref;
4526 rc = -EFAULT; /* if I segv in copy_to_user()... */
4528 memset(&tmpid, 0, sizeof(tmpid));
4529 for (i = 0; i < n_ids; i++) {
4530 tmpid.pid = pbuf->pb_info.pi_pid;
4531 tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
4532 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
4533 goto fail_ping_buffer_decref;
4535 rc = pbuf->pb_info.pi_nnis;
4537 fail_ping_buffer_decref:
4538 lnet_ping_buffer_decref(pbuf);
4543 lnet_discover(struct lnet_process_id id, __u32 force,
4544 struct lnet_process_id __user *ids, int n_ids)
4546 struct lnet_peer_ni *lpni;
4547 struct lnet_peer_ni *p;
4548 struct lnet_peer *lp;
4549 struct lnet_process_id *buf;
4555 id.nid == LNET_NID_ANY)
4558 if (id.pid == LNET_PID_ANY)
4559 id.pid = LNET_PID_LUSTRE;
4562 * If the user buffer has more space than the lnet_interfaces_max,
4563 * then only fill it up to lnet_interfaces_max.
4565 if (n_ids > lnet_interfaces_max)
4566 n_ids = lnet_interfaces_max;
4568 CFS_ALLOC_PTR_ARRAY(buf, n_ids);
4572 cpt = lnet_net_lock_current();
4573 lpni = lnet_nid2peerni_locked(id.nid, LNET_NID_ANY, cpt);
4580 * Clearing the NIDS_UPTODATE flag ensures the peer will
4581 * be discovered, provided discovery has not been disabled.
4583 lp = lpni->lpni_peer_net->lpn_peer;
4584 spin_lock(&lp->lp_lock);
4585 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
4586 /* If the force flag is set, force a PING and PUSH as well. */
4588 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
4589 spin_unlock(&lp->lp_lock);
4590 rc = lnet_discover_peer_locked(lpni, cpt, true);
4594 /* The lpni (or lp) for this NID may have changed and our ref is
4595 * the only thing keeping the old one around. Release the ref
4596 * and lookup the lpni again
4598 lnet_peer_ni_decref_locked(lpni);
4599 lpni = lnet_find_peer_ni_locked(id.nid);
4604 lp = lpni->lpni_peer_net->lpn_peer;
4608 while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
4609 buf[i].pid = id.pid;
4610 buf[i].nid = p->lpni_nid;
4617 lnet_peer_ni_decref_locked(lpni);
4619 lnet_net_unlock(cpt);
4622 if (copy_to_user(ids, buf, rc * sizeof(*buf)))
4624 CFS_FREE_PTR_ARRAY(buf, n_ids);
4630 * Retrieve peer discovery status.
4632 * \retval 1 if lnet_peer_discovery_disabled is 0
4633 * \retval 0 if lnet_peer_discovery_disabled is 1
4636 LNetGetPeerDiscoveryStatus(void)
4638 return !lnet_peer_discovery_disabled;
4640 EXPORT_SYMBOL(LNetGetPeerDiscoveryStatus);