4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_LNET
35 #include <linux/ctype.h>
36 #include <linux/log2.h>
37 #include <linux/ktime.h>
38 #include <linux/moduleparam.h>
39 #include <linux/uaccess.h>
40 #ifdef HAVE_SCHED_HEADERS
41 #include <linux/sched/signal.h>
44 #include <lnet/lib-lnet.h>
46 #define D_LNI D_CONSOLE
49 * initialize ln_api_mutex statically, since it needs to be used in
50 * discovery_set callback. That module parameter callback can be called
51 * before module init completes. The mutex needs to be ready for use then.
53 struct lnet the_lnet = {
54 .ln_api_mutex = __MUTEX_INITIALIZER(the_lnet.ln_api_mutex),
55 }; /* THE state of the network */
56 EXPORT_SYMBOL(the_lnet);
58 static char *ip2nets = "";
59 module_param(ip2nets, charp, 0444);
60 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
62 static char *networks = "";
63 module_param(networks, charp, 0444);
64 MODULE_PARM_DESC(networks, "local networks");
66 static char *routes = "";
67 module_param(routes, charp, 0444);
68 MODULE_PARM_DESC(routes, "routes to non-local networks");
70 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
71 module_param(rnet_htable_size, int, 0444);
72 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
74 static int use_tcp_bonding = false;
75 module_param(use_tcp_bonding, int, 0444);
76 MODULE_PARM_DESC(use_tcp_bonding,
77 "Set to 1 to use socklnd bonding. 0 to use Multi-Rail");
79 unsigned int lnet_numa_range = 0;
80 module_param(lnet_numa_range, uint, 0444);
81 MODULE_PARM_DESC(lnet_numa_range,
82 "NUMA range to consider during Multi-Rail selection");
85 * lnet_health_sensitivity determines by how much we decrement the health
86 * value on sending error. The value defaults to 100, which means health
87 * interface health is decremented by 100 points every failure.
89 unsigned int lnet_health_sensitivity = 100;
90 static int sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
91 #ifdef HAVE_KERNEL_PARAM_OPS
92 static struct kernel_param_ops param_ops_health_sensitivity = {
93 .set = sensitivity_set,
96 #define param_check_health_sensitivity(name, p) \
97 __param_check(name, p, int)
98 module_param(lnet_health_sensitivity, health_sensitivity, S_IRUGO|S_IWUSR);
100 module_param_call(lnet_health_sensitivity, sensitivity_set, param_get_int,
101 &lnet_health_sensitivity, S_IRUGO|S_IWUSR);
103 MODULE_PARM_DESC(lnet_health_sensitivity,
104 "Value to decrement the health value by on error");
107 * lnet_recovery_interval determines how often we should perform recovery
108 * on unhealthy interfaces.
110 unsigned int lnet_recovery_interval = 1;
111 static int recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp);
112 #ifdef HAVE_KERNEL_PARAM_OPS
113 static struct kernel_param_ops param_ops_recovery_interval = {
114 .set = recovery_interval_set,
115 .get = param_get_int,
117 #define param_check_recovery_interval(name, p) \
118 __param_check(name, p, int)
119 module_param(lnet_recovery_interval, recovery_interval, S_IRUGO|S_IWUSR);
121 module_param_call(lnet_recovery_interval, recovery_interval_set, param_get_int,
122 &lnet_recovery_interval, S_IRUGO|S_IWUSR);
124 MODULE_PARM_DESC(lnet_recovery_interval,
125 "Interval to recover unhealthy interfaces in seconds");
127 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
128 static int intf_max_set(const char *val, cfs_kernel_param_arg_t *kp);
130 static struct kernel_param_ops param_ops_interfaces_max = {
132 .get = param_get_int,
135 #define param_check_interfaces_max(name, p) \
136 __param_check(name, p, int)
138 #ifdef HAVE_KERNEL_PARAM_OPS
139 module_param(lnet_interfaces_max, interfaces_max, 0644);
141 module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
142 ¶m_ops_interfaces_max, 0644);
144 MODULE_PARM_DESC(lnet_interfaces_max,
145 "Maximum number of interfaces in a node.");
147 unsigned lnet_peer_discovery_disabled = 0;
148 static int discovery_set(const char *val, cfs_kernel_param_arg_t *kp);
150 static struct kernel_param_ops param_ops_discovery_disabled = {
151 .set = discovery_set,
152 .get = param_get_int,
155 #define param_check_discovery_disabled(name, p) \
156 __param_check(name, p, int)
157 #ifdef HAVE_KERNEL_PARAM_OPS
158 module_param(lnet_peer_discovery_disabled, discovery_disabled, 0644);
160 module_param_call(lnet_peer_discovery_disabled, discovery_set, param_get_int,
161 ¶m_ops_discovery_disabled, 0644);
163 MODULE_PARM_DESC(lnet_peer_discovery_disabled,
164 "Set to 1 to disable peer discovery on this node.");
166 unsigned int lnet_drop_asym_route;
167 static int drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp);
169 static struct kernel_param_ops param_ops_drop_asym_route = {
170 .set = drop_asym_route_set,
171 .get = param_get_int,
174 #define param_check_drop_asym_route(name, p) \
175 __param_check(name, p, int)
176 #ifdef HAVE_KERNEL_PARAM_OPS
177 module_param(lnet_drop_asym_route, drop_asym_route, 0644);
179 module_param_call(lnet_drop_asym_route, drop_asym_route_set, param_get_int,
180 ¶m_ops_drop_asym_route, 0644);
182 MODULE_PARM_DESC(lnet_drop_asym_route,
183 "Set to 1 to drop asymmetrical route messages.");
185 #define LNET_TRANSACTION_TIMEOUT_NO_HEALTH_DEFAULT 50
186 #define LNET_TRANSACTION_TIMEOUT_HEALTH_DEFAULT 50
188 unsigned lnet_transaction_timeout = LNET_TRANSACTION_TIMEOUT_HEALTH_DEFAULT;
189 static int transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp);
190 #ifdef HAVE_KERNEL_PARAM_OPS
191 static struct kernel_param_ops param_ops_transaction_timeout = {
192 .set = transaction_to_set,
193 .get = param_get_int,
196 #define param_check_transaction_timeout(name, p) \
197 __param_check(name, p, int)
198 module_param(lnet_transaction_timeout, transaction_timeout, S_IRUGO|S_IWUSR);
200 module_param_call(lnet_transaction_timeout, transaction_to_set, param_get_int,
201 &lnet_transaction_timeout, S_IRUGO|S_IWUSR);
203 MODULE_PARM_DESC(lnet_transaction_timeout,
204 "Maximum number of seconds to wait for a peer response.");
206 #define LNET_RETRY_COUNT_HEALTH_DEFAULT 2
207 unsigned lnet_retry_count = LNET_RETRY_COUNT_HEALTH_DEFAULT;
208 static int retry_count_set(const char *val, cfs_kernel_param_arg_t *kp);
209 #ifdef HAVE_KERNEL_PARAM_OPS
210 static struct kernel_param_ops param_ops_retry_count = {
211 .set = retry_count_set,
212 .get = param_get_int,
215 #define param_check_retry_count(name, p) \
216 __param_check(name, p, int)
217 module_param(lnet_retry_count, retry_count, S_IRUGO|S_IWUSR);
219 module_param_call(lnet_retry_count, retry_count_set, param_get_int,
220 &lnet_retry_count, S_IRUGO|S_IWUSR);
222 MODULE_PARM_DESC(lnet_retry_count,
223 "Maximum number of times to retry transmitting a message");
225 unsigned int lnet_response_tracking = 3;
226 static int response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp);
228 #ifdef HAVE_KERNEL_PARAM_OPS
229 static struct kernel_param_ops param_ops_response_tracking = {
230 .set = response_tracking_set,
231 .get = param_get_int,
234 #define param_check_response_tracking(name, p) \
235 __param_check(name, p, int)
236 module_param(lnet_response_tracking, response_tracking, 0644);
238 module_param_call(lnet_response_tracking, response_tracking_set, param_get_int,
239 &lnet_response_tracking, 0644);
241 MODULE_PARM_DESC(lnet_response_tracking,
242 "(0|1|2|3) LNet Internal Only|GET Reply only|PUT ACK only|Full Tracking (default)");
244 #define LNET_LND_TIMEOUT_DEFAULT ((LNET_TRANSACTION_TIMEOUT_HEALTH_DEFAULT - 1) / \
245 (LNET_RETRY_COUNT_HEALTH_DEFAULT + 1))
246 unsigned int lnet_lnd_timeout = LNET_LND_TIMEOUT_DEFAULT;
247 static void lnet_set_lnd_timeout(void)
249 lnet_lnd_timeout = (lnet_transaction_timeout - 1) /
250 (lnet_retry_count + 1);
253 unsigned int lnet_current_net_count;
256 * This sequence number keeps track of how many times DLC was used to
257 * update the local NIs. It is incremented when a NI is added or
258 * removed and checked when sending a message to determine if there is
259 * a need to re-run the selection algorithm. See lnet_select_pathway()
260 * for more details on its usage.
262 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
264 static int lnet_ping(struct lnet_process_id id, signed long timeout,
265 struct lnet_process_id __user *ids, int n_ids);
267 static int lnet_discover(struct lnet_process_id id, __u32 force,
268 struct lnet_process_id __user *ids, int n_ids);
271 sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
274 unsigned *sensitivity = (unsigned *)kp->arg;
277 rc = kstrtoul(val, 0, &value);
279 CERROR("Invalid module parameter value for 'lnet_health_sensitivity'\n");
284 * The purpose of locking the api_mutex here is to ensure that
285 * the correct value ends up stored properly.
287 mutex_lock(&the_lnet.ln_api_mutex);
289 if (value > LNET_MAX_HEALTH_VALUE) {
290 mutex_unlock(&the_lnet.ln_api_mutex);
291 CERROR("Invalid health value. Maximum: %d value = %lu\n",
292 LNET_MAX_HEALTH_VALUE, value);
297 * if we're turning on health then use the health timeout
300 if (*sensitivity == 0 && value != 0) {
301 lnet_transaction_timeout = LNET_TRANSACTION_TIMEOUT_HEALTH_DEFAULT;
302 lnet_retry_count = LNET_RETRY_COUNT_HEALTH_DEFAULT;
303 lnet_set_lnd_timeout();
305 * if we're turning off health then use the no health timeout
308 } else if (*sensitivity != 0 && value == 0) {
309 lnet_transaction_timeout =
310 LNET_TRANSACTION_TIMEOUT_NO_HEALTH_DEFAULT;
311 lnet_retry_count = 0;
312 lnet_set_lnd_timeout();
315 *sensitivity = value;
317 mutex_unlock(&the_lnet.ln_api_mutex);
323 recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp)
326 unsigned *interval = (unsigned *)kp->arg;
329 rc = kstrtoul(val, 0, &value);
331 CERROR("Invalid module parameter value for 'lnet_recovery_interval'\n");
336 CERROR("lnet_recovery_interval must be at least 1 second\n");
341 * The purpose of locking the api_mutex here is to ensure that
342 * the correct value ends up stored properly.
344 mutex_lock(&the_lnet.ln_api_mutex);
348 mutex_unlock(&the_lnet.ln_api_mutex);
354 discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
357 unsigned *discovery_off = (unsigned *)kp->arg;
359 struct lnet_ping_buffer *pbuf;
361 rc = kstrtoul(val, 0, &value);
363 CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
367 value = (value) ? 1 : 0;
370 * The purpose of locking the api_mutex here is to ensure that
371 * the correct value ends up stored properly.
373 mutex_lock(&the_lnet.ln_api_mutex);
375 if (value == *discovery_off) {
376 mutex_unlock(&the_lnet.ln_api_mutex);
381 * We still want to set the discovery value even when LNet is not
382 * running. This is the case when LNet is being loaded and we want
383 * the module parameters to take effect. Otherwise if we're
384 * changing the value dynamically, we want to set it after
387 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
388 *discovery_off = value;
389 mutex_unlock(&the_lnet.ln_api_mutex);
393 /* tell peers that discovery setting has changed */
394 lnet_net_lock(LNET_LOCK_EX);
395 pbuf = the_lnet.ln_ping_target;
397 pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
399 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
400 lnet_net_unlock(LNET_LOCK_EX);
402 /* only send a push when we're turning off discovery */
403 if (*discovery_off <= 0 && value > 0)
404 lnet_push_update_to_peers(1);
405 *discovery_off = value;
407 mutex_unlock(&the_lnet.ln_api_mutex);
413 drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp)
416 unsigned int *drop_asym_route = (unsigned int *)kp->arg;
419 rc = kstrtoul(val, 0, &value);
421 CERROR("Invalid module parameter value for "
422 "'lnet_drop_asym_route'\n");
427 * The purpose of locking the api_mutex here is to ensure that
428 * the correct value ends up stored properly.
430 mutex_lock(&the_lnet.ln_api_mutex);
432 if (value == *drop_asym_route) {
433 mutex_unlock(&the_lnet.ln_api_mutex);
437 *drop_asym_route = value;
439 mutex_unlock(&the_lnet.ln_api_mutex);
445 transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp)
448 unsigned *transaction_to = (unsigned *)kp->arg;
451 rc = kstrtoul(val, 0, &value);
453 CERROR("Invalid module parameter value for 'lnet_transaction_timeout'\n");
458 * The purpose of locking the api_mutex here is to ensure that
459 * the correct value ends up stored properly.
461 mutex_lock(&the_lnet.ln_api_mutex);
463 if (value < lnet_retry_count || value == 0) {
464 mutex_unlock(&the_lnet.ln_api_mutex);
465 CERROR("Invalid value for lnet_transaction_timeout (%lu). "
466 "Has to be greater than lnet_retry_count (%u)\n",
467 value, lnet_retry_count);
471 if (value == *transaction_to) {
472 mutex_unlock(&the_lnet.ln_api_mutex);
476 *transaction_to = value;
477 /* Update the lnet_lnd_timeout now that we've modified the
478 * transaction timeout
480 lnet_set_lnd_timeout();
482 mutex_unlock(&the_lnet.ln_api_mutex);
488 retry_count_set(const char *val, cfs_kernel_param_arg_t *kp)
491 unsigned *retry_count = (unsigned *)kp->arg;
494 rc = kstrtoul(val, 0, &value);
496 CERROR("Invalid module parameter value for 'lnet_retry_count'\n");
501 * The purpose of locking the api_mutex here is to ensure that
502 * the correct value ends up stored properly.
504 mutex_lock(&the_lnet.ln_api_mutex);
506 if (lnet_health_sensitivity == 0) {
507 mutex_unlock(&the_lnet.ln_api_mutex);
508 CERROR("Can not set retry_count when health feature is turned off\n");
512 if (value > lnet_transaction_timeout) {
513 mutex_unlock(&the_lnet.ln_api_mutex);
514 CERROR("Invalid value for lnet_retry_count (%lu). "
515 "Has to be smaller than lnet_transaction_timeout (%u)\n",
516 value, lnet_transaction_timeout);
520 *retry_count = value;
522 /* Update the lnet_lnd_timeout now that we've modified the
525 lnet_set_lnd_timeout();
527 mutex_unlock(&the_lnet.ln_api_mutex);
533 intf_max_set(const char *val, cfs_kernel_param_arg_t *kp)
537 rc = kstrtoint(val, 0, &value);
539 CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
543 if (value < LNET_INTERFACES_MIN) {
544 CWARN("max interfaces provided are too small, setting to %d\n",
545 LNET_INTERFACES_MAX_DEFAULT);
546 value = LNET_INTERFACES_MAX_DEFAULT;
549 *(int *)kp->arg = value;
555 response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp)
558 unsigned long new_value;
560 rc = kstrtoul(val, 0, &new_value);
562 CERROR("Invalid value for 'lnet_response_tracking'\n");
566 if (new_value < 0 || new_value > 3) {
567 CWARN("Invalid value (%lu) for 'lnet_response_tracking'\n",
572 lnet_response_tracking = new_value;
578 lnet_get_routes(void)
584 lnet_get_networks(void)
589 if (*networks != 0 && *ip2nets != 0) {
590 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
591 "'ip2nets' but not both at once\n");
596 rc = lnet_parse_ip2nets(&nets, ip2nets);
597 return (rc == 0) ? nets : NULL;
607 lnet_init_locks(void)
609 spin_lock_init(&the_lnet.ln_eq_wait_lock);
610 spin_lock_init(&the_lnet.ln_msg_resend_lock);
611 init_completion(&the_lnet.ln_mt_wait_complete);
612 mutex_init(&the_lnet.ln_lnd_mutex);
615 struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
616 struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
618 struct kmem_cache *lnet_rspt_cachep; /* response tracker cache */
619 struct kmem_cache *lnet_msg_cachep;
622 lnet_slab_setup(void)
624 /* create specific kmem_cache for MEs and small MDs (i.e., originally
625 * allocated in <size-xxx> kmem_cache).
627 lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(struct lnet_me),
629 if (!lnet_mes_cachep)
632 lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
633 LNET_SMALL_MD_SIZE, 0, 0,
635 if (!lnet_small_mds_cachep)
638 lnet_rspt_cachep = kmem_cache_create("lnet_rspt", sizeof(struct lnet_rsp_tracker),
640 if (!lnet_rspt_cachep)
643 lnet_msg_cachep = kmem_cache_create("lnet_msg", sizeof(struct lnet_msg),
645 if (!lnet_msg_cachep)
652 lnet_slab_cleanup(void)
654 if (lnet_msg_cachep) {
655 kmem_cache_destroy(lnet_msg_cachep);
656 lnet_msg_cachep = NULL;
660 if (lnet_rspt_cachep) {
661 kmem_cache_destroy(lnet_rspt_cachep);
662 lnet_rspt_cachep = NULL;
665 if (lnet_small_mds_cachep) {
666 kmem_cache_destroy(lnet_small_mds_cachep);
667 lnet_small_mds_cachep = NULL;
670 if (lnet_mes_cachep) {
671 kmem_cache_destroy(lnet_mes_cachep);
672 lnet_mes_cachep = NULL;
677 lnet_create_remote_nets_table(void)
680 struct list_head *hash;
682 LASSERT(the_lnet.ln_remote_nets_hash == NULL);
683 LASSERT(the_lnet.ln_remote_nets_hbits > 0);
684 CFS_ALLOC_PTR_ARRAY(hash, LNET_REMOTE_NETS_HASH_SIZE);
686 CERROR("Failed to create remote nets hash table\n");
690 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
691 INIT_LIST_HEAD(&hash[i]);
692 the_lnet.ln_remote_nets_hash = hash;
697 lnet_destroy_remote_nets_table(void)
701 if (the_lnet.ln_remote_nets_hash == NULL)
704 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
705 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
707 CFS_FREE_PTR_ARRAY(the_lnet.ln_remote_nets_hash,
708 LNET_REMOTE_NETS_HASH_SIZE);
709 the_lnet.ln_remote_nets_hash = NULL;
713 lnet_destroy_locks(void)
715 if (the_lnet.ln_res_lock != NULL) {
716 cfs_percpt_lock_free(the_lnet.ln_res_lock);
717 the_lnet.ln_res_lock = NULL;
720 if (the_lnet.ln_net_lock != NULL) {
721 cfs_percpt_lock_free(the_lnet.ln_net_lock);
722 the_lnet.ln_net_lock = NULL;
727 lnet_create_locks(void)
731 the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
732 if (the_lnet.ln_res_lock == NULL)
735 the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
736 if (the_lnet.ln_net_lock == NULL)
742 lnet_destroy_locks();
746 static void lnet_assert_wire_constants(void)
748 /* Wire protocol assertions generated by 'wirecheck'
749 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
750 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
751 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
754 BUILD_BUG_ON(LNET_PROTO_TCP_MAGIC != 0xeebc0ded);
755 BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MAJOR != 1);
756 BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MINOR != 0);
757 BUILD_BUG_ON(LNET_MSG_ACK != 0);
758 BUILD_BUG_ON(LNET_MSG_PUT != 1);
759 BUILD_BUG_ON(LNET_MSG_GET != 2);
760 BUILD_BUG_ON(LNET_MSG_REPLY != 3);
761 BUILD_BUG_ON(LNET_MSG_HELLO != 4);
763 /* Checks for struct lnet_handle_wire */
764 BUILD_BUG_ON((int)sizeof(struct lnet_handle_wire) != 16);
765 BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
766 wh_interface_cookie) != 0);
767 BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) != 8);
768 BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
769 wh_object_cookie) != 8);
770 BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) != 8);
772 /* Checks for struct struct lnet_magicversion */
773 BUILD_BUG_ON((int)sizeof(struct lnet_magicversion) != 8);
774 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, magic) != 0);
775 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->magic) != 4);
776 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, version_major) != 4);
777 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_major) != 2);
778 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion,
779 version_minor) != 6);
780 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_minor) != 2);
782 /* Checks for struct struct lnet_hdr */
783 BUILD_BUG_ON((int)sizeof(struct lnet_hdr) != 72);
784 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_nid) != 0);
785 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_nid) != 8);
786 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_nid) != 8);
787 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_nid) != 8);
788 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_pid) != 16);
789 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_pid) != 4);
790 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_pid) != 20);
791 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_pid) != 4);
792 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, type) != 24);
793 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->type) != 4);
794 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, payload_length) != 28);
795 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->payload_length) != 4);
796 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg) != 32);
797 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg) != 40);
800 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) != 32);
801 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) != 16);
802 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.match_bits) != 48);
803 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) != 8);
804 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.mlength) != 56);
805 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) != 4);
808 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) != 32);
809 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) != 16);
810 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.match_bits) != 48);
811 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) != 8);
812 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.hdr_data) != 56);
813 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) != 8);
814 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ptl_index) != 64);
815 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) != 4);
816 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.offset) != 68);
817 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) != 4);
820 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.return_wmd) != 32);
821 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) != 16);
822 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.match_bits) != 48);
823 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) != 8);
824 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.ptl_index) != 56);
825 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) != 4);
826 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.src_offset) != 60);
827 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) != 4);
828 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.sink_length) != 64);
829 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) != 4);
832 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) != 32);
833 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) != 16);
836 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.incarnation) != 32);
837 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) != 8);
838 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.type) != 40);
839 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) != 4);
841 /* Checks for struct lnet_ni_status and related constants */
842 BUILD_BUG_ON(LNET_NI_STATUS_INVALID != 0x00000000);
843 BUILD_BUG_ON(LNET_NI_STATUS_UP != 0x15aac0de);
844 BUILD_BUG_ON(LNET_NI_STATUS_DOWN != 0xdeadface);
846 /* Checks for struct lnet_ni_status */
847 BUILD_BUG_ON((int)sizeof(struct lnet_ni_status) != 16);
848 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_nid) != 0);
849 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) != 8);
850 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_status) != 8);
851 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_status) != 4);
852 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_unused) != 12);
853 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_unused) != 4);
855 /* Checks for struct lnet_ping_info and related constants */
856 BUILD_BUG_ON(LNET_PROTO_PING_MAGIC != 0x70696E67);
857 BUILD_BUG_ON(LNET_PING_FEAT_INVAL != 0);
858 BUILD_BUG_ON(LNET_PING_FEAT_BASE != 1);
859 BUILD_BUG_ON(LNET_PING_FEAT_NI_STATUS != 2);
860 BUILD_BUG_ON(LNET_PING_FEAT_RTE_DISABLED != 4);
861 BUILD_BUG_ON(LNET_PING_FEAT_MULTI_RAIL != 8);
862 BUILD_BUG_ON(LNET_PING_FEAT_DISCOVERY != 16);
863 BUILD_BUG_ON(LNET_PING_FEAT_BITS != 31);
865 /* Checks for struct lnet_ping_info */
866 BUILD_BUG_ON((int)sizeof(struct lnet_ping_info) != 16);
867 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_magic) != 0);
868 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) != 4);
869 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_features) != 4);
870 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_features) != 4);
871 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_pid) != 8);
872 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) != 4);
873 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_nnis) != 12);
874 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) != 4);
875 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_ni) != 16);
876 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) != 0);
879 static const struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
881 const struct lnet_lnd *lnd;
883 /* holding lnd mutex */
884 if (type >= NUM_LNDS)
886 lnd = the_lnet.ln_lnds[type];
887 LASSERT(!lnd || lnd->lnd_type == type);
893 lnet_get_lnd_timeout(void)
895 return lnet_lnd_timeout;
897 EXPORT_SYMBOL(lnet_get_lnd_timeout);
900 lnet_register_lnd(const struct lnet_lnd *lnd)
902 mutex_lock(&the_lnet.ln_lnd_mutex);
904 LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
905 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
907 the_lnet.ln_lnds[lnd->lnd_type] = lnd;
909 CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
911 mutex_unlock(&the_lnet.ln_lnd_mutex);
913 EXPORT_SYMBOL(lnet_register_lnd);
916 lnet_unregister_lnd(const struct lnet_lnd *lnd)
918 mutex_lock(&the_lnet.ln_lnd_mutex);
920 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
922 the_lnet.ln_lnds[lnd->lnd_type] = NULL;
923 CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
925 mutex_unlock(&the_lnet.ln_lnd_mutex);
927 EXPORT_SYMBOL(lnet_unregister_lnd);
930 lnet_counters_get_common_locked(struct lnet_counters_common *common)
932 struct lnet_counters *ctr;
935 /* FIXME !!! Their is no assert_lnet_net_locked() to ensure this
936 * actually called under the protection of the lnet_net_lock.
938 memset(common, 0, sizeof(*common));
940 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
941 common->lcc_msgs_max += ctr->lct_common.lcc_msgs_max;
942 common->lcc_msgs_alloc += ctr->lct_common.lcc_msgs_alloc;
943 common->lcc_errors += ctr->lct_common.lcc_errors;
944 common->lcc_send_count += ctr->lct_common.lcc_send_count;
945 common->lcc_recv_count += ctr->lct_common.lcc_recv_count;
946 common->lcc_route_count += ctr->lct_common.lcc_route_count;
947 common->lcc_drop_count += ctr->lct_common.lcc_drop_count;
948 common->lcc_send_length += ctr->lct_common.lcc_send_length;
949 common->lcc_recv_length += ctr->lct_common.lcc_recv_length;
950 common->lcc_route_length += ctr->lct_common.lcc_route_length;
951 common->lcc_drop_length += ctr->lct_common.lcc_drop_length;
956 lnet_counters_get_common(struct lnet_counters_common *common)
958 lnet_net_lock(LNET_LOCK_EX);
959 lnet_counters_get_common_locked(common);
960 lnet_net_unlock(LNET_LOCK_EX);
962 EXPORT_SYMBOL(lnet_counters_get_common);
965 lnet_counters_get(struct lnet_counters *counters)
967 struct lnet_counters *ctr;
968 struct lnet_counters_health *health = &counters->lct_health;
971 memset(counters, 0, sizeof(*counters));
973 lnet_net_lock(LNET_LOCK_EX);
975 if (the_lnet.ln_state != LNET_STATE_RUNNING)
976 GOTO(out_unlock, rc = -ENODEV);
978 lnet_counters_get_common_locked(&counters->lct_common);
980 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
981 health->lch_rst_alloc += ctr->lct_health.lch_rst_alloc;
982 health->lch_resend_count += ctr->lct_health.lch_resend_count;
983 health->lch_response_timeout_count +=
984 ctr->lct_health.lch_response_timeout_count;
985 health->lch_local_interrupt_count +=
986 ctr->lct_health.lch_local_interrupt_count;
987 health->lch_local_dropped_count +=
988 ctr->lct_health.lch_local_dropped_count;
989 health->lch_local_aborted_count +=
990 ctr->lct_health.lch_local_aborted_count;
991 health->lch_local_no_route_count +=
992 ctr->lct_health.lch_local_no_route_count;
993 health->lch_local_timeout_count +=
994 ctr->lct_health.lch_local_timeout_count;
995 health->lch_local_error_count +=
996 ctr->lct_health.lch_local_error_count;
997 health->lch_remote_dropped_count +=
998 ctr->lct_health.lch_remote_dropped_count;
999 health->lch_remote_error_count +=
1000 ctr->lct_health.lch_remote_error_count;
1001 health->lch_remote_timeout_count +=
1002 ctr->lct_health.lch_remote_timeout_count;
1003 health->lch_network_timeout_count +=
1004 ctr->lct_health.lch_network_timeout_count;
1007 lnet_net_unlock(LNET_LOCK_EX);
1010 EXPORT_SYMBOL(lnet_counters_get);
1013 lnet_counters_reset(void)
1015 struct lnet_counters *counters;
1018 lnet_net_lock(LNET_LOCK_EX);
1020 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1023 cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
1024 memset(counters, 0, sizeof(struct lnet_counters));
1026 lnet_net_unlock(LNET_LOCK_EX);
1030 lnet_res_type2str(int type)
1035 case LNET_COOKIE_TYPE_MD:
1037 case LNET_COOKIE_TYPE_ME:
1039 case LNET_COOKIE_TYPE_EQ:
1045 lnet_res_container_cleanup(struct lnet_res_container *rec)
1049 if (rec->rec_type == 0) /* not set yet, it's uninitialized */
1052 while (!list_empty(&rec->rec_active)) {
1053 struct list_head *e = rec->rec_active.next;
1056 if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
1057 lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
1059 } else { /* NB: Active MEs should be attached on portals */
1066 /* Found alive MD/ME/EQ, user really should unlink/free
1067 * all of them before finalize LNet, but if someone didn't,
1068 * we have to recycle garbage for him */
1069 CERROR("%d active elements on exit of %s container\n",
1070 count, lnet_res_type2str(rec->rec_type));
1073 if (rec->rec_lh_hash != NULL) {
1074 CFS_FREE_PTR_ARRAY(rec->rec_lh_hash, LNET_LH_HASH_SIZE);
1075 rec->rec_lh_hash = NULL;
1078 rec->rec_type = 0; /* mark it as finalized */
1082 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
1087 LASSERT(rec->rec_type == 0);
1089 rec->rec_type = type;
1090 INIT_LIST_HEAD(&rec->rec_active);
1092 rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
1094 /* Arbitrary choice of hash table size */
1095 LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
1096 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
1097 if (rec->rec_lh_hash == NULL) {
1102 for (i = 0; i < LNET_LH_HASH_SIZE; i++)
1103 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
1108 CERROR("Failed to setup %s resource container\n",
1109 lnet_res_type2str(type));
1110 lnet_res_container_cleanup(rec);
1115 lnet_res_containers_destroy(struct lnet_res_container **recs)
1117 struct lnet_res_container *rec;
1120 cfs_percpt_for_each(rec, i, recs)
1121 lnet_res_container_cleanup(rec);
1123 cfs_percpt_free(recs);
1126 static struct lnet_res_container **
1127 lnet_res_containers_create(int type)
1129 struct lnet_res_container **recs;
1130 struct lnet_res_container *rec;
1134 recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
1136 CERROR("Failed to allocate %s resource containers\n",
1137 lnet_res_type2str(type));
1141 cfs_percpt_for_each(rec, i, recs) {
1142 rc = lnet_res_container_setup(rec, i, type);
1144 lnet_res_containers_destroy(recs);
1152 struct lnet_libhandle *
1153 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
1155 /* ALWAYS called with lnet_res_lock held */
1156 struct list_head *head;
1157 struct lnet_libhandle *lh;
1160 if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
1163 hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
1164 head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
1166 list_for_each_entry(lh, head, lh_hash_chain) {
1167 if (lh->lh_cookie == cookie)
1175 lnet_res_lh_initialize(struct lnet_res_container *rec,
1176 struct lnet_libhandle *lh)
1178 /* ALWAYS called with lnet_res_lock held */
1179 unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
1182 lh->lh_cookie = rec->rec_lh_cookie;
1183 rec->rec_lh_cookie += 1 << ibits;
1185 hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
1187 list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
1191 lnet_create_array_of_queues(void)
1193 struct list_head **qs;
1194 struct list_head *q;
1197 qs = cfs_percpt_alloc(lnet_cpt_table(),
1198 sizeof(struct list_head));
1200 CERROR("Failed to allocate queues\n");
1204 cfs_percpt_for_each(q, i, qs)
1210 static int lnet_unprepare(void);
1213 lnet_prepare(lnet_pid_t requested_pid)
1215 /* Prepare to bring up the network */
1216 struct lnet_res_container **recs;
1219 if (requested_pid == LNET_PID_ANY) {
1220 /* Don't instantiate LNET just for me */
1224 LASSERT(the_lnet.ln_refcount == 0);
1226 the_lnet.ln_routing = 0;
1228 LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
1229 the_lnet.ln_pid = requested_pid;
1231 INIT_LIST_HEAD(&the_lnet.ln_test_peers);
1232 INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
1233 INIT_LIST_HEAD(&the_lnet.ln_nets);
1234 INIT_LIST_HEAD(&the_lnet.ln_routers);
1235 INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
1236 INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
1237 INIT_LIST_HEAD(&the_lnet.ln_dc_request);
1238 INIT_LIST_HEAD(&the_lnet.ln_dc_working);
1239 INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
1240 INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq);
1241 INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
1242 init_waitqueue_head(&the_lnet.ln_dc_waitq);
1243 the_lnet.ln_mt_handler = NULL;
1244 init_completion(&the_lnet.ln_started);
1246 rc = lnet_slab_setup();
1250 rc = lnet_create_remote_nets_table();
1255 * NB the interface cookie in wire handles guards against delayed
1256 * replies and ACKs appearing valid after reboot.
1258 the_lnet.ln_interface_cookie = ktime_get_real_ns();
1260 the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
1261 sizeof(struct lnet_counters));
1262 if (the_lnet.ln_counters == NULL) {
1263 CERROR("Failed to allocate counters for LNet\n");
1268 rc = lnet_peer_tables_create();
1272 rc = lnet_msg_containers_create();
1276 rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
1277 LNET_COOKIE_TYPE_EQ);
1281 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
1287 the_lnet.ln_md_containers = recs;
1289 rc = lnet_portals_create();
1291 CERROR("Failed to create portals for LNet: %d\n", rc);
1295 the_lnet.ln_mt_zombie_rstqs = lnet_create_array_of_queues();
1296 if (!the_lnet.ln_mt_zombie_rstqs) {
1309 lnet_unprepare (void)
1311 /* NB no LNET_LOCK since this is the last reference. All LND instances
1312 * have shut down already, so it is safe to unlink and free all
1313 * descriptors, even those that appear committed to a network op (eg MD
1314 * with non-zero pending count) */
1316 lnet_fail_nid(LNET_NID_ANY, 0);
1318 LASSERT(the_lnet.ln_refcount == 0);
1319 LASSERT(list_empty(&the_lnet.ln_test_peers));
1320 LASSERT(list_empty(&the_lnet.ln_nets));
1322 if (the_lnet.ln_mt_zombie_rstqs) {
1323 lnet_clean_zombie_rstqs();
1324 the_lnet.ln_mt_zombie_rstqs = NULL;
1327 lnet_assert_handler_unused(the_lnet.ln_mt_handler);
1328 the_lnet.ln_mt_handler = NULL;
1330 lnet_portals_destroy();
1332 if (the_lnet.ln_md_containers != NULL) {
1333 lnet_res_containers_destroy(the_lnet.ln_md_containers);
1334 the_lnet.ln_md_containers = NULL;
1337 lnet_res_container_cleanup(&the_lnet.ln_eq_container);
1339 lnet_msg_containers_destroy();
1341 lnet_rtrpools_free(0);
1343 if (the_lnet.ln_counters != NULL) {
1344 cfs_percpt_free(the_lnet.ln_counters);
1345 the_lnet.ln_counters = NULL;
1347 lnet_destroy_remote_nets_table();
1348 lnet_slab_cleanup();
1354 lnet_net2ni_locked(__u32 net_id, int cpt)
1357 struct lnet_net *net;
1359 LASSERT(cpt != LNET_LOCK_EX);
1361 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1362 if (net->net_id == net_id) {
1363 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
1373 lnet_net2ni_addref(__u32 net)
1378 ni = lnet_net2ni_locked(net, 0);
1380 lnet_ni_addref_locked(ni, 0);
1385 EXPORT_SYMBOL(lnet_net2ni_addref);
1388 lnet_get_net_locked(__u32 net_id)
1390 struct lnet_net *net;
1392 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1393 if (net->net_id == net_id)
1401 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
1406 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
1411 val = hash_long(key, LNET_CPT_BITS);
1412 /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
1416 return (unsigned int)(key + val + (val >> 1)) % number;
1420 lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
1422 struct lnet_net *net;
1424 /* must called with hold of lnet_net_lock */
1425 if (LNET_CPT_NUMBER == 1)
1426 return 0; /* the only one */
1429 * If NI is provided then use the CPT identified in the NI cpt
1430 * list if one exists. If one doesn't exist, then that NI is
1431 * associated with all CPTs and it follows that the net it belongs
1432 * to is implicitly associated with all CPTs, so just hash the nid
1436 if (ni->ni_cpts != NULL)
1437 return ni->ni_cpts[lnet_nid_cpt_hash(nid,
1440 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1443 /* no NI provided so look at the net */
1444 net = lnet_get_net_locked(LNET_NIDNET(nid));
1446 if (net != NULL && net->net_cpts != NULL) {
1447 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
1450 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1454 lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
1459 if (LNET_CPT_NUMBER == 1)
1460 return 0; /* the only one */
1462 cpt = lnet_net_lock_current();
1464 cpt2 = lnet_cpt_of_nid_locked(nid, ni);
1466 lnet_net_unlock(cpt);
1470 EXPORT_SYMBOL(lnet_cpt_of_nid);
1473 lnet_islocalnet_locked(__u32 net_id)
1475 struct lnet_net *net;
1478 net = lnet_get_net_locked(net_id);
1480 local = net != NULL;
1486 lnet_islocalnet(__u32 net_id)
1491 cpt = lnet_net_lock_current();
1493 local = lnet_islocalnet_locked(net_id);
1495 lnet_net_unlock(cpt);
1501 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
1503 struct lnet_net *net;
1506 LASSERT(cpt != LNET_LOCK_EX);
1508 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1509 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1510 if (ni->ni_nid == nid)
1519 lnet_nid2ni_addref(lnet_nid_t nid)
1524 ni = lnet_nid2ni_locked(nid, 0);
1526 lnet_ni_addref_locked(ni, 0);
1531 EXPORT_SYMBOL(lnet_nid2ni_addref);
1534 lnet_islocalnid(lnet_nid_t nid)
1539 cpt = lnet_net_lock_current();
1540 ni = lnet_nid2ni_locked(nid, cpt);
1541 lnet_net_unlock(cpt);
1547 lnet_count_acceptor_nets(void)
1549 /* Return the # of NIs that need the acceptor. */
1551 struct lnet_net *net;
1554 cpt = lnet_net_lock_current();
1555 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1556 /* all socklnd type networks should have the acceptor
1558 if (net->net_lnd->lnd_accept != NULL)
1562 lnet_net_unlock(cpt);
1567 struct lnet_ping_buffer *
1568 lnet_ping_buffer_alloc(int nnis, gfp_t gfp)
1570 struct lnet_ping_buffer *pbuf;
1572 LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nnis), gfp);
1574 pbuf->pb_nnis = nnis;
1575 pbuf->pb_needs_post = false;
1576 atomic_set(&pbuf->pb_refcnt, 1);
1583 lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
1585 LASSERT(atomic_read(&pbuf->pb_refcnt) == 0);
1586 LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
1589 static struct lnet_ping_buffer *
1590 lnet_ping_target_create(int nnis)
1592 struct lnet_ping_buffer *pbuf;
1594 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1596 CERROR("Can't allocate ping source [%d]\n", nnis);
1600 pbuf->pb_info.pi_nnis = nnis;
1601 pbuf->pb_info.pi_pid = the_lnet.ln_pid;
1602 pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
1603 pbuf->pb_info.pi_features =
1604 LNET_PING_FEAT_NI_STATUS | LNET_PING_FEAT_MULTI_RAIL;
1610 lnet_get_net_ni_count_locked(struct lnet_net *net)
1615 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1622 lnet_get_net_ni_count_pre(struct lnet_net *net)
1627 list_for_each_entry(ni, &net->net_ni_added, ni_netlist)
1634 lnet_get_ni_count(void)
1637 struct lnet_net *net;
1642 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1643 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1653 lnet_get_net_count(void)
1655 struct lnet_net *net;
1660 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1670 lnet_swap_pinginfo(struct lnet_ping_buffer *pbuf)
1672 struct lnet_ni_status *stat;
1676 __swab32s(&pbuf->pb_info.pi_magic);
1677 __swab32s(&pbuf->pb_info.pi_features);
1678 __swab32s(&pbuf->pb_info.pi_pid);
1679 __swab32s(&pbuf->pb_info.pi_nnis);
1680 nnis = pbuf->pb_info.pi_nnis;
1681 if (nnis > pbuf->pb_nnis)
1682 nnis = pbuf->pb_nnis;
1683 for (i = 0; i < nnis; i++) {
1684 stat = &pbuf->pb_info.pi_ni[i];
1685 __swab64s(&stat->ns_nid);
1686 __swab32s(&stat->ns_status);
1691 lnet_ping_info_validate(struct lnet_ping_info *pinfo)
1695 if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
1697 if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
1699 /* Loopback is guaranteed to be present */
1700 if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
1702 if (LNET_PING_INFO_LONI(pinfo) != LNET_NID_LO_0)
1708 lnet_ping_target_destroy(void)
1710 struct lnet_net *net;
1713 lnet_net_lock(LNET_LOCK_EX);
1715 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1716 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1718 ni->ni_status = NULL;
1723 lnet_ping_buffer_decref(the_lnet.ln_ping_target);
1724 the_lnet.ln_ping_target = NULL;
1726 lnet_net_unlock(LNET_LOCK_EX);
1730 lnet_ping_target_event_handler(struct lnet_event *event)
1732 struct lnet_ping_buffer *pbuf = event->md_user_ptr;
1734 if (event->unlinked)
1735 lnet_ping_buffer_decref(pbuf);
1739 lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
1740 struct lnet_handle_md *ping_mdh,
1741 int ni_count, bool set_eq)
1743 struct lnet_process_id id = {
1744 .nid = LNET_NID_ANY,
1748 struct lnet_md md = { NULL };
1752 the_lnet.ln_ping_target_handler =
1753 lnet_ping_target_event_handler;
1755 *ppbuf = lnet_ping_target_create(ni_count);
1756 if (*ppbuf == NULL) {
1761 /* Ping target ME/MD */
1762 me = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1763 LNET_PROTO_PING_MATCHBITS, 0,
1764 LNET_UNLINK, LNET_INS_AFTER);
1767 CERROR("Can't create ping target ME: %d\n", rc);
1768 goto fail_decref_ping_buffer;
1771 /* initialize md content */
1772 md.start = &(*ppbuf)->pb_info;
1773 md.length = LNET_PING_INFO_SIZE((*ppbuf)->pb_nnis);
1774 md.threshold = LNET_MD_THRESH_INF;
1776 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1777 LNET_MD_MANAGE_REMOTE;
1778 md.handler = the_lnet.ln_ping_target_handler;
1779 md.user_ptr = *ppbuf;
1781 rc = LNetMDAttach(me, &md, LNET_RETAIN, ping_mdh);
1783 CERROR("Can't attach ping target MD: %d\n", rc);
1784 goto fail_decref_ping_buffer;
1786 lnet_ping_buffer_addref(*ppbuf);
1790 fail_decref_ping_buffer:
1791 LASSERT(atomic_read(&(*ppbuf)->pb_refcnt) == 1);
1792 lnet_ping_buffer_decref(*ppbuf);
1799 lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
1800 struct lnet_handle_md *ping_mdh)
1802 LNetMDUnlink(*ping_mdh);
1803 LNetInvalidateMDHandle(ping_mdh);
1805 /* NB the MD could be busy; this just starts the unlink */
1806 wait_var_event_warning(&pbuf->pb_refcnt,
1807 atomic_read(&pbuf->pb_refcnt) <= 1,
1808 "Still waiting for ping data MD to unlink\n");
1812 lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
1815 struct lnet_net *net;
1816 struct lnet_ni_status *ns;
1821 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1822 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1823 LASSERT(i < pbuf->pb_nnis);
1825 ns = &pbuf->pb_info.pi_ni[i];
1827 ns->ns_nid = ni->ni_nid;
1830 ns->ns_status = (ni->ni_status != NULL) ?
1831 ni->ni_status->ns_status :
1840 * We (ab)use the ns_status of the loopback interface to
1841 * transmit the sequence number. The first interface listed
1842 * must be the loopback interface.
1844 rc = lnet_ping_info_validate(&pbuf->pb_info);
1846 LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
1849 LNET_PING_BUFFER_SEQNO(pbuf) =
1850 atomic_inc_return(&the_lnet.ln_ping_target_seqno);
1854 lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
1855 struct lnet_handle_md ping_mdh)
1857 struct lnet_ping_buffer *old_pbuf = NULL;
1858 struct lnet_handle_md old_ping_md;
1860 /* switch the NIs to point to the new ping info created */
1861 lnet_net_lock(LNET_LOCK_EX);
1863 if (!the_lnet.ln_routing)
1864 pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1865 if (!lnet_peer_discovery_disabled)
1866 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
1868 /* Ensure only known feature bits have been set. */
1869 LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
1870 LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
1872 lnet_ping_target_install_locked(pbuf);
1874 if (the_lnet.ln_ping_target) {
1875 old_pbuf = the_lnet.ln_ping_target;
1876 old_ping_md = the_lnet.ln_ping_target_md;
1878 the_lnet.ln_ping_target_md = ping_mdh;
1879 the_lnet.ln_ping_target = pbuf;
1881 lnet_net_unlock(LNET_LOCK_EX);
1884 /* unlink and free the old ping info */
1885 lnet_ping_md_unlink(old_pbuf, &old_ping_md);
1886 lnet_ping_buffer_decref(old_pbuf);
1889 lnet_push_update_to_peers(0);
1893 lnet_ping_target_fini(void)
1895 lnet_ping_md_unlink(the_lnet.ln_ping_target,
1896 &the_lnet.ln_ping_target_md);
1898 lnet_assert_handler_unused(the_lnet.ln_ping_target_handler);
1899 lnet_ping_target_destroy();
1902 /* Resize the push target. */
1903 int lnet_push_target_resize(void)
1905 struct lnet_handle_md mdh;
1906 struct lnet_handle_md old_mdh;
1907 struct lnet_ping_buffer *pbuf;
1908 struct lnet_ping_buffer *old_pbuf;
1913 nnis = the_lnet.ln_push_target_nnis;
1915 CDEBUG(D_NET, "Invalid nnis %d\n", nnis);
1919 /* NB: lnet_ping_buffer_alloc() sets pbuf refcount to 1. That ref is
1920 * dropped when we need to resize again (see "old_pbuf" below) or when
1921 * LNet is shutdown (see lnet_push_target_fini())
1923 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1925 CDEBUG(D_NET, "Can't allocate pbuf for nnis %d\n", nnis);
1929 rc = lnet_push_target_post(pbuf, &mdh);
1931 CDEBUG(D_NET, "Failed to post push target: %d\n", rc);
1932 lnet_ping_buffer_decref(pbuf);
1936 lnet_net_lock(LNET_LOCK_EX);
1937 old_pbuf = the_lnet.ln_push_target;
1938 old_mdh = the_lnet.ln_push_target_md;
1939 the_lnet.ln_push_target = pbuf;
1940 the_lnet.ln_push_target_md = mdh;
1941 lnet_net_unlock(LNET_LOCK_EX);
1944 LNetMDUnlink(old_mdh);
1945 /* Drop ref set by lnet_ping_buffer_alloc() */
1946 lnet_ping_buffer_decref(old_pbuf);
1949 /* Received another push or reply that requires a larger buffer */
1950 if (nnis < the_lnet.ln_push_target_nnis)
1953 CDEBUG(D_NET, "nnis %d success\n", nnis);
1957 int lnet_push_target_post(struct lnet_ping_buffer *pbuf,
1958 struct lnet_handle_md *mdhp)
1960 struct lnet_process_id id = { LNET_NID_ANY, LNET_PID_ANY };
1961 struct lnet_md md = { NULL };
1965 me = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1966 LNET_PROTO_PING_MATCHBITS, 0,
1967 LNET_UNLINK, LNET_INS_AFTER);
1970 CERROR("Can't create push target ME: %d\n", rc);
1974 pbuf->pb_needs_post = false;
1976 /* This reference is dropped by lnet_push_target_event_handler() */
1977 lnet_ping_buffer_addref(pbuf);
1979 /* initialize md content */
1980 md.start = &pbuf->pb_info;
1981 md.length = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
1984 md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE;
1986 md.handler = the_lnet.ln_push_target_handler;
1988 rc = LNetMDAttach(me, &md, LNET_UNLINK, mdhp);
1990 CERROR("Can't attach push MD: %d\n", rc);
1991 lnet_ping_buffer_decref(pbuf);
1992 pbuf->pb_needs_post = true;
1996 CDEBUG(D_NET, "posted push target %p\n", pbuf);
2001 static void lnet_push_target_event_handler(struct lnet_event *ev)
2003 struct lnet_ping_buffer *pbuf = ev->md_user_ptr;
2005 CDEBUG(D_NET, "type %d status %d unlinked %d\n", ev->type, ev->status,
2008 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2009 lnet_swap_pinginfo(pbuf);
2011 if (ev->type == LNET_EVENT_UNLINK) {
2012 /* Drop ref added by lnet_push_target_post() */
2013 lnet_ping_buffer_decref(pbuf);
2017 lnet_peer_push_event(ev);
2019 /* Drop ref added by lnet_push_target_post */
2020 lnet_ping_buffer_decref(pbuf);
2023 /* Initialize the push target. */
2024 static int lnet_push_target_init(void)
2028 if (the_lnet.ln_push_target)
2031 the_lnet.ln_push_target_handler =
2032 lnet_push_target_event_handler;
2034 rc = LNetSetLazyPortal(LNET_RESERVED_PORTAL);
2037 /* Start at the required minimum, we'll enlarge if required. */
2038 the_lnet.ln_push_target_nnis = LNET_INTERFACES_MIN;
2040 rc = lnet_push_target_resize();
2043 LNetClearLazyPortal(LNET_RESERVED_PORTAL);
2044 the_lnet.ln_push_target_handler = NULL;
2050 /* Clean up the push target. */
2051 static void lnet_push_target_fini(void)
2053 if (!the_lnet.ln_push_target)
2056 /* Unlink and invalidate to prevent new references. */
2057 LNetMDUnlink(the_lnet.ln_push_target_md);
2058 LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
2060 /* Wait for the unlink to complete. */
2061 wait_var_event_warning(&the_lnet.ln_push_target->pb_refcnt,
2062 atomic_read(&the_lnet.ln_push_target->pb_refcnt) <= 1,
2063 "Still waiting for ping data MD to unlink\n");
2065 /* Drop ref set by lnet_ping_buffer_alloc() */
2066 lnet_ping_buffer_decref(the_lnet.ln_push_target);
2067 the_lnet.ln_push_target = NULL;
2068 the_lnet.ln_push_target_nnis = 0;
2070 LNetClearLazyPortal(LNET_RESERVED_PORTAL);
2071 lnet_assert_handler_unused(the_lnet.ln_push_target_handler);
2072 the_lnet.ln_push_target_handler = NULL;
2076 lnet_ni_tq_credits(struct lnet_ni *ni)
2080 LASSERT(ni->ni_ncpts >= 1);
2082 if (ni->ni_ncpts == 1)
2083 return ni->ni_net->net_tunables.lct_max_tx_credits;
2085 credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
2086 credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
2087 credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
2093 lnet_ni_unlink_locked(struct lnet_ni *ni)
2095 /* move it to zombie list and nobody can find it anymore */
2096 LASSERT(!list_empty(&ni->ni_netlist));
2097 list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
2098 lnet_ni_decref_locked(ni, 0);
2102 lnet_clear_zombies_nis_locked(struct lnet_net *net)
2107 struct list_head *zombie_list = &net->net_ni_zombie;
2110 * Now wait for the NIs I just nuked to show up on the zombie
2111 * list and shut them down in guaranteed thread context
2114 while (!list_empty(zombie_list)) {
2118 ni = list_entry(zombie_list->next,
2119 struct lnet_ni, ni_netlist);
2120 list_del_init(&ni->ni_netlist);
2121 /* the ni should be in deleting state. If it's not it's
2123 LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
2124 cfs_percpt_for_each(ref, j, ni->ni_refs) {
2127 /* still busy, add it back to zombie list */
2128 list_add(&ni->ni_netlist, zombie_list);
2132 if (!list_empty(&ni->ni_netlist)) {
2133 lnet_net_unlock(LNET_LOCK_EX);
2135 if ((i & (-i)) == i) {
2137 "Waiting for zombie LNI %s\n",
2138 libcfs_nid2str(ni->ni_nid));
2140 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2141 lnet_net_lock(LNET_LOCK_EX);
2145 lnet_net_unlock(LNET_LOCK_EX);
2147 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
2149 LASSERT(!in_interrupt());
2150 /* Holding the mutex makes it safe for lnd_shutdown
2151 * to call module_put(). Module unload cannot finish
2152 * until lnet_unregister_lnd() completes, and that
2153 * requires the mutex.
2155 mutex_lock(&the_lnet.ln_lnd_mutex);
2156 (net->net_lnd->lnd_shutdown)(ni);
2157 mutex_unlock(&the_lnet.ln_lnd_mutex);
2160 CDEBUG(D_LNI, "Removed LNI %s\n",
2161 libcfs_nid2str(ni->ni_nid));
2165 lnet_net_lock(LNET_LOCK_EX);
2169 /* shutdown down the NI and release refcount */
2171 lnet_shutdown_lndni(struct lnet_ni *ni)
2174 struct lnet_net *net = ni->ni_net;
2176 lnet_net_lock(LNET_LOCK_EX);
2178 ni->ni_state = LNET_NI_STATE_DELETING;
2180 lnet_ni_unlink_locked(ni);
2181 lnet_incr_dlc_seq();
2182 lnet_net_unlock(LNET_LOCK_EX);
2184 /* clear messages for this NI on the lazy portal */
2185 for (i = 0; i < the_lnet.ln_nportals; i++)
2186 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
2188 lnet_net_lock(LNET_LOCK_EX);
2189 lnet_clear_zombies_nis_locked(net);
2190 lnet_net_unlock(LNET_LOCK_EX);
2194 lnet_shutdown_lndnet(struct lnet_net *net)
2198 lnet_net_lock(LNET_LOCK_EX);
2200 list_del_init(&net->net_list);
2202 while (!list_empty(&net->net_ni_list)) {
2203 ni = list_entry(net->net_ni_list.next,
2204 struct lnet_ni, ni_netlist);
2205 lnet_net_unlock(LNET_LOCK_EX);
2206 lnet_shutdown_lndni(ni);
2207 lnet_net_lock(LNET_LOCK_EX);
2210 lnet_net_unlock(LNET_LOCK_EX);
2212 /* Do peer table cleanup for this net */
2213 lnet_peer_tables_cleanup(net);
2219 lnet_shutdown_lndnets(void)
2221 struct lnet_net *net;
2223 struct lnet_msg *msg, *tmp;
2225 /* NB called holding the global mutex */
2227 /* All quiet on the API front */
2228 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
2229 LASSERT(the_lnet.ln_refcount == 0);
2231 lnet_net_lock(LNET_LOCK_EX);
2232 the_lnet.ln_state = LNET_STATE_STOPPING;
2235 * move the nets to the zombie list to avoid them being
2236 * picked up for new work. LONET is also included in the
2237 * Nets that will be moved to the zombie list
2239 list_splice_init(&the_lnet.ln_nets, &the_lnet.ln_net_zombie);
2241 /* Drop the cached loopback Net. */
2242 if (the_lnet.ln_loni != NULL) {
2243 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
2244 the_lnet.ln_loni = NULL;
2246 lnet_net_unlock(LNET_LOCK_EX);
2248 /* iterate through the net zombie list and delete each net */
2249 while (!list_empty(&the_lnet.ln_net_zombie)) {
2250 net = list_entry(the_lnet.ln_net_zombie.next,
2251 struct lnet_net, net_list);
2252 lnet_shutdown_lndnet(net);
2255 spin_lock(&the_lnet.ln_msg_resend_lock);
2256 list_splice(&the_lnet.ln_msg_resend, &resend);
2257 spin_unlock(&the_lnet.ln_msg_resend_lock);
2259 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
2260 list_del_init(&msg->msg_list);
2261 msg->msg_no_resend = true;
2262 lnet_finalize(msg, -ECANCELED);
2265 lnet_net_lock(LNET_LOCK_EX);
2266 the_lnet.ln_state = LNET_STATE_SHUTDOWN;
2267 lnet_net_unlock(LNET_LOCK_EX);
2271 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
2274 struct lnet_tx_queue *tq;
2276 struct lnet_net *net = ni->ni_net;
2278 mutex_lock(&the_lnet.ln_lnd_mutex);
2281 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
2282 ni->ni_lnd_tunables_set = true;
2285 rc = (net->net_lnd->lnd_startup)(ni);
2287 mutex_unlock(&the_lnet.ln_lnd_mutex);
2290 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
2291 rc, libcfs_lnd2str(net->net_lnd->lnd_type));
2296 ni->ni_state = LNET_NI_STATE_ACTIVE;
2299 /* We keep a reference on the loopback net through the loopback NI */
2300 if (net->net_lnd->lnd_type == LOLND) {
2302 LASSERT(the_lnet.ln_loni == NULL);
2303 the_lnet.ln_loni = ni;
2304 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
2305 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
2306 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
2307 ni->ni_net->net_tunables.lct_peer_timeout = 0;
2311 if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
2312 ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
2313 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
2314 libcfs_lnd2str(net->net_lnd->lnd_type),
2315 ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
2317 /* shutdown the NI since if we get here then it must've already
2320 lnet_shutdown_lndni(ni);
2324 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
2325 tq->tq_credits_min =
2326 tq->tq_credits_max =
2327 tq->tq_credits = lnet_ni_tq_credits(ni);
2330 atomic_set(&ni->ni_tx_credits,
2331 lnet_ni_tq_credits(ni) * ni->ni_ncpts);
2332 atomic_set(&ni->ni_healthv, LNET_MAX_HEALTH_VALUE);
2334 CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
2335 libcfs_nid2str(ni->ni_nid),
2336 ni->ni_net->net_tunables.lct_peer_tx_credits,
2337 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
2338 ni->ni_net->net_tunables.lct_peer_rtr_credits,
2339 ni->ni_net->net_tunables.lct_peer_timeout);
2348 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
2351 struct lnet_net *net_l = NULL;
2352 LIST_HEAD(local_ni_list);
2356 const struct lnet_lnd *lnd;
2358 net->net_tunables.lct_peer_timeout;
2360 net->net_tunables.lct_max_tx_credits;
2361 int peerrtrcredits =
2362 net->net_tunables.lct_peer_rtr_credits;
2365 * make sure that this net is unique. If it isn't then
2366 * we are adding interfaces to an already existing network, and
2367 * 'net' is just a convenient way to pass in the list.
2368 * if it is unique we need to find the LND and load it if
2371 if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
2372 lnd_type = LNET_NETTYP(net->net_id);
2374 mutex_lock(&the_lnet.ln_lnd_mutex);
2375 lnd = lnet_find_lnd_by_type(lnd_type);
2378 mutex_unlock(&the_lnet.ln_lnd_mutex);
2379 rc = request_module("%s", libcfs_lnd2modname(lnd_type));
2380 mutex_lock(&the_lnet.ln_lnd_mutex);
2382 lnd = lnet_find_lnd_by_type(lnd_type);
2384 mutex_unlock(&the_lnet.ln_lnd_mutex);
2385 CERROR("Can't load LND %s, module %s, rc=%d\n",
2386 libcfs_lnd2str(lnd_type),
2387 libcfs_lnd2modname(lnd_type), rc);
2388 #ifndef HAVE_MODULE_LOADING_SUPPORT
2389 LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
2390 "compiled with kernel module "
2391 "loading support.");
2400 mutex_unlock(&the_lnet.ln_lnd_mutex);
2406 * net_l: if the network being added is unique then net_l
2407 * will point to that network
2408 * if the network being added is not unique then
2409 * net_l points to the existing network.
2411 * When we enter the loop below, we'll pick NIs off he
2412 * network beign added and start them up, then add them to
2413 * a local ni list. Once we've successfully started all
2414 * the NIs then we join the local NI list (of started up
2415 * networks) with the net_l->net_ni_list, which should
2416 * point to the correct network to add the new ni list to
2418 * If any of the new NIs fail to start up, then we want to
2419 * iterate through the local ni list, which should include
2420 * any NIs which were successfully started up, and shut
2423 * After than we want to delete the network being added,
2424 * to avoid a memory leak.
2428 * When a network uses TCP bonding then all its interfaces
2429 * must be specified when the network is first defined: the
2430 * TCP bonding code doesn't allow for interfaces to be added
2433 if (net_l != net && net_l != NULL && use_tcp_bonding &&
2434 LNET_NETTYP(net_l->net_id) == SOCKLND) {
2439 while (!list_empty(&net->net_ni_added)) {
2440 ni = list_entry(net->net_ni_added.next, struct lnet_ni,
2442 list_del_init(&ni->ni_netlist);
2444 /* make sure that the the NI we're about to start
2445 * up is actually unique. if it's not fail. */
2446 if (!lnet_ni_unique_net(&net_l->net_ni_list,
2447 ni->ni_interfaces[0])) {
2452 /* adjust the pointer the parent network, just in case it
2453 * the net is a duplicate */
2456 rc = lnet_startup_lndni(ni, tun);
2462 list_add_tail(&ni->ni_netlist, &local_ni_list);
2467 lnet_net_lock(LNET_LOCK_EX);
2468 list_splice_tail(&local_ni_list, &net_l->net_ni_list);
2469 lnet_incr_dlc_seq();
2470 lnet_net_unlock(LNET_LOCK_EX);
2472 /* if the network is not unique then we don't want to keep
2473 * it around after we're done. Free it. Otherwise add that
2474 * net to the global the_lnet.ln_nets */
2475 if (net_l != net && net_l != NULL) {
2477 * TODO - note. currently the tunables can not be updated
2483 * restore tunables after it has been overwitten by the
2486 if (peer_timeout != -1)
2487 net->net_tunables.lct_peer_timeout = peer_timeout;
2488 if (maxtxcredits != -1)
2489 net->net_tunables.lct_max_tx_credits = maxtxcredits;
2490 if (peerrtrcredits != -1)
2491 net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
2493 lnet_net_lock(LNET_LOCK_EX);
2494 list_add_tail(&net->net_list, &the_lnet.ln_nets);
2495 lnet_net_unlock(LNET_LOCK_EX);
2498 /* update net count */
2499 lnet_current_net_count = lnet_get_net_count();
2505 * shutdown the new NIs that are being started up
2506 * free the NET being started
2508 while (!list_empty(&local_ni_list)) {
2509 ni = list_entry(local_ni_list.next, struct lnet_ni,
2512 lnet_shutdown_lndni(ni);
2522 lnet_startup_lndnets(struct list_head *netlist)
2524 struct lnet_net *net;
2529 * Change to running state before bringing up the LNDs. This
2530 * allows lnet_shutdown_lndnets() to assert that we've passed
2533 lnet_net_lock(LNET_LOCK_EX);
2534 the_lnet.ln_state = LNET_STATE_RUNNING;
2535 lnet_net_unlock(LNET_LOCK_EX);
2537 while (!list_empty(netlist)) {
2538 net = list_entry(netlist->next, struct lnet_net, net_list);
2539 list_del_init(&net->net_list);
2541 rc = lnet_startup_lndnet(net, NULL);
2551 lnet_shutdown_lndnets();
2557 * Initialize LNet library.
2559 * Automatically called at module loading time. Caller has to call
2560 * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
2561 * latter returned 0. It must be called exactly once.
2563 * \retval 0 on success
2564 * \retval -ve on failures.
2566 int lnet_lib_init(void)
2570 lnet_assert_wire_constants();
2572 /* refer to global cfs_cpt_table for now */
2573 the_lnet.ln_cpt_table = cfs_cpt_tab;
2574 the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_tab);
2576 LASSERT(the_lnet.ln_cpt_number > 0);
2577 if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
2578 /* we are under risk of consuming all lh_cookie */
2579 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
2580 "please change setting of CPT-table and retry\n",
2581 the_lnet.ln_cpt_number, LNET_CPT_MAX);
2585 while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
2586 the_lnet.ln_cpt_bits++;
2588 rc = lnet_create_locks();
2590 CERROR("Can't create LNet global locks: %d\n", rc);
2594 the_lnet.ln_refcount = 0;
2595 INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
2596 INIT_LIST_HEAD(&the_lnet.ln_msg_resend);
2598 /* The hash table size is the number of bits it takes to express the set
2599 * ln_num_routes, minus 1 (better to under estimate than over so we
2600 * don't waste memory). */
2601 if (rnet_htable_size <= 0)
2602 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
2603 else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
2604 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
2605 the_lnet.ln_remote_nets_hbits = max_t(int, 1,
2606 order_base_2(rnet_htable_size) - 1);
2608 /* All LNDs apart from the LOLND are in separate modules. They
2609 * register themselves when their module loads, and unregister
2610 * themselves when their module is unloaded. */
2611 lnet_register_lnd(&the_lolnd);
2616 * Finalize LNet library.
2618 * \pre lnet_lib_init() called with success.
2619 * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
2621 * As this happens at module-unload, all lnds must already be unloaded,
2622 * so they must already be unregistered.
2624 void lnet_lib_exit(void)
2628 LASSERT(the_lnet.ln_refcount == 0);
2629 lnet_unregister_lnd(&the_lolnd);
2630 for (i = 0; i < NUM_LNDS; i++)
2631 LASSERT(!the_lnet.ln_lnds[i]);
2632 lnet_destroy_locks();
2636 * Set LNet PID and start LNet interfaces, routing, and forwarding.
2638 * Users must call this function at least once before any other functions.
2639 * For each successful call there must be a corresponding call to
2640 * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
2643 * The PID used by LNet may be different from the one requested.
2646 * \param requested_pid PID requested by the caller.
2648 * \return >= 0 on success, and < 0 error code on failures.
2651 LNetNIInit(lnet_pid_t requested_pid)
2653 int im_a_router = 0;
2656 struct lnet_ping_buffer *pbuf;
2657 struct lnet_handle_md ping_mdh;
2658 LIST_HEAD(net_head);
2659 struct lnet_net *net;
2661 mutex_lock(&the_lnet.ln_api_mutex);
2663 CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
2665 if (the_lnet.ln_refcount > 0) {
2666 rc = the_lnet.ln_refcount++;
2667 mutex_unlock(&the_lnet.ln_api_mutex);
2671 rc = lnet_prepare(requested_pid);
2673 mutex_unlock(&the_lnet.ln_api_mutex);
2677 /* create a network for Loopback network */
2678 net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
2681 goto err_empty_list;
2684 /* Add in the loopback NI */
2685 if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
2687 goto err_empty_list;
2690 /* If LNet is being initialized via DLC it is possible
2691 * that the user requests not to load module parameters (ones which
2692 * are supported by DLC) on initialization. Therefore, make sure not
2693 * to load networks, routes and forwarding from module parameters
2694 * in this case. On cleanup in case of failure only clean up
2695 * routes if it has been loaded */
2696 if (!the_lnet.ln_nis_from_mod_params) {
2697 rc = lnet_parse_networks(&net_head, lnet_get_networks(),
2700 goto err_empty_list;
2703 ni_count = lnet_startup_lndnets(&net_head);
2706 goto err_empty_list;
2709 if (!the_lnet.ln_nis_from_mod_params) {
2710 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
2712 goto err_shutdown_lndnis;
2714 rc = lnet_rtrpools_alloc(im_a_router);
2716 goto err_destroy_routes;
2719 rc = lnet_acceptor_start();
2721 goto err_destroy_routes;
2723 the_lnet.ln_refcount = 1;
2724 /* Now I may use my own API functions... */
2726 rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_count, true);
2728 goto err_acceptor_stop;
2730 lnet_ping_target_update(pbuf, ping_mdh);
2732 the_lnet.ln_mt_handler = lnet_mt_event_handler;
2734 rc = lnet_push_target_init();
2738 rc = lnet_peer_discovery_start();
2740 goto err_destroy_push_target;
2742 rc = lnet_monitor_thr_start();
2744 goto err_stop_discovery_thr;
2747 lnet_router_debugfs_init();
2749 mutex_unlock(&the_lnet.ln_api_mutex);
2751 complete_all(&the_lnet.ln_started);
2753 /* wait for all routers to start */
2754 lnet_wait_router_start();
2758 err_stop_discovery_thr:
2759 lnet_peer_discovery_stop();
2760 err_destroy_push_target:
2761 lnet_push_target_fini();
2763 lnet_ping_target_fini();
2765 the_lnet.ln_refcount = 0;
2766 lnet_acceptor_stop();
2768 if (!the_lnet.ln_nis_from_mod_params)
2769 lnet_destroy_routes();
2770 err_shutdown_lndnis:
2771 lnet_shutdown_lndnets();
2775 mutex_unlock(&the_lnet.ln_api_mutex);
2776 while (!list_empty(&net_head)) {
2777 struct lnet_net *net;
2779 net = list_entry(net_head.next, struct lnet_net, net_list);
2780 list_del_init(&net->net_list);
2785 EXPORT_SYMBOL(LNetNIInit);
2788 * Stop LNet interfaces, routing, and forwarding.
2790 * Users must call this function once for each successful call to LNetNIInit().
2791 * Once the LNetNIFini() operation has been started, the results of pending
2792 * API operations are undefined.
2794 * \return always 0 for current implementation.
2799 mutex_lock(&the_lnet.ln_api_mutex);
2801 LASSERT(the_lnet.ln_refcount > 0);
2803 if (the_lnet.ln_refcount != 1) {
2804 the_lnet.ln_refcount--;
2806 LASSERT(!the_lnet.ln_niinit_self);
2810 lnet_router_debugfs_fini();
2811 lnet_monitor_thr_stop();
2812 lnet_peer_discovery_stop();
2813 lnet_push_target_fini();
2814 lnet_ping_target_fini();
2816 /* Teardown fns that use my own API functions BEFORE here */
2817 the_lnet.ln_refcount = 0;
2819 lnet_acceptor_stop();
2820 lnet_destroy_routes();
2821 lnet_shutdown_lndnets();
2825 mutex_unlock(&the_lnet.ln_api_mutex);
2828 EXPORT_SYMBOL(LNetNIFini);
2831 * Grabs the ni data from the ni structure and fills the out
2834 * \param[in] ni network interface structure
2835 * \param[out] cfg_ni NI config information
2836 * \param[out] tun network and LND tunables
2839 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
2840 struct lnet_ioctl_config_lnd_tunables *tun,
2841 struct lnet_ioctl_element_stats *stats,
2844 size_t min_size = 0;
2847 if (!ni || !cfg_ni || !tun)
2850 if (ni->ni_interfaces[0] != NULL) {
2851 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2852 if (ni->ni_interfaces[i] != NULL) {
2853 strncpy(cfg_ni->lic_ni_intf[i],
2854 ni->ni_interfaces[i],
2855 sizeof(cfg_ni->lic_ni_intf[i]));
2860 cfg_ni->lic_nid = ni->ni_nid;
2861 if (ni->ni_nid == LNET_NID_LO_0)
2862 cfg_ni->lic_status = LNET_NI_STATUS_UP;
2864 cfg_ni->lic_status = ni->ni_status->ns_status;
2865 cfg_ni->lic_tcp_bonding = use_tcp_bonding;
2866 cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
2868 memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
2871 stats->iel_send_count = lnet_sum_stats(&ni->ni_stats,
2872 LNET_STATS_TYPE_SEND);
2873 stats->iel_recv_count = lnet_sum_stats(&ni->ni_stats,
2874 LNET_STATS_TYPE_RECV);
2875 stats->iel_drop_count = lnet_sum_stats(&ni->ni_stats,
2876 LNET_STATS_TYPE_DROP);
2880 * tun->lt_tun will always be present, but in order to be
2881 * backwards compatible, we need to deal with the cases when
2882 * tun->lt_tun is smaller than what the kernel has, because it
2883 * comes from an older version of a userspace program, then we'll
2884 * need to copy as much information as we have available space.
2886 min_size = tun_size - sizeof(tun->lt_cmn);
2887 memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
2889 /* copy over the cpts */
2890 if (ni->ni_ncpts == LNET_CPT_NUMBER &&
2891 ni->ni_cpts == NULL) {
2892 for (i = 0; i < ni->ni_ncpts; i++)
2893 cfg_ni->lic_cpts[i] = i;
2896 ni->ni_cpts != NULL && i < ni->ni_ncpts &&
2897 i < LNET_MAX_SHOW_NUM_CPT;
2899 cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
2901 cfg_ni->lic_ncpts = ni->ni_ncpts;
2905 * NOTE: This is a legacy function left in the code to be backwards
2906 * compatible with older userspace programs. It should eventually be
2909 * Grabs the ni data from the ni structure and fills the out
2912 * \param[in] ni network interface structure
2913 * \param[out] config config information
2916 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
2917 struct lnet_ioctl_config_data *config)
2919 struct lnet_ioctl_net_config *net_config;
2920 struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
2921 size_t min_size, tunable_size = 0;
2927 net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
2931 BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
2932 ARRAY_SIZE(net_config->ni_interfaces));
2934 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2935 if (!ni->ni_interfaces[i])
2938 strncpy(net_config->ni_interfaces[i],
2939 ni->ni_interfaces[i],
2940 sizeof(net_config->ni_interfaces[i]));
2943 config->cfg_nid = ni->ni_nid;
2944 config->cfg_config_u.cfg_net.net_peer_timeout =
2945 ni->ni_net->net_tunables.lct_peer_timeout;
2946 config->cfg_config_u.cfg_net.net_max_tx_credits =
2947 ni->ni_net->net_tunables.lct_max_tx_credits;
2948 config->cfg_config_u.cfg_net.net_peer_tx_credits =
2949 ni->ni_net->net_tunables.lct_peer_tx_credits;
2950 config->cfg_config_u.cfg_net.net_peer_rtr_credits =
2951 ni->ni_net->net_tunables.lct_peer_rtr_credits;
2953 if (ni->ni_nid == LNET_NID_LO_0)
2954 net_config->ni_status = LNET_NI_STATUS_UP;
2956 net_config->ni_status = ni->ni_status->ns_status;
2959 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
2961 for (i = 0; i < num_cpts; i++)
2962 net_config->ni_cpts[i] = ni->ni_cpts[i];
2964 config->cfg_ncpts = num_cpts;
2968 * See if user land tools sent in a newer and larger version
2969 * of struct lnet_tunables than what the kernel uses.
2971 min_size = sizeof(*config) + sizeof(*net_config);
2973 if (config->cfg_hdr.ioc_len > min_size)
2974 tunable_size = config->cfg_hdr.ioc_len - min_size;
2976 /* Don't copy too much data to user space */
2977 min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
2978 lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
2980 if (lnd_cfg && min_size) {
2981 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
2982 config->cfg_config_u.cfg_net.net_interface_count = 1;
2984 /* Tell user land that kernel side has less data */
2985 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
2986 min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
2987 config->cfg_hdr.ioc_len -= min_size;
2993 lnet_get_ni_idx_locked(int idx)
2996 struct lnet_net *net;
2998 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2999 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3009 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
3012 struct lnet_net *net = mynet;
3015 * It is possible that the net has been cleaned out while there is
3016 * a message being sent. This function accessed the net without
3017 * checking if the list is empty
3021 net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
3023 if (list_empty(&net->net_ni_list))
3025 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
3031 if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
3032 /* if you reached the end of the ni list and the net is
3033 * specified, then there are no more nis in that net */
3037 /* we reached the end of this net ni list. move to the
3039 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
3040 /* no more nets and no more NIs. */
3043 /* get the next net */
3044 net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
3046 if (list_empty(&net->net_ni_list))
3048 /* get the ni on it */
3049 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
3055 if (list_empty(&prev->ni_netlist))
3058 /* there are more nis left */
3059 ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
3065 lnet_get_net_config(struct lnet_ioctl_config_data *config)
3070 int idx = config->cfg_count;
3072 cpt = lnet_net_lock_current();
3074 ni = lnet_get_ni_idx_locked(idx);
3079 lnet_fill_ni_info_legacy(ni, config);
3083 lnet_net_unlock(cpt);
3088 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
3089 struct lnet_ioctl_config_lnd_tunables *tun,
3090 struct lnet_ioctl_element_stats *stats,
3097 if (!cfg_ni || !tun || !stats)
3100 cpt = lnet_net_lock_current();
3102 ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
3107 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
3111 lnet_net_unlock(cpt);
3115 int lnet_get_ni_stats(struct lnet_ioctl_element_msg_stats *msg_stats)
3124 cpt = lnet_net_lock_current();
3126 ni = lnet_get_ni_idx_locked(msg_stats->im_idx);
3129 lnet_usr_translate_stats(msg_stats, &ni->ni_stats);
3133 lnet_net_unlock(cpt);
3138 static int lnet_add_net_common(struct lnet_net *net,
3139 struct lnet_ioctl_config_lnd_tunables *tun)
3142 struct lnet_ping_buffer *pbuf;
3143 struct lnet_handle_md ping_mdh;
3145 struct lnet_remotenet *rnet;
3148 lnet_net_lock(LNET_LOCK_EX);
3149 rnet = lnet_find_rnet_locked(net->net_id);
3150 lnet_net_unlock(LNET_LOCK_EX);
3152 * make sure that the net added doesn't invalidate the current
3153 * configuration LNet is keeping
3156 CERROR("Adding net %s will invalidate routing configuration\n",
3157 libcfs_net2str(net->net_id));
3163 * make sure you calculate the correct number of slots in the ping
3164 * buffer. Since the ping info is a flattened list of all the NIs,
3165 * we should allocate enough slots to accomodate the number of NIs
3166 * which will be added.
3168 * since ni hasn't been configured yet, use
3169 * lnet_get_net_ni_count_pre() which checks the net_ni_added list
3171 net_ni_count = lnet_get_net_ni_count_pre(net);
3173 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3174 net_ni_count + lnet_get_ni_count(),
3182 memcpy(&net->net_tunables,
3183 &tun->lt_cmn, sizeof(net->net_tunables));
3185 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
3187 net_id = net->net_id;
3189 rc = lnet_startup_lndnet(net,
3190 (tun) ? &tun->lt_tun : NULL);
3194 lnet_net_lock(LNET_LOCK_EX);
3195 net = lnet_get_net_locked(net_id);
3196 lnet_net_unlock(LNET_LOCK_EX);
3201 * Start the acceptor thread if this is the first network
3202 * being added that requires the thread.
3204 if (net->net_lnd->lnd_accept) {
3205 rc = lnet_acceptor_start();
3207 /* shutdown the net that we just started */
3208 CERROR("Failed to start up acceptor thread\n");
3209 lnet_shutdown_lndnet(net);
3214 lnet_net_lock(LNET_LOCK_EX);
3215 lnet_peer_net_added(net);
3216 lnet_net_unlock(LNET_LOCK_EX);
3218 lnet_ping_target_update(pbuf, ping_mdh);
3223 lnet_ping_md_unlink(pbuf, &ping_mdh);
3224 lnet_ping_buffer_decref(pbuf);
3229 lnet_set_tune_defaults(struct lnet_ioctl_config_lnd_tunables *tun)
3232 if (!tun->lt_cmn.lct_peer_timeout)
3233 tun->lt_cmn.lct_peer_timeout = DEFAULT_PEER_TIMEOUT;
3234 if (!tun->lt_cmn.lct_peer_tx_credits)
3235 tun->lt_cmn.lct_peer_tx_credits = DEFAULT_PEER_CREDITS;
3236 if (!tun->lt_cmn.lct_max_tx_credits)
3237 tun->lt_cmn.lct_max_tx_credits = DEFAULT_CREDITS;
3241 static int lnet_handle_legacy_ip2nets(char *ip2nets,
3242 struct lnet_ioctl_config_lnd_tunables *tun)
3244 struct lnet_net *net;
3247 LIST_HEAD(net_head);
3249 rc = lnet_parse_ip2nets(&nets, ip2nets);
3253 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
3257 lnet_set_tune_defaults(tun);
3259 mutex_lock(&the_lnet.ln_api_mutex);
3260 while (!list_empty(&net_head)) {
3261 net = list_entry(net_head.next, struct lnet_net, net_list);
3262 list_del_init(&net->net_list);
3263 rc = lnet_add_net_common(net, tun);
3269 mutex_unlock(&the_lnet.ln_api_mutex);
3271 while (!list_empty(&net_head)) {
3272 net = list_entry(net_head.next, struct lnet_net, net_list);
3273 list_del_init(&net->net_list);
3279 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
3281 struct lnet_net *net;
3283 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
3285 __u32 net_id, lnd_type;
3287 /* get the tunables if they are available */
3288 if (conf->lic_cfg_hdr.ioc_len >=
3289 sizeof(*conf) + sizeof(*tun))
3290 tun = (struct lnet_ioctl_config_lnd_tunables *)
3293 /* handle legacy ip2nets from DLC */
3294 if (conf->lic_legacy_ip2nets[0] != '\0')
3295 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
3298 net_id = LNET_NIDNET(conf->lic_nid);
3299 lnd_type = LNET_NETTYP(net_id);
3301 if (!libcfs_isknown_lnd(lnd_type)) {
3302 CERROR("No valid net and lnd information provided\n");
3306 net = lnet_net_alloc(net_id, NULL);
3310 for (i = 0; i < conf->lic_ncpts; i++) {
3311 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER)
3315 ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
3316 conf->lic_ni_intf[0]);
3320 lnet_set_tune_defaults(tun);
3322 mutex_lock(&the_lnet.ln_api_mutex);
3324 rc = lnet_add_net_common(net, tun);
3326 mutex_unlock(&the_lnet.ln_api_mutex);
3331 int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
3333 struct lnet_net *net;
3335 __u32 net_id = LNET_NIDNET(conf->lic_nid);
3336 struct lnet_ping_buffer *pbuf;
3337 struct lnet_handle_md ping_mdh;
3342 /* don't allow userspace to shutdown the LOLND */
3343 if (LNET_NETTYP(net_id) == LOLND)
3346 mutex_lock(&the_lnet.ln_api_mutex);
3350 net = lnet_get_net_locked(net_id);
3352 CERROR("net %s not found\n",
3353 libcfs_net2str(net_id));
3358 addr = LNET_NIDADDR(conf->lic_nid);
3360 /* remove the entire net */
3361 net_count = lnet_get_net_ni_count_locked(net);
3365 /* create and link a new ping info, before removing the old one */
3366 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3367 lnet_get_ni_count() - net_count,
3370 goto unlock_api_mutex;
3372 lnet_shutdown_lndnet(net);
3374 lnet_acceptor_stop();
3376 lnet_ping_target_update(pbuf, ping_mdh);
3378 goto unlock_api_mutex;
3381 ni = lnet_nid2ni_locked(conf->lic_nid, 0);
3383 CERROR("nid %s not found\n",
3384 libcfs_nid2str(conf->lic_nid));
3389 net_count = lnet_get_net_ni_count_locked(net);
3393 /* create and link a new ping info, before removing the old one */
3394 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3395 lnet_get_ni_count() - 1, false);
3397 goto unlock_api_mutex;
3399 lnet_shutdown_lndni(ni);
3401 lnet_acceptor_stop();
3403 lnet_ping_target_update(pbuf, ping_mdh);
3405 /* check if the net is empty and remove it if it is */
3407 lnet_shutdown_lndnet(net);
3409 goto unlock_api_mutex;
3414 mutex_unlock(&the_lnet.ln_api_mutex);
3420 * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
3421 * They are only expected to be called for unique networks.
3422 * That can be as a result of older DLC library
3423 * calls. Multi-Rail DLC and beyond no longer uses these APIs.
3426 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
3428 struct lnet_net *net;
3429 LIST_HEAD(net_head);
3431 struct lnet_ioctl_config_lnd_tunables tun;
3432 const char *nets = conf->cfg_config_u.cfg_net.net_intf;
3434 /* Create a net/ni structures for the network string */
3435 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
3437 return rc == 0 ? -EINVAL : rc;
3439 mutex_lock(&the_lnet.ln_api_mutex);
3442 rc = -EINVAL; /* only add one network per call */
3443 goto out_unlock_clean;
3446 net = list_entry(net_head.next, struct lnet_net, net_list);
3447 list_del_init(&net->net_list);
3449 LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
3451 memset(&tun, 0, sizeof(tun));
3453 tun.lt_cmn.lct_peer_timeout =
3454 (!conf->cfg_config_u.cfg_net.net_peer_timeout) ? DEFAULT_PEER_TIMEOUT :
3455 conf->cfg_config_u.cfg_net.net_peer_timeout;
3456 tun.lt_cmn.lct_peer_tx_credits =
3457 (!conf->cfg_config_u.cfg_net.net_peer_tx_credits) ? DEFAULT_PEER_CREDITS :
3458 conf->cfg_config_u.cfg_net.net_peer_tx_credits;
3459 tun.lt_cmn.lct_peer_rtr_credits =
3460 conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
3461 tun.lt_cmn.lct_max_tx_credits =
3462 (!conf->cfg_config_u.cfg_net.net_max_tx_credits) ? DEFAULT_CREDITS :
3463 conf->cfg_config_u.cfg_net.net_max_tx_credits;
3465 rc = lnet_add_net_common(net, &tun);
3468 mutex_unlock(&the_lnet.ln_api_mutex);
3469 while (!list_empty(&net_head)) {
3470 /* net_head list is empty in success case */
3471 net = list_entry(net_head.next, struct lnet_net, net_list);
3472 list_del_init(&net->net_list);
3479 lnet_dyn_del_net(__u32 net_id)
3481 struct lnet_net *net;
3482 struct lnet_ping_buffer *pbuf;
3483 struct lnet_handle_md ping_mdh;
3487 /* don't allow userspace to shutdown the LOLND */
3488 if (LNET_NETTYP(net_id) == LOLND)
3491 mutex_lock(&the_lnet.ln_api_mutex);
3495 net = lnet_get_net_locked(net_id);
3502 net_ni_count = lnet_get_net_ni_count_locked(net);
3506 /* create and link a new ping info, before removing the old one */
3507 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3508 lnet_get_ni_count() - net_ni_count, false);
3512 lnet_shutdown_lndnet(net);
3514 lnet_acceptor_stop();
3516 lnet_ping_target_update(pbuf, ping_mdh);
3519 mutex_unlock(&the_lnet.ln_api_mutex);
3524 void lnet_incr_dlc_seq(void)
3526 atomic_inc(&lnet_dlc_seq_no);
3529 __u32 lnet_get_dlc_seq_locked(void)
3531 return atomic_read(&lnet_dlc_seq_no);
3535 lnet_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3537 struct lnet_net *net;
3540 lnet_net_lock(LNET_LOCK_EX);
3541 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3542 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3543 if (ni->ni_nid == nid || all) {
3544 atomic_set(&ni->ni_healthv, value);
3545 if (list_empty(&ni->ni_recovery) &&
3546 value < LNET_MAX_HEALTH_VALUE) {
3547 CERROR("manually adding local NI %s to recovery\n",
3548 libcfs_nid2str(ni->ni_nid));
3549 list_add_tail(&ni->ni_recovery,
3550 &the_lnet.ln_mt_localNIRecovq);
3551 lnet_ni_addref_locked(ni, 0);
3554 lnet_net_unlock(LNET_LOCK_EX);
3560 lnet_net_unlock(LNET_LOCK_EX);
3564 lnet_get_local_ni_hstats(struct lnet_ioctl_local_ni_hstats *stats)
3568 lnet_nid_t nid = stats->hlni_nid;
3570 cpt = lnet_net_lock_current();
3571 ni = lnet_nid2ni_locked(nid, cpt);
3578 stats->hlni_local_interrupt = atomic_read(&ni->ni_hstats.hlt_local_interrupt);
3579 stats->hlni_local_dropped = atomic_read(&ni->ni_hstats.hlt_local_dropped);
3580 stats->hlni_local_aborted = atomic_read(&ni->ni_hstats.hlt_local_aborted);
3581 stats->hlni_local_no_route = atomic_read(&ni->ni_hstats.hlt_local_no_route);
3582 stats->hlni_local_timeout = atomic_read(&ni->ni_hstats.hlt_local_timeout);
3583 stats->hlni_local_error = atomic_read(&ni->ni_hstats.hlt_local_error);
3584 stats->hlni_health_value = atomic_read(&ni->ni_healthv);
3587 lnet_net_unlock(cpt);
3593 lnet_get_local_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
3598 lnet_net_lock(LNET_LOCK_EX);
3599 list_for_each_entry(ni, &the_lnet.ln_mt_localNIRecovq, ni_recovery) {
3600 list->rlst_nid_array[i] = ni->ni_nid;
3602 if (i >= LNET_MAX_SHOW_NUM_NID)
3605 lnet_net_unlock(LNET_LOCK_EX);
3606 list->rlst_num_nids = i;
3612 lnet_get_peer_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
3614 struct lnet_peer_ni *lpni;
3617 lnet_net_lock(LNET_LOCK_EX);
3618 list_for_each_entry(lpni, &the_lnet.ln_mt_peerNIRecovq, lpni_recovery) {
3619 list->rlst_nid_array[i] = lpni->lpni_nid;
3621 if (i >= LNET_MAX_SHOW_NUM_NID)
3624 lnet_net_unlock(LNET_LOCK_EX);
3625 list->rlst_num_nids = i;
3631 * LNet ioctl handler.
3635 LNetCtl(unsigned int cmd, void *arg)
3637 struct libcfs_ioctl_data *data = arg;
3638 struct lnet_ioctl_config_data *config;
3639 struct lnet_process_id id = {0};
3643 BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
3644 sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
3647 case IOC_LIBCFS_GET_NI:
3648 rc = LNetGetId(data->ioc_count, &id);
3649 data->ioc_nid = id.nid;
3652 case IOC_LIBCFS_FAIL_NID:
3653 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
3655 case IOC_LIBCFS_ADD_ROUTE: {
3656 /* default router sensitivity to 1 */
3657 unsigned int sensitivity = 1;
3660 if (config->cfg_hdr.ioc_len < sizeof(*config))
3663 if (config->cfg_config_u.cfg_route.rtr_sensitivity) {
3665 config->cfg_config_u.cfg_route.rtr_sensitivity;
3668 mutex_lock(&the_lnet.ln_api_mutex);
3669 rc = lnet_add_route(config->cfg_net,
3670 config->cfg_config_u.cfg_route.rtr_hop,
3672 config->cfg_config_u.cfg_route.
3673 rtr_priority, sensitivity);
3674 mutex_unlock(&the_lnet.ln_api_mutex);
3678 case IOC_LIBCFS_DEL_ROUTE:
3681 if (config->cfg_hdr.ioc_len < sizeof(*config))
3684 mutex_lock(&the_lnet.ln_api_mutex);
3685 rc = lnet_del_route(config->cfg_net, config->cfg_nid);
3686 mutex_unlock(&the_lnet.ln_api_mutex);
3689 case IOC_LIBCFS_GET_ROUTE:
3692 if (config->cfg_hdr.ioc_len < sizeof(*config))
3695 mutex_lock(&the_lnet.ln_api_mutex);
3696 rc = lnet_get_route(config->cfg_count,
3698 &config->cfg_config_u.cfg_route.rtr_hop,
3700 &config->cfg_config_u.cfg_route.rtr_flags,
3701 &config->cfg_config_u.cfg_route.
3703 &config->cfg_config_u.cfg_route.
3705 mutex_unlock(&the_lnet.ln_api_mutex);
3708 case IOC_LIBCFS_GET_LOCAL_NI: {
3709 struct lnet_ioctl_config_ni *cfg_ni;
3710 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
3711 struct lnet_ioctl_element_stats *stats;
3716 /* get the tunables if they are available */
3717 if (cfg_ni->lic_cfg_hdr.ioc_len <
3718 sizeof(*cfg_ni) + sizeof(*stats) + sizeof(*tun))
3721 stats = (struct lnet_ioctl_element_stats *)
3723 tun = (struct lnet_ioctl_config_lnd_tunables *)
3724 (cfg_ni->lic_bulk + sizeof(*stats));
3726 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
3729 mutex_lock(&the_lnet.ln_api_mutex);
3730 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
3731 mutex_unlock(&the_lnet.ln_api_mutex);
3735 case IOC_LIBCFS_GET_LOCAL_NI_MSG_STATS: {
3736 struct lnet_ioctl_element_msg_stats *msg_stats = arg;
3738 if (msg_stats->im_hdr.ioc_len != sizeof(*msg_stats))
3741 mutex_lock(&the_lnet.ln_api_mutex);
3742 rc = lnet_get_ni_stats(msg_stats);
3743 mutex_unlock(&the_lnet.ln_api_mutex);
3748 case IOC_LIBCFS_GET_NET: {
3749 size_t total = sizeof(*config) +
3750 sizeof(struct lnet_ioctl_net_config);
3753 if (config->cfg_hdr.ioc_len < total)
3756 mutex_lock(&the_lnet.ln_api_mutex);
3757 rc = lnet_get_net_config(config);
3758 mutex_unlock(&the_lnet.ln_api_mutex);
3762 case IOC_LIBCFS_GET_LNET_STATS:
3764 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
3766 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
3769 mutex_lock(&the_lnet.ln_api_mutex);
3770 rc = lnet_counters_get(&lnet_stats->st_cntrs);
3771 mutex_unlock(&the_lnet.ln_api_mutex);
3775 case IOC_LIBCFS_CONFIG_RTR:
3778 if (config->cfg_hdr.ioc_len < sizeof(*config))
3781 mutex_lock(&the_lnet.ln_api_mutex);
3782 if (config->cfg_config_u.cfg_buffers.buf_enable) {
3783 rc = lnet_rtrpools_enable();
3784 mutex_unlock(&the_lnet.ln_api_mutex);
3787 lnet_rtrpools_disable();
3788 mutex_unlock(&the_lnet.ln_api_mutex);
3791 case IOC_LIBCFS_ADD_BUF:
3794 if (config->cfg_hdr.ioc_len < sizeof(*config))
3797 mutex_lock(&the_lnet.ln_api_mutex);
3798 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
3800 config->cfg_config_u.cfg_buffers.
3802 config->cfg_config_u.cfg_buffers.
3804 mutex_unlock(&the_lnet.ln_api_mutex);
3807 case IOC_LIBCFS_SET_NUMA_RANGE: {
3808 struct lnet_ioctl_set_value *numa;
3810 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3812 lnet_net_lock(LNET_LOCK_EX);
3813 lnet_numa_range = numa->sv_value;
3814 lnet_net_unlock(LNET_LOCK_EX);
3818 case IOC_LIBCFS_GET_NUMA_RANGE: {
3819 struct lnet_ioctl_set_value *numa;
3821 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3823 numa->sv_value = lnet_numa_range;
3827 case IOC_LIBCFS_GET_BUF: {
3828 struct lnet_ioctl_pool_cfg *pool_cfg;
3829 size_t total = sizeof(*config) + sizeof(*pool_cfg);
3833 if (config->cfg_hdr.ioc_len < total)
3836 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
3838 mutex_lock(&the_lnet.ln_api_mutex);
3839 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
3840 mutex_unlock(&the_lnet.ln_api_mutex);
3844 case IOC_LIBCFS_GET_LOCAL_HSTATS: {
3845 struct lnet_ioctl_local_ni_hstats *stats = arg;
3847 if (stats->hlni_hdr.ioc_len < sizeof(*stats))
3850 mutex_lock(&the_lnet.ln_api_mutex);
3851 rc = lnet_get_local_ni_hstats(stats);
3852 mutex_unlock(&the_lnet.ln_api_mutex);
3857 case IOC_LIBCFS_GET_RECOVERY_QUEUE: {
3858 struct lnet_ioctl_recovery_list *list = arg;
3859 if (list->rlst_hdr.ioc_len < sizeof(*list))
3862 mutex_lock(&the_lnet.ln_api_mutex);
3863 if (list->rlst_type == LNET_HEALTH_TYPE_LOCAL_NI)
3864 rc = lnet_get_local_ni_recovery_list(list);
3866 rc = lnet_get_peer_ni_recovery_list(list);
3867 mutex_unlock(&the_lnet.ln_api_mutex);
3871 case IOC_LIBCFS_ADD_PEER_NI: {
3872 struct lnet_ioctl_peer_cfg *cfg = arg;
3874 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3877 mutex_lock(&the_lnet.ln_api_mutex);
3878 rc = lnet_add_peer_ni(cfg->prcfg_prim_nid,
3881 mutex_unlock(&the_lnet.ln_api_mutex);
3885 case IOC_LIBCFS_DEL_PEER_NI: {
3886 struct lnet_ioctl_peer_cfg *cfg = arg;
3888 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3891 mutex_lock(&the_lnet.ln_api_mutex);
3892 rc = lnet_del_peer_ni(cfg->prcfg_prim_nid,
3893 cfg->prcfg_cfg_nid);
3894 mutex_unlock(&the_lnet.ln_api_mutex);
3898 case IOC_LIBCFS_GET_PEER_INFO: {
3899 struct lnet_ioctl_peer *peer_info = arg;
3901 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
3904 mutex_lock(&the_lnet.ln_api_mutex);
3905 rc = lnet_get_peer_ni_info(
3906 peer_info->pr_count,
3908 peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
3909 &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
3910 &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
3911 &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
3912 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
3913 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
3914 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
3915 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
3916 mutex_unlock(&the_lnet.ln_api_mutex);
3920 case IOC_LIBCFS_GET_PEER_NI: {
3921 struct lnet_ioctl_peer_cfg *cfg = arg;
3923 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3926 mutex_lock(&the_lnet.ln_api_mutex);
3927 rc = lnet_get_peer_info(cfg,
3928 (void __user *)cfg->prcfg_bulk);
3929 mutex_unlock(&the_lnet.ln_api_mutex);
3933 case IOC_LIBCFS_GET_PEER_LIST: {
3934 struct lnet_ioctl_peer_cfg *cfg = arg;
3936 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3939 mutex_lock(&the_lnet.ln_api_mutex);
3940 rc = lnet_get_peer_list(&cfg->prcfg_count, &cfg->prcfg_size,
3941 (struct lnet_process_id __user *)cfg->prcfg_bulk);
3942 mutex_unlock(&the_lnet.ln_api_mutex);
3946 case IOC_LIBCFS_SET_HEALHV: {
3947 struct lnet_ioctl_reset_health_cfg *cfg = arg;
3949 if (cfg->rh_hdr.ioc_len < sizeof(*cfg))
3951 if (cfg->rh_value < 0 ||
3952 cfg->rh_value > LNET_MAX_HEALTH_VALUE)
3953 value = LNET_MAX_HEALTH_VALUE;
3955 value = cfg->rh_value;
3956 CDEBUG(D_NET, "Manually setting healthv to %d for %s:%s. all = %d\n",
3957 value, (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI) ?
3958 "local" : "peer", libcfs_nid2str(cfg->rh_nid), cfg->rh_all);
3959 mutex_lock(&the_lnet.ln_api_mutex);
3960 if (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI)
3961 lnet_ni_set_healthv(cfg->rh_nid, value,
3964 lnet_peer_ni_set_healthv(cfg->rh_nid, value,
3966 mutex_unlock(&the_lnet.ln_api_mutex);
3970 case IOC_LIBCFS_NOTIFY_ROUTER: {
3971 time64_t deadline = ktime_get_real_seconds() - data->ioc_u64[0];
3973 /* The deadline passed in by the user should be some time in
3974 * seconds in the future since the UNIX epoch. We have to map
3975 * that deadline to the wall clock.
3977 deadline += ktime_get_seconds();
3978 return lnet_notify(NULL, data->ioc_nid, data->ioc_flags, false,
3982 case IOC_LIBCFS_LNET_DIST:
3983 rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
3984 if (rc < 0 && rc != -EHOSTUNREACH)
3987 data->ioc_u32[0] = rc;
3990 case IOC_LIBCFS_TESTPROTOCOMPAT:
3991 the_lnet.ln_testprotocompat = data->ioc_flags;
3994 case IOC_LIBCFS_LNET_FAULT:
3995 return lnet_fault_ctl(data->ioc_flags, data);
3997 case IOC_LIBCFS_PING: {
3998 signed long timeout;
4000 id.nid = data->ioc_nid;
4001 id.pid = data->ioc_u32[0];
4003 /* If timeout is negative then set default of 3 minutes */
4004 if (((s32)data->ioc_u32[1] <= 0) ||
4005 data->ioc_u32[1] > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
4006 timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
4008 timeout = nsecs_to_jiffies(data->ioc_u32[1] * NSEC_PER_MSEC);
4010 rc = lnet_ping(id, timeout, data->ioc_pbuf1,
4011 data->ioc_plen1 / sizeof(struct lnet_process_id));
4016 data->ioc_count = rc;
4020 case IOC_LIBCFS_PING_PEER: {
4021 struct lnet_ioctl_ping_data *ping = arg;
4022 struct lnet_peer *lp;
4023 signed long timeout;
4025 /* If timeout is negative then set default of 3 minutes */
4026 if (((s32)ping->op_param) <= 0 ||
4027 ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
4028 timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
4030 timeout = nsecs_to_jiffies(ping->op_param * NSEC_PER_MSEC);
4032 rc = lnet_ping(ping->ping_id, timeout,
4038 mutex_lock(&the_lnet.ln_api_mutex);
4039 lp = lnet_find_peer(ping->ping_id.nid);
4041 ping->ping_id.nid = lp->lp_primary_nid;
4042 ping->mr_info = lnet_peer_is_multi_rail(lp);
4043 lnet_peer_decref_locked(lp);
4045 mutex_unlock(&the_lnet.ln_api_mutex);
4047 ping->ping_count = rc;
4051 case IOC_LIBCFS_DISCOVER: {
4052 struct lnet_ioctl_ping_data *discover = arg;
4053 struct lnet_peer *lp;
4055 rc = lnet_discover(discover->ping_id, discover->op_param,
4057 discover->ping_count);
4061 mutex_lock(&the_lnet.ln_api_mutex);
4062 lp = lnet_find_peer(discover->ping_id.nid);
4064 discover->ping_id.nid = lp->lp_primary_nid;
4065 discover->mr_info = lnet_peer_is_multi_rail(lp);
4066 lnet_peer_decref_locked(lp);
4068 mutex_unlock(&the_lnet.ln_api_mutex);
4070 discover->ping_count = rc;
4075 ni = lnet_net2ni_addref(data->ioc_net);
4079 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
4082 rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
4089 EXPORT_SYMBOL(LNetCtl);
4091 void LNetDebugPeer(struct lnet_process_id id)
4093 lnet_debug_peer(id.nid);
4095 EXPORT_SYMBOL(LNetDebugPeer);
4098 * Determine if the specified peer \a nid is on the local node.
4100 * \param nid peer nid to check
4102 * \retval true If peer NID is on the local node.
4103 * \retval false If peer NID is not on the local node.
4105 bool LNetIsPeerLocal(lnet_nid_t nid)
4107 struct lnet_net *net;
4111 cpt = lnet_net_lock_current();
4112 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
4113 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
4114 if (ni->ni_nid == nid) {
4115 lnet_net_unlock(cpt);
4120 lnet_net_unlock(cpt);
4124 EXPORT_SYMBOL(LNetIsPeerLocal);
4127 * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
4128 * Note that all interfaces share a same PID, as requested by LNetNIInit().
4130 * \param index Index of the interface to look up.
4131 * \param id On successful return, this location will hold the
4132 * struct lnet_process_id ID of the interface.
4134 * \retval 0 If an interface exists at \a index.
4135 * \retval -ENOENT If no interface has been found.
4138 LNetGetId(unsigned int index, struct lnet_process_id *id)
4141 struct lnet_net *net;
4145 LASSERT(the_lnet.ln_refcount > 0);
4147 cpt = lnet_net_lock_current();
4149 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
4150 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
4154 id->nid = ni->ni_nid;
4155 id->pid = the_lnet.ln_pid;
4161 lnet_net_unlock(cpt);
4164 EXPORT_SYMBOL(LNetGetId);
4169 struct lnet_handle_md mdh;
4170 struct completion completion;
4174 lnet_ping_event_handler(struct lnet_event *event)
4176 struct ping_data *pd = event->md_user_ptr;
4178 CDEBUG(D_NET, "ping event (%d %d)%s\n",
4179 event->type, event->status,
4180 event->unlinked ? " unlinked" : "");
4182 if (event->status) {
4184 pd->rc = event->status;
4185 } else if (event->type == LNET_EVENT_REPLY) {
4187 pd->rc = event->mlength;
4189 if (event->unlinked)
4190 complete(&pd->completion);
4193 static int lnet_ping(struct lnet_process_id id, signed long timeout,
4194 struct lnet_process_id __user *ids, int n_ids)
4196 struct lnet_md md = { NULL };
4197 struct ping_data pd = { 0 };
4198 struct lnet_ping_buffer *pbuf;
4199 struct lnet_process_id tmpid;
4205 /* n_ids limit is arbitrary */
4206 if (n_ids <= 0 || id.nid == LNET_NID_ANY)
4210 * if the user buffer has more space than the lnet_interfaces_max
4211 * then only fill it up to lnet_interfaces_max
4213 if (n_ids > lnet_interfaces_max)
4214 n_ids = lnet_interfaces_max;
4216 if (id.pid == LNET_PID_ANY)
4217 id.pid = LNET_PID_LUSTRE;
4219 pbuf = lnet_ping_buffer_alloc(n_ids, GFP_NOFS);
4223 /* initialize md content */
4224 md.start = &pbuf->pb_info;
4225 md.length = LNET_PING_INFO_SIZE(n_ids);
4226 md.threshold = 2; /* GET/REPLY */
4228 md.options = LNET_MD_TRUNCATE;
4230 md.handler = lnet_ping_event_handler;
4232 init_completion(&pd.completion);
4234 rc = LNetMDBind(&md, LNET_UNLINK, &pd.mdh);
4236 CERROR("Can't bind MD: %d\n", rc);
4237 goto fail_ping_buffer_decref;
4240 rc = LNetGet(LNET_NID_ANY, pd.mdh, id,
4241 LNET_RESERVED_PORTAL,
4242 LNET_PROTO_PING_MATCHBITS, 0, false);
4245 /* Don't CERROR; this could be deliberate! */
4246 rc2 = LNetMDUnlink(pd.mdh);
4249 /* NB must wait for the UNLINK event below... */
4252 if (wait_for_completion_timeout(&pd.completion, timeout) == 0) {
4253 /* Ensure completion in finite time... */
4254 LNetMDUnlink(pd.mdh);
4255 wait_for_completion(&pd.completion);
4259 goto fail_ping_buffer_decref;
4263 LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
4265 rc = -EPROTO; /* if I can't parse... */
4268 CERROR("%s: ping info too short %d\n",
4269 libcfs_id2str(id), nob);
4270 goto fail_ping_buffer_decref;
4273 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
4274 lnet_swap_pinginfo(pbuf);
4275 } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
4276 CERROR("%s: Unexpected magic %08x\n",
4277 libcfs_id2str(id), pbuf->pb_info.pi_magic);
4278 goto fail_ping_buffer_decref;
4281 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
4282 CERROR("%s: ping w/o NI status: 0x%x\n",
4283 libcfs_id2str(id), pbuf->pb_info.pi_features);
4284 goto fail_ping_buffer_decref;
4287 if (nob < LNET_PING_INFO_SIZE(0)) {
4288 CERROR("%s: Short reply %d(%d min)\n",
4290 nob, (int)LNET_PING_INFO_SIZE(0));
4291 goto fail_ping_buffer_decref;
4294 if (pbuf->pb_info.pi_nnis < n_ids)
4295 n_ids = pbuf->pb_info.pi_nnis;
4297 if (nob < LNET_PING_INFO_SIZE(n_ids)) {
4298 CERROR("%s: Short reply %d(%d expected)\n",
4300 nob, (int)LNET_PING_INFO_SIZE(n_ids));
4301 goto fail_ping_buffer_decref;
4304 rc = -EFAULT; /* if I segv in copy_to_user()... */
4306 memset(&tmpid, 0, sizeof(tmpid));
4307 for (i = 0; i < n_ids; i++) {
4308 tmpid.pid = pbuf->pb_info.pi_pid;
4309 tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
4310 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
4311 goto fail_ping_buffer_decref;
4313 rc = pbuf->pb_info.pi_nnis;
4315 fail_ping_buffer_decref:
4316 lnet_ping_buffer_decref(pbuf);
4321 lnet_discover(struct lnet_process_id id, __u32 force,
4322 struct lnet_process_id __user *ids, int n_ids)
4324 struct lnet_peer_ni *lpni;
4325 struct lnet_peer_ni *p;
4326 struct lnet_peer *lp;
4327 struct lnet_process_id *buf;
4333 id.nid == LNET_NID_ANY)
4336 if (id.pid == LNET_PID_ANY)
4337 id.pid = LNET_PID_LUSTRE;
4340 * If the user buffer has more space than the lnet_interfaces_max,
4341 * then only fill it up to lnet_interfaces_max.
4343 if (n_ids > lnet_interfaces_max)
4344 n_ids = lnet_interfaces_max;
4346 CFS_ALLOC_PTR_ARRAY(buf, n_ids);
4350 cpt = lnet_net_lock_current();
4351 lpni = lnet_nid2peerni_locked(id.nid, LNET_NID_ANY, cpt);
4358 * Clearing the NIDS_UPTODATE flag ensures the peer will
4359 * be discovered, provided discovery has not been disabled.
4361 lp = lpni->lpni_peer_net->lpn_peer;
4362 spin_lock(&lp->lp_lock);
4363 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
4364 /* If the force flag is set, force a PING and PUSH as well. */
4366 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
4367 spin_unlock(&lp->lp_lock);
4368 rc = lnet_discover_peer_locked(lpni, cpt, true);
4374 while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
4375 buf[i].pid = id.pid;
4376 buf[i].nid = p->lpni_nid;
4383 lnet_peer_ni_decref_locked(lpni);
4385 lnet_net_unlock(cpt);
4388 if (copy_to_user(ids, buf, rc * sizeof(*buf)))
4390 CFS_FREE_PTR_ARRAY(buf, n_ids);
4396 * Retrieve peer discovery status.
4398 * \retval 1 if lnet_peer_discovery_disabled is 0
4399 * \retval 0 if lnet_peer_discovery_disabled is 1
4402 LNetGetPeerDiscoveryStatus(void)
4404 return !lnet_peer_discovery_disabled;
4406 EXPORT_SYMBOL(LNetGetPeerDiscoveryStatus);