4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #define DEBUG_SUBSYSTEM S_LNET
34 #include <linux/ctype.h>
35 #include <linux/generic-radix-tree.h>
36 #include <linux/log2.h>
37 #include <linux/ktime.h>
38 #include <linux/moduleparam.h>
39 #include <linux/uaccess.h>
40 #ifdef HAVE_SCHED_HEADERS
41 #include <linux/sched/signal.h>
43 #include <net/genetlink.h>
45 #include <libcfs/linux/linux-net.h>
46 #include <lnet/udsp.h>
47 #include <lnet/lib-lnet.h>
49 #define D_LNI D_CONSOLE
52 * initialize ln_api_mutex statically, since it needs to be used in
53 * discovery_set callback. That module parameter callback can be called
54 * before module init completes. The mutex needs to be ready for use then.
56 struct lnet the_lnet = {
57 .ln_api_mutex = __MUTEX_INITIALIZER(the_lnet.ln_api_mutex),
58 }; /* THE state of the network */
59 EXPORT_SYMBOL(the_lnet);
61 static char *ip2nets = "";
62 module_param(ip2nets, charp, 0444);
63 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
65 static char *networks = "";
66 module_param(networks, charp, 0444);
67 MODULE_PARM_DESC(networks, "local networks");
69 static char *routes = "";
70 module_param(routes, charp, 0444);
71 MODULE_PARM_DESC(routes, "routes to non-local networks");
73 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
74 module_param(rnet_htable_size, int, 0444);
75 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
77 static int use_tcp_bonding;
78 module_param(use_tcp_bonding, int, 0444);
79 MODULE_PARM_DESC(use_tcp_bonding,
80 "use_tcp_bonding parameter has been removed");
82 unsigned int lnet_numa_range = 0;
83 module_param(lnet_numa_range, uint, 0444);
84 MODULE_PARM_DESC(lnet_numa_range,
85 "NUMA range to consider during Multi-Rail selection");
88 * lnet_health_sensitivity determines by how much we decrement the health
89 * value on sending error. The value defaults to 100, which means health
90 * interface health is decremented by 100 points every failure.
92 unsigned int lnet_health_sensitivity = 100;
93 static int sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
94 #ifdef HAVE_KERNEL_PARAM_OPS
95 static struct kernel_param_ops param_ops_health_sensitivity = {
96 .set = sensitivity_set,
99 #define param_check_health_sensitivity(name, p) \
100 __param_check(name, p, int)
101 module_param(lnet_health_sensitivity, health_sensitivity, S_IRUGO|S_IWUSR);
103 module_param_call(lnet_health_sensitivity, sensitivity_set, param_get_int,
104 &lnet_health_sensitivity, S_IRUGO|S_IWUSR);
106 MODULE_PARM_DESC(lnet_health_sensitivity,
107 "Value to decrement the health value by on error");
110 * lnet_recovery_interval determines how often we should perform recovery
111 * on unhealthy interfaces.
113 unsigned int lnet_recovery_interval = 1;
114 static int recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp);
115 #ifdef HAVE_KERNEL_PARAM_OPS
116 static struct kernel_param_ops param_ops_recovery_interval = {
117 .set = recovery_interval_set,
118 .get = param_get_int,
120 #define param_check_recovery_interval(name, p) \
121 __param_check(name, p, int)
122 module_param(lnet_recovery_interval, recovery_interval, S_IRUGO|S_IWUSR);
124 module_param_call(lnet_recovery_interval, recovery_interval_set, param_get_int,
125 &lnet_recovery_interval, S_IRUGO|S_IWUSR);
127 MODULE_PARM_DESC(lnet_recovery_interval,
128 "DEPRECATED - Interval to recover unhealthy interfaces in seconds");
130 unsigned int lnet_recovery_limit;
131 module_param(lnet_recovery_limit, uint, 0644);
132 MODULE_PARM_DESC(lnet_recovery_limit,
133 "How long to attempt recovery of unhealthy peer interfaces in seconds. Set to 0 to allow indefinite recovery");
135 unsigned int lnet_max_recovery_ping_interval = 900;
136 unsigned int lnet_max_recovery_ping_count = 9;
137 static int max_recovery_ping_interval_set(const char *val,
138 cfs_kernel_param_arg_t *kp);
140 #define param_check_max_recovery_ping_interval(name, p) \
141 __param_check(name, p, int)
143 #ifdef HAVE_KERNEL_PARAM_OPS
144 static struct kernel_param_ops param_ops_max_recovery_ping_interval = {
145 .set = max_recovery_ping_interval_set,
146 .get = param_get_int,
148 module_param(lnet_max_recovery_ping_interval, max_recovery_ping_interval, 0644);
150 module_param_call(lnet_max_recovery_ping_interval, max_recovery_ping_interval,
151 param_get_int, &lnet_max_recovery_ping_interval, 0644);
153 MODULE_PARM_DESC(lnet_max_recovery_ping_interval,
154 "The max interval between LNet recovery pings, in seconds");
156 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
157 static int intf_max_set(const char *val, cfs_kernel_param_arg_t *kp);
159 static struct kernel_param_ops param_ops_interfaces_max = {
161 .get = param_get_int,
164 #define param_check_interfaces_max(name, p) \
165 __param_check(name, p, int)
167 #ifdef HAVE_KERNEL_PARAM_OPS
168 module_param(lnet_interfaces_max, interfaces_max, 0644);
170 module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
171 ¶m_ops_interfaces_max, 0644);
173 MODULE_PARM_DESC(lnet_interfaces_max,
174 "Maximum number of interfaces in a node.");
176 unsigned lnet_peer_discovery_disabled = 0;
177 static int discovery_set(const char *val, cfs_kernel_param_arg_t *kp);
179 static struct kernel_param_ops param_ops_discovery_disabled = {
180 .set = discovery_set,
181 .get = param_get_int,
184 #define param_check_discovery_disabled(name, p) \
185 __param_check(name, p, int)
186 #ifdef HAVE_KERNEL_PARAM_OPS
187 module_param(lnet_peer_discovery_disabled, discovery_disabled, 0644);
189 module_param_call(lnet_peer_discovery_disabled, discovery_set, param_get_int,
190 ¶m_ops_discovery_disabled, 0644);
192 MODULE_PARM_DESC(lnet_peer_discovery_disabled,
193 "Set to 1 to disable peer discovery on this node.");
195 unsigned int lnet_drop_asym_route;
196 static int drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp);
198 static struct kernel_param_ops param_ops_drop_asym_route = {
199 .set = drop_asym_route_set,
200 .get = param_get_int,
203 #define param_check_drop_asym_route(name, p) \
204 __param_check(name, p, int)
205 #ifdef HAVE_KERNEL_PARAM_OPS
206 module_param(lnet_drop_asym_route, drop_asym_route, 0644);
208 module_param_call(lnet_drop_asym_route, drop_asym_route_set, param_get_int,
209 ¶m_ops_drop_asym_route, 0644);
211 MODULE_PARM_DESC(lnet_drop_asym_route,
212 "Set to 1 to drop asymmetrical route messages.");
214 #define LNET_TRANSACTION_TIMEOUT_DEFAULT 150
215 unsigned int lnet_transaction_timeout = LNET_TRANSACTION_TIMEOUT_DEFAULT;
216 static int transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp);
217 #ifdef HAVE_KERNEL_PARAM_OPS
218 static struct kernel_param_ops param_ops_transaction_timeout = {
219 .set = transaction_to_set,
220 .get = param_get_int,
223 #define param_check_transaction_timeout(name, p) \
224 __param_check(name, p, int)
225 module_param(lnet_transaction_timeout, transaction_timeout, S_IRUGO|S_IWUSR);
227 module_param_call(lnet_transaction_timeout, transaction_to_set, param_get_int,
228 &lnet_transaction_timeout, S_IRUGO|S_IWUSR);
230 MODULE_PARM_DESC(lnet_transaction_timeout,
231 "Maximum number of seconds to wait for a peer response.");
233 #define LNET_RETRY_COUNT_DEFAULT 2
234 unsigned int lnet_retry_count = LNET_RETRY_COUNT_DEFAULT;
235 static int retry_count_set(const char *val, cfs_kernel_param_arg_t *kp);
236 #ifdef HAVE_KERNEL_PARAM_OPS
237 static struct kernel_param_ops param_ops_retry_count = {
238 .set = retry_count_set,
239 .get = param_get_int,
242 #define param_check_retry_count(name, p) \
243 __param_check(name, p, int)
244 module_param(lnet_retry_count, retry_count, S_IRUGO|S_IWUSR);
246 module_param_call(lnet_retry_count, retry_count_set, param_get_int,
247 &lnet_retry_count, S_IRUGO|S_IWUSR);
249 MODULE_PARM_DESC(lnet_retry_count,
250 "Maximum number of times to retry transmitting a message");
252 unsigned int lnet_response_tracking = 3;
253 static int response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp);
255 #ifdef HAVE_KERNEL_PARAM_OPS
256 static struct kernel_param_ops param_ops_response_tracking = {
257 .set = response_tracking_set,
258 .get = param_get_int,
261 #define param_check_response_tracking(name, p) \
262 __param_check(name, p, int)
263 module_param(lnet_response_tracking, response_tracking, 0644);
265 module_param_call(lnet_response_tracking, response_tracking_set, param_get_int,
266 &lnet_response_tracking, 0644);
268 MODULE_PARM_DESC(lnet_response_tracking,
269 "(0|1|2|3) LNet Internal Only|GET Reply only|PUT ACK only|Full Tracking (default)");
271 int lock_prim_nid = 1;
272 module_param(lock_prim_nid, int, 0444);
273 MODULE_PARM_DESC(lock_prim_nid,
274 "Whether nid passed down by Lustre is locked as primary");
276 #define LNET_LND_TIMEOUT_DEFAULT ((LNET_TRANSACTION_TIMEOUT_DEFAULT - 1) / \
277 (LNET_RETRY_COUNT_DEFAULT + 1))
278 unsigned int lnet_lnd_timeout = LNET_LND_TIMEOUT_DEFAULT;
279 static void lnet_set_lnd_timeout(void)
281 lnet_lnd_timeout = max((lnet_transaction_timeout - 1) /
282 (lnet_retry_count + 1), 1U);
286 * This sequence number keeps track of how many times DLC was used to
287 * update the local NIs. It is incremented when a NI is added or
288 * removed and checked when sending a message to determine if there is
289 * a need to re-run the selection algorithm. See lnet_select_pathway()
290 * for more details on its usage.
292 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
294 struct lnet_fail_ping {
295 struct lnet_processid lfp_id;
299 struct lnet_genl_ping_list {
300 unsigned int lgpl_index;
301 unsigned int lgpl_list_count;
302 unsigned int lgpl_failed_count;
303 signed long lgpl_timeout;
304 struct lnet_nid lgpl_src_nid;
305 GENRADIX(struct lnet_fail_ping) lgpl_failed;
306 GENRADIX(struct lnet_processid) lgpl_list;
309 static int lnet_ping(struct lnet_processid *id, struct lnet_nid *src_nid,
310 signed long timeout, struct lnet_genl_ping_list *plist,
313 static int lnet_discover(struct lnet_process_id id, __u32 force,
314 struct lnet_process_id __user *ids, int n_ids);
317 sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
320 unsigned *sensitivity = (unsigned *)kp->arg;
323 rc = kstrtoul(val, 0, &value);
325 CERROR("Invalid module parameter value for 'lnet_health_sensitivity'\n");
330 * The purpose of locking the api_mutex here is to ensure that
331 * the correct value ends up stored properly.
333 mutex_lock(&the_lnet.ln_api_mutex);
335 if (value > LNET_MAX_HEALTH_VALUE) {
336 mutex_unlock(&the_lnet.ln_api_mutex);
337 CERROR("Invalid health value. Maximum: %d value = %lu\n",
338 LNET_MAX_HEALTH_VALUE, value);
342 if (*sensitivity != 0 && value == 0 && lnet_retry_count != 0) {
343 lnet_retry_count = 0;
344 lnet_set_lnd_timeout();
347 *sensitivity = value;
349 mutex_unlock(&the_lnet.ln_api_mutex);
355 recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp)
357 CWARN("'lnet_recovery_interval' has been deprecated\n");
363 max_recovery_ping_interval_set(const char *val, cfs_kernel_param_arg_t *kp)
368 rc = kstrtoul(val, 0, &value);
370 CERROR("Invalid module parameter value for 'lnet_max_recovery_ping_interval'\n");
375 CERROR("Invalid max ping timeout. Must be strictly positive\n");
379 /* The purpose of locking the api_mutex here is to ensure that
380 * the correct value ends up stored properly.
382 mutex_lock(&the_lnet.ln_api_mutex);
383 lnet_max_recovery_ping_interval = value;
384 lnet_max_recovery_ping_count = 0;
387 lnet_max_recovery_ping_count++;
390 mutex_unlock(&the_lnet.ln_api_mutex);
396 discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
399 unsigned *discovery_off = (unsigned *)kp->arg;
401 struct lnet_ping_buffer *pbuf;
403 rc = kstrtoul(val, 0, &value);
405 CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
409 value = (value) ? 1 : 0;
412 * The purpose of locking the api_mutex here is to ensure that
413 * the correct value ends up stored properly.
415 mutex_lock(&the_lnet.ln_api_mutex);
417 if (value == *discovery_off) {
418 mutex_unlock(&the_lnet.ln_api_mutex);
423 * We still want to set the discovery value even when LNet is not
424 * running. This is the case when LNet is being loaded and we want
425 * the module parameters to take effect. Otherwise if we're
426 * changing the value dynamically, we want to set it after
429 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
430 *discovery_off = value;
431 mutex_unlock(&the_lnet.ln_api_mutex);
435 /* tell peers that discovery setting has changed */
436 lnet_net_lock(LNET_LOCK_EX);
437 pbuf = the_lnet.ln_ping_target;
439 pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
441 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
442 lnet_net_unlock(LNET_LOCK_EX);
444 /* only send a push when we're turning off discovery */
445 if (*discovery_off <= 0 && value > 0)
446 lnet_push_update_to_peers(1);
447 *discovery_off = value;
449 mutex_unlock(&the_lnet.ln_api_mutex);
455 drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp)
458 unsigned int *drop_asym_route = (unsigned int *)kp->arg;
461 rc = kstrtoul(val, 0, &value);
463 CERROR("Invalid module parameter value for "
464 "'lnet_drop_asym_route'\n");
469 * The purpose of locking the api_mutex here is to ensure that
470 * the correct value ends up stored properly.
472 mutex_lock(&the_lnet.ln_api_mutex);
474 if (value == *drop_asym_route) {
475 mutex_unlock(&the_lnet.ln_api_mutex);
479 *drop_asym_route = value;
481 mutex_unlock(&the_lnet.ln_api_mutex);
487 transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp)
490 unsigned *transaction_to = (unsigned *)kp->arg;
493 rc = kstrtoul(val, 0, &value);
495 CERROR("Invalid module parameter value for 'lnet_transaction_timeout'\n");
500 * The purpose of locking the api_mutex here is to ensure that
501 * the correct value ends up stored properly.
503 mutex_lock(&the_lnet.ln_api_mutex);
505 if (value <= lnet_retry_count || value == 0) {
506 mutex_unlock(&the_lnet.ln_api_mutex);
507 CERROR("Invalid value for lnet_transaction_timeout (%lu). "
508 "Has to be greater than lnet_retry_count (%u)\n",
509 value, lnet_retry_count);
513 if (value == *transaction_to) {
514 mutex_unlock(&the_lnet.ln_api_mutex);
518 *transaction_to = value;
519 /* Update the lnet_lnd_timeout now that we've modified the
520 * transaction timeout
522 lnet_set_lnd_timeout();
524 mutex_unlock(&the_lnet.ln_api_mutex);
530 retry_count_set(const char *val, cfs_kernel_param_arg_t *kp)
533 unsigned *retry_count = (unsigned *)kp->arg;
536 rc = kstrtoul(val, 0, &value);
538 CERROR("Invalid module parameter value for 'lnet_retry_count'\n");
543 * The purpose of locking the api_mutex here is to ensure that
544 * the correct value ends up stored properly.
546 mutex_lock(&the_lnet.ln_api_mutex);
548 if (lnet_health_sensitivity == 0 && value > 0) {
549 mutex_unlock(&the_lnet.ln_api_mutex);
550 CERROR("Can not set lnet_retry_count when health feature is turned off\n");
554 if (value > lnet_transaction_timeout) {
555 mutex_unlock(&the_lnet.ln_api_mutex);
556 CERROR("Invalid value for lnet_retry_count (%lu). "
557 "Has to be smaller than lnet_transaction_timeout (%u)\n",
558 value, lnet_transaction_timeout);
562 *retry_count = value;
564 /* Update the lnet_lnd_timeout now that we've modified the
567 lnet_set_lnd_timeout();
569 mutex_unlock(&the_lnet.ln_api_mutex);
575 intf_max_set(const char *val, cfs_kernel_param_arg_t *kp)
579 rc = kstrtoint(val, 0, &value);
581 CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
585 if (value < LNET_INTERFACES_MIN) {
586 CWARN("max interfaces provided are too small, setting to %d\n",
587 LNET_INTERFACES_MAX_DEFAULT);
588 value = LNET_INTERFACES_MAX_DEFAULT;
591 *(int *)kp->arg = value;
597 response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp)
600 unsigned long new_value;
602 rc = kstrtoul(val, 0, &new_value);
604 CERROR("Invalid value for 'lnet_response_tracking'\n");
608 if (new_value < 0 || new_value > 3) {
609 CWARN("Invalid value (%lu) for 'lnet_response_tracking'\n",
614 lnet_response_tracking = new_value;
620 lnet_get_routes(void)
626 lnet_get_networks(void)
631 if (*networks != 0 && *ip2nets != 0) {
632 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
633 "'ip2nets' but not both at once\n");
638 rc = lnet_parse_ip2nets(&nets, ip2nets);
639 return (rc == 0) ? nets : NULL;
649 lnet_init_locks(void)
651 spin_lock_init(&the_lnet.ln_eq_wait_lock);
652 spin_lock_init(&the_lnet.ln_msg_resend_lock);
653 init_completion(&the_lnet.ln_mt_wait_complete);
654 mutex_init(&the_lnet.ln_lnd_mutex);
657 struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
658 struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
660 struct kmem_cache *lnet_udsp_cachep; /* udsp cache */
661 struct kmem_cache *lnet_rspt_cachep; /* response tracker cache */
662 struct kmem_cache *lnet_msg_cachep;
665 lnet_slab_setup(void)
667 /* create specific kmem_cache for MEs and small MDs (i.e., originally
668 * allocated in <size-xxx> kmem_cache).
670 lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(struct lnet_me),
672 if (!lnet_mes_cachep)
675 lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
676 LNET_SMALL_MD_SIZE, 0, 0,
678 if (!lnet_small_mds_cachep)
681 lnet_udsp_cachep = kmem_cache_create("lnet_udsp",
682 sizeof(struct lnet_udsp),
684 if (!lnet_udsp_cachep)
687 lnet_rspt_cachep = kmem_cache_create("lnet_rspt", sizeof(struct lnet_rsp_tracker),
689 if (!lnet_rspt_cachep)
692 lnet_msg_cachep = kmem_cache_create("lnet_msg", sizeof(struct lnet_msg),
694 if (!lnet_msg_cachep)
701 lnet_slab_cleanup(void)
703 if (lnet_msg_cachep) {
704 kmem_cache_destroy(lnet_msg_cachep);
705 lnet_msg_cachep = NULL;
708 if (lnet_rspt_cachep) {
709 kmem_cache_destroy(lnet_rspt_cachep);
710 lnet_rspt_cachep = NULL;
713 if (lnet_udsp_cachep) {
714 kmem_cache_destroy(lnet_udsp_cachep);
715 lnet_udsp_cachep = NULL;
718 if (lnet_small_mds_cachep) {
719 kmem_cache_destroy(lnet_small_mds_cachep);
720 lnet_small_mds_cachep = NULL;
723 if (lnet_mes_cachep) {
724 kmem_cache_destroy(lnet_mes_cachep);
725 lnet_mes_cachep = NULL;
730 lnet_create_remote_nets_table(void)
733 struct list_head *hash;
735 LASSERT(the_lnet.ln_remote_nets_hash == NULL);
736 LASSERT(the_lnet.ln_remote_nets_hbits > 0);
737 CFS_ALLOC_PTR_ARRAY(hash, LNET_REMOTE_NETS_HASH_SIZE);
739 CERROR("Failed to create remote nets hash table\n");
743 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
744 INIT_LIST_HEAD(&hash[i]);
745 the_lnet.ln_remote_nets_hash = hash;
750 lnet_destroy_remote_nets_table(void)
754 if (the_lnet.ln_remote_nets_hash == NULL)
757 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
758 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
760 CFS_FREE_PTR_ARRAY(the_lnet.ln_remote_nets_hash,
761 LNET_REMOTE_NETS_HASH_SIZE);
762 the_lnet.ln_remote_nets_hash = NULL;
766 lnet_destroy_locks(void)
768 if (the_lnet.ln_res_lock != NULL) {
769 cfs_percpt_lock_free(the_lnet.ln_res_lock);
770 the_lnet.ln_res_lock = NULL;
773 if (the_lnet.ln_net_lock != NULL) {
774 cfs_percpt_lock_free(the_lnet.ln_net_lock);
775 the_lnet.ln_net_lock = NULL;
780 lnet_create_locks(void)
784 the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
785 if (the_lnet.ln_res_lock == NULL)
788 the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
789 if (the_lnet.ln_net_lock == NULL)
795 lnet_destroy_locks();
799 static void lnet_assert_wire_constants(void)
801 /* Wire protocol assertions generated by 'wirecheck'
802 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
803 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
804 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7)
808 BUILD_BUG_ON(LNET_PROTO_TCP_MAGIC != 0xeebc0ded);
809 BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MAJOR != 1);
810 BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MINOR != 0);
811 BUILD_BUG_ON(LNET_MSG_ACK != 0);
812 BUILD_BUG_ON(LNET_MSG_PUT != 1);
813 BUILD_BUG_ON(LNET_MSG_GET != 2);
814 BUILD_BUG_ON(LNET_MSG_REPLY != 3);
815 BUILD_BUG_ON(LNET_MSG_HELLO != 4);
817 BUILD_BUG_ON((int)sizeof(lnet_nid_t) != 8);
818 BUILD_BUG_ON((int)sizeof(lnet_pid_t) != 4);
820 /* Checks for struct lnet_nid */
821 BUILD_BUG_ON((int)sizeof(struct lnet_nid) != 20);
822 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_size) != 0);
823 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_size) != 1);
824 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_type) != 1);
825 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_type) != 1);
826 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_num) != 2);
827 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_num) != 2);
828 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_addr) != 4);
829 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_addr) != 16);
831 /* Checks for struct lnet_process_id_packed */
832 BUILD_BUG_ON((int)sizeof(struct lnet_process_id_packed) != 12);
833 BUILD_BUG_ON((int)offsetof(struct lnet_process_id_packed, nid) != 0);
834 BUILD_BUG_ON((int)sizeof(((struct lnet_process_id_packed *)0)->nid) != 8);
835 BUILD_BUG_ON((int)offsetof(struct lnet_process_id_packed, pid) != 8);
836 BUILD_BUG_ON((int)sizeof(((struct lnet_process_id_packed *)0)->pid) != 4);
838 /* Checks for struct lnet_handle_wire */
839 BUILD_BUG_ON((int)sizeof(struct lnet_handle_wire) != 16);
840 BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
841 wh_interface_cookie) != 0);
842 BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) != 8);
843 BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
844 wh_object_cookie) != 8);
845 BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) != 8);
847 /* Checks for struct struct lnet_magicversion */
848 BUILD_BUG_ON((int)sizeof(struct lnet_magicversion) != 8);
849 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, magic) != 0);
850 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->magic) != 4);
851 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, version_major) != 4);
852 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_major) != 2);
853 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion,
854 version_minor) != 6);
855 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_minor) != 2);
857 /* Checks for struct _lnet_hdr_nid4 */
858 BUILD_BUG_ON((int)sizeof(struct _lnet_hdr_nid4) != 72);
859 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, dest_nid) != 0);
860 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->dest_nid) != 8);
861 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, src_nid) != 8);
862 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->src_nid) != 8);
863 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, dest_pid) != 16);
864 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->dest_pid) != 4);
865 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, src_pid) != 20);
866 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->src_pid) != 4);
867 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, type) != 24);
868 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->type) != 4);
869 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, payload_length) != 28);
870 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->payload_length) != 4);
871 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg) != 32);
872 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg) != 40);
875 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.ack.dst_wmd) != 32);
876 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.ack.dst_wmd) != 16);
877 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.ack.match_bits) != 48);
878 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.ack.match_bits) != 8);
879 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.ack.mlength) != 56);
880 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.ack.mlength) != 4);
883 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.ack_wmd) != 32);
884 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.ack_wmd) != 16);
885 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.match_bits) != 48);
886 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.match_bits) != 8);
887 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.hdr_data) != 56);
888 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.hdr_data) != 8);
889 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.ptl_index) != 64);
890 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.ptl_index) != 4);
891 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.offset) != 68);
892 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.offset) != 4);
895 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.return_wmd) != 32);
896 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.return_wmd) != 16);
897 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.match_bits) != 48);
898 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.match_bits) != 8);
899 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.ptl_index) != 56);
900 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.ptl_index) != 4);
901 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.src_offset) != 60);
902 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.src_offset) != 4);
903 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.sink_length) != 64);
904 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.sink_length) != 4);
907 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.reply.dst_wmd) != 32);
908 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.reply.dst_wmd) != 16);
911 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.hello.incarnation) != 32);
912 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.hello.incarnation) != 8);
913 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.hello.type) != 40);
914 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.hello.type) != 4);
916 /* Checks for struct lnet_ni_status and related constants */
917 BUILD_BUG_ON(LNET_NI_STATUS_INVALID != 0x00000000);
918 BUILD_BUG_ON(LNET_NI_STATUS_UP != 0x15aac0de);
919 BUILD_BUG_ON(LNET_NI_STATUS_DOWN != 0xdeadface);
921 /* Checks for struct lnet_ni_status */
922 BUILD_BUG_ON((int)sizeof(struct lnet_ni_status) != 16);
923 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_nid) != 0);
924 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) != 8);
925 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_status) != 8);
926 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_status) != 4);
927 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_msg_size) != 12);
928 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_msg_size) != 4);
930 /* Checks for struct lnet_ni_large_status */
931 BUILD_BUG_ON((int)sizeof(struct lnet_ni_large_status) != 24);
932 BUILD_BUG_ON((int)offsetof(struct lnet_ni_large_status, ns_status) != 0);
933 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_large_status *)0)->ns_status) != 4);
934 BUILD_BUG_ON((int)offsetof(struct lnet_ni_large_status, ns_nid) != 4);
935 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_large_status *)0)->ns_nid) != 20);
937 /* Checks for struct lnet_ping_info and related constants */
938 BUILD_BUG_ON(LNET_PROTO_PING_MAGIC != 0x70696E67);
939 BUILD_BUG_ON(LNET_PING_FEAT_INVAL != 0);
940 BUILD_BUG_ON(LNET_PING_FEAT_BASE != 1);
941 BUILD_BUG_ON(LNET_PING_FEAT_NI_STATUS != 2);
942 BUILD_BUG_ON(LNET_PING_FEAT_RTE_DISABLED != 4);
943 BUILD_BUG_ON(LNET_PING_FEAT_MULTI_RAIL != 8);
944 BUILD_BUG_ON(LNET_PING_FEAT_DISCOVERY != 16);
945 BUILD_BUG_ON(LNET_PING_FEAT_LARGE_ADDR != 32);
946 BUILD_BUG_ON(LNET_PING_FEAT_PRIMARY_LARGE != 64);
947 BUILD_BUG_ON(LNET_PING_FEAT_BITS != 127);
949 /* Checks for struct lnet_ping_info */
950 BUILD_BUG_ON((int)sizeof(struct lnet_ping_info) != 16);
951 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_magic) != 0);
952 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) != 4);
953 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_features) != 4);
954 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_features) != 4);
955 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_pid) != 8);
956 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) != 4);
957 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_nnis) != 12);
958 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) != 4);
959 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_ni) != 16);
960 BUILD_BUG_ON(offsetof(struct lnet_ping_info, pi_ni) != sizeof(struct lnet_ping_info));
962 /* Acceptor connection request */
963 BUILD_BUG_ON(LNET_PROTO_ACCEPTOR_VERSION != 1);
965 /* Checks for struct lnet_acceptor_connreq */
966 BUILD_BUG_ON((int)sizeof(struct lnet_acceptor_connreq) != 16);
967 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_magic) != 0);
968 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_magic) != 4);
969 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_version) != 4);
970 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_version) != 4);
971 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_nid) != 8);
972 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_nid) != 8);
974 /* Checks for struct lnet_acceptor_connreq_v2 */
975 BUILD_BUG_ON((int)sizeof(struct lnet_acceptor_connreq_v2) != 28);
976 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_magic) != 0);
977 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_magic) != 4);
978 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_version) != 4);
979 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_version) != 4);
980 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_nid) != 8);
981 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_nid) != 20);
983 /* Checks for struct lnet_counters_common */
984 BUILD_BUG_ON((int)sizeof(struct lnet_counters_common) != 60);
985 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_msgs_alloc) != 0);
986 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_msgs_alloc) != 4);
987 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_msgs_max) != 4);
988 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_msgs_max) != 4);
989 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_errors) != 8);
990 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_errors) != 4);
991 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_send_count) != 12);
992 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_send_count) != 4);
993 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_recv_count) != 16);
994 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_recv_count) != 4);
995 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_route_count) != 20);
996 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_route_count) != 4);
997 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_drop_count) != 24);
998 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_drop_count) != 4);
999 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_send_length) != 28);
1000 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_send_length) != 8);
1001 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_recv_length) != 36);
1002 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_recv_length) != 8);
1003 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_route_length) != 44);
1004 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_route_length) != 8);
1005 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_drop_length) != 52);
1006 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_drop_length) != 8);
1009 static const struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
1011 const struct lnet_lnd *lnd;
1013 /* holding lnd mutex */
1014 if (type >= NUM_LNDS)
1016 lnd = the_lnet.ln_lnds[type];
1017 LASSERT(!lnd || lnd->lnd_type == type);
1023 lnet_get_lnd_timeout(void)
1025 return lnet_lnd_timeout;
1027 EXPORT_SYMBOL(lnet_get_lnd_timeout);
1030 lnet_register_lnd(const struct lnet_lnd *lnd)
1032 mutex_lock(&the_lnet.ln_lnd_mutex);
1034 LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
1035 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
1037 the_lnet.ln_lnds[lnd->lnd_type] = lnd;
1039 CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
1041 mutex_unlock(&the_lnet.ln_lnd_mutex);
1043 EXPORT_SYMBOL(lnet_register_lnd);
1046 lnet_unregister_lnd(const struct lnet_lnd *lnd)
1048 mutex_lock(&the_lnet.ln_lnd_mutex);
1050 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
1052 the_lnet.ln_lnds[lnd->lnd_type] = NULL;
1053 CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
1055 mutex_unlock(&the_lnet.ln_lnd_mutex);
1057 EXPORT_SYMBOL(lnet_unregister_lnd);
1060 lnet_counters_get_common_locked(struct lnet_counters_common *common)
1062 struct lnet_counters *ctr;
1065 /* FIXME !!! Their is no assert_lnet_net_locked() to ensure this
1066 * actually called under the protection of the lnet_net_lock.
1068 memset(common, 0, sizeof(*common));
1070 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
1071 common->lcc_msgs_max += ctr->lct_common.lcc_msgs_max;
1072 common->lcc_msgs_alloc += ctr->lct_common.lcc_msgs_alloc;
1073 common->lcc_errors += ctr->lct_common.lcc_errors;
1074 common->lcc_send_count += ctr->lct_common.lcc_send_count;
1075 common->lcc_recv_count += ctr->lct_common.lcc_recv_count;
1076 common->lcc_route_count += ctr->lct_common.lcc_route_count;
1077 common->lcc_drop_count += ctr->lct_common.lcc_drop_count;
1078 common->lcc_send_length += ctr->lct_common.lcc_send_length;
1079 common->lcc_recv_length += ctr->lct_common.lcc_recv_length;
1080 common->lcc_route_length += ctr->lct_common.lcc_route_length;
1081 common->lcc_drop_length += ctr->lct_common.lcc_drop_length;
1086 lnet_counters_get_common(struct lnet_counters_common *common)
1088 lnet_net_lock(LNET_LOCK_EX);
1089 lnet_counters_get_common_locked(common);
1090 lnet_net_unlock(LNET_LOCK_EX);
1092 EXPORT_SYMBOL(lnet_counters_get_common);
1095 lnet_counters_get(struct lnet_counters *counters)
1097 struct lnet_counters *ctr;
1098 struct lnet_counters_health *health = &counters->lct_health;
1101 memset(counters, 0, sizeof(*counters));
1103 lnet_net_lock(LNET_LOCK_EX);
1105 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1106 GOTO(out_unlock, rc = -ENODEV);
1108 lnet_counters_get_common_locked(&counters->lct_common);
1110 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
1111 health->lch_rst_alloc += ctr->lct_health.lch_rst_alloc;
1112 health->lch_resend_count += ctr->lct_health.lch_resend_count;
1113 health->lch_response_timeout_count +=
1114 ctr->lct_health.lch_response_timeout_count;
1115 health->lch_local_interrupt_count +=
1116 ctr->lct_health.lch_local_interrupt_count;
1117 health->lch_local_dropped_count +=
1118 ctr->lct_health.lch_local_dropped_count;
1119 health->lch_local_aborted_count +=
1120 ctr->lct_health.lch_local_aborted_count;
1121 health->lch_local_no_route_count +=
1122 ctr->lct_health.lch_local_no_route_count;
1123 health->lch_local_timeout_count +=
1124 ctr->lct_health.lch_local_timeout_count;
1125 health->lch_local_error_count +=
1126 ctr->lct_health.lch_local_error_count;
1127 health->lch_remote_dropped_count +=
1128 ctr->lct_health.lch_remote_dropped_count;
1129 health->lch_remote_error_count +=
1130 ctr->lct_health.lch_remote_error_count;
1131 health->lch_remote_timeout_count +=
1132 ctr->lct_health.lch_remote_timeout_count;
1133 health->lch_network_timeout_count +=
1134 ctr->lct_health.lch_network_timeout_count;
1137 lnet_net_unlock(LNET_LOCK_EX);
1140 EXPORT_SYMBOL(lnet_counters_get);
1143 lnet_counters_reset(void)
1145 struct lnet_counters *counters;
1148 lnet_net_lock(LNET_LOCK_EX);
1150 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1153 cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
1154 memset(counters, 0, sizeof(struct lnet_counters));
1156 lnet_net_unlock(LNET_LOCK_EX);
1160 lnet_res_type2str(int type)
1165 case LNET_COOKIE_TYPE_MD:
1167 case LNET_COOKIE_TYPE_ME:
1169 case LNET_COOKIE_TYPE_EQ:
1175 lnet_res_container_cleanup(struct lnet_res_container *rec)
1179 if (rec->rec_type == 0) /* not set yet, it's uninitialized */
1182 while (!list_empty(&rec->rec_active)) {
1183 struct list_head *e = rec->rec_active.next;
1186 if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
1187 lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
1189 } else { /* NB: Active MEs should be attached on portals */
1196 /* Found alive MD/ME/EQ, user really should unlink/free
1197 * all of them before finalize LNet, but if someone didn't,
1198 * we have to recycle garbage for him */
1199 CERROR("%d active elements on exit of %s container\n",
1200 count, lnet_res_type2str(rec->rec_type));
1203 if (rec->rec_lh_hash != NULL) {
1204 CFS_FREE_PTR_ARRAY(rec->rec_lh_hash, LNET_LH_HASH_SIZE);
1205 rec->rec_lh_hash = NULL;
1208 rec->rec_type = 0; /* mark it as finalized */
1212 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
1217 LASSERT(rec->rec_type == 0);
1219 rec->rec_type = type;
1220 INIT_LIST_HEAD(&rec->rec_active);
1222 rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
1224 /* Arbitrary choice of hash table size */
1225 LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
1226 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
1227 if (rec->rec_lh_hash == NULL) {
1232 for (i = 0; i < LNET_LH_HASH_SIZE; i++)
1233 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
1238 CERROR("Failed to setup %s resource container\n",
1239 lnet_res_type2str(type));
1240 lnet_res_container_cleanup(rec);
1245 lnet_res_containers_destroy(struct lnet_res_container **recs)
1247 struct lnet_res_container *rec;
1250 cfs_percpt_for_each(rec, i, recs)
1251 lnet_res_container_cleanup(rec);
1253 cfs_percpt_free(recs);
1256 static struct lnet_res_container **
1257 lnet_res_containers_create(int type)
1259 struct lnet_res_container **recs;
1260 struct lnet_res_container *rec;
1264 recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
1266 CERROR("Failed to allocate %s resource containers\n",
1267 lnet_res_type2str(type));
1271 cfs_percpt_for_each(rec, i, recs) {
1272 rc = lnet_res_container_setup(rec, i, type);
1274 lnet_res_containers_destroy(recs);
1282 struct lnet_libhandle *
1283 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
1285 /* ALWAYS called with lnet_res_lock held */
1286 struct list_head *head;
1287 struct lnet_libhandle *lh;
1290 if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
1293 hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
1294 head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
1296 list_for_each_entry(lh, head, lh_hash_chain) {
1297 if (lh->lh_cookie == cookie)
1305 lnet_res_lh_initialize(struct lnet_res_container *rec,
1306 struct lnet_libhandle *lh)
1308 /* ALWAYS called with lnet_res_lock held */
1309 unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
1312 lh->lh_cookie = rec->rec_lh_cookie;
1313 rec->rec_lh_cookie += 1 << ibits;
1315 hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
1317 list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
1321 lnet_create_array_of_queues(void)
1323 struct list_head **qs;
1324 struct list_head *q;
1327 qs = cfs_percpt_alloc(lnet_cpt_table(),
1328 sizeof(struct list_head));
1330 CERROR("Failed to allocate queues\n");
1334 cfs_percpt_for_each(q, i, qs)
1340 static int lnet_unprepare(void);
1343 lnet_prepare(lnet_pid_t requested_pid)
1345 /* Prepare to bring up the network */
1346 struct lnet_res_container **recs;
1349 if (requested_pid == LNET_PID_ANY) {
1350 /* Don't instantiate LNET just for me */
1354 LASSERT(the_lnet.ln_refcount == 0);
1356 the_lnet.ln_routing = 0;
1358 LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
1359 the_lnet.ln_pid = requested_pid;
1361 INIT_LIST_HEAD(&the_lnet.ln_test_peers);
1362 INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
1363 INIT_LIST_HEAD(&the_lnet.ln_nets);
1364 INIT_LIST_HEAD(&the_lnet.ln_routers);
1365 INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
1366 INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
1367 INIT_LIST_HEAD(&the_lnet.ln_dc_request);
1368 INIT_LIST_HEAD(&the_lnet.ln_dc_working);
1369 INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
1370 INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq);
1371 INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
1372 INIT_LIST_HEAD(&the_lnet.ln_udsp_list);
1373 init_waitqueue_head(&the_lnet.ln_dc_waitq);
1374 the_lnet.ln_mt_handler = NULL;
1375 init_completion(&the_lnet.ln_started);
1376 atomic_set(&the_lnet.ln_late_msg_count, 0);
1377 atomic64_set(&the_lnet.ln_late_msg_nsecs, 0);
1379 rc = lnet_slab_setup();
1383 rc = lnet_create_remote_nets_table();
1388 * NB the interface cookie in wire handles guards against delayed
1389 * replies and ACKs appearing valid after reboot.
1391 the_lnet.ln_interface_cookie = ktime_get_real_ns();
1393 the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
1394 sizeof(struct lnet_counters));
1395 if (the_lnet.ln_counters == NULL) {
1396 CERROR("Failed to allocate counters for LNet\n");
1401 rc = lnet_peer_tables_create();
1405 rc = lnet_msg_containers_create();
1409 rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
1410 LNET_COOKIE_TYPE_EQ);
1414 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
1420 the_lnet.ln_md_containers = recs;
1422 rc = lnet_portals_create();
1424 CERROR("Failed to create portals for LNet: %d\n", rc);
1428 the_lnet.ln_mt_zombie_rstqs = lnet_create_array_of_queues();
1429 if (!the_lnet.ln_mt_zombie_rstqs) {
1442 lnet_unprepare(void)
1444 /* NB no LNET_LOCK since this is the last reference. All LND instances
1445 * have shut down already, so it is safe to unlink and free all
1446 * descriptors, even those that appear committed to a network op (eg MD
1447 * with non-zero pending count) */
1449 lnet_fail_nid(LNET_NID_ANY, 0);
1451 LASSERT(the_lnet.ln_refcount == 0);
1452 LASSERT(list_empty(&the_lnet.ln_test_peers));
1453 LASSERT(list_empty(&the_lnet.ln_nets));
1455 if (the_lnet.ln_mt_zombie_rstqs) {
1456 lnet_clean_zombie_rstqs();
1457 the_lnet.ln_mt_zombie_rstqs = NULL;
1460 lnet_assert_handler_unused(the_lnet.ln_mt_handler);
1461 the_lnet.ln_mt_handler = NULL;
1463 lnet_portals_destroy();
1465 if (the_lnet.ln_md_containers != NULL) {
1466 lnet_res_containers_destroy(the_lnet.ln_md_containers);
1467 the_lnet.ln_md_containers = NULL;
1470 lnet_res_container_cleanup(&the_lnet.ln_eq_container);
1472 lnet_msg_containers_destroy();
1474 lnet_rtrpools_free(0);
1476 if (the_lnet.ln_counters != NULL) {
1477 cfs_percpt_free(the_lnet.ln_counters);
1478 the_lnet.ln_counters = NULL;
1480 lnet_destroy_remote_nets_table();
1481 lnet_udsp_destroy(true);
1482 lnet_slab_cleanup();
1488 lnet_net2ni_locked(__u32 net_id, int cpt)
1491 struct lnet_net *net;
1493 LASSERT(cpt != LNET_LOCK_EX);
1495 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1496 if (net->net_id == net_id) {
1497 ni = list_first_entry(&net->net_ni_list, struct lnet_ni,
1507 lnet_net2ni_addref(__u32 net)
1512 ni = lnet_net2ni_locked(net, 0);
1514 lnet_ni_addref_locked(ni, 0);
1519 EXPORT_SYMBOL(lnet_net2ni_addref);
1522 lnet_get_net_locked(__u32 net_id)
1524 struct lnet_net *net;
1526 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1527 if (net->net_id == net_id)
1535 lnet_net_clr_pref_rtrs(struct lnet_net *net)
1537 struct list_head zombies;
1538 struct lnet_nid_list *ne;
1539 struct lnet_nid_list *tmp;
1541 INIT_LIST_HEAD(&zombies);
1543 lnet_net_lock(LNET_LOCK_EX);
1544 list_splice_init(&net->net_rtr_pref_nids, &zombies);
1545 lnet_net_unlock(LNET_LOCK_EX);
1547 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1548 list_del_init(&ne->nl_list);
1549 LIBCFS_FREE(ne, sizeof(*ne));
1554 lnet_net_add_pref_rtr(struct lnet_net *net,
1555 struct lnet_nid *gw_nid)
1556 __must_hold(&the_lnet.ln_api_mutex)
1558 struct lnet_nid_list *ne;
1560 /* This function is called with api_mutex held. When the api_mutex
1561 * is held the list can not be modified, as it is only modified as
1562 * a result of applying a UDSP and that happens under api_mutex
1565 list_for_each_entry(ne, &net->net_rtr_pref_nids, nl_list) {
1566 if (nid_same(&ne->nl_nid, gw_nid))
1570 LIBCFS_ALLOC(ne, sizeof(*ne));
1574 ne->nl_nid = *gw_nid;
1576 /* Lock the cpt to protect against addition and checks in the
1577 * selection algorithm
1579 lnet_net_lock(LNET_LOCK_EX);
1580 list_add(&ne->nl_list, &net->net_rtr_pref_nids);
1581 lnet_net_unlock(LNET_LOCK_EX);
1587 lnet_nid4_cpt_hash(lnet_nid_t nid, unsigned int number)
1590 __u16 lnd = LNET_NETTYP(LNET_NIDNET(nid));
1593 if (lnd == KFILND || lnd == GNILND) {
1594 cpt = hash_long(key, LNET_CPT_BITS);
1596 /* NB: The number of CPTs needn't be a power of 2 */
1598 cpt = (key + cpt + (cpt >> 1)) % number;
1600 __u64 pair_bits = 0x0001000100010001LLU;
1601 __u64 mask = pair_bits * 0xFF;
1603 /* For ipv4 NIDs, use (sum-by-multiplication of nid bytes) mod
1604 * (number of CPTs) to match nid to a CPT.
1606 pair_sum = (key & mask) + ((key >> 8) & mask);
1607 pair_sum = (pair_sum * pair_bits) >> 48;
1608 cpt = (unsigned int)(pair_sum) % number;
1611 CDEBUG(D_NET, "Match nid %s to cpt %u\n",
1612 libcfs_nid2str(nid), cpt);
1618 lnet_nid_cpt_hash(struct lnet_nid *nid, unsigned int number)
1624 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
1629 if (nid_is_nid4(nid))
1630 return lnet_nid4_cpt_hash(lnet_nid_to_nid4(nid), number);
1632 for (i = 0; i < 4; i++)
1633 h = cfs_hash_32(nid->nid_addr[i]^h, 32);
1634 val = cfs_hash_32(LNET_NID_NET(nid) ^ h, LNET_CPT_BITS);
1637 return (unsigned int)(h + val + (val >> 1)) % number;
1641 lnet_cpt_of_nid_locked(struct lnet_nid *nid, struct lnet_ni *ni)
1643 struct lnet_net *net;
1645 /* must called with hold of lnet_net_lock */
1646 if (LNET_CPT_NUMBER == 1)
1647 return 0; /* the only one */
1650 * If NI is provided then use the CPT identified in the NI cpt
1651 * list if one exists. If one doesn't exist, then that NI is
1652 * associated with all CPTs and it follows that the net it belongs
1653 * to is implicitly associated with all CPTs, so just hash the nid
1657 if (ni->ni_cpts != NULL)
1658 return ni->ni_cpts[lnet_nid_cpt_hash(nid,
1661 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1664 /* no NI provided so look at the net */
1665 net = lnet_get_net_locked(LNET_NID_NET(nid));
1667 if (net != NULL && net->net_cpts != NULL) {
1668 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
1671 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1675 lnet_nid2cpt(struct lnet_nid *nid, struct lnet_ni *ni)
1680 if (LNET_CPT_NUMBER == 1)
1681 return 0; /* the only one */
1683 cpt = lnet_net_lock_current();
1685 cpt2 = lnet_cpt_of_nid_locked(nid, ni);
1687 lnet_net_unlock(cpt);
1691 EXPORT_SYMBOL(lnet_nid2cpt);
1694 lnet_cpt_of_nid(lnet_nid_t nid4, struct lnet_ni *ni)
1696 struct lnet_nid nid;
1698 if (LNET_CPT_NUMBER == 1)
1699 return 0; /* the only one */
1701 lnet_nid4_to_nid(nid4, &nid);
1702 return lnet_nid2cpt(&nid, ni);
1704 EXPORT_SYMBOL(lnet_cpt_of_nid);
1707 lnet_islocalnet_locked(__u32 net_id)
1709 struct lnet_net *net;
1712 net = lnet_get_net_locked(net_id);
1714 local = net != NULL;
1720 lnet_islocalnet(__u32 net_id)
1725 cpt = lnet_net_lock_current();
1727 local = lnet_islocalnet_locked(net_id);
1729 lnet_net_unlock(cpt);
1735 lnet_nid_to_ni_locked(struct lnet_nid *nid, int cpt)
1737 struct lnet_net *net;
1740 LASSERT(cpt != LNET_LOCK_EX);
1742 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1743 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1744 if (nid_same(&ni->ni_nid, nid))
1753 lnet_nid_to_ni_addref(struct lnet_nid *nid)
1758 ni = lnet_nid_to_ni_locked(nid, 0);
1760 lnet_ni_addref_locked(ni, 0);
1765 EXPORT_SYMBOL(lnet_nid_to_ni_addref);
1768 lnet_islocalnid(struct lnet_nid *nid)
1773 cpt = lnet_net_lock_current();
1774 ni = lnet_nid_to_ni_locked(nid, cpt);
1775 lnet_net_unlock(cpt);
1781 lnet_count_acceptor_nets(void)
1783 /* Return the # of NIs that need the acceptor. */
1785 struct lnet_net *net;
1788 cpt = lnet_net_lock_current();
1789 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1790 /* all socklnd type networks should have the acceptor
1792 if (net->net_lnd->lnd_accept != NULL)
1796 lnet_net_unlock(cpt);
1801 struct lnet_ping_buffer *
1802 lnet_ping_buffer_alloc(int nbytes, gfp_t gfp)
1804 struct lnet_ping_buffer *pbuf;
1806 LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nbytes), gfp);
1808 pbuf->pb_nbytes = nbytes; /* sizeof of pb_info */
1809 pbuf->pb_needs_post = false;
1810 atomic_set(&pbuf->pb_refcnt, 1);
1817 lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
1819 LASSERT(atomic_read(&pbuf->pb_refcnt) == 0);
1820 LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nbytes));
1823 static struct lnet_ping_buffer *
1824 lnet_ping_target_create(int nbytes)
1826 struct lnet_ping_buffer *pbuf;
1828 pbuf = lnet_ping_buffer_alloc(nbytes, GFP_NOFS);
1830 CERROR("Can't allocate ping source [%d]\n", nbytes);
1834 pbuf->pb_info.pi_nnis = 0;
1835 pbuf->pb_info.pi_pid = the_lnet.ln_pid;
1836 pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
1837 pbuf->pb_info.pi_features =
1838 LNET_PING_FEAT_NI_STATUS | LNET_PING_FEAT_MULTI_RAIL;
1844 lnet_get_net_ni_bytes_locked(struct lnet_net *net)
1849 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1850 bytes += lnet_ping_sts_size(&ni->ni_nid);
1856 lnet_get_ni_bytes(void)
1859 struct lnet_net *net;
1864 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1865 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1866 bytes += lnet_ping_sts_size(&ni->ni_nid);
1875 lnet_swap_pinginfo(struct lnet_ping_buffer *pbuf)
1877 struct lnet_ni_large_status *lstat, *lend;
1878 struct lnet_ni_status *stat, *end;
1882 __swab32s(&pbuf->pb_info.pi_magic);
1883 __swab32s(&pbuf->pb_info.pi_features);
1884 __swab32s(&pbuf->pb_info.pi_pid);
1885 __swab32s(&pbuf->pb_info.pi_nnis);
1886 nnis = pbuf->pb_info.pi_nnis;
1887 stat = &pbuf->pb_info.pi_ni[0];
1888 end = (void *)&pbuf->pb_info + pbuf->pb_nbytes;
1889 for (i = 0; i < nnis && stat + 1 <= end; i++, stat++) {
1890 __swab64s(&stat->ns_nid);
1891 __swab32s(&stat->ns_status);
1893 /* Might be total size */
1894 __swab32s(&stat->ns_msg_size);
1896 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_LARGE_ADDR))
1899 lstat = (struct lnet_ni_large_status *)stat;
1901 while (lstat + 1 <= lend) {
1902 __swab32s(&lstat->ns_status);
1903 /* struct lnet_nid never needs to be swabed */
1904 lstat = lnet_ping_sts_next(lstat);
1909 lnet_ping_info_validate(struct lnet_ping_info *pinfo)
1913 if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
1915 if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
1917 /* Loopback is guaranteed to be present */
1918 if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
1920 if (LNET_PING_INFO_LONI(pinfo) != LNET_NID_LO_0)
1926 lnet_ping_target_destroy(void)
1928 struct lnet_net *net;
1931 lnet_net_lock(LNET_LOCK_EX);
1933 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1934 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1936 ni->ni_status = NULL;
1941 lnet_ping_buffer_decref(the_lnet.ln_ping_target);
1942 the_lnet.ln_ping_target = NULL;
1944 lnet_net_unlock(LNET_LOCK_EX);
1948 lnet_ping_target_event_handler(struct lnet_event *event)
1950 struct lnet_ping_buffer *pbuf = event->md_user_ptr;
1952 if (event->unlinked)
1953 lnet_ping_buffer_decref(pbuf);
1957 lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
1958 struct lnet_handle_md *ping_mdh,
1959 int ni_bytes, bool set_eq)
1961 struct lnet_processid id = {
1962 .nid = LNET_ANY_NID,
1966 struct lnet_md md = { NULL };
1970 the_lnet.ln_ping_target_handler =
1971 lnet_ping_target_event_handler;
1973 *ppbuf = lnet_ping_target_create(ni_bytes);
1974 if (*ppbuf == NULL) {
1979 /* Ping target ME/MD */
1980 me = LNetMEAttach(LNET_RESERVED_PORTAL, &id,
1981 LNET_PROTO_PING_MATCHBITS, 0,
1982 LNET_UNLINK, LNET_INS_AFTER);
1985 CERROR("Can't create ping target ME: %d\n", rc);
1986 goto fail_decref_ping_buffer;
1989 /* initialize md content */
1990 md.start = &(*ppbuf)->pb_info;
1991 md.length = (*ppbuf)->pb_nbytes;
1992 md.threshold = LNET_MD_THRESH_INF;
1994 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1995 LNET_MD_MANAGE_REMOTE;
1996 md.handler = the_lnet.ln_ping_target_handler;
1997 md.user_ptr = *ppbuf;
1999 rc = LNetMDAttach(me, &md, LNET_RETAIN, ping_mdh);
2001 CERROR("Can't attach ping target MD: %d\n", rc);
2002 goto fail_decref_ping_buffer;
2004 lnet_ping_buffer_addref(*ppbuf);
2008 fail_decref_ping_buffer:
2009 LASSERT(atomic_read(&(*ppbuf)->pb_refcnt) == 1);
2010 lnet_ping_buffer_decref(*ppbuf);
2017 lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
2018 struct lnet_handle_md *ping_mdh)
2020 LNetMDUnlink(*ping_mdh);
2021 LNetInvalidateMDHandle(ping_mdh);
2023 /* NB the MD could be busy; this just starts the unlink */
2024 wait_var_event_warning(&pbuf->pb_refcnt,
2025 atomic_read(&pbuf->pb_refcnt) <= 1,
2026 "Still waiting for ping data MD to unlink\n");
2030 lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
2033 struct lnet_net *net;
2034 struct lnet_ni_status *ns, *end;
2035 struct lnet_ni_large_status *lns, *lend;
2038 pbuf->pb_info.pi_nnis = 0;
2039 ns = &pbuf->pb_info.pi_ni[0];
2040 end = (void *)&pbuf->pb_info + pbuf->pb_nbytes;
2041 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2042 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2043 if (!nid_is_nid4(&ni->ni_nid)) {
2044 if (ns == &pbuf->pb_info.pi_ni[1]) {
2045 /* This is primary, and it is long */
2046 pbuf->pb_info.pi_features |=
2047 LNET_PING_FEAT_PRIMARY_LARGE;
2051 LASSERT(ns + 1 <= end);
2052 ns->ns_nid = lnet_nid_to_nid4(&ni->ni_nid);
2055 ns->ns_status = lnet_ni_get_status_locked(ni);
2056 ni->ni_status = &ns->ns_status;
2059 pbuf->pb_info.pi_nnis++;
2066 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2067 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2068 if (nid_is_nid4(&ni->ni_nid))
2070 LASSERT(lns + 1 <= lend);
2072 lns->ns_nid = ni->ni_nid;
2075 ns->ns_status = lnet_ni_get_status_locked(ni);
2076 ni->ni_status = &lns->ns_status;
2079 lns = lnet_ping_sts_next(lns);
2082 if ((void *)lns > (void *)ns) {
2083 /* Record total info size */
2084 pbuf->pb_info.pi_ni[0].ns_msg_size =
2085 (void *)lns - (void *)&pbuf->pb_info;
2086 pbuf->pb_info.pi_features |= LNET_PING_FEAT_LARGE_ADDR;
2089 /* We (ab)use the ns_status of the loopback interface to
2090 * transmit the sequence number. The first interface listed
2091 * must be the loopback interface.
2093 rc = lnet_ping_info_validate(&pbuf->pb_info);
2095 LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
2098 LNET_PING_BUFFER_SEQNO(pbuf) =
2099 atomic_inc_return(&the_lnet.ln_ping_target_seqno);
2103 lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
2104 struct lnet_handle_md ping_mdh)
2106 struct lnet_ping_buffer *old_pbuf = NULL;
2107 struct lnet_handle_md old_ping_md;
2109 /* switch the NIs to point to the new ping info created */
2110 lnet_net_lock(LNET_LOCK_EX);
2112 if (!the_lnet.ln_routing)
2113 pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
2114 if (!lnet_peer_discovery_disabled)
2115 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
2117 /* Ensure only known feature bits have been set. */
2118 LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
2119 LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
2121 lnet_ping_target_install_locked(pbuf);
2123 if (the_lnet.ln_ping_target) {
2124 old_pbuf = the_lnet.ln_ping_target;
2125 old_ping_md = the_lnet.ln_ping_target_md;
2127 the_lnet.ln_ping_target_md = ping_mdh;
2128 the_lnet.ln_ping_target = pbuf;
2130 lnet_net_unlock(LNET_LOCK_EX);
2133 /* unlink and free the old ping info */
2134 lnet_ping_md_unlink(old_pbuf, &old_ping_md);
2135 lnet_ping_buffer_decref(old_pbuf);
2138 lnet_push_update_to_peers(0);
2142 lnet_ping_target_fini(void)
2144 lnet_ping_md_unlink(the_lnet.ln_ping_target,
2145 &the_lnet.ln_ping_target_md);
2147 lnet_assert_handler_unused(the_lnet.ln_ping_target_handler);
2148 lnet_ping_target_destroy();
2151 /* Resize the push target. */
2152 int lnet_push_target_resize(void)
2154 struct lnet_handle_md mdh;
2155 struct lnet_handle_md old_mdh;
2156 struct lnet_ping_buffer *pbuf;
2157 struct lnet_ping_buffer *old_pbuf;
2162 nbytes = the_lnet.ln_push_target_nbytes;
2164 CDEBUG(D_NET, "Invalid nbytes %d\n", nbytes);
2168 /* NB: lnet_ping_buffer_alloc() sets pbuf refcount to 1. That ref is
2169 * dropped when we need to resize again (see "old_pbuf" below) or when
2170 * LNet is shutdown (see lnet_push_target_fini())
2172 pbuf = lnet_ping_buffer_alloc(nbytes, GFP_NOFS);
2174 CDEBUG(D_NET, "Can't allocate pbuf for nbytes %d\n", nbytes);
2178 rc = lnet_push_target_post(pbuf, &mdh);
2180 CDEBUG(D_NET, "Failed to post push target: %d\n", rc);
2181 lnet_ping_buffer_decref(pbuf);
2185 lnet_net_lock(LNET_LOCK_EX);
2186 old_pbuf = the_lnet.ln_push_target;
2187 old_mdh = the_lnet.ln_push_target_md;
2188 the_lnet.ln_push_target = pbuf;
2189 the_lnet.ln_push_target_md = mdh;
2190 lnet_net_unlock(LNET_LOCK_EX);
2193 LNetMDUnlink(old_mdh);
2194 /* Drop ref set by lnet_ping_buffer_alloc() */
2195 lnet_ping_buffer_decref(old_pbuf);
2198 /* Received another push or reply that requires a larger buffer */
2199 if (nbytes < the_lnet.ln_push_target_nbytes)
2202 CDEBUG(D_NET, "nbytes %d success\n", nbytes);
2206 int lnet_push_target_post(struct lnet_ping_buffer *pbuf,
2207 struct lnet_handle_md *mdhp)
2209 struct lnet_processid id = { LNET_ANY_NID, LNET_PID_ANY };
2210 struct lnet_md md = { NULL };
2214 me = LNetMEAttach(LNET_RESERVED_PORTAL, &id,
2215 LNET_PROTO_PING_MATCHBITS, 0,
2216 LNET_UNLINK, LNET_INS_AFTER);
2219 CERROR("Can't create push target ME: %d\n", rc);
2223 pbuf->pb_needs_post = false;
2225 /* This reference is dropped by lnet_push_target_event_handler() */
2226 lnet_ping_buffer_addref(pbuf);
2228 /* initialize md content */
2229 md.start = &pbuf->pb_info;
2230 md.length = pbuf->pb_nbytes;
2233 md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE;
2235 md.handler = the_lnet.ln_push_target_handler;
2237 rc = LNetMDAttach(me, &md, LNET_UNLINK, mdhp);
2239 CERROR("Can't attach push MD: %d\n", rc);
2240 lnet_ping_buffer_decref(pbuf);
2241 pbuf->pb_needs_post = true;
2245 CDEBUG(D_NET, "posted push target %p\n", pbuf);
2250 static void lnet_push_target_event_handler(struct lnet_event *ev)
2252 struct lnet_ping_buffer *pbuf = ev->md_user_ptr;
2254 CDEBUG(D_NET, "type %d status %d unlinked %d\n", ev->type, ev->status,
2257 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2258 lnet_swap_pinginfo(pbuf);
2260 if (ev->type == LNET_EVENT_UNLINK) {
2261 /* Drop ref added by lnet_push_target_post() */
2262 lnet_ping_buffer_decref(pbuf);
2266 lnet_peer_push_event(ev);
2268 /* Drop ref added by lnet_push_target_post */
2269 lnet_ping_buffer_decref(pbuf);
2272 /* Initialize the push target. */
2273 static int lnet_push_target_init(void)
2277 if (the_lnet.ln_push_target)
2280 the_lnet.ln_push_target_handler =
2281 lnet_push_target_event_handler;
2283 rc = LNetSetLazyPortal(LNET_RESERVED_PORTAL);
2286 /* Start at the required minimum, we'll enlarge if required. */
2287 the_lnet.ln_push_target_nbytes = LNET_PING_INFO_MIN_SIZE;
2289 rc = lnet_push_target_resize();
2291 LNetClearLazyPortal(LNET_RESERVED_PORTAL);
2292 the_lnet.ln_push_target_handler = NULL;
2298 /* Clean up the push target. */
2299 static void lnet_push_target_fini(void)
2301 if (!the_lnet.ln_push_target)
2304 /* Unlink and invalidate to prevent new references. */
2305 LNetMDUnlink(the_lnet.ln_push_target_md);
2306 LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
2308 /* Wait for the unlink to complete. */
2309 wait_var_event_warning(&the_lnet.ln_push_target->pb_refcnt,
2310 atomic_read(&the_lnet.ln_push_target->pb_refcnt) <= 1,
2311 "Still waiting for ping data MD to unlink\n");
2313 /* Drop ref set by lnet_ping_buffer_alloc() */
2314 lnet_ping_buffer_decref(the_lnet.ln_push_target);
2315 the_lnet.ln_push_target = NULL;
2316 the_lnet.ln_push_target_nbytes = 0;
2318 LNetClearLazyPortal(LNET_RESERVED_PORTAL);
2319 lnet_assert_handler_unused(the_lnet.ln_push_target_handler);
2320 the_lnet.ln_push_target_handler = NULL;
2324 lnet_ni_tq_credits(struct lnet_ni *ni)
2328 LASSERT(ni->ni_ncpts >= 1);
2330 if (ni->ni_ncpts == 1)
2331 return ni->ni_net->net_tunables.lct_max_tx_credits;
2333 credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
2334 credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
2335 credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
2341 lnet_ni_unlink_locked(struct lnet_ni *ni)
2343 /* move it to zombie list and nobody can find it anymore */
2344 LASSERT(!list_empty(&ni->ni_netlist));
2345 list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
2346 lnet_ni_decref_locked(ni, 0);
2350 lnet_clear_zombies_nis_locked(struct lnet_net *net)
2355 struct list_head *zombie_list = &net->net_ni_zombie;
2358 * Now wait for the NIs I just nuked to show up on the zombie
2359 * list and shut them down in guaranteed thread context
2362 while ((ni = list_first_entry_or_null(zombie_list,
2364 ni_netlist)) != NULL) {
2368 list_del_init(&ni->ni_netlist);
2369 /* the ni should be in deleting state. If it's not it's
2371 LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
2372 cfs_percpt_for_each(ref, j, ni->ni_refs) {
2375 /* still busy, add it back to zombie list */
2376 list_add(&ni->ni_netlist, zombie_list);
2380 if (!list_empty(&ni->ni_netlist)) {
2381 /* Unlock mutex while waiting to allow other
2382 * threads to read the LNet state and fall through
2385 lnet_net_unlock(LNET_LOCK_EX);
2386 mutex_unlock(&the_lnet.ln_api_mutex);
2389 if ((i & (-i)) == i) {
2391 "Waiting for zombie LNI %s\n",
2392 libcfs_nidstr(&ni->ni_nid));
2394 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2396 mutex_lock(&the_lnet.ln_api_mutex);
2397 lnet_net_lock(LNET_LOCK_EX);
2401 lnet_net_unlock(LNET_LOCK_EX);
2403 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
2405 LASSERT(!in_interrupt());
2406 /* Holding the LND mutex makes it safe for lnd_shutdown
2407 * to call module_put(). Module unload cannot finish
2408 * until lnet_unregister_lnd() completes, and that
2409 * requires the LND mutex.
2411 mutex_unlock(&the_lnet.ln_api_mutex);
2412 mutex_lock(&the_lnet.ln_lnd_mutex);
2413 (net->net_lnd->lnd_shutdown)(ni);
2414 mutex_unlock(&the_lnet.ln_lnd_mutex);
2415 mutex_lock(&the_lnet.ln_api_mutex);
2418 CDEBUG(D_LNI, "Removed LNI %s\n",
2419 libcfs_nidstr(&ni->ni_nid));
2423 lnet_net_lock(LNET_LOCK_EX);
2427 /* shutdown down the NI and release refcount */
2429 lnet_shutdown_lndni(struct lnet_ni *ni)
2432 struct lnet_net *net = ni->ni_net;
2434 lnet_net_lock(LNET_LOCK_EX);
2436 ni->ni_state = LNET_NI_STATE_DELETING;
2438 lnet_ni_unlink_locked(ni);
2439 lnet_incr_dlc_seq();
2440 lnet_net_unlock(LNET_LOCK_EX);
2442 /* clear messages for this NI on the lazy portal */
2443 for (i = 0; i < the_lnet.ln_nportals; i++)
2444 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
2446 lnet_net_lock(LNET_LOCK_EX);
2447 lnet_clear_zombies_nis_locked(net);
2448 lnet_net_unlock(LNET_LOCK_EX);
2452 lnet_shutdown_lndnet(struct lnet_net *net)
2456 lnet_net_lock(LNET_LOCK_EX);
2458 list_del_init(&net->net_list);
2460 while ((ni = list_first_entry_or_null(&net->net_ni_list,
2462 ni_netlist)) != NULL) {
2463 lnet_net_unlock(LNET_LOCK_EX);
2464 lnet_shutdown_lndni(ni);
2465 lnet_net_lock(LNET_LOCK_EX);
2468 lnet_net_unlock(LNET_LOCK_EX);
2470 /* Do peer table cleanup for this net */
2471 lnet_peer_tables_cleanup(net);
2477 lnet_shutdown_lndnets(void)
2479 struct lnet_net *net;
2481 struct lnet_msg *msg, *tmp;
2483 /* NB called holding the global mutex */
2485 /* All quiet on the API front */
2486 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING ||
2487 the_lnet.ln_state == LNET_STATE_STOPPING);
2488 LASSERT(the_lnet.ln_refcount == 0);
2490 lnet_net_lock(LNET_LOCK_EX);
2491 the_lnet.ln_state = LNET_STATE_STOPPING;
2494 * move the nets to the zombie list to avoid them being
2495 * picked up for new work. LONET is also included in the
2496 * Nets that will be moved to the zombie list
2498 list_splice_init(&the_lnet.ln_nets, &the_lnet.ln_net_zombie);
2500 /* Drop the cached loopback Net. */
2501 if (the_lnet.ln_loni != NULL) {
2502 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
2503 the_lnet.ln_loni = NULL;
2505 lnet_net_unlock(LNET_LOCK_EX);
2507 /* iterate through the net zombie list and delete each net */
2508 while ((net = list_first_entry_or_null(&the_lnet.ln_net_zombie,
2511 lnet_shutdown_lndnet(net);
2513 spin_lock(&the_lnet.ln_msg_resend_lock);
2514 list_splice(&the_lnet.ln_msg_resend, &resend);
2515 spin_unlock(&the_lnet.ln_msg_resend_lock);
2517 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
2518 list_del_init(&msg->msg_list);
2519 msg->msg_no_resend = true;
2520 lnet_finalize(msg, -ECANCELED);
2523 lnet_net_lock(LNET_LOCK_EX);
2524 the_lnet.ln_state = LNET_STATE_SHUTDOWN;
2525 lnet_net_unlock(LNET_LOCK_EX);
2529 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
2532 struct lnet_tx_queue *tq;
2534 struct lnet_net *net = ni->ni_net;
2536 mutex_lock(&the_lnet.ln_lnd_mutex);
2539 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
2540 ni->ni_lnd_tunables_set = true;
2543 rc = (net->net_lnd->lnd_startup)(ni);
2545 mutex_unlock(&the_lnet.ln_lnd_mutex);
2548 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
2549 rc, libcfs_lnd2str(net->net_lnd->lnd_type));
2554 ni->ni_state = LNET_NI_STATE_ACTIVE;
2557 /* We keep a reference on the loopback net through the loopback NI */
2558 if (net->net_lnd->lnd_type == LOLND) {
2560 LASSERT(the_lnet.ln_loni == NULL);
2561 the_lnet.ln_loni = ni;
2562 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
2563 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
2564 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
2565 ni->ni_net->net_tunables.lct_peer_timeout = 0;
2569 if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
2570 ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
2571 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
2572 libcfs_lnd2str(net->net_lnd->lnd_type),
2573 ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
2575 /* shutdown the NI since if we get here then it must've already
2578 lnet_shutdown_lndni(ni);
2582 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
2583 tq->tq_credits_min =
2584 tq->tq_credits_max =
2585 tq->tq_credits = lnet_ni_tq_credits(ni);
2588 atomic_set(&ni->ni_tx_credits,
2589 lnet_ni_tq_credits(ni) * ni->ni_ncpts);
2590 atomic_set(&ni->ni_healthv, LNET_MAX_HEALTH_VALUE);
2592 /* Nodes with small feet have little entropy. The NID for this
2593 * node gives the most entropy in the low bits.
2595 add_device_randomness(&ni->ni_nid, sizeof(ni->ni_nid));
2597 CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
2598 libcfs_nidstr(&ni->ni_nid),
2599 ni->ni_net->net_tunables.lct_peer_tx_credits,
2600 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
2601 ni->ni_net->net_tunables.lct_peer_rtr_credits,
2602 ni->ni_net->net_tunables.lct_peer_timeout);
2610 static const struct lnet_lnd *lnet_load_lnd(u32 lnd_type)
2612 const struct lnet_lnd *lnd;
2615 mutex_lock(&the_lnet.ln_lnd_mutex);
2616 lnd = lnet_find_lnd_by_type(lnd_type);
2618 mutex_unlock(&the_lnet.ln_lnd_mutex);
2619 rc = request_module("%s", libcfs_lnd2modname(lnd_type));
2620 mutex_lock(&the_lnet.ln_lnd_mutex);
2622 lnd = lnet_find_lnd_by_type(lnd_type);
2624 mutex_unlock(&the_lnet.ln_lnd_mutex);
2625 CERROR("Can't load LND %s, module %s, rc=%d\n",
2626 libcfs_lnd2str(lnd_type),
2627 libcfs_lnd2modname(lnd_type), rc);
2628 #ifndef HAVE_MODULE_LOADING_SUPPORT
2629 LCONSOLE_ERROR_MSG(0x104,
2630 "Your kernel must be compiled with kernel module loading support.");
2632 return ERR_PTR(-EINVAL);
2635 mutex_unlock(&the_lnet.ln_lnd_mutex);
2641 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
2644 struct lnet_net *net_l = NULL;
2645 LIST_HEAD(local_ni_list);
2649 const struct lnet_lnd *lnd;
2651 net->net_tunables.lct_peer_timeout;
2653 net->net_tunables.lct_max_tx_credits;
2654 int peerrtrcredits =
2655 net->net_tunables.lct_peer_rtr_credits;
2658 * make sure that this net is unique. If it isn't then
2659 * we are adding interfaces to an already existing network, and
2660 * 'net' is just a convenient way to pass in the list.
2661 * if it is unique we need to find the LND and load it if
2664 if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
2665 lnd_type = LNET_NETTYP(net->net_id);
2667 lnd = lnet_load_lnd(lnd_type);
2673 mutex_lock(&the_lnet.ln_lnd_mutex);
2675 mutex_unlock(&the_lnet.ln_lnd_mutex);
2681 * net_l: if the network being added is unique then net_l
2682 * will point to that network
2683 * if the network being added is not unique then
2684 * net_l points to the existing network.
2686 * When we enter the loop below, we'll pick NIs off he
2687 * network beign added and start them up, then add them to
2688 * a local ni list. Once we've successfully started all
2689 * the NIs then we join the local NI list (of started up
2690 * networks) with the net_l->net_ni_list, which should
2691 * point to the correct network to add the new ni list to
2693 * If any of the new NIs fail to start up, then we want to
2694 * iterate through the local ni list, which should include
2695 * any NIs which were successfully started up, and shut
2698 * After than we want to delete the network being added,
2699 * to avoid a memory leak.
2701 while ((ni = list_first_entry_or_null(&net->net_ni_added,
2703 ni_netlist)) != NULL) {
2704 list_del_init(&ni->ni_netlist);
2706 /* make sure that the the NI we're about to start
2707 * up is actually unique. if it's not fail. */
2708 if (!lnet_ni_unique_net(&net_l->net_ni_list,
2709 ni->ni_interface)) {
2714 /* adjust the pointer the parent network, just in case it
2715 * the net is a duplicate */
2718 rc = lnet_startup_lndni(ni, tun);
2724 list_add_tail(&ni->ni_netlist, &local_ni_list);
2729 lnet_net_lock(LNET_LOCK_EX);
2730 list_splice_tail(&local_ni_list, &net_l->net_ni_list);
2731 lnet_incr_dlc_seq();
2732 lnet_net_unlock(LNET_LOCK_EX);
2734 /* if the network is not unique then we don't want to keep
2735 * it around after we're done. Free it. Otherwise add that
2736 * net to the global the_lnet.ln_nets */
2737 if (net_l != net && net_l != NULL) {
2739 * TODO - note. currently the tunables can not be updated
2745 * restore tunables after it has been overwitten by the
2748 if (peer_timeout != -1)
2749 net->net_tunables.lct_peer_timeout = peer_timeout;
2750 if (maxtxcredits != -1)
2751 net->net_tunables.lct_max_tx_credits = maxtxcredits;
2752 if (peerrtrcredits != -1)
2753 net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
2755 lnet_net_lock(LNET_LOCK_EX);
2756 list_add_tail(&net->net_list, &the_lnet.ln_nets);
2757 lnet_net_unlock(LNET_LOCK_EX);
2764 * shutdown the new NIs that are being started up
2765 * free the NET being started
2767 while ((ni = list_first_entry_or_null(&local_ni_list,
2769 ni_netlist)) != NULL)
2770 lnet_shutdown_lndni(ni);
2779 lnet_startup_lndnets(struct list_head *netlist)
2781 struct lnet_net *net;
2786 * Change to running state before bringing up the LNDs. This
2787 * allows lnet_shutdown_lndnets() to assert that we've passed
2790 lnet_net_lock(LNET_LOCK_EX);
2791 the_lnet.ln_state = LNET_STATE_RUNNING;
2792 lnet_net_unlock(LNET_LOCK_EX);
2794 while ((net = list_first_entry_or_null(netlist,
2796 net_list)) != NULL) {
2797 list_del_init(&net->net_list);
2799 rc = lnet_startup_lndnet(net, NULL);
2809 lnet_shutdown_lndnets();
2814 static int lnet_genl_parse_list(struct sk_buff *msg,
2815 const struct ln_key_list *data[], u16 idx)
2817 const struct ln_key_list *list = data[idx];
2818 const struct ln_key_props *props;
2819 struct nlattr *node;
2825 if (!list->lkl_maxattr)
2828 props = list->lkl_list;
2832 node = nla_nest_start(msg, LN_SCALAR_ATTR_LIST);
2836 for (count = 1; count <= list->lkl_maxattr; count++) {
2837 struct nlattr *key = nla_nest_start(msg, count);
2840 nla_put_u16(msg, LN_SCALAR_ATTR_LIST_SIZE,
2843 nla_put_u16(msg, LN_SCALAR_ATTR_INDEX, count);
2844 if (props[count].lkp_value)
2845 nla_put_string(msg, LN_SCALAR_ATTR_VALUE,
2846 props[count].lkp_value);
2847 if (props[count].lkp_key_format)
2848 nla_put_u16(msg, LN_SCALAR_ATTR_KEY_FORMAT,
2849 props[count].lkp_key_format);
2850 nla_put_u16(msg, LN_SCALAR_ATTR_NLA_TYPE,
2851 props[count].lkp_data_type);
2852 if (props[count].lkp_data_type == NLA_NESTED) {
2855 rc = lnet_genl_parse_list(msg, data, ++idx);
2861 nla_nest_end(msg, key);
2864 nla_nest_end(msg, node);
2868 int lnet_genl_send_scalar_list(struct sk_buff *msg, u32 portid, u32 seq,
2869 const struct genl_family *family, int flags,
2870 u8 cmd, const struct ln_key_list *data[])
2878 hdr = genlmsg_put(msg, portid, seq, family, flags, cmd);
2880 GOTO(canceled, rc = -EMSGSIZE);
2882 rc = lnet_genl_parse_list(msg, data, 0);
2886 genlmsg_end(msg, hdr);
2889 genlmsg_cancel(msg, hdr);
2890 return rc > 0 ? 0 : rc;
2892 EXPORT_SYMBOL(lnet_genl_send_scalar_list);
2894 static struct genl_family lnet_family;
2897 * Initialize LNet library.
2899 * Automatically called at module loading time. Caller has to call
2900 * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
2901 * latter returned 0. It must be called exactly once.
2903 * \retval 0 on success
2904 * \retval -ve on failures.
2906 int lnet_lib_init(void)
2910 lnet_assert_wire_constants();
2912 /* refer to global cfs_cpt_table for now */
2913 the_lnet.ln_cpt_table = cfs_cpt_tab;
2914 the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_tab);
2916 LASSERT(the_lnet.ln_cpt_number > 0);
2917 if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
2918 /* we are under risk of consuming all lh_cookie */
2919 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
2920 "please change setting of CPT-table and retry\n",
2921 the_lnet.ln_cpt_number, LNET_CPT_MAX);
2925 while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
2926 the_lnet.ln_cpt_bits++;
2928 rc = lnet_create_locks();
2930 CERROR("Can't create LNet global locks: %d\n", rc);
2934 rc = genl_register_family(&lnet_family);
2936 lnet_destroy_locks();
2937 CERROR("Can't register LNet netlink family: %d\n", rc);
2941 the_lnet.ln_refcount = 0;
2942 INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
2943 INIT_LIST_HEAD(&the_lnet.ln_msg_resend);
2945 /* The hash table size is the number of bits it takes to express the set
2946 * ln_num_routes, minus 1 (better to under estimate than over so we
2947 * don't waste memory). */
2948 if (rnet_htable_size <= 0)
2949 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
2950 else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
2951 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
2952 the_lnet.ln_remote_nets_hbits = max_t(int, 1,
2953 order_base_2(rnet_htable_size) - 1);
2955 /* All LNDs apart from the LOLND are in separate modules. They
2956 * register themselves when their module loads, and unregister
2957 * themselves when their module is unloaded. */
2958 lnet_register_lnd(&the_lolnd);
2963 * Finalize LNet library.
2965 * \pre lnet_lib_init() called with success.
2966 * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
2968 * As this happens at module-unload, all lnds must already be unloaded,
2969 * so they must already be unregistered.
2971 void lnet_lib_exit(void)
2975 LASSERT(the_lnet.ln_refcount == 0);
2976 lnet_unregister_lnd(&the_lolnd);
2977 for (i = 0; i < NUM_LNDS; i++)
2978 LASSERT(!the_lnet.ln_lnds[i]);
2979 lnet_destroy_locks();
2980 genl_unregister_family(&lnet_family);
2984 * Set LNet PID and start LNet interfaces, routing, and forwarding.
2986 * Users must call this function at least once before any other functions.
2987 * For each successful call there must be a corresponding call to
2988 * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
2991 * The PID used by LNet may be different from the one requested.
2994 * \param requested_pid PID requested by the caller.
2996 * \return >= 0 on success, and < 0 error code on failures.
2999 LNetNIInit(lnet_pid_t requested_pid)
3001 int im_a_router = 0;
3004 struct lnet_ping_buffer *pbuf;
3005 struct lnet_handle_md ping_mdh;
3006 LIST_HEAD(net_head);
3007 struct lnet_net *net;
3009 mutex_lock(&the_lnet.ln_api_mutex);
3011 CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
3013 if (the_lnet.ln_state == LNET_STATE_STOPPING) {
3014 mutex_unlock(&the_lnet.ln_api_mutex);
3018 if (the_lnet.ln_refcount > 0) {
3019 rc = the_lnet.ln_refcount++;
3020 mutex_unlock(&the_lnet.ln_api_mutex);
3024 rc = lnet_prepare(requested_pid);
3026 mutex_unlock(&the_lnet.ln_api_mutex);
3030 /* create a network for Loopback network */
3031 net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
3034 goto err_empty_list;
3037 /* Add in the loopback NI */
3038 if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
3040 goto err_empty_list;
3043 if (use_tcp_bonding)
3044 CWARN("use_tcp_bonding has been removed. Use Multi-Rail and Dynamic Discovery instead, see LU-13641\n");
3046 /* If LNet is being initialized via DLC it is possible
3047 * that the user requests not to load module parameters (ones which
3048 * are supported by DLC) on initialization. Therefore, make sure not
3049 * to load networks, routes and forwarding from module parameters
3050 * in this case. On cleanup in case of failure only clean up
3051 * routes if it has been loaded */
3052 if (!the_lnet.ln_nis_from_mod_params) {
3053 rc = lnet_parse_networks(&net_head, lnet_get_networks());
3055 goto err_empty_list;
3058 rc = lnet_startup_lndnets(&net_head);
3060 goto err_empty_list;
3062 if (!the_lnet.ln_nis_from_mod_params) {
3063 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
3065 goto err_shutdown_lndnis;
3067 rc = lnet_rtrpools_alloc(im_a_router);
3069 goto err_destroy_routes;
3072 rc = lnet_acceptor_start();
3074 goto err_destroy_routes;
3076 the_lnet.ln_refcount = 1;
3077 /* Now I may use my own API functions... */
3079 ni_bytes = LNET_PING_INFO_HDR_SIZE;
3080 list_for_each_entry(net, &the_lnet.ln_nets, net_list)
3081 ni_bytes += lnet_get_net_ni_bytes_locked(net);
3083 rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_bytes, true);
3085 goto err_acceptor_stop;
3087 lnet_ping_target_update(pbuf, ping_mdh);
3089 the_lnet.ln_mt_handler = lnet_mt_event_handler;
3091 rc = lnet_push_target_init();
3095 rc = lnet_peer_discovery_start();
3097 goto err_destroy_push_target;
3099 rc = lnet_monitor_thr_start();
3101 goto err_stop_discovery_thr;
3104 lnet_router_debugfs_init();
3106 mutex_unlock(&the_lnet.ln_api_mutex);
3108 complete_all(&the_lnet.ln_started);
3110 /* wait for all routers to start */
3111 lnet_wait_router_start();
3115 err_stop_discovery_thr:
3116 lnet_peer_discovery_stop();
3117 err_destroy_push_target:
3118 lnet_push_target_fini();
3120 lnet_ping_target_fini();
3122 the_lnet.ln_refcount = 0;
3123 lnet_acceptor_stop();
3125 if (!the_lnet.ln_nis_from_mod_params)
3126 lnet_destroy_routes();
3127 err_shutdown_lndnis:
3128 lnet_shutdown_lndnets();
3132 mutex_unlock(&the_lnet.ln_api_mutex);
3133 while ((net = list_first_entry_or_null(&net_head,
3135 net_list)) != NULL) {
3136 list_del_init(&net->net_list);
3141 EXPORT_SYMBOL(LNetNIInit);
3144 * Stop LNet interfaces, routing, and forwarding.
3146 * Users must call this function once for each successful call to LNetNIInit().
3147 * Once the LNetNIFini() operation has been started, the results of pending
3148 * API operations are undefined.
3150 * \return always 0 for current implementation.
3155 mutex_lock(&the_lnet.ln_api_mutex);
3157 LASSERT(the_lnet.ln_refcount > 0);
3159 if (the_lnet.ln_refcount != 1) {
3160 the_lnet.ln_refcount--;
3162 LASSERT(!the_lnet.ln_niinit_self);
3164 lnet_net_lock(LNET_LOCK_EX);
3165 the_lnet.ln_state = LNET_STATE_STOPPING;
3166 lnet_net_unlock(LNET_LOCK_EX);
3170 lnet_router_debugfs_fini();
3171 lnet_monitor_thr_stop();
3172 lnet_peer_discovery_stop();
3173 lnet_push_target_fini();
3174 lnet_ping_target_fini();
3176 /* Teardown fns that use my own API functions BEFORE here */
3177 the_lnet.ln_refcount = 0;
3179 lnet_acceptor_stop();
3180 lnet_destroy_routes();
3181 lnet_shutdown_lndnets();
3185 mutex_unlock(&the_lnet.ln_api_mutex);
3188 EXPORT_SYMBOL(LNetNIFini);
3191 * Grabs the ni data from the ni structure and fills the out
3194 * \param[in] ni network interface structure
3195 * \param[out] cfg_ni NI config information
3196 * \param[out] tun network and LND tunables
3199 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
3200 struct lnet_ioctl_config_lnd_tunables *tun,
3201 struct lnet_ioctl_element_stats *stats,
3204 size_t min_size = 0;
3207 if (!ni || !cfg_ni || !tun || !nid_is_nid4(&ni->ni_nid))
3210 if (ni->ni_interface != NULL) {
3211 strncpy(cfg_ni->lic_ni_intf,
3213 sizeof(cfg_ni->lic_ni_intf));
3216 cfg_ni->lic_nid = lnet_nid_to_nid4(&ni->ni_nid);
3217 cfg_ni->lic_status = lnet_ni_get_status_locked(ni);
3218 cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
3220 memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
3223 stats->iel_send_count = lnet_sum_stats(&ni->ni_stats,
3224 LNET_STATS_TYPE_SEND);
3225 stats->iel_recv_count = lnet_sum_stats(&ni->ni_stats,
3226 LNET_STATS_TYPE_RECV);
3227 stats->iel_drop_count = lnet_sum_stats(&ni->ni_stats,
3228 LNET_STATS_TYPE_DROP);
3232 * tun->lt_tun will always be present, but in order to be
3233 * backwards compatible, we need to deal with the cases when
3234 * tun->lt_tun is smaller than what the kernel has, because it
3235 * comes from an older version of a userspace program, then we'll
3236 * need to copy as much information as we have available space.
3238 min_size = tun_size - sizeof(tun->lt_cmn);
3239 memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
3241 /* copy over the cpts */
3242 if (ni->ni_ncpts == LNET_CPT_NUMBER &&
3243 ni->ni_cpts == NULL) {
3244 for (i = 0; i < ni->ni_ncpts; i++)
3245 cfg_ni->lic_cpts[i] = i;
3248 ni->ni_cpts != NULL && i < ni->ni_ncpts &&
3249 i < LNET_MAX_SHOW_NUM_CPT;
3251 cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
3253 cfg_ni->lic_ncpts = ni->ni_ncpts;
3257 * NOTE: This is a legacy function left in the code to be backwards
3258 * compatible with older userspace programs. It should eventually be
3261 * Grabs the ni data from the ni structure and fills the out
3264 * \param[in] ni network interface structure
3265 * \param[out] config config information
3268 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
3269 struct lnet_ioctl_config_data *config)
3271 struct lnet_ioctl_net_config *net_config;
3272 struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
3273 size_t min_size, tunable_size = 0;
3276 if (!ni || !config || !nid_is_nid4(&ni->ni_nid))
3279 net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
3283 if (!ni->ni_interface)
3286 strncpy(net_config->ni_interface,
3288 sizeof(net_config->ni_interface));
3290 config->cfg_nid = lnet_nid_to_nid4(&ni->ni_nid);
3291 config->cfg_config_u.cfg_net.net_peer_timeout =
3292 ni->ni_net->net_tunables.lct_peer_timeout;
3293 config->cfg_config_u.cfg_net.net_max_tx_credits =
3294 ni->ni_net->net_tunables.lct_max_tx_credits;
3295 config->cfg_config_u.cfg_net.net_peer_tx_credits =
3296 ni->ni_net->net_tunables.lct_peer_tx_credits;
3297 config->cfg_config_u.cfg_net.net_peer_rtr_credits =
3298 ni->ni_net->net_tunables.lct_peer_rtr_credits;
3300 net_config->ni_status = lnet_ni_get_status_locked(ni);
3303 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
3305 for (i = 0; i < num_cpts; i++)
3306 net_config->ni_cpts[i] = ni->ni_cpts[i];
3308 config->cfg_ncpts = num_cpts;
3312 * See if user land tools sent in a newer and larger version
3313 * of struct lnet_tunables than what the kernel uses.
3315 min_size = sizeof(*config) + sizeof(*net_config);
3317 if (config->cfg_hdr.ioc_len > min_size)
3318 tunable_size = config->cfg_hdr.ioc_len - min_size;
3320 /* Don't copy too much data to user space */
3321 min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
3322 lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
3324 if (lnd_cfg && min_size) {
3325 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
3326 config->cfg_config_u.cfg_net.net_interface_count = 1;
3328 /* Tell user land that kernel side has less data */
3329 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
3330 min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
3331 config->cfg_hdr.ioc_len -= min_size;
3337 lnet_get_ni_idx_locked(int idx)
3340 struct lnet_net *net;
3342 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3343 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3352 int lnet_get_net_healthv_locked(struct lnet_net *net)
3355 int best_healthv = 0;
3356 int healthv, ni_fatal;
3358 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3359 healthv = atomic_read(&ni->ni_healthv);
3360 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
3361 if (!ni_fatal && healthv > best_healthv)
3362 best_healthv = healthv;
3365 return best_healthv;
3369 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
3372 struct lnet_net *net = mynet;
3375 * It is possible that the net has been cleaned out while there is
3376 * a message being sent. This function accessed the net without
3377 * checking if the list is empty
3381 net = list_first_entry(&the_lnet.ln_nets,
3384 if (list_empty(&net->net_ni_list))
3386 ni = list_first_entry(&net->net_ni_list, struct lnet_ni,
3392 if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
3393 /* if you reached the end of the ni list and the net is
3394 * specified, then there are no more nis in that net */
3398 /* we reached the end of this net ni list. move to the
3400 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
3401 /* no more nets and no more NIs. */
3404 /* get the next net */
3405 net = list_first_entry(&prev->ni_net->net_list, struct lnet_net,
3407 if (list_empty(&net->net_ni_list))
3409 /* get the ni on it */
3410 ni = list_first_entry(&net->net_ni_list, struct lnet_ni,
3416 if (list_empty(&prev->ni_netlist))
3419 /* there are more nis left */
3420 ni = list_first_entry(&prev->ni_netlist, struct lnet_ni, ni_netlist);
3426 lnet_get_net_config(struct lnet_ioctl_config_data *config)
3431 int idx = config->cfg_count;
3433 cpt = lnet_net_lock_current();
3435 ni = lnet_get_ni_idx_locked(idx);
3440 lnet_fill_ni_info_legacy(ni, config);
3444 lnet_net_unlock(cpt);
3449 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
3450 struct lnet_ioctl_config_lnd_tunables *tun,
3451 struct lnet_ioctl_element_stats *stats,
3458 if (!cfg_ni || !tun || !stats)
3461 cpt = lnet_net_lock_current();
3463 ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
3468 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
3472 lnet_net_unlock(cpt);
3476 static int lnet_get_ni_stats(struct lnet_ioctl_element_msg_stats *msg_stats)
3485 cpt = lnet_net_lock_current();
3487 ni = lnet_get_ni_idx_locked(msg_stats->im_idx);
3490 lnet_usr_translate_stats(msg_stats, &ni->ni_stats);
3494 lnet_net_unlock(cpt);
3499 static int lnet_add_net_common(struct lnet_net *net,
3500 struct lnet_ioctl_config_lnd_tunables *tun)
3502 struct lnet_handle_md ping_mdh;
3503 struct lnet_ping_buffer *pbuf;
3504 struct lnet_remotenet *rnet;
3509 lnet_net_lock(LNET_LOCK_EX);
3510 rnet = lnet_find_rnet_locked(net->net_id);
3511 lnet_net_unlock(LNET_LOCK_EX);
3513 * make sure that the net added doesn't invalidate the current
3514 * configuration LNet is keeping
3517 CERROR("Adding net %s will invalidate routing configuration\n",
3518 libcfs_net2str(net->net_id));
3524 memcpy(&net->net_tunables,
3525 &tun->lt_cmn, sizeof(net->net_tunables));
3527 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
3529 net_id = net->net_id;
3531 rc = lnet_startup_lndnet(net,
3532 (tun) ? &tun->lt_tun : NULL);
3536 /* make sure you calculate the correct number of slots in the ping
3537 * buffer. Since the ping info is a flattened list of all the NIs,
3538 * we should allocate enough slots to accomodate the number of NIs
3539 * which will be added.
3541 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3542 LNET_PING_INFO_HDR_SIZE +
3543 lnet_get_ni_bytes(),
3546 lnet_shutdown_lndnet(net);
3550 lnet_net_lock(LNET_LOCK_EX);
3551 net = lnet_get_net_locked(net_id);
3554 /* apply the UDSPs */
3555 rc = lnet_udsp_apply_policies_on_net(net);
3557 CERROR("Failed to apply UDSPs on local net %s\n",
3558 libcfs_net2str(net->net_id));
3560 /* At this point we lost track of which NI was just added, so we
3561 * just re-apply the policies on all of the NIs on this net
3563 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3564 rc = lnet_udsp_apply_policies_on_ni(ni);
3566 CERROR("Failed to apply UDSPs on ni %s\n",
3567 libcfs_nidstr(&ni->ni_nid));
3569 lnet_net_unlock(LNET_LOCK_EX);
3572 * Start the acceptor thread if this is the first network
3573 * being added that requires the thread.
3575 if (net->net_lnd->lnd_accept) {
3576 rc = lnet_acceptor_start();
3578 /* shutdown the net that we just started */
3579 CERROR("Failed to start up acceptor thread\n");
3580 lnet_shutdown_lndnet(net);
3585 lnet_net_lock(LNET_LOCK_EX);
3586 lnet_peer_net_added(net);
3587 lnet_net_unlock(LNET_LOCK_EX);
3589 lnet_ping_target_update(pbuf, ping_mdh);
3594 lnet_ping_md_unlink(pbuf, &ping_mdh);
3595 lnet_ping_buffer_decref(pbuf);
3600 lnet_set_tune_defaults(struct lnet_ioctl_config_lnd_tunables *tun)
3603 if (tun->lt_cmn.lct_peer_timeout < 0)
3604 tun->lt_cmn.lct_peer_timeout = DEFAULT_PEER_TIMEOUT;
3605 if (!tun->lt_cmn.lct_peer_tx_credits)
3606 tun->lt_cmn.lct_peer_tx_credits = DEFAULT_PEER_CREDITS;
3607 if (!tun->lt_cmn.lct_max_tx_credits)
3608 tun->lt_cmn.lct_max_tx_credits = DEFAULT_CREDITS;
3612 static int lnet_handle_legacy_ip2nets(char *ip2nets,
3613 struct lnet_ioctl_config_lnd_tunables *tun)
3615 struct lnet_net *net;
3618 LIST_HEAD(net_head);
3620 rc = lnet_parse_ip2nets(&nets, ip2nets);
3624 rc = lnet_parse_networks(&net_head, nets);
3628 lnet_set_tune_defaults(tun);
3630 mutex_lock(&the_lnet.ln_api_mutex);
3631 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3636 while ((net = list_first_entry_or_null(&net_head,
3638 net_list)) != NULL) {
3639 list_del_init(&net->net_list);
3640 rc = lnet_add_net_common(net, tun);
3646 mutex_unlock(&the_lnet.ln_api_mutex);
3648 while ((net = list_first_entry_or_null(&net_head,
3650 net_list)) != NULL) {
3651 list_del_init(&net->net_list);
3657 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf, u32 net_id,
3658 struct lnet_ioctl_config_lnd_tunables *tun)
3660 struct lnet_net *net;
3665 /* handle legacy ip2nets from DLC */
3666 if (conf->lic_legacy_ip2nets[0] != '\0')
3667 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
3670 lnd_type = LNET_NETTYP(net_id);
3672 if (!libcfs_isknown_lnd(lnd_type)) {
3673 CERROR("No valid net and lnd information provided\n");
3677 net = lnet_net_alloc(net_id, NULL);
3681 for (i = 0; i < conf->lic_ncpts; i++) {
3682 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER) {
3688 ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
3695 lnet_set_tune_defaults(tun);
3697 mutex_lock(&the_lnet.ln_api_mutex);
3698 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3702 rc = lnet_add_net_common(net, tun);
3705 mutex_unlock(&the_lnet.ln_api_mutex);
3707 /* If NI already exist delete this new unused copy */
3714 int lnet_dyn_del_ni(struct lnet_nid *nid)
3716 struct lnet_net *net;
3718 u32 net_id = LNET_NID_NET(nid);
3719 struct lnet_ping_buffer *pbuf;
3720 struct lnet_handle_md ping_mdh;
3724 /* don't allow userspace to shutdown the LOLND */
3725 if (LNET_NETTYP(net_id) == LOLND)
3728 mutex_lock(&the_lnet.ln_api_mutex);
3729 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3731 goto unlock_api_mutex;
3736 net = lnet_get_net_locked(net_id);
3738 CERROR("net %s not found\n",
3739 libcfs_net2str(net_id));
3744 if (!nid_addr_is_set(nid)) {
3745 /* remove the entire net */
3746 net_bytes = lnet_get_net_ni_bytes_locked(net);
3750 /* create and link a new ping info, before removing the old one */
3751 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3752 LNET_PING_INFO_HDR_SIZE +
3753 lnet_get_ni_bytes() - net_bytes,
3756 goto unlock_api_mutex;
3758 lnet_shutdown_lndnet(net);
3760 lnet_acceptor_stop();
3762 lnet_ping_target_update(pbuf, ping_mdh);
3764 goto unlock_api_mutex;
3767 ni = lnet_nid_to_ni_locked(nid, 0);
3769 CERROR("nid %s not found\n", libcfs_nidstr(nid));
3774 net_bytes = lnet_get_net_ni_bytes_locked(net);
3775 net_empty = list_is_singular(&net->net_ni_list);
3779 /* create and link a new ping info, before removing the old one */
3780 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3781 (LNET_PING_INFO_HDR_SIZE +
3782 lnet_get_ni_bytes() -
3783 lnet_ping_sts_size(&ni->ni_nid)),
3786 goto unlock_api_mutex;
3788 lnet_shutdown_lndni(ni);
3790 lnet_acceptor_stop();
3792 lnet_ping_target_update(pbuf, ping_mdh);
3794 /* check if the net is empty and remove it if it is */
3796 lnet_shutdown_lndnet(net);
3798 goto unlock_api_mutex;
3803 mutex_unlock(&the_lnet.ln_api_mutex);
3809 * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
3810 * They are only expected to be called for unique networks.
3811 * That can be as a result of older DLC library
3812 * calls. Multi-Rail DLC and beyond no longer uses these APIs.
3815 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
3817 struct lnet_net *net;
3818 LIST_HEAD(net_head);
3820 struct lnet_ioctl_config_lnd_tunables tun;
3821 const char *nets = conf->cfg_config_u.cfg_net.net_intf;
3823 /* Create a net/ni structures for the network string */
3824 rc = lnet_parse_networks(&net_head, nets);
3826 return rc == 0 ? -EINVAL : rc;
3828 mutex_lock(&the_lnet.ln_api_mutex);
3829 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3831 goto out_unlock_clean;
3835 rc = -EINVAL; /* only add one network per call */
3836 goto out_unlock_clean;
3839 net = list_first_entry(&net_head, struct lnet_net, net_list);
3840 list_del_init(&net->net_list);
3842 LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
3844 memset(&tun, 0, sizeof(tun));
3846 tun.lt_cmn.lct_peer_timeout =
3847 (!conf->cfg_config_u.cfg_net.net_peer_timeout) ? DEFAULT_PEER_TIMEOUT :
3848 conf->cfg_config_u.cfg_net.net_peer_timeout;
3849 tun.lt_cmn.lct_peer_tx_credits =
3850 (!conf->cfg_config_u.cfg_net.net_peer_tx_credits) ? DEFAULT_PEER_CREDITS :
3851 conf->cfg_config_u.cfg_net.net_peer_tx_credits;
3852 tun.lt_cmn.lct_peer_rtr_credits =
3853 conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
3854 tun.lt_cmn.lct_max_tx_credits =
3855 (!conf->cfg_config_u.cfg_net.net_max_tx_credits) ? DEFAULT_CREDITS :
3856 conf->cfg_config_u.cfg_net.net_max_tx_credits;
3858 rc = lnet_add_net_common(net, &tun);
3861 mutex_unlock(&the_lnet.ln_api_mutex);
3862 /* net_head list is empty in success case */
3863 while ((net = list_first_entry_or_null(&net_head,
3865 net_list)) != NULL) {
3866 list_del_init(&net->net_list);
3873 lnet_dyn_del_net(u32 net_id)
3875 struct lnet_net *net;
3876 struct lnet_ping_buffer *pbuf;
3877 struct lnet_handle_md ping_mdh;
3878 int net_ni_bytes, rc;
3880 /* don't allow userspace to shutdown the LOLND */
3881 if (LNET_NETTYP(net_id) == LOLND)
3884 mutex_lock(&the_lnet.ln_api_mutex);
3885 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3892 net = lnet_get_net_locked(net_id);
3899 net_ni_bytes = lnet_get_net_ni_bytes_locked(net);
3903 /* create and link a new ping info, before removing the old one */
3904 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3905 LNET_PING_INFO_HDR_SIZE +
3906 lnet_get_ni_bytes() - net_ni_bytes,
3911 lnet_shutdown_lndnet(net);
3913 lnet_acceptor_stop();
3915 lnet_ping_target_update(pbuf, ping_mdh);
3918 mutex_unlock(&the_lnet.ln_api_mutex);
3923 void lnet_mark_ping_buffer_for_update(void)
3925 if (the_lnet.ln_routing)
3928 atomic_set(&the_lnet.ln_update_ping_buf, 1);
3929 complete(&the_lnet.ln_mt_wait_complete);
3931 EXPORT_SYMBOL(lnet_mark_ping_buffer_for_update);
3933 void lnet_update_ping_buffer(void)
3935 struct lnet_ping_buffer *pbuf;
3936 struct lnet_handle_md ping_mdh;
3938 if (atomic_dec_if_positive(&the_lnet.ln_update_ping_buf) < 0)
3941 mutex_lock(&the_lnet.ln_api_mutex);
3943 if (!lnet_ping_target_setup(&pbuf, &ping_mdh,
3944 LNET_PING_INFO_HDR_SIZE +
3945 lnet_get_ni_bytes(),
3947 lnet_ping_target_update(pbuf, ping_mdh);
3949 mutex_unlock(&the_lnet.ln_api_mutex);
3952 void lnet_incr_dlc_seq(void)
3954 atomic_inc(&lnet_dlc_seq_no);
3957 __u32 lnet_get_dlc_seq_locked(void)
3959 return atomic_read(&lnet_dlc_seq_no);
3963 lnet_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3965 struct lnet_net *net;
3968 lnet_net_lock(LNET_LOCK_EX);
3969 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3970 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3971 if (all || (nid_is_nid4(&ni->ni_nid) &&
3972 lnet_nid_to_nid4(&ni->ni_nid) == nid)) {
3973 atomic_set(&ni->ni_healthv, value);
3974 if (list_empty(&ni->ni_recovery) &&
3975 value < LNET_MAX_HEALTH_VALUE) {
3976 CERROR("manually adding local NI %s to recovery\n",
3977 libcfs_nidstr(&ni->ni_nid));
3978 list_add_tail(&ni->ni_recovery,
3979 &the_lnet.ln_mt_localNIRecovq);
3980 lnet_ni_addref_locked(ni, 0);
3983 lnet_net_unlock(LNET_LOCK_EX);
3989 lnet_net_unlock(LNET_LOCK_EX);
3993 lnet_ni_set_conns_per_peer(lnet_nid_t nid, int value, bool all)
3995 struct lnet_net *net;
3998 lnet_net_lock(LNET_LOCK_EX);
3999 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
4000 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
4001 if (lnet_nid_to_nid4(&ni->ni_nid) != nid && !all)
4003 if (LNET_NETTYP(net->net_id) == SOCKLND)
4004 ni->ni_lnd_tunables.lnd_tun_u.lnd_sock.lnd_conns_per_peer = value;
4005 else if (LNET_NETTYP(net->net_id) == O2IBLND)
4006 ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib.lnd_conns_per_peer = value;
4008 lnet_net_unlock(LNET_LOCK_EX);
4013 lnet_net_unlock(LNET_LOCK_EX);
4017 lnet_get_local_ni_hstats(struct lnet_ioctl_local_ni_hstats *stats)
4021 struct lnet_nid nid;
4023 lnet_nid4_to_nid(stats->hlni_nid, &nid);
4024 cpt = lnet_net_lock_current();
4025 ni = lnet_nid_to_ni_locked(&nid, cpt);
4031 stats->hlni_local_interrupt = atomic_read(&ni->ni_hstats.hlt_local_interrupt);
4032 stats->hlni_local_dropped = atomic_read(&ni->ni_hstats.hlt_local_dropped);
4033 stats->hlni_local_aborted = atomic_read(&ni->ni_hstats.hlt_local_aborted);
4034 stats->hlni_local_no_route = atomic_read(&ni->ni_hstats.hlt_local_no_route);
4035 stats->hlni_local_timeout = atomic_read(&ni->ni_hstats.hlt_local_timeout);
4036 stats->hlni_local_error = atomic_read(&ni->ni_hstats.hlt_local_error);
4037 stats->hlni_fatal_error = atomic_read(&ni->ni_fatal_error_on);
4038 stats->hlni_health_value = atomic_read(&ni->ni_healthv);
4039 stats->hlni_ping_count = ni->ni_ping_count;
4040 stats->hlni_next_ping = ni->ni_next_ping;
4043 lnet_net_unlock(cpt);
4049 lnet_get_local_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
4054 lnet_net_lock(LNET_LOCK_EX);
4055 list_for_each_entry(ni, &the_lnet.ln_mt_localNIRecovq, ni_recovery) {
4056 if (!nid_is_nid4(&ni->ni_nid))
4058 list->rlst_nid_array[i] = lnet_nid_to_nid4(&ni->ni_nid);
4060 if (i >= LNET_MAX_SHOW_NUM_NID)
4063 lnet_net_unlock(LNET_LOCK_EX);
4064 list->rlst_num_nids = i;
4070 lnet_get_peer_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
4072 struct lnet_peer_ni *lpni;
4075 lnet_net_lock(LNET_LOCK_EX);
4076 list_for_each_entry(lpni, &the_lnet.ln_mt_peerNIRecovq, lpni_recovery) {
4077 list->rlst_nid_array[i] = lnet_nid_to_nid4(&lpni->lpni_nid);
4079 if (i >= LNET_MAX_SHOW_NUM_NID)
4082 lnet_net_unlock(LNET_LOCK_EX);
4083 list->rlst_num_nids = i;
4089 * LNet ioctl handler.
4093 LNetCtl(unsigned int cmd, void *arg)
4095 struct libcfs_ioctl_data *data = arg;
4096 struct lnet_ioctl_config_data *config;
4098 struct lnet_nid nid;
4101 BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
4102 sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
4105 case IOC_LIBCFS_GET_NI: {
4106 struct lnet_processid id = {};
4108 rc = LNetGetId(data->ioc_count, &id, false);
4109 data->ioc_nid = lnet_nid_to_nid4(&id.nid);
4112 case IOC_LIBCFS_FAIL_NID:
4113 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
4115 case IOC_LIBCFS_ADD_ROUTE: {
4116 /* default router sensitivity to 1 */
4117 unsigned int sensitivity = 1;
4120 if (config->cfg_hdr.ioc_len < sizeof(*config))
4123 if (config->cfg_config_u.cfg_route.rtr_sensitivity) {
4125 config->cfg_config_u.cfg_route.rtr_sensitivity;
4128 lnet_nid4_to_nid(config->cfg_nid, &nid);
4129 mutex_lock(&the_lnet.ln_api_mutex);
4130 rc = lnet_add_route(config->cfg_net,
4131 config->cfg_config_u.cfg_route.rtr_hop,
4133 config->cfg_config_u.cfg_route.
4134 rtr_priority, sensitivity);
4135 mutex_unlock(&the_lnet.ln_api_mutex);
4139 case IOC_LIBCFS_DEL_ROUTE:
4142 if (config->cfg_hdr.ioc_len < sizeof(*config))
4145 lnet_nid4_to_nid(config->cfg_nid, &nid);
4146 mutex_lock(&the_lnet.ln_api_mutex);
4147 rc = lnet_del_route(config->cfg_net, &nid);
4148 mutex_unlock(&the_lnet.ln_api_mutex);
4151 case IOC_LIBCFS_GET_ROUTE:
4154 if (config->cfg_hdr.ioc_len < sizeof(*config))
4157 mutex_lock(&the_lnet.ln_api_mutex);
4158 rc = lnet_get_route(config->cfg_count,
4160 &config->cfg_config_u.cfg_route.rtr_hop,
4162 &config->cfg_config_u.cfg_route.rtr_flags,
4163 &config->cfg_config_u.cfg_route.
4165 &config->cfg_config_u.cfg_route.
4167 mutex_unlock(&the_lnet.ln_api_mutex);
4170 case IOC_LIBCFS_GET_LOCAL_NI: {
4171 struct lnet_ioctl_config_ni *cfg_ni;
4172 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
4173 struct lnet_ioctl_element_stats *stats;
4178 /* get the tunables if they are available */
4179 if (cfg_ni->lic_cfg_hdr.ioc_len <
4180 sizeof(*cfg_ni) + sizeof(*stats) + sizeof(*tun))
4183 stats = (struct lnet_ioctl_element_stats *)
4185 tun = (struct lnet_ioctl_config_lnd_tunables *)
4186 (cfg_ni->lic_bulk + sizeof(*stats));
4188 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
4191 mutex_lock(&the_lnet.ln_api_mutex);
4192 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
4193 mutex_unlock(&the_lnet.ln_api_mutex);
4197 case IOC_LIBCFS_GET_LOCAL_NI_MSG_STATS: {
4198 struct lnet_ioctl_element_msg_stats *msg_stats = arg;
4200 if (msg_stats->im_hdr.ioc_len != sizeof(*msg_stats))
4203 mutex_lock(&the_lnet.ln_api_mutex);
4204 rc = lnet_get_ni_stats(msg_stats);
4205 mutex_unlock(&the_lnet.ln_api_mutex);
4210 case IOC_LIBCFS_GET_NET: {
4211 size_t total = sizeof(*config) +
4212 sizeof(struct lnet_ioctl_net_config);
4215 if (config->cfg_hdr.ioc_len < total)
4218 mutex_lock(&the_lnet.ln_api_mutex);
4219 rc = lnet_get_net_config(config);
4220 mutex_unlock(&the_lnet.ln_api_mutex);
4224 case IOC_LIBCFS_GET_LNET_STATS:
4226 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
4228 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
4231 mutex_lock(&the_lnet.ln_api_mutex);
4232 rc = lnet_counters_get(&lnet_stats->st_cntrs);
4233 mutex_unlock(&the_lnet.ln_api_mutex);
4237 case IOC_LIBCFS_RESET_LNET_STATS:
4239 mutex_lock(&the_lnet.ln_api_mutex);
4240 lnet_counters_reset();
4241 mutex_unlock(&the_lnet.ln_api_mutex);
4245 case IOC_LIBCFS_CONFIG_RTR:
4248 if (config->cfg_hdr.ioc_len < sizeof(*config))
4251 mutex_lock(&the_lnet.ln_api_mutex);
4252 if (config->cfg_config_u.cfg_buffers.buf_enable) {
4253 rc = lnet_rtrpools_enable();
4254 mutex_unlock(&the_lnet.ln_api_mutex);
4257 lnet_rtrpools_disable();
4258 mutex_unlock(&the_lnet.ln_api_mutex);
4261 case IOC_LIBCFS_ADD_BUF:
4264 if (config->cfg_hdr.ioc_len < sizeof(*config))
4267 mutex_lock(&the_lnet.ln_api_mutex);
4268 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
4270 config->cfg_config_u.cfg_buffers.
4272 config->cfg_config_u.cfg_buffers.
4274 mutex_unlock(&the_lnet.ln_api_mutex);
4277 case IOC_LIBCFS_SET_NUMA_RANGE: {
4278 struct lnet_ioctl_set_value *numa;
4280 if (numa->sv_hdr.ioc_len != sizeof(*numa))
4282 lnet_net_lock(LNET_LOCK_EX);
4283 lnet_numa_range = numa->sv_value;
4284 lnet_net_unlock(LNET_LOCK_EX);
4288 case IOC_LIBCFS_GET_NUMA_RANGE: {
4289 struct lnet_ioctl_set_value *numa;
4291 if (numa->sv_hdr.ioc_len != sizeof(*numa))
4293 numa->sv_value = lnet_numa_range;
4297 case IOC_LIBCFS_GET_BUF: {
4298 struct lnet_ioctl_pool_cfg *pool_cfg;
4299 size_t total = sizeof(*config) + sizeof(*pool_cfg);
4303 if (config->cfg_hdr.ioc_len < total)
4306 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
4308 mutex_lock(&the_lnet.ln_api_mutex);
4309 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
4310 mutex_unlock(&the_lnet.ln_api_mutex);
4314 case IOC_LIBCFS_GET_LOCAL_HSTATS: {
4315 struct lnet_ioctl_local_ni_hstats *stats = arg;
4317 if (stats->hlni_hdr.ioc_len < sizeof(*stats))
4320 mutex_lock(&the_lnet.ln_api_mutex);
4321 rc = lnet_get_local_ni_hstats(stats);
4322 mutex_unlock(&the_lnet.ln_api_mutex);
4327 case IOC_LIBCFS_GET_RECOVERY_QUEUE: {
4328 struct lnet_ioctl_recovery_list *list = arg;
4329 if (list->rlst_hdr.ioc_len < sizeof(*list))
4332 mutex_lock(&the_lnet.ln_api_mutex);
4333 if (list->rlst_type == LNET_HEALTH_TYPE_LOCAL_NI)
4334 rc = lnet_get_local_ni_recovery_list(list);
4336 rc = lnet_get_peer_ni_recovery_list(list);
4337 mutex_unlock(&the_lnet.ln_api_mutex);
4341 case IOC_LIBCFS_ADD_PEER_NI: {
4342 struct lnet_ioctl_peer_cfg *cfg = arg;
4343 struct lnet_nid prim_nid;
4345 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4348 mutex_lock(&the_lnet.ln_api_mutex);
4349 lnet_nid4_to_nid(cfg->prcfg_prim_nid, &prim_nid);
4350 lnet_nid4_to_nid(cfg->prcfg_cfg_nid, &nid);
4351 rc = lnet_user_add_peer_ni(&prim_nid, &nid, cfg->prcfg_mr,
4352 cfg->prcfg_count == 1);
4353 mutex_unlock(&the_lnet.ln_api_mutex);
4357 case IOC_LIBCFS_DEL_PEER_NI: {
4358 struct lnet_ioctl_peer_cfg *cfg = arg;
4359 struct lnet_nid prim_nid;
4361 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4364 mutex_lock(&the_lnet.ln_api_mutex);
4365 lnet_nid4_to_nid(cfg->prcfg_prim_nid, &prim_nid);
4366 lnet_nid4_to_nid(cfg->prcfg_cfg_nid, &nid);
4367 rc = lnet_del_peer_ni(&prim_nid,
4370 mutex_unlock(&the_lnet.ln_api_mutex);
4374 case IOC_LIBCFS_GET_PEER_INFO: {
4375 struct lnet_ioctl_peer *peer_info = arg;
4377 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
4380 mutex_lock(&the_lnet.ln_api_mutex);
4381 rc = lnet_get_peer_ni_info(
4382 peer_info->pr_count,
4384 peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
4385 &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
4386 &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
4387 &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
4388 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
4389 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
4390 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
4391 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
4392 mutex_unlock(&the_lnet.ln_api_mutex);
4396 case IOC_LIBCFS_GET_PEER_NI: {
4397 struct lnet_ioctl_peer_cfg *cfg = arg;
4399 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4402 mutex_lock(&the_lnet.ln_api_mutex);
4403 rc = lnet_get_peer_info(cfg,
4404 (void __user *)cfg->prcfg_bulk);
4405 mutex_unlock(&the_lnet.ln_api_mutex);
4409 case IOC_LIBCFS_GET_PEER_LIST: {
4410 struct lnet_ioctl_peer_cfg *cfg = arg;
4412 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4415 mutex_lock(&the_lnet.ln_api_mutex);
4416 rc = lnet_get_peer_list(&cfg->prcfg_count, &cfg->prcfg_size,
4417 (struct lnet_process_id __user *)cfg->prcfg_bulk);
4418 mutex_unlock(&the_lnet.ln_api_mutex);
4422 case IOC_LIBCFS_SET_HEALHV: {
4423 struct lnet_ioctl_reset_health_cfg *cfg = arg;
4425 if (cfg->rh_hdr.ioc_len < sizeof(*cfg))
4427 if (cfg->rh_value < 0 ||
4428 cfg->rh_value > LNET_MAX_HEALTH_VALUE)
4429 value = LNET_MAX_HEALTH_VALUE;
4431 value = cfg->rh_value;
4432 CDEBUG(D_NET, "Manually setting healthv to %d for %s:%s. all = %d\n",
4433 value, (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI) ?
4434 "local" : "peer", libcfs_nid2str(cfg->rh_nid), cfg->rh_all);
4435 mutex_lock(&the_lnet.ln_api_mutex);
4436 if (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI)
4437 lnet_ni_set_healthv(cfg->rh_nid, value,
4440 lnet_peer_ni_set_healthv(cfg->rh_nid, value,
4442 mutex_unlock(&the_lnet.ln_api_mutex);
4446 case IOC_LIBCFS_SET_PEER: {
4447 struct lnet_ioctl_peer_cfg *cfg = arg;
4448 struct lnet_peer *lp;
4450 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4453 mutex_lock(&the_lnet.ln_api_mutex);
4454 lnet_nid4_to_nid(cfg->prcfg_prim_nid, &nid);
4455 lp = lnet_find_peer(&nid);
4457 mutex_unlock(&the_lnet.ln_api_mutex);
4460 spin_lock(&lp->lp_lock);
4461 lp->lp_state = cfg->prcfg_state;
4462 spin_unlock(&lp->lp_lock);
4463 lnet_peer_decref_locked(lp);
4464 mutex_unlock(&the_lnet.ln_api_mutex);
4465 CDEBUG(D_NET, "Set peer %s state to %u\n",
4466 libcfs_nid2str(cfg->prcfg_prim_nid), cfg->prcfg_state);
4470 case IOC_LIBCFS_SET_CONNS_PER_PEER: {
4471 struct lnet_ioctl_reset_conns_per_peer_cfg *cfg = arg;
4474 if (cfg->rcpp_hdr.ioc_len < sizeof(*cfg))
4476 if (cfg->rcpp_value < 0)
4479 value = cfg->rcpp_value;
4481 "Setting conns_per_peer to %d for %s. all = %d\n",
4482 value, libcfs_nid2str(cfg->rcpp_nid), cfg->rcpp_all);
4483 mutex_lock(&the_lnet.ln_api_mutex);
4484 lnet_ni_set_conns_per_peer(cfg->rcpp_nid, value, cfg->rcpp_all);
4485 mutex_unlock(&the_lnet.ln_api_mutex);
4489 case IOC_LIBCFS_NOTIFY_ROUTER: {
4490 time64_t deadline = ktime_get_real_seconds() - data->ioc_u64[0];
4492 /* The deadline passed in by the user should be some time in
4493 * seconds in the future since the UNIX epoch. We have to map
4494 * that deadline to the wall clock.
4496 deadline += ktime_get_seconds();
4497 lnet_nid4_to_nid(data->ioc_nid, &nid);
4498 return lnet_notify(NULL, &nid, data->ioc_flags, false,
4502 case IOC_LIBCFS_LNET_DIST:
4503 lnet_nid4_to_nid(data->ioc_nid, &nid);
4504 rc = LNetDist(&nid, &nid, &data->ioc_u32[1]);
4505 if (rc < 0 && rc != -EHOSTUNREACH)
4508 data->ioc_nid = lnet_nid_to_nid4(&nid);
4509 data->ioc_u32[0] = rc;
4512 case IOC_LIBCFS_TESTPROTOCOMPAT:
4513 the_lnet.ln_testprotocompat = data->ioc_flags;
4516 case IOC_LIBCFS_LNET_FAULT:
4517 return lnet_fault_ctl(data->ioc_flags, data);
4519 case IOC_LIBCFS_PING_PEER: {
4520 struct lnet_ioctl_ping_data *ping = arg;
4521 struct lnet_process_id __user *ids = ping->ping_buf;
4522 struct lnet_nid src_nid = LNET_ANY_NID;
4523 struct lnet_genl_ping_list plist;
4524 struct lnet_processid id;
4525 struct lnet_peer *lp;
4526 signed long timeout;
4529 /* Check if the supplied ping data supports source nid
4530 * NB: This check is sufficient if lnet_ioctl_ping_data has
4531 * additional fields added, but if they are re-ordered or
4532 * fields removed then this will break. It is expected that
4533 * these ioctls will be replaced with netlink implementation, so
4534 * it is probably not worth coming up with a more robust version
4535 * compatibility scheme.
4537 if (ping->ping_hdr.ioc_len >= sizeof(struct lnet_ioctl_ping_data))
4538 lnet_nid4_to_nid(ping->ping_src, &src_nid);
4540 /* If timeout is negative then set default of 3 minutes */
4541 if (((s32)ping->op_param) <= 0 ||
4542 ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
4543 timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
4545 timeout = nsecs_to_jiffies(ping->op_param * NSEC_PER_MSEC);
4547 id.pid = ping->ping_id.pid;
4548 lnet_nid4_to_nid(ping->ping_id.nid, &id.nid);
4549 rc = lnet_ping(&id, &src_nid, timeout, &plist,
4552 goto report_ping_err;
4556 for (i = 0; i < count; i++) {
4557 struct lnet_processid *result;
4558 struct lnet_process_id tmpid;
4560 result = genradix_ptr(&plist.lgpl_list, i);
4561 memset(&tmpid, 0, sizeof(tmpid));
4562 tmpid.pid = result->pid;
4563 tmpid.nid = lnet_nid_to_nid4(&result->nid);
4564 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid))) {
4566 goto report_ping_err;
4570 mutex_lock(&the_lnet.ln_api_mutex);
4571 lp = lnet_find_peer(&id.nid);
4574 lnet_nid_to_nid4(&lp->lp_primary_nid);
4575 ping->mr_info = lnet_peer_is_multi_rail(lp);
4576 lnet_peer_decref_locked(lp);
4578 mutex_unlock(&the_lnet.ln_api_mutex);
4580 ping->ping_count = count;
4582 genradix_free(&plist.lgpl_list);
4586 case IOC_LIBCFS_DISCOVER: {
4587 struct lnet_ioctl_ping_data *discover = arg;
4588 struct lnet_peer *lp;
4590 rc = lnet_discover(discover->ping_id, discover->op_param,
4592 discover->ping_count);
4596 mutex_lock(&the_lnet.ln_api_mutex);
4597 lnet_nid4_to_nid(discover->ping_id.nid, &nid);
4598 lp = lnet_find_peer(&nid);
4600 discover->ping_id.nid =
4601 lnet_nid_to_nid4(&lp->lp_primary_nid);
4602 discover->mr_info = lnet_peer_is_multi_rail(lp);
4603 lnet_peer_decref_locked(lp);
4605 mutex_unlock(&the_lnet.ln_api_mutex);
4607 discover->ping_count = rc;
4611 case IOC_LIBCFS_ADD_UDSP: {
4612 struct lnet_ioctl_udsp *ioc_udsp = arg;
4613 __u32 bulk_size = ioc_udsp->iou_hdr.ioc_len;
4615 mutex_lock(&the_lnet.ln_api_mutex);
4616 rc = lnet_udsp_demarshal_add(arg, bulk_size);
4618 rc = lnet_udsp_apply_policies(NULL, false);
4619 CDEBUG(D_NET, "policy application returned %d\n", rc);
4622 mutex_unlock(&the_lnet.ln_api_mutex);
4627 case IOC_LIBCFS_DEL_UDSP: {
4628 struct lnet_ioctl_udsp *ioc_udsp = arg;
4629 int idx = ioc_udsp->iou_idx;
4631 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4634 mutex_lock(&the_lnet.ln_api_mutex);
4635 rc = lnet_udsp_del_policy(idx);
4636 mutex_unlock(&the_lnet.ln_api_mutex);
4641 case IOC_LIBCFS_GET_UDSP_SIZE: {
4642 struct lnet_ioctl_udsp *ioc_udsp = arg;
4643 struct lnet_udsp *udsp;
4645 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4650 mutex_lock(&the_lnet.ln_api_mutex);
4651 udsp = lnet_udsp_get_policy(ioc_udsp->iou_idx);
4655 /* coming in iou_idx will hold the idx of the udsp
4656 * to get the size of. going out the iou_idx will
4657 * hold the size of the UDSP found at the passed
4660 ioc_udsp->iou_idx = lnet_get_udsp_size(udsp);
4661 if (ioc_udsp->iou_idx < 0)
4664 mutex_unlock(&the_lnet.ln_api_mutex);
4669 case IOC_LIBCFS_GET_UDSP: {
4670 struct lnet_ioctl_udsp *ioc_udsp = arg;
4671 struct lnet_udsp *udsp;
4673 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4678 mutex_lock(&the_lnet.ln_api_mutex);
4679 udsp = lnet_udsp_get_policy(ioc_udsp->iou_idx);
4683 rc = lnet_udsp_marshal(udsp, ioc_udsp);
4684 mutex_unlock(&the_lnet.ln_api_mutex);
4689 case IOC_LIBCFS_GET_CONST_UDSP_INFO: {
4690 struct lnet_ioctl_construct_udsp_info *info = arg;
4692 if (info->cud_hdr.ioc_len < sizeof(*info))
4695 CDEBUG(D_NET, "GET_UDSP_INFO for %s\n",
4696 libcfs_nid2str(info->cud_nid));
4698 mutex_lock(&the_lnet.ln_api_mutex);
4699 lnet_udsp_get_construct_info(info);
4700 mutex_unlock(&the_lnet.ln_api_mutex);
4706 ni = lnet_net2ni_addref(data->ioc_net);
4710 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
4713 rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
4716 return rc <= 0 ? rc : 0;
4720 EXPORT_SYMBOL(LNetCtl);
4722 static const struct ln_key_list net_props_list = {
4723 .lkl_maxattr = LNET_NET_ATTR_MAX,
4725 [LNET_NET_ATTR_HDR] = {
4727 .lkp_key_format = LNKF_SEQUENCE | LNKF_MAPPING,
4728 .lkp_data_type = NLA_NUL_STRING,
4730 [LNET_NET_ATTR_TYPE] = {
4731 .lkp_value = "net type",
4732 .lkp_data_type = NLA_STRING
4734 [LNET_NET_ATTR_LOCAL] = {
4735 .lkp_value = "local NI(s)",
4736 .lkp_key_format = LNKF_SEQUENCE | LNKF_MAPPING,
4737 .lkp_data_type = NLA_NESTED
4742 static struct ln_key_list local_ni_list = {
4743 .lkl_maxattr = LNET_NET_LOCAL_NI_ATTR_MAX,
4745 [LNET_NET_LOCAL_NI_ATTR_NID] = {
4747 .lkp_data_type = NLA_STRING
4749 [LNET_NET_LOCAL_NI_ATTR_STATUS] = {
4750 .lkp_value = "status",
4751 .lkp_data_type = NLA_STRING
4753 [LNET_NET_LOCAL_NI_ATTR_INTERFACE] = {
4754 .lkp_value = "interfaces",
4755 .lkp_key_format = LNKF_MAPPING,
4756 .lkp_data_type = NLA_NESTED
4761 static const struct ln_key_list local_ni_interfaces_list = {
4762 .lkl_maxattr = LNET_NET_LOCAL_NI_INTF_ATTR_MAX,
4764 [LNET_NET_LOCAL_NI_INTF_ATTR_TYPE] = {
4766 .lkp_data_type = NLA_STRING
4771 /* Use an index since the traversal is across LNet nets and ni collections */
4772 struct lnet_genl_net_list {
4773 unsigned int lngl_net_id;
4774 unsigned int lngl_idx;
4777 static inline struct lnet_genl_net_list *
4778 lnet_net_dump_ctx(struct netlink_callback *cb)
4780 return (struct lnet_genl_net_list *)cb->args[0];
4783 static int lnet_net_show_done(struct netlink_callback *cb)
4785 struct lnet_genl_net_list *nlist = lnet_net_dump_ctx(cb);
4788 LIBCFS_FREE(nlist, sizeof(*nlist));
4795 /* LNet net ->start() handler for GET requests */
4796 static int lnet_net_show_start(struct netlink_callback *cb)
4798 struct genlmsghdr *gnlh = nlmsg_data(cb->nlh);
4799 #ifdef HAVE_NL_PARSE_WITH_EXT_ACK
4800 struct netlink_ext_ack *extack = NULL;
4802 struct lnet_genl_net_list *nlist;
4803 int msg_len = genlmsg_len(gnlh);
4804 struct nlattr *params, *top;
4807 #ifdef HAVE_NL_DUMP_WITH_EXT_ACK
4808 extack = cb->extack;
4810 if (the_lnet.ln_refcount == 0) {
4811 NL_SET_ERR_MSG(extack, "LNet stack down");
4815 LIBCFS_ALLOC(nlist, sizeof(*nlist));
4819 nlist->lngl_net_id = LNET_NET_ANY;
4820 nlist->lngl_idx = 0;
4821 cb->args[0] = (long)nlist;
4826 params = genlmsg_data(gnlh);
4827 if (!(nla_type(params) & LN_SCALAR_ATTR_LIST)) {
4828 NL_SET_ERR_MSG(extack, "invalid configuration");
4832 nla_for_each_nested(top, params, rem) {
4836 nla_for_each_nested(net, top, rem2) {
4837 char filter[LNET_NIDSTR_SIZE];
4839 if (nla_type(net) != LN_SCALAR_ATTR_VALUE ||
4840 nla_strcmp(net, "net type") != 0)
4843 net = nla_next(net, &rem2);
4844 if (nla_type(net) != LN_SCALAR_ATTR_VALUE) {
4845 NL_SET_ERR_MSG(extack, "invalid config param");
4846 GOTO(report_err, rc = -EINVAL);
4849 rc = nla_strscpy(filter, net, sizeof(filter));
4851 NL_SET_ERR_MSG(extack, "failed to get param");
4852 GOTO(report_err, rc);
4856 nlist->lngl_net_id = libcfs_str2net(filter);
4857 if (nlist->lngl_net_id == LNET_NET_ANY) {
4858 NL_SET_ERR_MSG(extack, "cannot parse net");
4859 GOTO(report_err, rc = -ENOENT);
4865 lnet_net_show_done(cb);
4870 static int lnet_net_show_dump(struct sk_buff *msg,
4871 struct netlink_callback *cb)
4873 struct lnet_genl_net_list *nlist = lnet_net_dump_ctx(cb);
4874 #ifdef HAVE_NL_PARSE_WITH_EXT_ACK
4875 struct netlink_ext_ack *extack = NULL;
4877 int portid = NETLINK_CB(cb->skb).portid;
4878 int seq = cb->nlh->nlmsg_seq;
4879 struct lnet_net *net;
4880 int idx = 0, rc = 0;
4884 #ifdef HAVE_NL_DUMP_WITH_EXT_ACK
4885 extack = cb->extack;
4887 if (!nlist->lngl_idx) {
4888 const struct ln_key_list *all[] = {
4889 &net_props_list, &local_ni_list,
4890 &local_ni_interfaces_list,
4894 rc = lnet_genl_send_scalar_list(msg, portid, seq,
4896 NLM_F_CREATE | NLM_F_MULTI,
4897 LNET_CMD_NETS, all);
4899 NL_SET_ERR_MSG(extack, "failed to send key table");
4900 GOTO(send_error, rc);
4904 lnet_net_lock(LNET_LOCK_EX);
4906 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
4909 if (nlist->lngl_net_id != LNET_NET_ANY &&
4910 nlist->lngl_net_id != net->net_id)
4913 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
4914 struct nlattr *local_ni, *ni_attr;
4915 char *status = "up";
4917 if (idx++ < nlist->lngl_idx)
4920 hdr = genlmsg_put(msg, portid, seq, &lnet_family,
4921 NLM_F_MULTI, LNET_CMD_NETS);
4923 NL_SET_ERR_MSG(extack, "failed to send values");
4924 GOTO(net_unlock, rc = -EMSGSIZE);
4928 nla_put_string(msg, LNET_NET_ATTR_HDR, "");
4930 nla_put_string(msg, LNET_NET_ATTR_TYPE,
4931 libcfs_net2str(net->net_id));
4934 local_ni = nla_nest_start(msg, LNET_NET_ATTR_LOCAL);
4935 ni_attr = nla_nest_start(msg, idx - 1);
4938 nla_put_string(msg, LNET_NET_LOCAL_NI_ATTR_NID,
4939 libcfs_nidstr(&ni->ni_nid));
4940 if (nid_is_lo0(&ni->ni_nid) &&
4941 *ni->ni_status != LNET_NI_STATUS_UP)
4943 nla_put_string(msg, LNET_NET_LOCAL_NI_ATTR_STATUS, "up");
4945 if (!nid_is_lo0(&ni->ni_nid) && ni->ni_interface) {
4946 struct nlattr *intf_nest, *intf_attr;
4948 intf_nest = nla_nest_start(msg,
4949 LNET_NET_LOCAL_NI_ATTR_INTERFACE);
4950 intf_attr = nla_nest_start(msg, 0);
4952 LNET_NET_LOCAL_NI_INTF_ATTR_TYPE,
4954 nla_nest_end(msg, intf_attr);
4955 nla_nest_end(msg, intf_nest);
4959 nla_nest_end(msg, ni_attr);
4960 nla_nest_end(msg, local_ni);
4962 genlmsg_end(msg, hdr);
4967 struct nlmsghdr *nlh = nlmsg_hdr(msg);
4969 nlmsg_cancel(msg, nlh);
4970 NL_SET_ERR_MSG(extack, "Network is down");
4974 lnet_net_unlock(LNET_LOCK_EX);
4976 nlist->lngl_idx = idx;
4978 return lnet_nl_send_error(cb->skb, portid, seq, rc);
4981 #ifndef HAVE_NETLINK_CALLBACK_START
4982 static int lnet_old_net_show_dump(struct sk_buff *msg,
4983 struct netlink_callback *cb)
4986 int rc = lnet_net_show_start(cb);
4992 return lnet_net_show_dump(msg, cb);
4996 static int lnet_genl_parse_tunables(struct nlattr *settings,
4997 struct lnet_ioctl_config_lnd_tunables *tun)
4999 struct nlattr *param;
5002 nla_for_each_nested(param, settings, rem) {
5003 int type = LNET_NET_LOCAL_NI_TUNABLES_ATTR_UNSPEC;
5006 if (nla_type(param) != LN_SCALAR_ATTR_VALUE)
5009 if (nla_strcmp(param, "peer_timeout") == 0)
5010 type = LNET_NET_LOCAL_NI_TUNABLES_ATTR_PEER_TIMEOUT;
5011 else if (nla_strcmp(param, "peer_credits") == 0)
5012 type = LNET_NET_LOCAL_NI_TUNABLES_ATTR_PEER_CREDITS;
5013 else if (nla_strcmp(param, "peer_buffer_credits") == 0)
5014 type = LNET_NET_LOCAL_NI_TUNABLES_ATTR_PEER_BUFFER_CREDITS;
5015 else if (nla_strcmp(param, "credits") == 0)
5016 type = LNET_NET_LOCAL_NI_TUNABLES_ATTR_CREDITS;
5018 param = nla_next(param, &rem);
5019 if (nla_type(param) != LN_SCALAR_ATTR_INT_VALUE)
5022 num = nla_get_s64(param);
5024 case LNET_NET_LOCAL_NI_TUNABLES_ATTR_PEER_TIMEOUT:
5026 tun->lt_cmn.lct_peer_timeout = num;
5028 case LNET_NET_LOCAL_NI_TUNABLES_ATTR_PEER_CREDITS:
5030 tun->lt_cmn.lct_peer_tx_credits = num;
5032 case LNET_NET_LOCAL_NI_TUNABLES_ATTR_PEER_BUFFER_CREDITS:
5034 tun->lt_cmn.lct_peer_rtr_credits = num;
5036 case LNET_NET_LOCAL_NI_TUNABLES_ATTR_CREDITS:
5038 tun->lt_cmn.lct_max_tx_credits = num;
5048 static int lnet_genl_parse_lnd_tunables(struct nlattr *settings,
5049 struct lnet_lnd_tunables *tun,
5050 const struct lnet_lnd *lnd)
5052 const struct ln_key_list *list = lnd->lnd_keys;
5053 struct nlattr *param;
5057 /* silently ignore these setting if the LND driver doesn't
5058 * support any LND tunables
5060 if (!list || !lnd->lnd_nl_set || !list->lkl_maxattr)
5063 nla_for_each_nested(param, settings, rem) {
5064 if (nla_type(param) != LN_SCALAR_ATTR_VALUE)
5067 for (i = 1; i <= list->lkl_maxattr; i++) {
5068 if (!list->lkl_list[i].lkp_value ||
5069 nla_strcmp(param, list->lkl_list[i].lkp_value) != 0)
5072 param = nla_next(param, &rem);
5073 rc = lnd->lnd_nl_set(LNET_CMD_NETS, param, i, tun);
5083 lnet_genl_parse_local_ni(struct nlattr *entry, struct genl_info *info,
5084 int net_id, struct lnet_ioctl_config_ni *conf,
5087 bool create = info->nlhdr->nlmsg_flags & NLM_F_CREATE;
5088 struct lnet_ioctl_config_lnd_tunables *tun;
5089 struct nlattr *settings;
5092 LIBCFS_ALLOC(tun, sizeof(struct lnet_ioctl_config_lnd_tunables));
5094 GENL_SET_ERR_MSG(info, "cannot allocate memory for tunables");
5095 GOTO(out, rc = -ENOMEM);
5098 /* Use LND defaults */
5099 tun->lt_cmn.lct_peer_timeout = -1;
5100 tun->lt_cmn.lct_peer_tx_credits = -1;
5101 tun->lt_cmn.lct_peer_rtr_credits = -1;
5102 tun->lt_cmn.lct_max_tx_credits = -1;
5103 conf->lic_ncpts = 0;
5105 nla_for_each_nested(settings, entry, rem3) {
5106 if (nla_type(settings) != LN_SCALAR_ATTR_VALUE)
5109 if (nla_strcmp(settings, "interfaces") == 0) {
5110 struct nlattr *intf;
5113 settings = nla_next(settings, &rem3);
5114 if (nla_type(settings) !=
5115 LN_SCALAR_ATTR_LIST) {
5116 GENL_SET_ERR_MSG(info,
5117 "invalid interfaces");
5118 GOTO(out, rc = -EINVAL);
5121 nla_for_each_nested(intf, settings, rem4) {
5122 intf = nla_next(intf, &rem4);
5123 if (nla_type(intf) !=
5124 LN_SCALAR_ATTR_VALUE) {
5125 GENL_SET_ERR_MSG(info,
5126 "0 key is invalid");
5127 GOTO(out, rc = -EINVAL);
5130 rc = nla_strscpy(conf->lic_ni_intf, intf,
5131 sizeof(conf->lic_ni_intf));
5133 GENL_SET_ERR_MSG(info,
5134 "failed to parse interfaces");
5139 } else if (nla_strcmp(settings, "tunables") == 0) {
5140 settings = nla_next(settings, &rem3);
5141 if (nla_type(settings) !=
5142 LN_SCALAR_ATTR_LIST) {
5143 GENL_SET_ERR_MSG(info,
5144 "invalid tunables");
5145 GOTO(out, rc = -EINVAL);
5148 rc = lnet_genl_parse_tunables(settings, tun);
5150 GENL_SET_ERR_MSG(info,
5151 "failed to parse tunables");
5154 } else if ((nla_strcmp(settings, "lnd tunables") == 0)) {
5155 const struct lnet_lnd *lnd;
5157 lnd = lnet_load_lnd(LNET_NETTYP(net_id));
5159 GENL_SET_ERR_MSG(info,
5160 "LND type not supported");
5161 GOTO(out, rc = PTR_ERR(lnd));
5164 settings = nla_next(settings, &rem3);
5165 if (nla_type(settings) !=
5166 LN_SCALAR_ATTR_LIST) {
5167 GENL_SET_ERR_MSG(info,
5168 "lnd tunables should be list\n");
5169 GOTO(out, rc = -EINVAL);
5172 rc = lnet_genl_parse_lnd_tunables(settings,
5175 GENL_SET_ERR_MSG(info,
5176 "failed to parse lnd tunables");
5179 } else if (nla_strcmp(settings, "CPT") == 0) {
5183 settings = nla_next(settings, &rem3);
5184 if (nla_type(settings) != LN_SCALAR_ATTR_LIST) {
5185 GENL_SET_ERR_MSG(info,
5186 "CPT should be list");
5187 GOTO(out, rc = -EINVAL);
5190 nla_for_each_nested(cpt, settings, rem4) {
5193 if (nla_type(cpt) !=
5194 LN_SCALAR_ATTR_INT_VALUE) {
5195 GENL_SET_ERR_MSG(info,
5196 "invalid CPT config");
5197 GOTO(out, rc = -EINVAL);
5200 core = nla_get_s64(cpt);
5201 if (core >= LNET_CPT_NUMBER) {
5202 GENL_SET_ERR_MSG(info,
5203 "invalid CPT value");
5204 GOTO(out, rc = -ERANGE);
5207 conf->lic_cpts[conf->lic_ncpts] = core;
5214 struct lnet_net *net;
5218 if (!strlen(conf->lic_ni_intf)) {
5219 GENL_SET_ERR_MSG(info,
5220 "interface is missing");
5224 lnet_net_lock(LNET_LOCK_EX);
5225 net = lnet_get_net_locked(net_id);
5227 GENL_SET_ERR_MSG(info,
5228 "LNet net doesn't exist");
5229 lnet_net_unlock(LNET_LOCK_EX);
5233 list_for_each_entry(ni, &net->net_ni_list,
5235 if (!ni->ni_interface ||
5236 strcmp(ni->ni_interface,
5237 conf->lic_ni_intf) != 0)
5240 lnet_net_unlock(LNET_LOCK_EX);
5241 rc = lnet_dyn_del_ni(&ni->ni_nid);
5243 GENL_SET_ERR_MSG(info,
5244 "cannot del LNet NI");
5250 if (rc < 0) { /* will be -ENODEV */
5251 GENL_SET_ERR_MSG(info,
5252 "interface invalid for deleting LNet NI");
5253 lnet_net_unlock(LNET_LOCK_EX);
5256 if (!strlen(conf->lic_ni_intf)) {
5257 GENL_SET_ERR_MSG(info,
5258 "interface is missing");
5262 rc = lnet_dyn_add_ni(conf, net_id, tun);
5265 GENL_SET_ERR_MSG(info,
5266 "cannot parse net");
5269 GENL_SET_ERR_MSG(info,
5273 GENL_SET_ERR_MSG(info,
5274 "cannot add LNet NI");
5281 LIBCFS_FREE(tun, sizeof(struct lnet_ioctl_config_lnd_tunables));
5286 static int lnet_net_cmd(struct sk_buff *skb, struct genl_info *info)
5288 struct nlmsghdr *nlh = nlmsg_hdr(skb);
5289 struct genlmsghdr *gnlh = nlmsg_data(nlh);
5290 struct nlattr *params = genlmsg_data(gnlh);
5291 int msg_len, rem, rc = 0;
5292 struct nlattr *attr;
5294 msg_len = genlmsg_len(gnlh);
5296 GENL_SET_ERR_MSG(info, "no configuration");
5300 if (!(nla_type(params) & LN_SCALAR_ATTR_LIST)) {
5301 GENL_SET_ERR_MSG(info, "invalid configuration");
5305 nla_for_each_nested(attr, params, rem) {
5306 struct lnet_ioctl_config_ni conf;
5307 u32 net_id = LNET_NET_ANY;
5308 struct nlattr *entry;
5309 bool ni_list = false;
5312 if (nla_type(attr) != LN_SCALAR_ATTR_LIST)
5315 nla_for_each_nested(entry, attr, rem2) {
5316 switch (nla_type(entry)) {
5317 case LN_SCALAR_ATTR_VALUE: {
5320 memset(&conf, 0, sizeof(conf));
5321 if (nla_strcmp(entry, "ip2net") == 0) {
5322 entry = nla_next(entry, &rem2);
5323 if (nla_type(entry) !=
5324 LN_SCALAR_ATTR_VALUE) {
5325 GENL_SET_ERR_MSG(info,
5326 "ip2net has invalid key");
5327 GOTO(out, rc = -EINVAL);
5330 len = nla_strscpy(conf.lic_legacy_ip2nets,
5332 sizeof(conf.lic_legacy_ip2nets));
5334 GENL_SET_ERR_MSG(info,
5335 "ip2net key string is invalid");
5336 GOTO(out, rc = len);
5339 } else if (nla_strcmp(entry, "net type") == 0) {
5340 char tmp[LNET_NIDSTR_SIZE];
5342 entry = nla_next(entry, &rem2);
5343 if (nla_type(entry) !=
5344 LN_SCALAR_ATTR_VALUE) {
5345 GENL_SET_ERR_MSG(info,
5346 "net type has invalid key");
5347 GOTO(out, rc = -EINVAL);
5350 len = nla_strscpy(tmp, entry,
5353 GENL_SET_ERR_MSG(info,
5354 "net type key string is invalid");
5355 GOTO(out, rc = len);
5358 net_id = libcfs_str2net(tmp);
5360 GENL_SET_ERR_MSG(info,
5361 "cannot parse net");
5362 GOTO(out, rc = -ENODEV);
5364 if (LNET_NETTYP(net_id) == LOLND) {
5365 GENL_SET_ERR_MSG(info,
5366 "setting @lo not allowed");
5367 GOTO(out, rc = -ENODEV);
5369 conf.lic_legacy_ip2nets[0] = '\0';
5370 conf.lic_ni_intf[0] = '\0';
5377 case LN_SCALAR_ATTR_LIST: {
5378 struct nlattr *interface;
5381 nla_for_each_nested(interface, entry, rem3) {
5382 rc = lnet_genl_parse_local_ni(interface, info,
5390 /* it is possible a newer version of the user land send
5391 * values older kernels doesn't handle. So silently
5392 * ignore these values
5399 /* Handle case of just sent NET with no list of NIDs */
5400 if (!(info->nlhdr->nlmsg_flags & NLM_F_CREATE) && !ni_list) {
5401 rc = lnet_dyn_del_net(net_id);
5403 GENL_SET_ERR_MSG(info,
5404 "cannot del network");
5412 /** LNet route handling */
5414 /* We can't use struct lnet_ioctl_config_data since it lacks
5415 * support for large NIDS
5417 struct lnet_route_properties {
5418 struct lnet_nid lrp_gateway;
5423 u32 lrp_sensitivity;
5426 struct lnet_genl_route_list {
5427 unsigned int lgrl_index;
5428 unsigned int lgrl_count;
5429 GENRADIX(struct lnet_route_properties) lgrl_list;
5432 static inline struct lnet_genl_route_list *
5433 lnet_route_dump_ctx(struct netlink_callback *cb)
5435 return (struct lnet_genl_route_list *)cb->args[0];
5438 static int lnet_route_show_done(struct netlink_callback *cb)
5440 struct lnet_genl_route_list *rlist = lnet_route_dump_ctx(cb);
5443 genradix_free(&rlist->lgrl_list);
5444 CFS_FREE_PTR(rlist);
5451 static int lnet_scan_route(struct lnet_genl_route_list *rlist,
5452 struct lnet_route_properties *settings)
5454 struct lnet_remotenet *rnet;
5455 struct list_head *rn_list;
5456 struct lnet_route *route;
5459 cpt = lnet_net_lock_current();
5461 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
5462 rn_list = &the_lnet.ln_remote_nets_hash[i];
5463 list_for_each_entry(rnet, rn_list, lrn_list) {
5464 if (settings->lrp_net != LNET_NET_ANY &&
5465 settings->lrp_net != rnet->lrn_net)
5468 list_for_each_entry(route, &rnet->lrn_routes,
5470 struct lnet_route_properties *prop;
5472 if (!LNET_NID_IS_ANY(&settings->lrp_gateway) &&
5473 !nid_same(&settings->lrp_gateway,
5478 if (settings->lrp_hop != -1 &&
5479 settings->lrp_hop != route->lr_hops)
5482 if (settings->lrp_priority != -1 &&
5483 settings->lrp_priority != route->lr_priority)
5486 if (settings->lrp_sensitivity != -1 &&
5487 settings->lrp_sensitivity !=
5488 route->lr_gateway->lp_health_sensitivity)
5491 prop = genradix_ptr_alloc(&rlist->lgrl_list,
5492 rlist->lgrl_count++,
5495 GOTO(failed_alloc, rc = -ENOMEM);
5497 prop->lrp_net = rnet->lrn_net;
5498 prop->lrp_gateway = route->lr_nid;
5499 prop->lrp_hop = route->lr_hops;
5500 prop->lrp_priority = route->lr_priority;
5501 prop->lrp_sensitivity =
5502 route->lr_gateway->lp_health_sensitivity;
5503 if (lnet_is_route_alive(route))
5504 prop->lrp_flags |= LNET_RT_ALIVE;
5506 prop->lrp_flags &= ~LNET_RT_ALIVE;
5507 if (route->lr_single_hop)
5508 prop->lrp_flags &= ~LNET_RT_MULTI_HOP;
5510 prop->lrp_flags |= LNET_RT_MULTI_HOP;
5516 lnet_net_unlock(cpt);
5520 /* LNet route ->start() handler for GET requests */
5521 static int lnet_route_show_start(struct netlink_callback *cb)
5523 struct genlmsghdr *gnlh = nlmsg_data(cb->nlh);
5524 #ifdef HAVE_NL_PARSE_WITH_EXT_ACK
5525 struct netlink_ext_ack *extack = NULL;
5527 struct lnet_genl_route_list *rlist;
5528 int msg_len = genlmsg_len(gnlh);
5531 #ifdef HAVE_NL_DUMP_WITH_EXT_ACK
5532 extack = cb->extack;
5534 if (the_lnet.ln_refcount == 0 ||
5535 the_lnet.ln_state != LNET_STATE_RUNNING) {
5536 NL_SET_ERR_MSG(extack, "Network is down");
5540 CFS_ALLOC_PTR(rlist);
5542 NL_SET_ERR_MSG(extack, "No memory for route list");
5546 genradix_init(&rlist->lgrl_list);
5547 rlist->lgrl_count = 0;
5548 rlist->lgrl_index = 0;
5549 cb->args[0] = (long)rlist;
5551 mutex_lock(&the_lnet.ln_api_mutex);
5553 struct lnet_route_properties tmp = {
5554 .lrp_gateway = LNET_ANY_NID,
5555 .lrp_net = LNET_NET_ANY,
5558 .lrp_sensitivity = -1,
5561 rc = lnet_scan_route(rlist, &tmp);
5563 NL_SET_ERR_MSG(extack,
5564 "failed to allocate router data");
5565 GOTO(report_err, rc);
5568 struct nlattr *params = genlmsg_data(gnlh);
5569 struct nlattr *attr;
5572 nla_for_each_nested(attr, params, rem) {
5573 struct lnet_route_properties tmp = {
5574 .lrp_gateway = LNET_ANY_NID,
5575 .lrp_net = LNET_NET_ANY,
5578 .lrp_sensitivity = -1,
5580 struct nlattr *route;
5583 if (nla_type(attr) != LN_SCALAR_ATTR_LIST)
5586 nla_for_each_nested(route, attr, rem2) {
5587 if (nla_type(route) != LN_SCALAR_ATTR_VALUE)
5590 if (nla_strcmp(route, "net") == 0) {
5591 char nw[LNET_NIDSTR_SIZE];
5593 route = nla_next(route, &rem2);
5594 if (nla_type(route) !=
5595 LN_SCALAR_ATTR_VALUE) {
5596 NL_SET_ERR_MSG(extack,
5597 "invalid net param");
5598 GOTO(report_err, rc = -EINVAL);
5601 rc = nla_strscpy(nw, route, sizeof(nw));
5603 NL_SET_ERR_MSG(extack,
5604 "failed to get route param");
5605 GOTO(report_err, rc);
5608 tmp.lrp_net = libcfs_str2net(strim(nw));
5609 } else if (nla_strcmp(route, "gateway") == 0) {
5610 char gw[LNET_NIDSTR_SIZE];
5612 route = nla_next(route, &rem2);
5613 if (nla_type(route) !=
5614 LN_SCALAR_ATTR_VALUE) {
5615 NL_SET_ERR_MSG(extack,
5616 "invalid gateway param");
5617 GOTO(report_err, rc = -EINVAL);
5620 rc = nla_strscpy(gw, route, sizeof(gw));
5622 NL_SET_ERR_MSG(extack,
5623 "failed to get route param");
5624 GOTO(report_err, rc);
5627 libcfs_strnid(&tmp.lrp_gateway, strim(gw));
5628 } else if (nla_strcmp(route, "hop") == 0) {
5629 route = nla_next(route, &rem2);
5630 if (nla_type(route) !=
5631 LN_SCALAR_ATTR_INT_VALUE) {
5632 NL_SET_ERR_MSG(extack,
5633 "invalid hop param");
5634 GOTO(report_err, rc = -EINVAL);
5637 tmp.lrp_hop = nla_get_s64(route);
5638 if (tmp.lrp_hop != -1)
5639 clamp_t(s32, tmp.lrp_hop, 1, 127);
5640 } else if (nla_strcmp(route, "priority") == 0) {
5641 route = nla_next(route, &rem2);
5642 if (nla_type(route) !=
5643 LN_SCALAR_ATTR_INT_VALUE) {
5644 NL_SET_ERR_MSG(extack,
5645 "invalid priority param");
5646 GOTO(report_err, rc = -EINVAL);
5649 tmp.lrp_priority = nla_get_s64(route);
5653 rc = lnet_scan_route(rlist, &tmp);
5655 NL_SET_ERR_MSG(extack,
5656 "failed to allocate router data");
5657 GOTO(report_err, rc);
5662 mutex_unlock(&the_lnet.ln_api_mutex);
5665 lnet_route_show_done(cb);
5670 static const struct ln_key_list route_props_list = {
5671 .lkl_maxattr = LNET_ROUTE_ATTR_MAX,
5673 [LNET_ROUTE_ATTR_HDR] = {
5674 .lkp_value = "route",
5675 .lkp_key_format = LNKF_SEQUENCE | LNKF_MAPPING,
5676 .lkp_data_type = NLA_NUL_STRING,
5678 [LNET_ROUTE_ATTR_NET] = {
5680 .lkp_data_type = NLA_STRING
5682 [LNET_ROUTE_ATTR_GATEWAY] = {
5683 .lkp_value = "gateway",
5684 .lkp_data_type = NLA_STRING
5686 [LNET_ROUTE_ATTR_HOP] = {
5688 .lkp_data_type = NLA_S32
5690 [LNET_ROUTE_ATTR_PRIORITY] = {
5691 .lkp_value = "priority",
5692 .lkp_data_type = NLA_U32
5694 [LNET_ROUTE_ATTR_HEALTH_SENSITIVITY] = {
5695 .lkp_value = "health_sensitivity",
5696 .lkp_data_type = NLA_U32
5698 [LNET_ROUTE_ATTR_STATE] = {
5699 .lkp_value = "state",
5700 .lkp_data_type = NLA_STRING,
5702 [LNET_ROUTE_ATTR_TYPE] = {
5703 .lkp_value = "type",
5704 .lkp_data_type = NLA_STRING,
5710 static int lnet_route_show_dump(struct sk_buff *msg,
5711 struct netlink_callback *cb)
5713 struct lnet_genl_route_list *rlist = lnet_route_dump_ctx(cb);
5714 struct genlmsghdr *gnlh = nlmsg_data(cb->nlh);
5715 #ifdef HAVE_NL_PARSE_WITH_EXT_ACK
5716 struct netlink_ext_ack *extack = NULL;
5718 int portid = NETLINK_CB(cb->skb).portid;
5719 int seq = cb->nlh->nlmsg_seq;
5720 int idx = rlist->lgrl_index;
5723 #ifdef HAVE_NL_DUMP_WITH_EXT_ACK
5724 extack = cb->extack;
5726 if (!rlist->lgrl_count) {
5727 NL_SET_ERR_MSG(extack, "No routes found");
5728 GOTO(send_error, rc = -ENOENT);
5732 const struct ln_key_list *all[] = {
5733 &route_props_list, NULL
5736 rc = lnet_genl_send_scalar_list(msg, portid, seq,
5738 NLM_F_CREATE | NLM_F_MULTI,
5739 LNET_CMD_ROUTES, all);
5741 NL_SET_ERR_MSG(extack, "failed to send key table");
5742 GOTO(send_error, rc);
5746 /* If not routes found send an empty message and not an error */
5747 if (!rlist->lgrl_count) {
5750 hdr = genlmsg_put(msg, portid, seq, &lnet_family,
5751 NLM_F_MULTI, LNET_CMD_ROUTES);
5753 NL_SET_ERR_MSG(extack, "failed to send values");
5754 genlmsg_cancel(msg, hdr);
5755 GOTO(send_error, rc = -EMSGSIZE);
5757 genlmsg_end(msg, hdr);
5762 while (idx < rlist->lgrl_count) {
5763 struct lnet_route_properties *prop;
5766 prop = genradix_ptr(&rlist->lgrl_list, idx++);
5768 hdr = genlmsg_put(msg, portid, seq, &lnet_family,
5769 NLM_F_MULTI, LNET_CMD_ROUTES);
5771 NL_SET_ERR_MSG(extack, "failed to send values");
5772 genlmsg_cancel(msg, hdr);
5773 GOTO(send_error, rc = -EMSGSIZE);
5777 nla_put_string(msg, LNET_ROUTE_ATTR_HDR, "");
5779 nla_put_string(msg, LNET_ROUTE_ATTR_NET,
5780 libcfs_net2str(prop->lrp_net));
5781 nla_put_string(msg, LNET_ROUTE_ATTR_GATEWAY,
5782 libcfs_nidstr(&prop->lrp_gateway));
5783 if (gnlh->version) {
5784 nla_put_s32(msg, LNET_ROUTE_ATTR_HOP, prop->lrp_hop);
5785 nla_put_u32(msg, LNET_ROUTE_ATTR_PRIORITY, prop->lrp_priority);
5786 nla_put_u32(msg, LNET_ROUTE_ATTR_HEALTH_SENSITIVITY,
5787 prop->lrp_sensitivity);
5789 nla_put_string(msg, LNET_ROUTE_ATTR_STATE,
5790 prop->lrp_flags & LNET_RT_ALIVE ?
5792 nla_put_string(msg, LNET_ROUTE_ATTR_TYPE,
5793 prop->lrp_flags & LNET_RT_MULTI_HOP ?
5794 "multi-hop" : "single-hop");
5796 genlmsg_end(msg, hdr);
5798 rlist->lgrl_index = idx;
5800 return lnet_nl_send_error(cb->skb, portid, seq, rc);
5803 #ifndef HAVE_NETLINK_CALLBACK_START
5804 static int lnet_old_route_show_dump(struct sk_buff *msg,
5805 struct netlink_callback *cb)
5808 int rc = lnet_route_show_start(cb);
5814 return lnet_route_show_dump(msg, cb);
5816 #endif /* !HAVE_NETLINK_CALLBACK_START */
5818 static inline struct lnet_genl_ping_list *
5819 lnet_ping_dump_ctx(struct netlink_callback *cb)
5821 return (struct lnet_genl_ping_list *)cb->args[0];
5824 static int lnet_ping_show_done(struct netlink_callback *cb)
5826 struct lnet_genl_ping_list *plist = lnet_ping_dump_ctx(cb);
5829 genradix_free(&plist->lgpl_failed);
5830 genradix_free(&plist->lgpl_list);
5831 LIBCFS_FREE(plist, sizeof(*plist));
5838 /* LNet ping ->start() handler for GET requests */
5839 static int lnet_ping_show_start(struct netlink_callback *cb)
5841 struct genlmsghdr *gnlh = nlmsg_data(cb->nlh);
5842 #ifdef HAVE_NL_PARSE_WITH_EXT_ACK
5843 struct netlink_ext_ack *extack = NULL;
5845 struct lnet_genl_ping_list *plist;
5846 int msg_len = genlmsg_len(gnlh);
5847 struct nlattr *params, *top;
5850 #ifdef HAVE_NL_DUMP_WITH_EXT_ACK
5851 extack = cb->extack;
5853 if (the_lnet.ln_refcount == 0) {
5854 NL_SET_ERR_MSG(extack, "Network is down");
5859 NL_SET_ERR_MSG(extack, "Ping needs NID targets");
5863 LIBCFS_ALLOC(plist, sizeof(*plist));
5865 NL_SET_ERR_MSG(extack, "failed to setup ping list");
5868 genradix_init(&plist->lgpl_list);
5869 plist->lgpl_timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
5870 plist->lgpl_src_nid = LNET_ANY_NID;
5871 plist->lgpl_index = 0;
5872 plist->lgpl_list_count = 0;
5873 cb->args[0] = (long)plist;
5875 params = genlmsg_data(gnlh);
5876 nla_for_each_attr(top, params, msg_len, rem) {
5877 struct nlattr *nids;
5880 switch (nla_type(top)) {
5881 case LN_SCALAR_ATTR_VALUE:
5882 if (nla_strcmp(top, "timeout") == 0) {
5885 top = nla_next(top, &rem);
5886 if (nla_type(top) != LN_SCALAR_ATTR_INT_VALUE) {
5887 NL_SET_ERR_MSG(extack,
5888 "invalid timeout param");
5889 GOTO(report_err, rc = -EINVAL);
5892 /* If timeout is negative then set default of
5895 timeout = nla_get_s64(top);
5897 timeout < (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
5898 plist->lgpl_timeout =
5899 nsecs_to_jiffies(timeout * NSEC_PER_MSEC);
5900 } else if (nla_strcmp(top, "source") == 0) {
5901 char nidstr[LNET_NIDSTR_SIZE + 1];
5903 top = nla_next(top, &rem);
5904 if (nla_type(top) != LN_SCALAR_ATTR_VALUE) {
5905 NL_SET_ERR_MSG(extack,
5906 "invalid source param");
5907 GOTO(report_err, rc = -EINVAL);
5910 rc = nla_strscpy(nidstr, top, sizeof(nidstr));
5912 NL_SET_ERR_MSG(extack,
5913 "failed to parse source nid");
5914 GOTO(report_err, rc);
5917 rc = libcfs_strnid(&plist->lgpl_src_nid,
5920 NL_SET_ERR_MSG(extack,
5921 "invalid source nid");
5922 GOTO(report_err, rc);
5927 case LN_SCALAR_ATTR_LIST:
5928 nla_for_each_nested(nids, top, rem2) {
5929 char nid[LNET_NIDSTR_SIZE + 1];
5930 struct lnet_processid *id;
5932 if (nla_type(nids) != LN_SCALAR_ATTR_VALUE)
5935 memset(nid, 0, sizeof(nid));
5936 rc = nla_strscpy(nid, nids, sizeof(nid));
5938 NL_SET_ERR_MSG(extack,
5939 "failed to get NID");
5940 GOTO(report_err, rc);
5943 id = genradix_ptr_alloc(&plist->lgpl_list,
5944 plist->lgpl_list_count++,
5947 NL_SET_ERR_MSG(extack,
5948 "failed to allocate NID");
5949 GOTO(report_err, rc = -ENOMEM);
5952 rc = libcfs_strid(id, strim(nid));
5954 NL_SET_ERR_MSG(extack, "invalid NID");
5955 GOTO(report_err, rc);
5966 lnet_ping_show_done(cb);
5971 static const struct ln_key_list ping_props_list = {
5972 .lkl_maxattr = LNET_PING_ATTR_MAX,
5974 [LNET_PING_ATTR_HDR] = {
5975 .lkp_value = "ping",
5976 .lkp_key_format = LNKF_SEQUENCE | LNKF_MAPPING,
5977 .lkp_data_type = NLA_NUL_STRING,
5979 [LNET_PING_ATTR_PRIMARY_NID] = {
5980 .lkp_value = "primary nid",
5981 .lkp_data_type = NLA_STRING
5983 [LNET_PING_ATTR_ERRNO] = {
5984 .lkp_value = "errno",
5985 .lkp_data_type = NLA_S16
5987 [LNET_PING_ATTR_MULTIRAIL] = {
5988 .lkp_value = "Multi-Rail",
5989 .lkp_data_type = NLA_FLAG
5991 [LNET_PING_ATTR_PEER_NI_LIST] = {
5992 .lkp_value = "peer_ni",
5993 .lkp_key_format = LNKF_SEQUENCE | LNKF_MAPPING,
5994 .lkp_data_type = NLA_NESTED
5999 static struct ln_key_list ping_peer_ni_list = {
6000 .lkl_maxattr = LNET_PING_PEER_NI_ATTR_MAX,
6002 [LNET_PING_PEER_NI_ATTR_NID] = {
6004 .lkp_data_type = NLA_STRING
6009 static int lnet_ping_show_dump(struct sk_buff *msg,
6010 struct netlink_callback *cb)
6012 struct lnet_genl_ping_list *plist = lnet_ping_dump_ctx(cb);
6013 struct genlmsghdr *gnlh = nlmsg_data(cb->nlh);
6014 #ifdef HAVE_NL_PARSE_WITH_EXT_ACK
6015 struct netlink_ext_ack *extack = NULL;
6017 int portid = NETLINK_CB(cb->skb).portid;
6018 int seq = cb->nlh->nlmsg_seq;
6019 int idx = plist->lgpl_index;
6022 #ifdef HAVE_NL_DUMP_WITH_EXT_ACK
6023 extack = cb->extack;
6025 if (!plist->lgpl_index) {
6026 const struct ln_key_list *all[] = {
6027 &ping_props_list, &ping_peer_ni_list, NULL
6030 rc = lnet_genl_send_scalar_list(msg, portid, seq,
6032 NLM_F_CREATE | NLM_F_MULTI,
6033 LNET_CMD_PING, all);
6035 NL_SET_ERR_MSG(extack, "failed to send key table");
6036 GOTO(send_error, rc);
6039 genradix_init(&plist->lgpl_failed);
6042 while (idx < plist->lgpl_list_count) {
6043 struct lnet_nid primary_nid = LNET_ANY_NID;
6044 struct lnet_genl_ping_list peers;
6045 struct lnet_processid *id;
6046 struct nlattr *nid_list;
6047 struct lnet_peer *lp;
6048 bool mr_flag = false;
6052 id = genradix_ptr(&plist->lgpl_list, idx++);
6053 if (nid_is_lo0(&id->nid))
6056 rc = lnet_ping(id, &plist->lgpl_src_nid, plist->lgpl_timeout,
6057 &peers, lnet_interfaces_max);
6059 struct lnet_fail_ping *fail;
6061 fail = genradix_ptr_alloc(&plist->lgpl_failed,
6062 plist->lgpl_failed_count++,
6065 NL_SET_ERR_MSG(extack,
6066 "failed to allocate failed NID");
6067 GOTO(send_error, rc);
6070 fail->lfp_errno = rc;
6074 mutex_lock(&the_lnet.ln_api_mutex);
6075 lp = lnet_find_peer(&id->nid);
6077 primary_nid = lp->lp_primary_nid;
6078 mr_flag = lnet_peer_is_multi_rail(lp);
6079 lnet_peer_decref_locked(lp);
6081 mutex_unlock(&the_lnet.ln_api_mutex);
6083 hdr = genlmsg_put(msg, portid, seq, &lnet_family,
6084 NLM_F_MULTI, LNET_CMD_PING);
6086 NL_SET_ERR_MSG(extack, "failed to send values");
6087 genlmsg_cancel(msg, hdr);
6088 GOTO(send_error, rc = -EMSGSIZE);
6092 nla_put_string(msg, LNET_PING_ATTR_HDR, "");
6094 nla_put_string(msg, LNET_PING_ATTR_PRIMARY_NID,
6095 libcfs_nidstr(&primary_nid));
6097 nla_put_flag(msg, LNET_PING_ATTR_MULTIRAIL);
6099 nid_list = nla_nest_start(msg, LNET_PING_ATTR_PEER_NI_LIST);
6100 for (count = 0; count < rc; count++) {
6101 struct lnet_processid *result;
6102 struct nlattr *nid_attr;
6105 result = genradix_ptr(&peers.lgpl_list, count);
6106 if (nid_is_lo0(&result->nid))
6109 nid_attr = nla_nest_start(msg, count + 1);
6110 if (gnlh->version == 1)
6111 idstr = libcfs_nidstr(&result->nid);
6113 idstr = libcfs_idstr(result);
6114 nla_put_string(msg, LNET_PING_PEER_NI_ATTR_NID, idstr);
6115 nla_nest_end(msg, nid_attr);
6117 nla_nest_end(msg, nid_list);
6118 genlmsg_end(msg, hdr);
6120 genradix_free(&peers.lgpl_list);
6123 for (i = 0; i < plist->lgpl_failed_count; i++) {
6124 struct lnet_fail_ping *fail;
6127 fail = genradix_ptr(&plist->lgpl_failed, i);
6129 hdr = genlmsg_put(msg, portid, seq, &lnet_family,
6130 NLM_F_MULTI, LNET_CMD_PING);
6132 NL_SET_ERR_MSG(extack, "failed to send failed values");
6133 genlmsg_cancel(msg, hdr);
6134 GOTO(send_error, rc = -EMSGSIZE);
6138 nla_put_string(msg, LNET_PING_ATTR_HDR, "");
6140 nla_put_string(msg, LNET_PING_ATTR_PRIMARY_NID,
6141 libcfs_nidstr(&fail->lfp_id.nid));
6142 nla_put_s16(msg, LNET_PING_ATTR_ERRNO, fail->lfp_errno);
6143 genlmsg_end(msg, hdr);
6145 rc = 0; /* don't treat it as an error */
6147 plist->lgpl_index = idx;
6149 return lnet_nl_send_error(cb->skb, portid, seq, rc);
6152 #ifndef HAVE_NETLINK_CALLBACK_START
6153 static int lnet_old_ping_show_dump(struct sk_buff *msg,
6154 struct netlink_callback *cb)
6157 int rc = lnet_ping_show_start(cb);
6163 return lnet_ping_show_dump(msg, cb);
6167 static const struct genl_multicast_group lnet_mcast_grps[] = {
6168 { .name = "ip2net", },
6170 { .name = "route", },
6171 { .name = "ping", },
6174 static const struct genl_ops lnet_genl_ops[] = {
6176 .cmd = LNET_CMD_NETS,
6177 .flags = GENL_ADMIN_PERM,
6178 #ifdef HAVE_NETLINK_CALLBACK_START
6179 .start = lnet_net_show_start,
6180 .dumpit = lnet_net_show_dump,
6182 .dumpit = lnet_old_net_show_dump,
6184 .done = lnet_net_show_done,
6185 .doit = lnet_net_cmd,
6188 .cmd = LNET_CMD_ROUTES,
6189 #ifdef HAVE_NETLINK_CALLBACK_START
6190 .start = lnet_route_show_start,
6191 .dumpit = lnet_route_show_dump,
6193 .dumpit = lnet_old_route_show_dump,
6195 .done = lnet_route_show_done,
6198 .cmd = LNET_CMD_PING,
6199 #ifdef HAVE_NETLINK_CALLBACK_START
6200 .start = lnet_ping_show_start,
6201 .dumpit = lnet_ping_show_dump,
6203 .dumpit = lnet_old_ping_show_dump,
6205 .done = lnet_ping_show_done,
6209 static struct genl_family lnet_family = {
6210 .name = LNET_GENL_NAME,
6211 .version = LNET_GENL_VERSION,
6212 .module = THIS_MODULE,
6214 .ops = lnet_genl_ops,
6215 .n_ops = ARRAY_SIZE(lnet_genl_ops),
6216 .mcgrps = lnet_mcast_grps,
6217 .n_mcgrps = ARRAY_SIZE(lnet_mcast_grps),
6218 #ifdef GENL_FAMILY_HAS_RESV_START_OP
6219 .resv_start_op = __LNET_CMD_MAX_PLUS_ONE,
6223 void LNetDebugPeer(struct lnet_processid *id)
6225 lnet_debug_peer(&id->nid);
6227 EXPORT_SYMBOL(LNetDebugPeer);
6230 * Determine if the specified peer \a nid is on the local node.
6232 * \param nid peer nid to check
6234 * \retval true If peer NID is on the local node.
6235 * \retval false If peer NID is not on the local node.
6237 bool LNetIsPeerLocal(struct lnet_nid *nid)
6239 struct lnet_net *net;
6243 cpt = lnet_net_lock_current();
6244 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
6245 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
6246 if (nid_same(&ni->ni_nid, nid)) {
6247 lnet_net_unlock(cpt);
6252 lnet_net_unlock(cpt);
6256 EXPORT_SYMBOL(LNetIsPeerLocal);
6259 * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
6260 * Note that all interfaces share a same PID, as requested by LNetNIInit().
6262 * @index Index of the interface to look up.
6263 * @id On successful return, this location will hold the
6264 * struct lnet_process_id ID of the interface.
6265 * @large_nids Report large NIDs if this is true.
6267 * RETURN 0 If an interface exists at \a index.
6268 * -ENOENT If no interface has been found.
6271 LNetGetId(unsigned int index, struct lnet_processid *id, bool large_nids)
6274 struct lnet_net *net;
6278 LASSERT(the_lnet.ln_refcount > 0);
6280 cpt = lnet_net_lock_current();
6282 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
6283 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
6284 if (!large_nids && !nid_is_nid4(&ni->ni_nid))
6290 id->nid = ni->ni_nid;
6291 id->pid = the_lnet.ln_pid;
6297 lnet_net_unlock(cpt);
6300 EXPORT_SYMBOL(LNetGetId);
6306 struct lnet_handle_md mdh;
6307 struct completion completion;
6311 lnet_ping_event_handler(struct lnet_event *event)
6313 struct ping_data *pd = event->md_user_ptr;
6315 CDEBUG(D_NET, "ping event (%d %d)%s\n",
6316 event->type, event->status,
6317 event->unlinked ? " unlinked" : "");
6319 if (event->status) {
6321 pd->rc = event->status;
6322 } else if (event->type == LNET_EVENT_REPLY) {
6324 pd->rc = event->mlength;
6327 if (event->unlinked)
6328 pd->pd_unlinked = 1;
6330 if (event->unlinked ||
6331 (event->type == LNET_EVENT_SEND && event->status))
6332 complete(&pd->completion);
6335 static int lnet_ping(struct lnet_processid *id, struct lnet_nid *src_nid,
6336 signed long timeout, struct lnet_genl_ping_list *plist,
6339 int id_bytes = sizeof(struct lnet_ni_status); /* For 0@lo */
6340 struct lnet_md md = { NULL };
6341 struct ping_data pd = { 0 };
6342 struct lnet_ping_buffer *pbuf;
6343 struct lnet_processid pid;
6344 struct lnet_ping_iter pi;
6351 genradix_init(&plist->lgpl_list);
6353 /* n_ids limit is arbitrary */
6354 if (n_ids <= 0 || LNET_NID_IS_ANY(&id->nid))
6357 /* if the user buffer has more space than the lnet_interfaces_max
6358 * then only fill it up to lnet_interfaces_max
6360 if (n_ids > lnet_interfaces_max)
6361 n_ids = lnet_interfaces_max;
6363 if (id->pid == LNET_PID_ANY)
6364 id->pid = LNET_PID_LUSTRE;
6366 id_bytes += lnet_ping_sts_size(&id->nid) * n_ids;
6367 pbuf = lnet_ping_buffer_alloc(id_bytes, GFP_NOFS);
6371 /* initialize md content */
6372 md.start = &pbuf->pb_info;
6373 md.length = id_bytes;
6374 md.threshold = 2; /* GET/REPLY */
6376 md.options = LNET_MD_TRUNCATE;
6378 md.handler = lnet_ping_event_handler;
6380 init_completion(&pd.completion);
6382 rc = LNetMDBind(&md, LNET_UNLINK, &pd.mdh);
6384 CERROR("Can't bind MD: %d\n", rc);
6385 goto fail_ping_buffer_decref;
6388 rc = LNetGet(src_nid, pd.mdh, id, LNET_RESERVED_PORTAL,
6389 LNET_PROTO_PING_MATCHBITS, 0, false);
6391 /* Don't CERROR; this could be deliberate! */
6392 rc2 = LNetMDUnlink(pd.mdh);
6395 /* NB must wait for the UNLINK event below... */
6398 /* Ensure completion in finite time... */
6399 wait_for_completion_timeout(&pd.completion, timeout);
6400 if (!pd.pd_unlinked) {
6401 LNetMDUnlink(pd.mdh);
6402 wait_for_completion(&pd.completion);
6407 goto fail_ping_buffer_decref;
6411 LASSERT(nob >= 0 && nob <= id_bytes);
6413 rc = -EPROTO; /* if I can't parse... */
6415 if (nob < LNET_PING_INFO_HDR_SIZE) {
6416 CERROR("%s: ping info too short %d\n",
6417 libcfs_idstr(id), nob);
6418 goto fail_ping_buffer_decref;
6421 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
6422 lnet_swap_pinginfo(pbuf);
6423 } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
6424 CERROR("%s: Unexpected magic %08x\n",
6425 libcfs_idstr(id), pbuf->pb_info.pi_magic);
6426 goto fail_ping_buffer_decref;
6429 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
6430 CERROR("%s: ping w/o NI status: 0x%x\n",
6431 libcfs_idstr(id), pbuf->pb_info.pi_features);
6432 goto fail_ping_buffer_decref;
6435 /* Test if smaller than lnet_pinginfo with just one pi_ni status info.
6436 * That one might contain size when large nids are used.
6438 if (nob < offsetof(struct lnet_ping_info, pi_ni[1])) {
6439 CERROR("%s: Short reply %d(%lu min)\n",
6440 libcfs_idstr(id), nob,
6441 offsetof(struct lnet_ping_info, pi_ni[1]));
6442 goto fail_ping_buffer_decref;
6445 if (ping_info_count_entries(pbuf) < n_ids) {
6446 n_ids = ping_info_count_entries(pbuf);
6447 id_bytes = lnet_ping_info_size(&pbuf->pb_info);
6450 if (nob < id_bytes) {
6451 CERROR("%s: Short reply %d(%d expected)\n",
6452 libcfs_idstr(id), nob, id_bytes);
6453 goto fail_ping_buffer_decref;
6456 for (st = ping_iter_first(&pi, pbuf, &pid.nid);
6458 st = ping_iter_next(&pi, &pid.nid)) {
6459 id = genradix_ptr_alloc(&plist->lgpl_list, i++, GFP_ATOMIC);
6462 goto fail_ping_buffer_decref;
6465 id->pid = pbuf->pb_info.pi_pid;
6469 fail_ping_buffer_decref:
6470 lnet_ping_buffer_decref(pbuf);
6475 lnet_discover(struct lnet_process_id id4, __u32 force,
6476 struct lnet_process_id __user *ids, int n_ids)
6478 struct lnet_peer_ni *lpni;
6479 struct lnet_peer_ni *p;
6480 struct lnet_peer *lp;
6481 struct lnet_process_id *buf;
6482 struct lnet_processid id;
6488 id4.nid == LNET_NID_ANY)
6491 lnet_pid4_to_pid(id4, &id);
6492 if (id.pid == LNET_PID_ANY)
6493 id.pid = LNET_PID_LUSTRE;
6496 * If the user buffer has more space than the lnet_interfaces_max,
6497 * then only fill it up to lnet_interfaces_max.
6499 if (n_ids > lnet_interfaces_max)
6500 n_ids = lnet_interfaces_max;
6502 CFS_ALLOC_PTR_ARRAY(buf, n_ids);
6506 cpt = lnet_net_lock_current();
6507 lpni = lnet_peerni_by_nid_locked(&id.nid, NULL, cpt);
6514 * Clearing the NIDS_UPTODATE flag ensures the peer will
6515 * be discovered, provided discovery has not been disabled.
6517 lp = lpni->lpni_peer_net->lpn_peer;
6518 spin_lock(&lp->lp_lock);
6519 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
6520 /* If the force flag is set, force a PING and PUSH as well. */
6522 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
6523 spin_unlock(&lp->lp_lock);
6524 rc = lnet_discover_peer_locked(lpni, cpt, true);
6528 /* The lpni (or lp) for this NID may have changed and our ref is
6529 * the only thing keeping the old one around. Release the ref
6530 * and lookup the lpni again
6532 lnet_peer_ni_decref_locked(lpni);
6533 lpni = lnet_peer_ni_find_locked(&id.nid);
6538 lp = lpni->lpni_peer_net->lpn_peer;
6542 while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
6543 buf[i].pid = id.pid;
6544 buf[i].nid = lnet_nid_to_nid4(&p->lpni_nid);
6551 lnet_peer_ni_decref_locked(lpni);
6553 lnet_net_unlock(cpt);
6556 if (copy_to_user(ids, buf, rc * sizeof(*buf)))
6558 CFS_FREE_PTR_ARRAY(buf, n_ids);
6564 * Retrieve peer discovery status.
6566 * \retval 1 if lnet_peer_discovery_disabled is 0
6567 * \retval 0 if lnet_peer_discovery_disabled is 1
6570 LNetGetPeerDiscoveryStatus(void)
6572 return !lnet_peer_discovery_disabled;
6574 EXPORT_SYMBOL(LNetGetPeerDiscoveryStatus);