4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #define DEBUG_SUBSYSTEM S_LNET
34 #include <linux/ctype.h>
35 #include <linux/generic-radix-tree.h>
36 #include <linux/log2.h>
37 #include <linux/ktime.h>
38 #include <linux/moduleparam.h>
39 #include <linux/uaccess.h>
40 #ifdef HAVE_SCHED_HEADERS
41 #include <linux/sched/signal.h>
43 #include <net/genetlink.h>
45 #include <libcfs/linux/linux-net.h>
46 #include <lnet/udsp.h>
47 #include <lnet/lib-lnet.h>
49 #define D_LNI D_CONSOLE
52 * initialize ln_api_mutex statically, since it needs to be used in
53 * discovery_set callback. That module parameter callback can be called
54 * before module init completes. The mutex needs to be ready for use then.
56 struct lnet the_lnet = {
57 .ln_api_mutex = __MUTEX_INITIALIZER(the_lnet.ln_api_mutex),
58 }; /* THE state of the network */
59 EXPORT_SYMBOL(the_lnet);
61 static char *ip2nets = "";
62 module_param(ip2nets, charp, 0444);
63 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
65 static char *networks = "";
66 module_param(networks, charp, 0444);
67 MODULE_PARM_DESC(networks, "local networks");
69 static char *routes = "";
70 module_param(routes, charp, 0444);
71 MODULE_PARM_DESC(routes, "routes to non-local networks");
73 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
74 module_param(rnet_htable_size, int, 0444);
75 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
77 static int use_tcp_bonding;
78 module_param(use_tcp_bonding, int, 0444);
79 MODULE_PARM_DESC(use_tcp_bonding,
80 "use_tcp_bonding parameter has been removed");
82 unsigned int lnet_numa_range = 0;
83 module_param(lnet_numa_range, uint, 0444);
84 MODULE_PARM_DESC(lnet_numa_range,
85 "NUMA range to consider during Multi-Rail selection");
88 * lnet_health_sensitivity determines by how much we decrement the health
89 * value on sending error. The value defaults to 100, which means health
90 * interface health is decremented by 100 points every failure.
92 unsigned int lnet_health_sensitivity = 100;
93 static int sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
94 #ifdef HAVE_KERNEL_PARAM_OPS
95 static struct kernel_param_ops param_ops_health_sensitivity = {
96 .set = sensitivity_set,
99 #define param_check_health_sensitivity(name, p) \
100 __param_check(name, p, int)
101 module_param(lnet_health_sensitivity, health_sensitivity, S_IRUGO|S_IWUSR);
103 module_param_call(lnet_health_sensitivity, sensitivity_set, param_get_int,
104 &lnet_health_sensitivity, S_IRUGO|S_IWUSR);
106 MODULE_PARM_DESC(lnet_health_sensitivity,
107 "Value to decrement the health value by on error");
110 * lnet_recovery_interval determines how often we should perform recovery
111 * on unhealthy interfaces.
113 unsigned int lnet_recovery_interval = 1;
114 static int recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp);
115 #ifdef HAVE_KERNEL_PARAM_OPS
116 static struct kernel_param_ops param_ops_recovery_interval = {
117 .set = recovery_interval_set,
118 .get = param_get_int,
120 #define param_check_recovery_interval(name, p) \
121 __param_check(name, p, int)
122 module_param(lnet_recovery_interval, recovery_interval, S_IRUGO|S_IWUSR);
124 module_param_call(lnet_recovery_interval, recovery_interval_set, param_get_int,
125 &lnet_recovery_interval, S_IRUGO|S_IWUSR);
127 MODULE_PARM_DESC(lnet_recovery_interval,
128 "DEPRECATED - Interval to recover unhealthy interfaces in seconds");
130 unsigned int lnet_recovery_limit;
131 module_param(lnet_recovery_limit, uint, 0644);
132 MODULE_PARM_DESC(lnet_recovery_limit,
133 "How long to attempt recovery of unhealthy peer interfaces in seconds. Set to 0 to allow indefinite recovery");
135 unsigned int lnet_max_recovery_ping_interval = 900;
136 unsigned int lnet_max_recovery_ping_count = 9;
137 static int max_recovery_ping_interval_set(const char *val,
138 cfs_kernel_param_arg_t *kp);
140 #define param_check_max_recovery_ping_interval(name, p) \
141 __param_check(name, p, int)
143 #ifdef HAVE_KERNEL_PARAM_OPS
144 static struct kernel_param_ops param_ops_max_recovery_ping_interval = {
145 .set = max_recovery_ping_interval_set,
146 .get = param_get_int,
148 module_param(lnet_max_recovery_ping_interval, max_recovery_ping_interval, 0644);
150 module_param_call(lnet_max_recovery_ping_interval, max_recovery_ping_interval,
151 param_get_int, &lnet_max_recovery_ping_interval, 0644);
153 MODULE_PARM_DESC(lnet_max_recovery_ping_interval,
154 "The max interval between LNet recovery pings, in seconds");
156 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
157 static int intf_max_set(const char *val, cfs_kernel_param_arg_t *kp);
159 static struct kernel_param_ops param_ops_interfaces_max = {
161 .get = param_get_int,
164 #define param_check_interfaces_max(name, p) \
165 __param_check(name, p, int)
167 #ifdef HAVE_KERNEL_PARAM_OPS
168 module_param(lnet_interfaces_max, interfaces_max, 0644);
170 module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
171 ¶m_ops_interfaces_max, 0644);
173 MODULE_PARM_DESC(lnet_interfaces_max,
174 "Maximum number of interfaces in a node.");
176 unsigned lnet_peer_discovery_disabled = 0;
177 static int discovery_set(const char *val, cfs_kernel_param_arg_t *kp);
179 static struct kernel_param_ops param_ops_discovery_disabled = {
180 .set = discovery_set,
181 .get = param_get_int,
184 #define param_check_discovery_disabled(name, p) \
185 __param_check(name, p, int)
186 #ifdef HAVE_KERNEL_PARAM_OPS
187 module_param(lnet_peer_discovery_disabled, discovery_disabled, 0644);
189 module_param_call(lnet_peer_discovery_disabled, discovery_set, param_get_int,
190 ¶m_ops_discovery_disabled, 0644);
192 MODULE_PARM_DESC(lnet_peer_discovery_disabled,
193 "Set to 1 to disable peer discovery on this node.");
195 unsigned int lnet_drop_asym_route;
196 static int drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp);
198 static struct kernel_param_ops param_ops_drop_asym_route = {
199 .set = drop_asym_route_set,
200 .get = param_get_int,
203 #define param_check_drop_asym_route(name, p) \
204 __param_check(name, p, int)
205 #ifdef HAVE_KERNEL_PARAM_OPS
206 module_param(lnet_drop_asym_route, drop_asym_route, 0644);
208 module_param_call(lnet_drop_asym_route, drop_asym_route_set, param_get_int,
209 ¶m_ops_drop_asym_route, 0644);
211 MODULE_PARM_DESC(lnet_drop_asym_route,
212 "Set to 1 to drop asymmetrical route messages.");
214 #define LNET_TRANSACTION_TIMEOUT_DEFAULT 150
215 unsigned int lnet_transaction_timeout = LNET_TRANSACTION_TIMEOUT_DEFAULT;
216 static int transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp);
217 #ifdef HAVE_KERNEL_PARAM_OPS
218 static struct kernel_param_ops param_ops_transaction_timeout = {
219 .set = transaction_to_set,
220 .get = param_get_int,
223 #define param_check_transaction_timeout(name, p) \
224 __param_check(name, p, int)
225 module_param(lnet_transaction_timeout, transaction_timeout, S_IRUGO|S_IWUSR);
227 module_param_call(lnet_transaction_timeout, transaction_to_set, param_get_int,
228 &lnet_transaction_timeout, S_IRUGO|S_IWUSR);
230 MODULE_PARM_DESC(lnet_transaction_timeout,
231 "Maximum number of seconds to wait for a peer response.");
233 #define LNET_RETRY_COUNT_DEFAULT 2
234 unsigned int lnet_retry_count = LNET_RETRY_COUNT_DEFAULT;
235 static int retry_count_set(const char *val, cfs_kernel_param_arg_t *kp);
236 #ifdef HAVE_KERNEL_PARAM_OPS
237 static struct kernel_param_ops param_ops_retry_count = {
238 .set = retry_count_set,
239 .get = param_get_int,
242 #define param_check_retry_count(name, p) \
243 __param_check(name, p, int)
244 module_param(lnet_retry_count, retry_count, S_IRUGO|S_IWUSR);
246 module_param_call(lnet_retry_count, retry_count_set, param_get_int,
247 &lnet_retry_count, S_IRUGO|S_IWUSR);
249 MODULE_PARM_DESC(lnet_retry_count,
250 "Maximum number of times to retry transmitting a message");
252 unsigned int lnet_response_tracking = 3;
253 static int response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp);
255 #ifdef HAVE_KERNEL_PARAM_OPS
256 static struct kernel_param_ops param_ops_response_tracking = {
257 .set = response_tracking_set,
258 .get = param_get_int,
261 #define param_check_response_tracking(name, p) \
262 __param_check(name, p, int)
263 module_param(lnet_response_tracking, response_tracking, 0644);
265 module_param_call(lnet_response_tracking, response_tracking_set, param_get_int,
266 &lnet_response_tracking, 0644);
268 MODULE_PARM_DESC(lnet_response_tracking,
269 "(0|1|2|3) LNet Internal Only|GET Reply only|PUT ACK only|Full Tracking (default)");
271 int lock_prim_nid = 1;
272 module_param(lock_prim_nid, int, 0444);
273 MODULE_PARM_DESC(lock_prim_nid,
274 "Whether nid passed down by Lustre is locked as primary");
276 #define LNET_LND_TIMEOUT_DEFAULT ((LNET_TRANSACTION_TIMEOUT_DEFAULT - 1) / \
277 (LNET_RETRY_COUNT_DEFAULT + 1))
278 unsigned int lnet_lnd_timeout = LNET_LND_TIMEOUT_DEFAULT;
279 static void lnet_set_lnd_timeout(void)
281 lnet_lnd_timeout = max((lnet_transaction_timeout - 1) /
282 (lnet_retry_count + 1), 1U);
286 * This sequence number keeps track of how many times DLC was used to
287 * update the local NIs. It is incremented when a NI is added or
288 * removed and checked when sending a message to determine if there is
289 * a need to re-run the selection algorithm. See lnet_select_pathway()
290 * for more details on its usage.
292 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
294 struct lnet_fail_ping {
295 struct lnet_processid lfp_id;
299 struct lnet_genl_ping_list {
300 unsigned int lgpl_index;
301 unsigned int lgpl_list_count;
302 unsigned int lgpl_failed_count;
303 signed long lgpl_timeout;
304 struct lnet_nid lgpl_src_nid;
305 GENRADIX(struct lnet_fail_ping) lgpl_failed;
306 GENRADIX(struct lnet_processid) lgpl_list;
309 static int lnet_ping(struct lnet_processid *id, struct lnet_nid *src_nid,
310 signed long timeout, struct lnet_genl_ping_list *plist,
313 static int lnet_discover(struct lnet_process_id id, __u32 force,
314 struct lnet_process_id __user *ids, int n_ids);
317 sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
320 unsigned *sensitivity = (unsigned *)kp->arg;
323 rc = kstrtoul(val, 0, &value);
325 CERROR("Invalid module parameter value for 'lnet_health_sensitivity'\n");
330 * The purpose of locking the api_mutex here is to ensure that
331 * the correct value ends up stored properly.
333 mutex_lock(&the_lnet.ln_api_mutex);
335 if (value > LNET_MAX_HEALTH_VALUE) {
336 mutex_unlock(&the_lnet.ln_api_mutex);
337 CERROR("Invalid health value. Maximum: %d value = %lu\n",
338 LNET_MAX_HEALTH_VALUE, value);
342 if (*sensitivity != 0 && value == 0 && lnet_retry_count != 0) {
343 lnet_retry_count = 0;
344 lnet_set_lnd_timeout();
347 *sensitivity = value;
349 mutex_unlock(&the_lnet.ln_api_mutex);
355 recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp)
357 CWARN("'lnet_recovery_interval' has been deprecated\n");
363 max_recovery_ping_interval_set(const char *val, cfs_kernel_param_arg_t *kp)
368 rc = kstrtoul(val, 0, &value);
370 CERROR("Invalid module parameter value for 'lnet_max_recovery_ping_interval'\n");
375 CERROR("Invalid max ping timeout. Must be strictly positive\n");
379 /* The purpose of locking the api_mutex here is to ensure that
380 * the correct value ends up stored properly.
382 mutex_lock(&the_lnet.ln_api_mutex);
383 lnet_max_recovery_ping_interval = value;
384 lnet_max_recovery_ping_count = 0;
387 lnet_max_recovery_ping_count++;
390 mutex_unlock(&the_lnet.ln_api_mutex);
396 discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
399 unsigned *discovery_off = (unsigned *)kp->arg;
401 struct lnet_ping_buffer *pbuf;
403 rc = kstrtoul(val, 0, &value);
405 CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
409 value = (value) ? 1 : 0;
412 * The purpose of locking the api_mutex here is to ensure that
413 * the correct value ends up stored properly.
415 mutex_lock(&the_lnet.ln_api_mutex);
417 if (value == *discovery_off) {
418 mutex_unlock(&the_lnet.ln_api_mutex);
423 * We still want to set the discovery value even when LNet is not
424 * running. This is the case when LNet is being loaded and we want
425 * the module parameters to take effect. Otherwise if we're
426 * changing the value dynamically, we want to set it after
429 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
430 *discovery_off = value;
431 mutex_unlock(&the_lnet.ln_api_mutex);
435 /* tell peers that discovery setting has changed */
436 lnet_net_lock(LNET_LOCK_EX);
437 pbuf = the_lnet.ln_ping_target;
439 pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
441 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
442 lnet_net_unlock(LNET_LOCK_EX);
444 /* only send a push when we're turning off discovery */
445 if (*discovery_off <= 0 && value > 0)
446 lnet_push_update_to_peers(1);
447 *discovery_off = value;
449 mutex_unlock(&the_lnet.ln_api_mutex);
455 drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp)
458 unsigned int *drop_asym_route = (unsigned int *)kp->arg;
461 rc = kstrtoul(val, 0, &value);
463 CERROR("Invalid module parameter value for "
464 "'lnet_drop_asym_route'\n");
469 * The purpose of locking the api_mutex here is to ensure that
470 * the correct value ends up stored properly.
472 mutex_lock(&the_lnet.ln_api_mutex);
474 if (value == *drop_asym_route) {
475 mutex_unlock(&the_lnet.ln_api_mutex);
479 *drop_asym_route = value;
481 mutex_unlock(&the_lnet.ln_api_mutex);
487 transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp)
490 unsigned *transaction_to = (unsigned *)kp->arg;
493 rc = kstrtoul(val, 0, &value);
495 CERROR("Invalid module parameter value for 'lnet_transaction_timeout'\n");
500 * The purpose of locking the api_mutex here is to ensure that
501 * the correct value ends up stored properly.
503 mutex_lock(&the_lnet.ln_api_mutex);
505 if (value <= lnet_retry_count || value == 0) {
506 mutex_unlock(&the_lnet.ln_api_mutex);
507 CERROR("Invalid value for lnet_transaction_timeout (%lu). "
508 "Has to be greater than lnet_retry_count (%u)\n",
509 value, lnet_retry_count);
513 if (value == *transaction_to) {
514 mutex_unlock(&the_lnet.ln_api_mutex);
518 *transaction_to = value;
519 /* Update the lnet_lnd_timeout now that we've modified the
520 * transaction timeout
522 lnet_set_lnd_timeout();
524 mutex_unlock(&the_lnet.ln_api_mutex);
530 retry_count_set(const char *val, cfs_kernel_param_arg_t *kp)
533 unsigned *retry_count = (unsigned *)kp->arg;
536 rc = kstrtoul(val, 0, &value);
538 CERROR("Invalid module parameter value for 'lnet_retry_count'\n");
543 * The purpose of locking the api_mutex here is to ensure that
544 * the correct value ends up stored properly.
546 mutex_lock(&the_lnet.ln_api_mutex);
548 if (lnet_health_sensitivity == 0 && value > 0) {
549 mutex_unlock(&the_lnet.ln_api_mutex);
550 CERROR("Can not set lnet_retry_count when health feature is turned off\n");
554 if (value > lnet_transaction_timeout) {
555 mutex_unlock(&the_lnet.ln_api_mutex);
556 CERROR("Invalid value for lnet_retry_count (%lu). "
557 "Has to be smaller than lnet_transaction_timeout (%u)\n",
558 value, lnet_transaction_timeout);
562 *retry_count = value;
564 /* Update the lnet_lnd_timeout now that we've modified the
567 lnet_set_lnd_timeout();
569 mutex_unlock(&the_lnet.ln_api_mutex);
575 intf_max_set(const char *val, cfs_kernel_param_arg_t *kp)
579 rc = kstrtoint(val, 0, &value);
581 CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
585 if (value < LNET_INTERFACES_MIN) {
586 CWARN("max interfaces provided are too small, setting to %d\n",
587 LNET_INTERFACES_MAX_DEFAULT);
588 value = LNET_INTERFACES_MAX_DEFAULT;
591 *(int *)kp->arg = value;
597 response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp)
600 unsigned long new_value;
602 rc = kstrtoul(val, 0, &new_value);
604 CERROR("Invalid value for 'lnet_response_tracking'\n");
608 if (new_value < 0 || new_value > 3) {
609 CWARN("Invalid value (%lu) for 'lnet_response_tracking'\n",
614 lnet_response_tracking = new_value;
620 lnet_get_routes(void)
626 lnet_get_networks(void)
631 if (*networks != 0 && *ip2nets != 0) {
632 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
633 "'ip2nets' but not both at once\n");
638 rc = lnet_parse_ip2nets(&nets, ip2nets);
639 return (rc == 0) ? nets : NULL;
649 lnet_init_locks(void)
651 spin_lock_init(&the_lnet.ln_eq_wait_lock);
652 spin_lock_init(&the_lnet.ln_msg_resend_lock);
653 init_completion(&the_lnet.ln_mt_wait_complete);
654 mutex_init(&the_lnet.ln_lnd_mutex);
657 struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
658 struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
660 struct kmem_cache *lnet_udsp_cachep; /* udsp cache */
661 struct kmem_cache *lnet_rspt_cachep; /* response tracker cache */
662 struct kmem_cache *lnet_msg_cachep;
665 lnet_slab_setup(void)
667 /* create specific kmem_cache for MEs and small MDs (i.e., originally
668 * allocated in <size-xxx> kmem_cache).
670 lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(struct lnet_me),
672 if (!lnet_mes_cachep)
675 lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
676 LNET_SMALL_MD_SIZE, 0, 0,
678 if (!lnet_small_mds_cachep)
681 lnet_udsp_cachep = kmem_cache_create("lnet_udsp",
682 sizeof(struct lnet_udsp),
684 if (!lnet_udsp_cachep)
687 lnet_rspt_cachep = kmem_cache_create("lnet_rspt", sizeof(struct lnet_rsp_tracker),
689 if (!lnet_rspt_cachep)
692 lnet_msg_cachep = kmem_cache_create("lnet_msg", sizeof(struct lnet_msg),
694 if (!lnet_msg_cachep)
701 lnet_slab_cleanup(void)
703 if (lnet_msg_cachep) {
704 kmem_cache_destroy(lnet_msg_cachep);
705 lnet_msg_cachep = NULL;
708 if (lnet_rspt_cachep) {
709 kmem_cache_destroy(lnet_rspt_cachep);
710 lnet_rspt_cachep = NULL;
713 if (lnet_udsp_cachep) {
714 kmem_cache_destroy(lnet_udsp_cachep);
715 lnet_udsp_cachep = NULL;
718 if (lnet_small_mds_cachep) {
719 kmem_cache_destroy(lnet_small_mds_cachep);
720 lnet_small_mds_cachep = NULL;
723 if (lnet_mes_cachep) {
724 kmem_cache_destroy(lnet_mes_cachep);
725 lnet_mes_cachep = NULL;
730 lnet_create_remote_nets_table(void)
733 struct list_head *hash;
735 LASSERT(the_lnet.ln_remote_nets_hash == NULL);
736 LASSERT(the_lnet.ln_remote_nets_hbits > 0);
737 CFS_ALLOC_PTR_ARRAY(hash, LNET_REMOTE_NETS_HASH_SIZE);
739 CERROR("Failed to create remote nets hash table\n");
743 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
744 INIT_LIST_HEAD(&hash[i]);
745 the_lnet.ln_remote_nets_hash = hash;
750 lnet_destroy_remote_nets_table(void)
754 if (the_lnet.ln_remote_nets_hash == NULL)
757 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
758 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
760 CFS_FREE_PTR_ARRAY(the_lnet.ln_remote_nets_hash,
761 LNET_REMOTE_NETS_HASH_SIZE);
762 the_lnet.ln_remote_nets_hash = NULL;
766 lnet_destroy_locks(void)
768 if (the_lnet.ln_res_lock != NULL) {
769 cfs_percpt_lock_free(the_lnet.ln_res_lock);
770 the_lnet.ln_res_lock = NULL;
773 if (the_lnet.ln_net_lock != NULL) {
774 cfs_percpt_lock_free(the_lnet.ln_net_lock);
775 the_lnet.ln_net_lock = NULL;
780 lnet_create_locks(void)
784 the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
785 if (the_lnet.ln_res_lock == NULL)
788 the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
789 if (the_lnet.ln_net_lock == NULL)
795 lnet_destroy_locks();
799 static void lnet_assert_wire_constants(void)
801 /* Wire protocol assertions generated by 'wirecheck'
802 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
803 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
804 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7)
808 BUILD_BUG_ON(LNET_PROTO_TCP_MAGIC != 0xeebc0ded);
809 BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MAJOR != 1);
810 BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MINOR != 0);
811 BUILD_BUG_ON(LNET_MSG_ACK != 0);
812 BUILD_BUG_ON(LNET_MSG_PUT != 1);
813 BUILD_BUG_ON(LNET_MSG_GET != 2);
814 BUILD_BUG_ON(LNET_MSG_REPLY != 3);
815 BUILD_BUG_ON(LNET_MSG_HELLO != 4);
817 BUILD_BUG_ON((int)sizeof(lnet_nid_t) != 8);
818 BUILD_BUG_ON((int)sizeof(lnet_pid_t) != 4);
820 /* Checks for struct lnet_nid */
821 BUILD_BUG_ON((int)sizeof(struct lnet_nid) != 20);
822 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_size) != 0);
823 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_size) != 1);
824 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_type) != 1);
825 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_type) != 1);
826 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_num) != 2);
827 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_num) != 2);
828 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_addr) != 4);
829 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_addr) != 16);
831 /* Checks for struct lnet_process_id_packed */
832 BUILD_BUG_ON((int)sizeof(struct lnet_process_id_packed) != 12);
833 BUILD_BUG_ON((int)offsetof(struct lnet_process_id_packed, nid) != 0);
834 BUILD_BUG_ON((int)sizeof(((struct lnet_process_id_packed *)0)->nid) != 8);
835 BUILD_BUG_ON((int)offsetof(struct lnet_process_id_packed, pid) != 8);
836 BUILD_BUG_ON((int)sizeof(((struct lnet_process_id_packed *)0)->pid) != 4);
838 /* Checks for struct lnet_handle_wire */
839 BUILD_BUG_ON((int)sizeof(struct lnet_handle_wire) != 16);
840 BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
841 wh_interface_cookie) != 0);
842 BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) != 8);
843 BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
844 wh_object_cookie) != 8);
845 BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) != 8);
847 /* Checks for struct struct lnet_magicversion */
848 BUILD_BUG_ON((int)sizeof(struct lnet_magicversion) != 8);
849 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, magic) != 0);
850 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->magic) != 4);
851 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, version_major) != 4);
852 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_major) != 2);
853 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion,
854 version_minor) != 6);
855 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_minor) != 2);
857 /* Checks for struct _lnet_hdr_nid4 */
858 BUILD_BUG_ON((int)sizeof(struct _lnet_hdr_nid4) != 72);
859 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, dest_nid) != 0);
860 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->dest_nid) != 8);
861 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, src_nid) != 8);
862 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->src_nid) != 8);
863 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, dest_pid) != 16);
864 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->dest_pid) != 4);
865 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, src_pid) != 20);
866 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->src_pid) != 4);
867 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, type) != 24);
868 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->type) != 4);
869 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, payload_length) != 28);
870 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->payload_length) != 4);
871 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg) != 32);
872 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg) != 40);
875 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.ack.dst_wmd) != 32);
876 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.ack.dst_wmd) != 16);
877 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.ack.match_bits) != 48);
878 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.ack.match_bits) != 8);
879 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.ack.mlength) != 56);
880 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.ack.mlength) != 4);
883 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.ack_wmd) != 32);
884 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.ack_wmd) != 16);
885 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.match_bits) != 48);
886 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.match_bits) != 8);
887 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.hdr_data) != 56);
888 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.hdr_data) != 8);
889 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.ptl_index) != 64);
890 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.ptl_index) != 4);
891 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.offset) != 68);
892 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.offset) != 4);
895 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.return_wmd) != 32);
896 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.return_wmd) != 16);
897 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.match_bits) != 48);
898 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.match_bits) != 8);
899 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.ptl_index) != 56);
900 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.ptl_index) != 4);
901 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.src_offset) != 60);
902 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.src_offset) != 4);
903 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.sink_length) != 64);
904 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.sink_length) != 4);
907 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.reply.dst_wmd) != 32);
908 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.reply.dst_wmd) != 16);
911 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.hello.incarnation) != 32);
912 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.hello.incarnation) != 8);
913 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.hello.type) != 40);
914 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.hello.type) != 4);
916 /* Checks for struct lnet_ni_status and related constants */
917 BUILD_BUG_ON(LNET_NI_STATUS_INVALID != 0x00000000);
918 BUILD_BUG_ON(LNET_NI_STATUS_UP != 0x15aac0de);
919 BUILD_BUG_ON(LNET_NI_STATUS_DOWN != 0xdeadface);
921 /* Checks for struct lnet_ni_status */
922 BUILD_BUG_ON((int)sizeof(struct lnet_ni_status) != 16);
923 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_nid) != 0);
924 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) != 8);
925 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_status) != 8);
926 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_status) != 4);
927 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_msg_size) != 12);
928 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_msg_size) != 4);
930 /* Checks for struct lnet_ni_large_status */
931 BUILD_BUG_ON((int)sizeof(struct lnet_ni_large_status) != 24);
932 BUILD_BUG_ON((int)offsetof(struct lnet_ni_large_status, ns_status) != 0);
933 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_large_status *)0)->ns_status) != 4);
934 BUILD_BUG_ON((int)offsetof(struct lnet_ni_large_status, ns_nid) != 4);
935 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_large_status *)0)->ns_nid) != 20);
937 /* Checks for struct lnet_ping_info and related constants */
938 BUILD_BUG_ON(LNET_PROTO_PING_MAGIC != 0x70696E67);
939 BUILD_BUG_ON(LNET_PING_FEAT_INVAL != 0);
940 BUILD_BUG_ON(LNET_PING_FEAT_BASE != 1);
941 BUILD_BUG_ON(LNET_PING_FEAT_NI_STATUS != 2);
942 BUILD_BUG_ON(LNET_PING_FEAT_RTE_DISABLED != 4);
943 BUILD_BUG_ON(LNET_PING_FEAT_MULTI_RAIL != 8);
944 BUILD_BUG_ON(LNET_PING_FEAT_DISCOVERY != 16);
945 BUILD_BUG_ON(LNET_PING_FEAT_LARGE_ADDR != 32);
946 BUILD_BUG_ON(LNET_PING_FEAT_PRIMARY_LARGE != 64);
947 BUILD_BUG_ON(LNET_PING_FEAT_BITS != 127);
949 /* Checks for struct lnet_ping_info */
950 BUILD_BUG_ON((int)sizeof(struct lnet_ping_info) != 16);
951 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_magic) != 0);
952 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) != 4);
953 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_features) != 4);
954 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_features) != 4);
955 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_pid) != 8);
956 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) != 4);
957 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_nnis) != 12);
958 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) != 4);
959 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_ni) != 16);
960 BUILD_BUG_ON(offsetof(struct lnet_ping_info, pi_ni) != sizeof(struct lnet_ping_info));
962 /* Acceptor connection request */
963 BUILD_BUG_ON(LNET_PROTO_ACCEPTOR_VERSION != 1);
965 /* Checks for struct lnet_acceptor_connreq */
966 BUILD_BUG_ON((int)sizeof(struct lnet_acceptor_connreq) != 16);
967 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_magic) != 0);
968 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_magic) != 4);
969 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_version) != 4);
970 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_version) != 4);
971 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_nid) != 8);
972 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_nid) != 8);
974 /* Checks for struct lnet_acceptor_connreq_v2 */
975 BUILD_BUG_ON((int)sizeof(struct lnet_acceptor_connreq_v2) != 28);
976 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_magic) != 0);
977 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_magic) != 4);
978 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_version) != 4);
979 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_version) != 4);
980 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_nid) != 8);
981 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_nid) != 20);
983 /* Checks for struct lnet_counters_common */
984 BUILD_BUG_ON((int)sizeof(struct lnet_counters_common) != 60);
985 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_msgs_alloc) != 0);
986 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_msgs_alloc) != 4);
987 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_msgs_max) != 4);
988 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_msgs_max) != 4);
989 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_errors) != 8);
990 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_errors) != 4);
991 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_send_count) != 12);
992 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_send_count) != 4);
993 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_recv_count) != 16);
994 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_recv_count) != 4);
995 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_route_count) != 20);
996 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_route_count) != 4);
997 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_drop_count) != 24);
998 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_drop_count) != 4);
999 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_send_length) != 28);
1000 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_send_length) != 8);
1001 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_recv_length) != 36);
1002 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_recv_length) != 8);
1003 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_route_length) != 44);
1004 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_route_length) != 8);
1005 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_drop_length) != 52);
1006 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_drop_length) != 8);
1009 static const struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
1011 const struct lnet_lnd *lnd;
1013 /* holding lnd mutex */
1014 if (type >= NUM_LNDS)
1016 lnd = the_lnet.ln_lnds[type];
1017 LASSERT(!lnd || lnd->lnd_type == type);
1023 lnet_get_lnd_timeout(void)
1025 return lnet_lnd_timeout;
1027 EXPORT_SYMBOL(lnet_get_lnd_timeout);
1030 lnet_register_lnd(const struct lnet_lnd *lnd)
1032 mutex_lock(&the_lnet.ln_lnd_mutex);
1034 LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
1035 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
1037 the_lnet.ln_lnds[lnd->lnd_type] = lnd;
1039 CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
1041 mutex_unlock(&the_lnet.ln_lnd_mutex);
1043 EXPORT_SYMBOL(lnet_register_lnd);
1046 lnet_unregister_lnd(const struct lnet_lnd *lnd)
1048 mutex_lock(&the_lnet.ln_lnd_mutex);
1050 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
1052 the_lnet.ln_lnds[lnd->lnd_type] = NULL;
1053 CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
1055 mutex_unlock(&the_lnet.ln_lnd_mutex);
1057 EXPORT_SYMBOL(lnet_unregister_lnd);
1060 lnet_counters_get_common_locked(struct lnet_counters_common *common)
1062 struct lnet_counters *ctr;
1065 /* FIXME !!! Their is no assert_lnet_net_locked() to ensure this
1066 * actually called under the protection of the lnet_net_lock.
1068 memset(common, 0, sizeof(*common));
1070 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
1071 common->lcc_msgs_max += ctr->lct_common.lcc_msgs_max;
1072 common->lcc_msgs_alloc += ctr->lct_common.lcc_msgs_alloc;
1073 common->lcc_errors += ctr->lct_common.lcc_errors;
1074 common->lcc_send_count += ctr->lct_common.lcc_send_count;
1075 common->lcc_recv_count += ctr->lct_common.lcc_recv_count;
1076 common->lcc_route_count += ctr->lct_common.lcc_route_count;
1077 common->lcc_drop_count += ctr->lct_common.lcc_drop_count;
1078 common->lcc_send_length += ctr->lct_common.lcc_send_length;
1079 common->lcc_recv_length += ctr->lct_common.lcc_recv_length;
1080 common->lcc_route_length += ctr->lct_common.lcc_route_length;
1081 common->lcc_drop_length += ctr->lct_common.lcc_drop_length;
1086 lnet_counters_get_common(struct lnet_counters_common *common)
1088 lnet_net_lock(LNET_LOCK_EX);
1089 lnet_counters_get_common_locked(common);
1090 lnet_net_unlock(LNET_LOCK_EX);
1092 EXPORT_SYMBOL(lnet_counters_get_common);
1095 lnet_counters_get(struct lnet_counters *counters)
1097 struct lnet_counters *ctr;
1098 struct lnet_counters_health *health = &counters->lct_health;
1101 memset(counters, 0, sizeof(*counters));
1103 lnet_net_lock(LNET_LOCK_EX);
1105 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1106 GOTO(out_unlock, rc = -ENODEV);
1108 lnet_counters_get_common_locked(&counters->lct_common);
1110 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
1111 health->lch_rst_alloc += ctr->lct_health.lch_rst_alloc;
1112 health->lch_resend_count += ctr->lct_health.lch_resend_count;
1113 health->lch_response_timeout_count +=
1114 ctr->lct_health.lch_response_timeout_count;
1115 health->lch_local_interrupt_count +=
1116 ctr->lct_health.lch_local_interrupt_count;
1117 health->lch_local_dropped_count +=
1118 ctr->lct_health.lch_local_dropped_count;
1119 health->lch_local_aborted_count +=
1120 ctr->lct_health.lch_local_aborted_count;
1121 health->lch_local_no_route_count +=
1122 ctr->lct_health.lch_local_no_route_count;
1123 health->lch_local_timeout_count +=
1124 ctr->lct_health.lch_local_timeout_count;
1125 health->lch_local_error_count +=
1126 ctr->lct_health.lch_local_error_count;
1127 health->lch_remote_dropped_count +=
1128 ctr->lct_health.lch_remote_dropped_count;
1129 health->lch_remote_error_count +=
1130 ctr->lct_health.lch_remote_error_count;
1131 health->lch_remote_timeout_count +=
1132 ctr->lct_health.lch_remote_timeout_count;
1133 health->lch_network_timeout_count +=
1134 ctr->lct_health.lch_network_timeout_count;
1137 lnet_net_unlock(LNET_LOCK_EX);
1140 EXPORT_SYMBOL(lnet_counters_get);
1143 lnet_counters_reset(void)
1145 struct lnet_counters *counters;
1148 lnet_net_lock(LNET_LOCK_EX);
1150 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1153 cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
1154 memset(counters, 0, sizeof(struct lnet_counters));
1156 lnet_net_unlock(LNET_LOCK_EX);
1160 lnet_res_type2str(int type)
1165 case LNET_COOKIE_TYPE_MD:
1167 case LNET_COOKIE_TYPE_ME:
1169 case LNET_COOKIE_TYPE_EQ:
1175 lnet_res_container_cleanup(struct lnet_res_container *rec)
1179 if (rec->rec_type == 0) /* not set yet, it's uninitialized */
1182 while (!list_empty(&rec->rec_active)) {
1183 struct list_head *e = rec->rec_active.next;
1186 if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
1187 lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
1189 } else { /* NB: Active MEs should be attached on portals */
1196 /* Found alive MD/ME/EQ, user really should unlink/free
1197 * all of them before finalize LNet, but if someone didn't,
1198 * we have to recycle garbage for him */
1199 CERROR("%d active elements on exit of %s container\n",
1200 count, lnet_res_type2str(rec->rec_type));
1203 if (rec->rec_lh_hash != NULL) {
1204 CFS_FREE_PTR_ARRAY(rec->rec_lh_hash, LNET_LH_HASH_SIZE);
1205 rec->rec_lh_hash = NULL;
1208 rec->rec_type = 0; /* mark it as finalized */
1212 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
1217 LASSERT(rec->rec_type == 0);
1219 rec->rec_type = type;
1220 INIT_LIST_HEAD(&rec->rec_active);
1222 rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
1224 /* Arbitrary choice of hash table size */
1225 LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
1226 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
1227 if (rec->rec_lh_hash == NULL) {
1232 for (i = 0; i < LNET_LH_HASH_SIZE; i++)
1233 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
1238 CERROR("Failed to setup %s resource container\n",
1239 lnet_res_type2str(type));
1240 lnet_res_container_cleanup(rec);
1245 lnet_res_containers_destroy(struct lnet_res_container **recs)
1247 struct lnet_res_container *rec;
1250 cfs_percpt_for_each(rec, i, recs)
1251 lnet_res_container_cleanup(rec);
1253 cfs_percpt_free(recs);
1256 static struct lnet_res_container **
1257 lnet_res_containers_create(int type)
1259 struct lnet_res_container **recs;
1260 struct lnet_res_container *rec;
1264 recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
1266 CERROR("Failed to allocate %s resource containers\n",
1267 lnet_res_type2str(type));
1271 cfs_percpt_for_each(rec, i, recs) {
1272 rc = lnet_res_container_setup(rec, i, type);
1274 lnet_res_containers_destroy(recs);
1282 struct lnet_libhandle *
1283 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
1285 /* ALWAYS called with lnet_res_lock held */
1286 struct list_head *head;
1287 struct lnet_libhandle *lh;
1290 if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
1293 hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
1294 head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
1296 list_for_each_entry(lh, head, lh_hash_chain) {
1297 if (lh->lh_cookie == cookie)
1305 lnet_res_lh_initialize(struct lnet_res_container *rec,
1306 struct lnet_libhandle *lh)
1308 /* ALWAYS called with lnet_res_lock held */
1309 unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
1312 lh->lh_cookie = rec->rec_lh_cookie;
1313 rec->rec_lh_cookie += 1 << ibits;
1315 hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
1317 list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
1321 lnet_create_array_of_queues(void)
1323 struct list_head **qs;
1324 struct list_head *q;
1327 qs = cfs_percpt_alloc(lnet_cpt_table(),
1328 sizeof(struct list_head));
1330 CERROR("Failed to allocate queues\n");
1334 cfs_percpt_for_each(q, i, qs)
1340 static int lnet_unprepare(void);
1343 lnet_prepare(lnet_pid_t requested_pid)
1345 /* Prepare to bring up the network */
1346 struct lnet_res_container **recs;
1349 if (requested_pid == LNET_PID_ANY) {
1350 /* Don't instantiate LNET just for me */
1354 LASSERT(the_lnet.ln_refcount == 0);
1356 the_lnet.ln_routing = 0;
1358 LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
1359 the_lnet.ln_pid = requested_pid;
1361 INIT_LIST_HEAD(&the_lnet.ln_test_peers);
1362 INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
1363 INIT_LIST_HEAD(&the_lnet.ln_nets);
1364 INIT_LIST_HEAD(&the_lnet.ln_routers);
1365 INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
1366 INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
1367 INIT_LIST_HEAD(&the_lnet.ln_dc_request);
1368 INIT_LIST_HEAD(&the_lnet.ln_dc_working);
1369 INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
1370 INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq);
1371 INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
1372 INIT_LIST_HEAD(&the_lnet.ln_udsp_list);
1373 init_waitqueue_head(&the_lnet.ln_dc_waitq);
1374 the_lnet.ln_mt_handler = NULL;
1375 init_completion(&the_lnet.ln_started);
1376 atomic_set(&the_lnet.ln_late_msg_count, 0);
1377 atomic64_set(&the_lnet.ln_late_msg_nsecs, 0);
1379 rc = lnet_slab_setup();
1383 rc = lnet_create_remote_nets_table();
1388 * NB the interface cookie in wire handles guards against delayed
1389 * replies and ACKs appearing valid after reboot.
1391 the_lnet.ln_interface_cookie = ktime_get_real_ns();
1393 the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
1394 sizeof(struct lnet_counters));
1395 if (the_lnet.ln_counters == NULL) {
1396 CERROR("Failed to allocate counters for LNet\n");
1401 rc = lnet_peer_tables_create();
1405 rc = lnet_msg_containers_create();
1409 rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
1410 LNET_COOKIE_TYPE_EQ);
1414 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
1420 the_lnet.ln_md_containers = recs;
1422 rc = lnet_portals_create();
1424 CERROR("Failed to create portals for LNet: %d\n", rc);
1428 the_lnet.ln_mt_zombie_rstqs = lnet_create_array_of_queues();
1429 if (!the_lnet.ln_mt_zombie_rstqs) {
1442 lnet_unprepare(void)
1444 /* NB no LNET_LOCK since this is the last reference. All LND instances
1445 * have shut down already, so it is safe to unlink and free all
1446 * descriptors, even those that appear committed to a network op (eg MD
1447 * with non-zero pending count) */
1449 lnet_fail_nid(LNET_NID_ANY, 0);
1451 LASSERT(the_lnet.ln_refcount == 0);
1452 LASSERT(list_empty(&the_lnet.ln_test_peers));
1453 LASSERT(list_empty(&the_lnet.ln_nets));
1455 if (the_lnet.ln_mt_zombie_rstqs) {
1456 lnet_clean_zombie_rstqs();
1457 the_lnet.ln_mt_zombie_rstqs = NULL;
1460 lnet_assert_handler_unused(the_lnet.ln_mt_handler);
1461 the_lnet.ln_mt_handler = NULL;
1463 lnet_portals_destroy();
1465 if (the_lnet.ln_md_containers != NULL) {
1466 lnet_res_containers_destroy(the_lnet.ln_md_containers);
1467 the_lnet.ln_md_containers = NULL;
1470 lnet_res_container_cleanup(&the_lnet.ln_eq_container);
1472 lnet_msg_containers_destroy();
1474 lnet_rtrpools_free(0);
1476 if (the_lnet.ln_counters != NULL) {
1477 cfs_percpt_free(the_lnet.ln_counters);
1478 the_lnet.ln_counters = NULL;
1480 lnet_destroy_remote_nets_table();
1481 lnet_udsp_destroy(true);
1482 lnet_slab_cleanup();
1488 lnet_net2ni_locked(__u32 net_id, int cpt)
1491 struct lnet_net *net;
1493 LASSERT(cpt != LNET_LOCK_EX);
1495 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1496 if (net->net_id == net_id) {
1497 ni = list_first_entry(&net->net_ni_list, struct lnet_ni,
1507 lnet_net2ni_addref(__u32 net)
1512 ni = lnet_net2ni_locked(net, 0);
1514 lnet_ni_addref_locked(ni, 0);
1519 EXPORT_SYMBOL(lnet_net2ni_addref);
1522 lnet_get_net_locked(__u32 net_id)
1524 struct lnet_net *net;
1526 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1527 if (net->net_id == net_id)
1535 lnet_net_clr_pref_rtrs(struct lnet_net *net)
1537 struct list_head zombies;
1538 struct lnet_nid_list *ne;
1539 struct lnet_nid_list *tmp;
1541 INIT_LIST_HEAD(&zombies);
1543 lnet_net_lock(LNET_LOCK_EX);
1544 list_splice_init(&net->net_rtr_pref_nids, &zombies);
1545 lnet_net_unlock(LNET_LOCK_EX);
1547 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1548 list_del_init(&ne->nl_list);
1549 LIBCFS_FREE(ne, sizeof(*ne));
1554 lnet_net_add_pref_rtr(struct lnet_net *net,
1555 struct lnet_nid *gw_nid)
1556 __must_hold(&the_lnet.ln_api_mutex)
1558 struct lnet_nid_list *ne;
1560 /* This function is called with api_mutex held. When the api_mutex
1561 * is held the list can not be modified, as it is only modified as
1562 * a result of applying a UDSP and that happens under api_mutex
1565 list_for_each_entry(ne, &net->net_rtr_pref_nids, nl_list) {
1566 if (nid_same(&ne->nl_nid, gw_nid))
1570 LIBCFS_ALLOC(ne, sizeof(*ne));
1574 ne->nl_nid = *gw_nid;
1576 /* Lock the cpt to protect against addition and checks in the
1577 * selection algorithm
1579 lnet_net_lock(LNET_LOCK_EX);
1580 list_add(&ne->nl_list, &net->net_rtr_pref_nids);
1581 lnet_net_unlock(LNET_LOCK_EX);
1587 lnet_net_is_pref_rtr_locked(struct lnet_net *net, struct lnet_nid *rtr_nid)
1589 struct lnet_nid_list *ne;
1591 CDEBUG(D_NET, "%s: rtr pref empty: %d\n",
1592 libcfs_net2str(net->net_id),
1593 list_empty(&net->net_rtr_pref_nids));
1595 if (list_empty(&net->net_rtr_pref_nids))
1598 list_for_each_entry(ne, &net->net_rtr_pref_nids, nl_list) {
1599 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
1600 libcfs_nidstr(&ne->nl_nid),
1601 libcfs_nidstr(rtr_nid));
1602 if (nid_same(rtr_nid, &ne->nl_nid))
1610 lnet_nid4_cpt_hash(lnet_nid_t nid, unsigned int number)
1613 __u16 lnd = LNET_NETTYP(LNET_NIDNET(nid));
1616 if (lnd == KFILND || lnd == GNILND) {
1617 cpt = hash_long(key, LNET_CPT_BITS);
1619 /* NB: The number of CPTs needn't be a power of 2 */
1621 cpt = (key + cpt + (cpt >> 1)) % number;
1623 __u64 pair_bits = 0x0001000100010001LLU;
1624 __u64 mask = pair_bits * 0xFF;
1626 /* For ipv4 NIDs, use (sum-by-multiplication of nid bytes) mod
1627 * (number of CPTs) to match nid to a CPT.
1629 pair_sum = (key & mask) + ((key >> 8) & mask);
1630 pair_sum = (pair_sum * pair_bits) >> 48;
1631 cpt = (unsigned int)(pair_sum) % number;
1634 CDEBUG(D_NET, "Match nid %s to cpt %u\n",
1635 libcfs_nid2str(nid), cpt);
1641 lnet_nid_cpt_hash(struct lnet_nid *nid, unsigned int number)
1647 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
1652 if (nid_is_nid4(nid))
1653 return lnet_nid4_cpt_hash(lnet_nid_to_nid4(nid), number);
1655 for (i = 0; i < 4; i++)
1656 h = cfs_hash_32(nid->nid_addr[i]^h, 32);
1657 val = cfs_hash_32(LNET_NID_NET(nid) ^ h, LNET_CPT_BITS);
1660 return (unsigned int)(h + val + (val >> 1)) % number;
1664 lnet_cpt_of_nid_locked(struct lnet_nid *nid, struct lnet_ni *ni)
1666 struct lnet_net *net;
1668 /* must called with hold of lnet_net_lock */
1669 if (LNET_CPT_NUMBER == 1)
1670 return 0; /* the only one */
1673 * If NI is provided then use the CPT identified in the NI cpt
1674 * list if one exists. If one doesn't exist, then that NI is
1675 * associated with all CPTs and it follows that the net it belongs
1676 * to is implicitly associated with all CPTs, so just hash the nid
1680 if (ni->ni_cpts != NULL)
1681 return ni->ni_cpts[lnet_nid_cpt_hash(nid,
1684 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1687 /* no NI provided so look at the net */
1688 net = lnet_get_net_locked(LNET_NID_NET(nid));
1690 if (net != NULL && net->net_cpts != NULL) {
1691 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
1694 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1698 lnet_nid2cpt(struct lnet_nid *nid, struct lnet_ni *ni)
1703 if (LNET_CPT_NUMBER == 1)
1704 return 0; /* the only one */
1706 cpt = lnet_net_lock_current();
1708 cpt2 = lnet_cpt_of_nid_locked(nid, ni);
1710 lnet_net_unlock(cpt);
1714 EXPORT_SYMBOL(lnet_nid2cpt);
1717 lnet_cpt_of_nid(lnet_nid_t nid4, struct lnet_ni *ni)
1719 struct lnet_nid nid;
1721 if (LNET_CPT_NUMBER == 1)
1722 return 0; /* the only one */
1724 lnet_nid4_to_nid(nid4, &nid);
1725 return lnet_nid2cpt(&nid, ni);
1727 EXPORT_SYMBOL(lnet_cpt_of_nid);
1730 lnet_islocalnet_locked(__u32 net_id)
1732 struct lnet_net *net;
1735 net = lnet_get_net_locked(net_id);
1737 local = net != NULL;
1743 lnet_islocalnet(__u32 net_id)
1748 cpt = lnet_net_lock_current();
1750 local = lnet_islocalnet_locked(net_id);
1752 lnet_net_unlock(cpt);
1758 lnet_nid_to_ni_locked(struct lnet_nid *nid, int cpt)
1760 struct lnet_net *net;
1763 LASSERT(cpt != LNET_LOCK_EX);
1765 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1766 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1767 if (nid_same(&ni->ni_nid, nid))
1776 lnet_nid_to_ni_addref(struct lnet_nid *nid)
1781 ni = lnet_nid_to_ni_locked(nid, 0);
1783 lnet_ni_addref_locked(ni, 0);
1788 EXPORT_SYMBOL(lnet_nid_to_ni_addref);
1791 lnet_islocalnid(struct lnet_nid *nid)
1796 cpt = lnet_net_lock_current();
1797 ni = lnet_nid_to_ni_locked(nid, cpt);
1798 lnet_net_unlock(cpt);
1804 lnet_count_acceptor_nets(void)
1806 /* Return the # of NIs that need the acceptor. */
1808 struct lnet_net *net;
1811 cpt = lnet_net_lock_current();
1812 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1813 /* all socklnd type networks should have the acceptor
1815 if (net->net_lnd->lnd_accept != NULL)
1819 lnet_net_unlock(cpt);
1824 struct lnet_ping_buffer *
1825 lnet_ping_buffer_alloc(int nbytes, gfp_t gfp)
1827 struct lnet_ping_buffer *pbuf;
1829 LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nbytes), gfp);
1831 pbuf->pb_nbytes = nbytes; /* sizeof of pb_info */
1832 pbuf->pb_needs_post = false;
1833 atomic_set(&pbuf->pb_refcnt, 1);
1840 lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
1842 LASSERT(atomic_read(&pbuf->pb_refcnt) == 0);
1843 LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nbytes));
1846 static struct lnet_ping_buffer *
1847 lnet_ping_target_create(int nbytes)
1849 struct lnet_ping_buffer *pbuf;
1851 pbuf = lnet_ping_buffer_alloc(nbytes, GFP_NOFS);
1853 CERROR("Can't allocate ping source [%d]\n", nbytes);
1857 pbuf->pb_info.pi_nnis = 0;
1858 pbuf->pb_info.pi_pid = the_lnet.ln_pid;
1859 pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
1860 pbuf->pb_info.pi_features =
1861 LNET_PING_FEAT_NI_STATUS | LNET_PING_FEAT_MULTI_RAIL;
1867 lnet_get_net_ni_bytes_locked(struct lnet_net *net)
1872 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1873 bytes += lnet_ping_sts_size(&ni->ni_nid);
1879 lnet_get_ni_bytes(void)
1882 struct lnet_net *net;
1887 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1888 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1889 bytes += lnet_ping_sts_size(&ni->ni_nid);
1898 lnet_swap_pinginfo(struct lnet_ping_buffer *pbuf)
1900 struct lnet_ni_large_status *lstat, *lend;
1901 struct lnet_ni_status *stat, *end;
1905 __swab32s(&pbuf->pb_info.pi_magic);
1906 __swab32s(&pbuf->pb_info.pi_features);
1907 __swab32s(&pbuf->pb_info.pi_pid);
1908 __swab32s(&pbuf->pb_info.pi_nnis);
1909 nnis = pbuf->pb_info.pi_nnis;
1910 stat = &pbuf->pb_info.pi_ni[0];
1911 end = (void *)&pbuf->pb_info + pbuf->pb_nbytes;
1912 for (i = 0; i < nnis && stat + 1 <= end; i++, stat++) {
1913 __swab64s(&stat->ns_nid);
1914 __swab32s(&stat->ns_status);
1916 /* Might be total size */
1917 __swab32s(&stat->ns_msg_size);
1919 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_LARGE_ADDR))
1922 lstat = (struct lnet_ni_large_status *)stat;
1924 while (lstat + 1 <= lend) {
1925 __swab32s(&lstat->ns_status);
1926 /* struct lnet_nid never needs to be swabed */
1927 lstat = lnet_ping_sts_next(lstat);
1932 lnet_ping_info_validate(struct lnet_ping_info *pinfo)
1936 if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
1938 if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
1940 /* Loopback is guaranteed to be present */
1941 if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
1943 if (LNET_PING_INFO_LONI(pinfo) != LNET_NID_LO_0)
1949 lnet_ping_target_destroy(void)
1951 struct lnet_net *net;
1954 lnet_net_lock(LNET_LOCK_EX);
1956 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1957 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1959 ni->ni_status = NULL;
1964 lnet_ping_buffer_decref(the_lnet.ln_ping_target);
1965 the_lnet.ln_ping_target = NULL;
1967 lnet_net_unlock(LNET_LOCK_EX);
1971 lnet_ping_target_event_handler(struct lnet_event *event)
1973 struct lnet_ping_buffer *pbuf = event->md_user_ptr;
1975 if (event->unlinked)
1976 lnet_ping_buffer_decref(pbuf);
1980 lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
1981 struct lnet_handle_md *ping_mdh,
1982 int ni_bytes, bool set_eq)
1984 struct lnet_processid id = {
1985 .nid = LNET_ANY_NID,
1989 struct lnet_md md = { NULL };
1993 the_lnet.ln_ping_target_handler =
1994 lnet_ping_target_event_handler;
1996 *ppbuf = lnet_ping_target_create(ni_bytes);
1997 if (*ppbuf == NULL) {
2002 /* Ping target ME/MD */
2003 me = LNetMEAttach(LNET_RESERVED_PORTAL, &id,
2004 LNET_PROTO_PING_MATCHBITS, 0,
2005 LNET_UNLINK, LNET_INS_AFTER);
2008 CERROR("Can't create ping target ME: %d\n", rc);
2009 goto fail_decref_ping_buffer;
2012 /* initialize md content */
2013 md.start = &(*ppbuf)->pb_info;
2014 md.length = (*ppbuf)->pb_nbytes;
2015 md.threshold = LNET_MD_THRESH_INF;
2017 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
2018 LNET_MD_MANAGE_REMOTE;
2019 md.handler = the_lnet.ln_ping_target_handler;
2020 md.user_ptr = *ppbuf;
2022 rc = LNetMDAttach(me, &md, LNET_RETAIN, ping_mdh);
2024 CERROR("Can't attach ping target MD: %d\n", rc);
2025 goto fail_decref_ping_buffer;
2027 lnet_ping_buffer_addref(*ppbuf);
2031 fail_decref_ping_buffer:
2032 LASSERT(atomic_read(&(*ppbuf)->pb_refcnt) == 1);
2033 lnet_ping_buffer_decref(*ppbuf);
2040 lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
2041 struct lnet_handle_md *ping_mdh)
2043 LNetMDUnlink(*ping_mdh);
2044 LNetInvalidateMDHandle(ping_mdh);
2046 /* NB the MD could be busy; this just starts the unlink */
2047 wait_var_event_warning(&pbuf->pb_refcnt,
2048 atomic_read(&pbuf->pb_refcnt) <= 1,
2049 "Still waiting for ping data MD to unlink\n");
2053 lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
2056 struct lnet_net *net;
2057 struct lnet_ni_status *ns, *end;
2058 struct lnet_ni_large_status *lns, *lend;
2061 pbuf->pb_info.pi_nnis = 0;
2062 ns = &pbuf->pb_info.pi_ni[0];
2063 end = (void *)&pbuf->pb_info + pbuf->pb_nbytes;
2064 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2065 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2066 if (!nid_is_nid4(&ni->ni_nid)) {
2067 if (ns == &pbuf->pb_info.pi_ni[1]) {
2068 /* This is primary, and it is long */
2069 pbuf->pb_info.pi_features |=
2070 LNET_PING_FEAT_PRIMARY_LARGE;
2074 LASSERT(ns + 1 <= end);
2075 ns->ns_nid = lnet_nid_to_nid4(&ni->ni_nid);
2078 ns->ns_status = lnet_ni_get_status_locked(ni);
2079 ni->ni_status = &ns->ns_status;
2082 pbuf->pb_info.pi_nnis++;
2089 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2090 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2091 if (nid_is_nid4(&ni->ni_nid))
2093 LASSERT(lns + 1 <= lend);
2095 lns->ns_nid = ni->ni_nid;
2098 ns->ns_status = lnet_ni_get_status_locked(ni);
2099 ni->ni_status = &lns->ns_status;
2102 lns = lnet_ping_sts_next(lns);
2105 if ((void *)lns > (void *)ns) {
2106 /* Record total info size */
2107 pbuf->pb_info.pi_ni[0].ns_msg_size =
2108 (void *)lns - (void *)&pbuf->pb_info;
2109 pbuf->pb_info.pi_features |= LNET_PING_FEAT_LARGE_ADDR;
2112 /* We (ab)use the ns_status of the loopback interface to
2113 * transmit the sequence number. The first interface listed
2114 * must be the loopback interface.
2116 rc = lnet_ping_info_validate(&pbuf->pb_info);
2118 LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
2121 LNET_PING_BUFFER_SEQNO(pbuf) =
2122 atomic_inc_return(&the_lnet.ln_ping_target_seqno);
2126 lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
2127 struct lnet_handle_md ping_mdh)
2129 struct lnet_ping_buffer *old_pbuf = NULL;
2130 struct lnet_handle_md old_ping_md;
2132 /* switch the NIs to point to the new ping info created */
2133 lnet_net_lock(LNET_LOCK_EX);
2135 if (!the_lnet.ln_routing)
2136 pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
2137 if (!lnet_peer_discovery_disabled)
2138 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
2140 /* Ensure only known feature bits have been set. */
2141 LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
2142 LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
2144 lnet_ping_target_install_locked(pbuf);
2146 if (the_lnet.ln_ping_target) {
2147 old_pbuf = the_lnet.ln_ping_target;
2148 old_ping_md = the_lnet.ln_ping_target_md;
2150 the_lnet.ln_ping_target_md = ping_mdh;
2151 the_lnet.ln_ping_target = pbuf;
2153 lnet_net_unlock(LNET_LOCK_EX);
2156 /* unlink and free the old ping info */
2157 lnet_ping_md_unlink(old_pbuf, &old_ping_md);
2158 lnet_ping_buffer_decref(old_pbuf);
2161 lnet_push_update_to_peers(0);
2165 lnet_ping_target_fini(void)
2167 lnet_ping_md_unlink(the_lnet.ln_ping_target,
2168 &the_lnet.ln_ping_target_md);
2170 lnet_assert_handler_unused(the_lnet.ln_ping_target_handler);
2171 lnet_ping_target_destroy();
2174 /* Resize the push target. */
2175 int lnet_push_target_resize(void)
2177 struct lnet_handle_md mdh;
2178 struct lnet_handle_md old_mdh;
2179 struct lnet_ping_buffer *pbuf;
2180 struct lnet_ping_buffer *old_pbuf;
2185 nbytes = the_lnet.ln_push_target_nbytes;
2187 CDEBUG(D_NET, "Invalid nbytes %d\n", nbytes);
2191 /* NB: lnet_ping_buffer_alloc() sets pbuf refcount to 1. That ref is
2192 * dropped when we need to resize again (see "old_pbuf" below) or when
2193 * LNet is shutdown (see lnet_push_target_fini())
2195 pbuf = lnet_ping_buffer_alloc(nbytes, GFP_NOFS);
2197 CDEBUG(D_NET, "Can't allocate pbuf for nbytes %d\n", nbytes);
2201 rc = lnet_push_target_post(pbuf, &mdh);
2203 CDEBUG(D_NET, "Failed to post push target: %d\n", rc);
2204 lnet_ping_buffer_decref(pbuf);
2208 lnet_net_lock(LNET_LOCK_EX);
2209 old_pbuf = the_lnet.ln_push_target;
2210 old_mdh = the_lnet.ln_push_target_md;
2211 the_lnet.ln_push_target = pbuf;
2212 the_lnet.ln_push_target_md = mdh;
2213 lnet_net_unlock(LNET_LOCK_EX);
2216 LNetMDUnlink(old_mdh);
2217 /* Drop ref set by lnet_ping_buffer_alloc() */
2218 lnet_ping_buffer_decref(old_pbuf);
2221 /* Received another push or reply that requires a larger buffer */
2222 if (nbytes < the_lnet.ln_push_target_nbytes)
2225 CDEBUG(D_NET, "nbytes %d success\n", nbytes);
2229 int lnet_push_target_post(struct lnet_ping_buffer *pbuf,
2230 struct lnet_handle_md *mdhp)
2232 struct lnet_processid id = { LNET_ANY_NID, LNET_PID_ANY };
2233 struct lnet_md md = { NULL };
2237 me = LNetMEAttach(LNET_RESERVED_PORTAL, &id,
2238 LNET_PROTO_PING_MATCHBITS, 0,
2239 LNET_UNLINK, LNET_INS_AFTER);
2242 CERROR("Can't create push target ME: %d\n", rc);
2246 pbuf->pb_needs_post = false;
2248 /* This reference is dropped by lnet_push_target_event_handler() */
2249 lnet_ping_buffer_addref(pbuf);
2251 /* initialize md content */
2252 md.start = &pbuf->pb_info;
2253 md.length = pbuf->pb_nbytes;
2256 md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE;
2258 md.handler = the_lnet.ln_push_target_handler;
2260 rc = LNetMDAttach(me, &md, LNET_UNLINK, mdhp);
2262 CERROR("Can't attach push MD: %d\n", rc);
2263 lnet_ping_buffer_decref(pbuf);
2264 pbuf->pb_needs_post = true;
2268 CDEBUG(D_NET, "posted push target %p\n", pbuf);
2273 static void lnet_push_target_event_handler(struct lnet_event *ev)
2275 struct lnet_ping_buffer *pbuf = ev->md_user_ptr;
2277 CDEBUG(D_NET, "type %d status %d unlinked %d\n", ev->type, ev->status,
2280 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2281 lnet_swap_pinginfo(pbuf);
2283 if (ev->type == LNET_EVENT_UNLINK) {
2284 /* Drop ref added by lnet_push_target_post() */
2285 lnet_ping_buffer_decref(pbuf);
2289 lnet_peer_push_event(ev);
2291 /* Drop ref added by lnet_push_target_post */
2292 lnet_ping_buffer_decref(pbuf);
2295 /* Initialize the push target. */
2296 static int lnet_push_target_init(void)
2300 if (the_lnet.ln_push_target)
2303 the_lnet.ln_push_target_handler =
2304 lnet_push_target_event_handler;
2306 rc = LNetSetLazyPortal(LNET_RESERVED_PORTAL);
2309 /* Start at the required minimum, we'll enlarge if required. */
2310 the_lnet.ln_push_target_nbytes = LNET_PING_INFO_MIN_SIZE;
2312 rc = lnet_push_target_resize();
2314 LNetClearLazyPortal(LNET_RESERVED_PORTAL);
2315 the_lnet.ln_push_target_handler = NULL;
2321 /* Clean up the push target. */
2322 static void lnet_push_target_fini(void)
2324 if (!the_lnet.ln_push_target)
2327 /* Unlink and invalidate to prevent new references. */
2328 LNetMDUnlink(the_lnet.ln_push_target_md);
2329 LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
2331 /* Wait for the unlink to complete. */
2332 wait_var_event_warning(&the_lnet.ln_push_target->pb_refcnt,
2333 atomic_read(&the_lnet.ln_push_target->pb_refcnt) <= 1,
2334 "Still waiting for ping data MD to unlink\n");
2336 /* Drop ref set by lnet_ping_buffer_alloc() */
2337 lnet_ping_buffer_decref(the_lnet.ln_push_target);
2338 the_lnet.ln_push_target = NULL;
2339 the_lnet.ln_push_target_nbytes = 0;
2341 LNetClearLazyPortal(LNET_RESERVED_PORTAL);
2342 lnet_assert_handler_unused(the_lnet.ln_push_target_handler);
2343 the_lnet.ln_push_target_handler = NULL;
2347 lnet_ni_tq_credits(struct lnet_ni *ni)
2351 LASSERT(ni->ni_ncpts >= 1);
2353 if (ni->ni_ncpts == 1)
2354 return ni->ni_net->net_tunables.lct_max_tx_credits;
2356 credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
2357 credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
2358 credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
2364 lnet_ni_unlink_locked(struct lnet_ni *ni)
2366 /* move it to zombie list and nobody can find it anymore */
2367 LASSERT(!list_empty(&ni->ni_netlist));
2368 list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
2369 lnet_ni_decref_locked(ni, 0);
2373 lnet_clear_zombies_nis_locked(struct lnet_net *net)
2378 struct list_head *zombie_list = &net->net_ni_zombie;
2381 * Now wait for the NIs I just nuked to show up on the zombie
2382 * list and shut them down in guaranteed thread context
2385 while ((ni = list_first_entry_or_null(zombie_list,
2387 ni_netlist)) != NULL) {
2391 list_del_init(&ni->ni_netlist);
2392 /* the ni should be in deleting state. If it's not it's
2394 LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
2395 cfs_percpt_for_each(ref, j, ni->ni_refs) {
2398 /* still busy, add it back to zombie list */
2399 list_add(&ni->ni_netlist, zombie_list);
2403 if (!list_empty(&ni->ni_netlist)) {
2404 /* Unlock mutex while waiting to allow other
2405 * threads to read the LNet state and fall through
2408 lnet_net_unlock(LNET_LOCK_EX);
2409 mutex_unlock(&the_lnet.ln_api_mutex);
2412 if ((i & (-i)) == i) {
2414 "Waiting for zombie LNI %s\n",
2415 libcfs_nidstr(&ni->ni_nid));
2417 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2419 mutex_lock(&the_lnet.ln_api_mutex);
2420 lnet_net_lock(LNET_LOCK_EX);
2424 lnet_net_unlock(LNET_LOCK_EX);
2426 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
2428 LASSERT(!in_interrupt());
2429 /* Holding the LND mutex makes it safe for lnd_shutdown
2430 * to call module_put(). Module unload cannot finish
2431 * until lnet_unregister_lnd() completes, and that
2432 * requires the LND mutex.
2434 mutex_unlock(&the_lnet.ln_api_mutex);
2435 mutex_lock(&the_lnet.ln_lnd_mutex);
2436 (net->net_lnd->lnd_shutdown)(ni);
2437 mutex_unlock(&the_lnet.ln_lnd_mutex);
2438 mutex_lock(&the_lnet.ln_api_mutex);
2441 CDEBUG(D_LNI, "Removed LNI %s\n",
2442 libcfs_nidstr(&ni->ni_nid));
2446 lnet_net_lock(LNET_LOCK_EX);
2450 /* shutdown down the NI and release refcount */
2452 lnet_shutdown_lndni(struct lnet_ni *ni)
2455 struct lnet_net *net = ni->ni_net;
2457 lnet_net_lock(LNET_LOCK_EX);
2459 ni->ni_state = LNET_NI_STATE_DELETING;
2461 lnet_ni_unlink_locked(ni);
2462 lnet_incr_dlc_seq();
2463 lnet_net_unlock(LNET_LOCK_EX);
2465 /* clear messages for this NI on the lazy portal */
2466 for (i = 0; i < the_lnet.ln_nportals; i++)
2467 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
2469 lnet_net_lock(LNET_LOCK_EX);
2470 lnet_clear_zombies_nis_locked(net);
2471 lnet_net_unlock(LNET_LOCK_EX);
2475 lnet_shutdown_lndnet(struct lnet_net *net)
2479 lnet_net_lock(LNET_LOCK_EX);
2481 list_del_init(&net->net_list);
2483 while ((ni = list_first_entry_or_null(&net->net_ni_list,
2485 ni_netlist)) != NULL) {
2486 lnet_net_unlock(LNET_LOCK_EX);
2487 lnet_shutdown_lndni(ni);
2488 lnet_net_lock(LNET_LOCK_EX);
2491 lnet_net_unlock(LNET_LOCK_EX);
2493 /* Do peer table cleanup for this net */
2494 lnet_peer_tables_cleanup(net);
2500 lnet_shutdown_lndnets(void)
2502 struct lnet_net *net;
2504 struct lnet_msg *msg, *tmp;
2506 /* NB called holding the global mutex */
2508 /* All quiet on the API front */
2509 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING ||
2510 the_lnet.ln_state == LNET_STATE_STOPPING);
2511 LASSERT(the_lnet.ln_refcount == 0);
2513 lnet_net_lock(LNET_LOCK_EX);
2514 the_lnet.ln_state = LNET_STATE_STOPPING;
2517 * move the nets to the zombie list to avoid them being
2518 * picked up for new work. LONET is also included in the
2519 * Nets that will be moved to the zombie list
2521 list_splice_init(&the_lnet.ln_nets, &the_lnet.ln_net_zombie);
2523 /* Drop the cached loopback Net. */
2524 if (the_lnet.ln_loni != NULL) {
2525 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
2526 the_lnet.ln_loni = NULL;
2528 lnet_net_unlock(LNET_LOCK_EX);
2530 /* iterate through the net zombie list and delete each net */
2531 while ((net = list_first_entry_or_null(&the_lnet.ln_net_zombie,
2534 lnet_shutdown_lndnet(net);
2536 spin_lock(&the_lnet.ln_msg_resend_lock);
2537 list_splice(&the_lnet.ln_msg_resend, &resend);
2538 spin_unlock(&the_lnet.ln_msg_resend_lock);
2540 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
2541 list_del_init(&msg->msg_list);
2542 msg->msg_no_resend = true;
2543 lnet_finalize(msg, -ECANCELED);
2546 lnet_net_lock(LNET_LOCK_EX);
2547 the_lnet.ln_state = LNET_STATE_SHUTDOWN;
2548 lnet_net_unlock(LNET_LOCK_EX);
2552 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
2555 struct lnet_tx_queue *tq;
2557 struct lnet_net *net = ni->ni_net;
2559 mutex_lock(&the_lnet.ln_lnd_mutex);
2562 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
2563 ni->ni_lnd_tunables_set = true;
2566 rc = (net->net_lnd->lnd_startup)(ni);
2568 mutex_unlock(&the_lnet.ln_lnd_mutex);
2571 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
2572 rc, libcfs_lnd2str(net->net_lnd->lnd_type));
2577 ni->ni_state = LNET_NI_STATE_ACTIVE;
2580 /* We keep a reference on the loopback net through the loopback NI */
2581 if (net->net_lnd->lnd_type == LOLND) {
2583 LASSERT(the_lnet.ln_loni == NULL);
2584 the_lnet.ln_loni = ni;
2585 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
2586 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
2587 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
2588 ni->ni_net->net_tunables.lct_peer_timeout = 0;
2592 if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
2593 ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
2594 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
2595 libcfs_lnd2str(net->net_lnd->lnd_type),
2596 ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
2598 /* shutdown the NI since if we get here then it must've already
2601 lnet_shutdown_lndni(ni);
2605 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
2606 tq->tq_credits_min =
2607 tq->tq_credits_max =
2608 tq->tq_credits = lnet_ni_tq_credits(ni);
2611 atomic_set(&ni->ni_tx_credits,
2612 lnet_ni_tq_credits(ni) * ni->ni_ncpts);
2613 atomic_set(&ni->ni_healthv, LNET_MAX_HEALTH_VALUE);
2615 /* Nodes with small feet have little entropy. The NID for this
2616 * node gives the most entropy in the low bits.
2618 add_device_randomness(&ni->ni_nid, sizeof(ni->ni_nid));
2620 CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
2621 libcfs_nidstr(&ni->ni_nid),
2622 ni->ni_net->net_tunables.lct_peer_tx_credits,
2623 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
2624 ni->ni_net->net_tunables.lct_peer_rtr_credits,
2625 ni->ni_net->net_tunables.lct_peer_timeout);
2633 static const struct lnet_lnd *lnet_load_lnd(u32 lnd_type)
2635 const struct lnet_lnd *lnd;
2638 mutex_lock(&the_lnet.ln_lnd_mutex);
2639 lnd = lnet_find_lnd_by_type(lnd_type);
2641 mutex_unlock(&the_lnet.ln_lnd_mutex);
2642 rc = request_module("%s", libcfs_lnd2modname(lnd_type));
2643 mutex_lock(&the_lnet.ln_lnd_mutex);
2645 lnd = lnet_find_lnd_by_type(lnd_type);
2647 mutex_unlock(&the_lnet.ln_lnd_mutex);
2648 CERROR("Can't load LND %s, module %s, rc=%d\n",
2649 libcfs_lnd2str(lnd_type),
2650 libcfs_lnd2modname(lnd_type), rc);
2651 #ifndef HAVE_MODULE_LOADING_SUPPORT
2652 LCONSOLE_ERROR_MSG(0x104,
2653 "Your kernel must be compiled with kernel module loading support.");
2655 return ERR_PTR(-EINVAL);
2658 mutex_unlock(&the_lnet.ln_lnd_mutex);
2664 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
2667 struct lnet_net *net_l = NULL;
2668 LIST_HEAD(local_ni_list);
2672 const struct lnet_lnd *lnd;
2674 net->net_tunables.lct_peer_timeout;
2676 net->net_tunables.lct_max_tx_credits;
2677 int peerrtrcredits =
2678 net->net_tunables.lct_peer_rtr_credits;
2681 * make sure that this net is unique. If it isn't then
2682 * we are adding interfaces to an already existing network, and
2683 * 'net' is just a convenient way to pass in the list.
2684 * if it is unique we need to find the LND and load it if
2687 if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
2688 lnd_type = LNET_NETTYP(net->net_id);
2690 lnd = lnet_load_lnd(lnd_type);
2696 mutex_lock(&the_lnet.ln_lnd_mutex);
2698 mutex_unlock(&the_lnet.ln_lnd_mutex);
2704 * net_l: if the network being added is unique then net_l
2705 * will point to that network
2706 * if the network being added is not unique then
2707 * net_l points to the existing network.
2709 * When we enter the loop below, we'll pick NIs off he
2710 * network beign added and start them up, then add them to
2711 * a local ni list. Once we've successfully started all
2712 * the NIs then we join the local NI list (of started up
2713 * networks) with the net_l->net_ni_list, which should
2714 * point to the correct network to add the new ni list to
2716 * If any of the new NIs fail to start up, then we want to
2717 * iterate through the local ni list, which should include
2718 * any NIs which were successfully started up, and shut
2721 * After than we want to delete the network being added,
2722 * to avoid a memory leak.
2724 while ((ni = list_first_entry_or_null(&net->net_ni_added,
2726 ni_netlist)) != NULL) {
2727 list_del_init(&ni->ni_netlist);
2729 /* make sure that the the NI we're about to start
2730 * up is actually unique. if it's not fail. */
2731 if (!lnet_ni_unique_net(&net_l->net_ni_list,
2732 ni->ni_interface)) {
2737 /* adjust the pointer the parent network, just in case it
2738 * the net is a duplicate */
2741 rc = lnet_startup_lndni(ni, tun);
2747 list_add_tail(&ni->ni_netlist, &local_ni_list);
2752 lnet_net_lock(LNET_LOCK_EX);
2753 list_splice_tail(&local_ni_list, &net_l->net_ni_list);
2754 lnet_incr_dlc_seq();
2755 lnet_net_unlock(LNET_LOCK_EX);
2757 /* if the network is not unique then we don't want to keep
2758 * it around after we're done. Free it. Otherwise add that
2759 * net to the global the_lnet.ln_nets */
2760 if (net_l != net && net_l != NULL) {
2762 * TODO - note. currently the tunables can not be updated
2768 * restore tunables after it has been overwitten by the
2771 if (peer_timeout != -1)
2772 net->net_tunables.lct_peer_timeout = peer_timeout;
2773 if (maxtxcredits != -1)
2774 net->net_tunables.lct_max_tx_credits = maxtxcredits;
2775 if (peerrtrcredits != -1)
2776 net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
2778 lnet_net_lock(LNET_LOCK_EX);
2779 list_add_tail(&net->net_list, &the_lnet.ln_nets);
2780 lnet_net_unlock(LNET_LOCK_EX);
2787 * shutdown the new NIs that are being started up
2788 * free the NET being started
2790 while ((ni = list_first_entry_or_null(&local_ni_list,
2792 ni_netlist)) != NULL)
2793 lnet_shutdown_lndni(ni);
2802 lnet_startup_lndnets(struct list_head *netlist)
2804 struct lnet_net *net;
2809 * Change to running state before bringing up the LNDs. This
2810 * allows lnet_shutdown_lndnets() to assert that we've passed
2813 lnet_net_lock(LNET_LOCK_EX);
2814 the_lnet.ln_state = LNET_STATE_RUNNING;
2815 lnet_net_unlock(LNET_LOCK_EX);
2817 while ((net = list_first_entry_or_null(netlist,
2819 net_list)) != NULL) {
2820 list_del_init(&net->net_list);
2822 rc = lnet_startup_lndnet(net, NULL);
2832 lnet_shutdown_lndnets();
2837 static int lnet_genl_parse_list(struct sk_buff *msg,
2838 const struct ln_key_list *data[], u16 idx)
2840 const struct ln_key_list *list = data[idx];
2841 const struct ln_key_props *props;
2842 struct nlattr *node;
2848 if (!list->lkl_maxattr)
2851 props = list->lkl_list;
2855 node = nla_nest_start(msg, LN_SCALAR_ATTR_LIST);
2859 for (count = 1; count <= list->lkl_maxattr; count++) {
2860 struct nlattr *key = nla_nest_start(msg, count);
2863 nla_put_u16(msg, LN_SCALAR_ATTR_LIST_SIZE,
2866 nla_put_u16(msg, LN_SCALAR_ATTR_INDEX, count);
2867 if (props[count].lkp_value)
2868 nla_put_string(msg, LN_SCALAR_ATTR_VALUE,
2869 props[count].lkp_value);
2870 if (props[count].lkp_key_format)
2871 nla_put_u16(msg, LN_SCALAR_ATTR_KEY_FORMAT,
2872 props[count].lkp_key_format);
2873 nla_put_u16(msg, LN_SCALAR_ATTR_NLA_TYPE,
2874 props[count].lkp_data_type);
2875 if (props[count].lkp_data_type == NLA_NESTED) {
2878 rc = lnet_genl_parse_list(msg, data, ++idx);
2884 nla_nest_end(msg, key);
2887 nla_nest_end(msg, node);
2891 int lnet_genl_send_scalar_list(struct sk_buff *msg, u32 portid, u32 seq,
2892 const struct genl_family *family, int flags,
2893 u8 cmd, const struct ln_key_list *data[])
2901 hdr = genlmsg_put(msg, portid, seq, family, flags, cmd);
2903 GOTO(canceled, rc = -EMSGSIZE);
2905 rc = lnet_genl_parse_list(msg, data, 0);
2909 genlmsg_end(msg, hdr);
2912 genlmsg_cancel(msg, hdr);
2913 return rc > 0 ? 0 : rc;
2915 EXPORT_SYMBOL(lnet_genl_send_scalar_list);
2917 static struct genl_family lnet_family;
2920 * Initialize LNet library.
2922 * Automatically called at module loading time. Caller has to call
2923 * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
2924 * latter returned 0. It must be called exactly once.
2926 * \retval 0 on success
2927 * \retval -ve on failures.
2929 int lnet_lib_init(void)
2933 lnet_assert_wire_constants();
2935 /* refer to global cfs_cpt_table for now */
2936 the_lnet.ln_cpt_table = cfs_cpt_tab;
2937 the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_tab);
2939 LASSERT(the_lnet.ln_cpt_number > 0);
2940 if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
2941 /* we are under risk of consuming all lh_cookie */
2942 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
2943 "please change setting of CPT-table and retry\n",
2944 the_lnet.ln_cpt_number, LNET_CPT_MAX);
2948 while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
2949 the_lnet.ln_cpt_bits++;
2951 rc = lnet_create_locks();
2953 CERROR("Can't create LNet global locks: %d\n", rc);
2957 rc = genl_register_family(&lnet_family);
2959 lnet_destroy_locks();
2960 CERROR("Can't register LNet netlink family: %d\n", rc);
2964 the_lnet.ln_refcount = 0;
2965 INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
2966 INIT_LIST_HEAD(&the_lnet.ln_msg_resend);
2968 /* The hash table size is the number of bits it takes to express the set
2969 * ln_num_routes, minus 1 (better to under estimate than over so we
2970 * don't waste memory). */
2971 if (rnet_htable_size <= 0)
2972 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
2973 else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
2974 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
2975 the_lnet.ln_remote_nets_hbits = max_t(int, 1,
2976 order_base_2(rnet_htable_size) - 1);
2978 /* All LNDs apart from the LOLND are in separate modules. They
2979 * register themselves when their module loads, and unregister
2980 * themselves when their module is unloaded. */
2981 lnet_register_lnd(&the_lolnd);
2986 * Finalize LNet library.
2988 * \pre lnet_lib_init() called with success.
2989 * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
2991 * As this happens at module-unload, all lnds must already be unloaded,
2992 * so they must already be unregistered.
2994 void lnet_lib_exit(void)
2998 LASSERT(the_lnet.ln_refcount == 0);
2999 lnet_unregister_lnd(&the_lolnd);
3000 for (i = 0; i < NUM_LNDS; i++)
3001 LASSERT(!the_lnet.ln_lnds[i]);
3002 lnet_destroy_locks();
3003 genl_unregister_family(&lnet_family);
3007 * Set LNet PID and start LNet interfaces, routing, and forwarding.
3009 * Users must call this function at least once before any other functions.
3010 * For each successful call there must be a corresponding call to
3011 * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
3014 * The PID used by LNet may be different from the one requested.
3017 * \param requested_pid PID requested by the caller.
3019 * \return >= 0 on success, and < 0 error code on failures.
3022 LNetNIInit(lnet_pid_t requested_pid)
3024 int im_a_router = 0;
3027 struct lnet_ping_buffer *pbuf;
3028 struct lnet_handle_md ping_mdh;
3029 LIST_HEAD(net_head);
3030 struct lnet_net *net;
3032 mutex_lock(&the_lnet.ln_api_mutex);
3034 CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
3036 if (the_lnet.ln_state == LNET_STATE_STOPPING) {
3037 mutex_unlock(&the_lnet.ln_api_mutex);
3041 if (the_lnet.ln_refcount > 0) {
3042 rc = the_lnet.ln_refcount++;
3043 mutex_unlock(&the_lnet.ln_api_mutex);
3047 rc = lnet_prepare(requested_pid);
3049 mutex_unlock(&the_lnet.ln_api_mutex);
3053 /* create a network for Loopback network */
3054 net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
3057 goto err_empty_list;
3060 /* Add in the loopback NI */
3061 if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
3063 goto err_empty_list;
3066 if (use_tcp_bonding)
3067 CWARN("use_tcp_bonding has been removed. Use Multi-Rail and Dynamic Discovery instead, see LU-13641\n");
3069 /* If LNet is being initialized via DLC it is possible
3070 * that the user requests not to load module parameters (ones which
3071 * are supported by DLC) on initialization. Therefore, make sure not
3072 * to load networks, routes and forwarding from module parameters
3073 * in this case. On cleanup in case of failure only clean up
3074 * routes if it has been loaded */
3075 if (!the_lnet.ln_nis_from_mod_params) {
3076 rc = lnet_parse_networks(&net_head, lnet_get_networks());
3078 goto err_empty_list;
3081 rc = lnet_startup_lndnets(&net_head);
3083 goto err_empty_list;
3085 if (!the_lnet.ln_nis_from_mod_params) {
3086 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
3088 goto err_shutdown_lndnis;
3090 rc = lnet_rtrpools_alloc(im_a_router);
3092 goto err_destroy_routes;
3095 rc = lnet_acceptor_start();
3097 goto err_destroy_routes;
3099 the_lnet.ln_refcount = 1;
3100 /* Now I may use my own API functions... */
3102 ni_bytes = LNET_PING_INFO_HDR_SIZE;
3103 list_for_each_entry(net, &the_lnet.ln_nets, net_list)
3104 ni_bytes += lnet_get_net_ni_bytes_locked(net);
3106 rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_bytes, true);
3108 goto err_acceptor_stop;
3110 lnet_ping_target_update(pbuf, ping_mdh);
3112 the_lnet.ln_mt_handler = lnet_mt_event_handler;
3114 rc = lnet_push_target_init();
3118 rc = lnet_peer_discovery_start();
3120 goto err_destroy_push_target;
3122 rc = lnet_monitor_thr_start();
3124 goto err_stop_discovery_thr;
3127 lnet_router_debugfs_init();
3129 mutex_unlock(&the_lnet.ln_api_mutex);
3131 complete_all(&the_lnet.ln_started);
3133 /* wait for all routers to start */
3134 lnet_wait_router_start();
3138 err_stop_discovery_thr:
3139 lnet_peer_discovery_stop();
3140 err_destroy_push_target:
3141 lnet_push_target_fini();
3143 lnet_ping_target_fini();
3145 the_lnet.ln_refcount = 0;
3146 lnet_acceptor_stop();
3148 if (!the_lnet.ln_nis_from_mod_params)
3149 lnet_destroy_routes();
3150 err_shutdown_lndnis:
3151 lnet_shutdown_lndnets();
3155 mutex_unlock(&the_lnet.ln_api_mutex);
3156 while ((net = list_first_entry_or_null(&net_head,
3158 net_list)) != NULL) {
3159 list_del_init(&net->net_list);
3164 EXPORT_SYMBOL(LNetNIInit);
3167 * Stop LNet interfaces, routing, and forwarding.
3169 * Users must call this function once for each successful call to LNetNIInit().
3170 * Once the LNetNIFini() operation has been started, the results of pending
3171 * API operations are undefined.
3173 * \return always 0 for current implementation.
3178 mutex_lock(&the_lnet.ln_api_mutex);
3180 LASSERT(the_lnet.ln_refcount > 0);
3182 if (the_lnet.ln_refcount != 1) {
3183 the_lnet.ln_refcount--;
3185 LASSERT(!the_lnet.ln_niinit_self);
3187 lnet_net_lock(LNET_LOCK_EX);
3188 the_lnet.ln_state = LNET_STATE_STOPPING;
3189 lnet_net_unlock(LNET_LOCK_EX);
3193 lnet_router_debugfs_fini();
3194 lnet_monitor_thr_stop();
3195 lnet_peer_discovery_stop();
3196 lnet_push_target_fini();
3197 lnet_ping_target_fini();
3199 /* Teardown fns that use my own API functions BEFORE here */
3200 the_lnet.ln_refcount = 0;
3202 lnet_acceptor_stop();
3203 lnet_destroy_routes();
3204 lnet_shutdown_lndnets();
3208 mutex_unlock(&the_lnet.ln_api_mutex);
3211 EXPORT_SYMBOL(LNetNIFini);
3214 * Grabs the ni data from the ni structure and fills the out
3217 * \param[in] ni network interface structure
3218 * \param[out] cfg_ni NI config information
3219 * \param[out] tun network and LND tunables
3222 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
3223 struct lnet_ioctl_config_lnd_tunables *tun,
3224 struct lnet_ioctl_element_stats *stats,
3227 size_t min_size = 0;
3230 if (!ni || !cfg_ni || !tun || !nid_is_nid4(&ni->ni_nid))
3233 if (ni->ni_interface != NULL) {
3234 strncpy(cfg_ni->lic_ni_intf,
3236 sizeof(cfg_ni->lic_ni_intf));
3239 cfg_ni->lic_nid = lnet_nid_to_nid4(&ni->ni_nid);
3240 cfg_ni->lic_status = lnet_ni_get_status_locked(ni);
3241 cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
3243 memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
3246 stats->iel_send_count = lnet_sum_stats(&ni->ni_stats,
3247 LNET_STATS_TYPE_SEND);
3248 stats->iel_recv_count = lnet_sum_stats(&ni->ni_stats,
3249 LNET_STATS_TYPE_RECV);
3250 stats->iel_drop_count = lnet_sum_stats(&ni->ni_stats,
3251 LNET_STATS_TYPE_DROP);
3255 * tun->lt_tun will always be present, but in order to be
3256 * backwards compatible, we need to deal with the cases when
3257 * tun->lt_tun is smaller than what the kernel has, because it
3258 * comes from an older version of a userspace program, then we'll
3259 * need to copy as much information as we have available space.
3261 min_size = tun_size - sizeof(tun->lt_cmn);
3262 memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
3264 /* copy over the cpts */
3265 if (ni->ni_ncpts == LNET_CPT_NUMBER &&
3266 ni->ni_cpts == NULL) {
3267 for (i = 0; i < ni->ni_ncpts; i++)
3268 cfg_ni->lic_cpts[i] = i;
3271 ni->ni_cpts != NULL && i < ni->ni_ncpts &&
3272 i < LNET_MAX_SHOW_NUM_CPT;
3274 cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
3276 cfg_ni->lic_ncpts = ni->ni_ncpts;
3280 * NOTE: This is a legacy function left in the code to be backwards
3281 * compatible with older userspace programs. It should eventually be
3284 * Grabs the ni data from the ni structure and fills the out
3287 * \param[in] ni network interface structure
3288 * \param[out] config config information
3291 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
3292 struct lnet_ioctl_config_data *config)
3294 struct lnet_ioctl_net_config *net_config;
3295 struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
3296 size_t min_size, tunable_size = 0;
3299 if (!ni || !config || !nid_is_nid4(&ni->ni_nid))
3302 net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
3306 if (!ni->ni_interface)
3309 strncpy(net_config->ni_interface,
3311 sizeof(net_config->ni_interface));
3313 config->cfg_nid = lnet_nid_to_nid4(&ni->ni_nid);
3314 config->cfg_config_u.cfg_net.net_peer_timeout =
3315 ni->ni_net->net_tunables.lct_peer_timeout;
3316 config->cfg_config_u.cfg_net.net_max_tx_credits =
3317 ni->ni_net->net_tunables.lct_max_tx_credits;
3318 config->cfg_config_u.cfg_net.net_peer_tx_credits =
3319 ni->ni_net->net_tunables.lct_peer_tx_credits;
3320 config->cfg_config_u.cfg_net.net_peer_rtr_credits =
3321 ni->ni_net->net_tunables.lct_peer_rtr_credits;
3323 net_config->ni_status = lnet_ni_get_status_locked(ni);
3326 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
3328 for (i = 0; i < num_cpts; i++)
3329 net_config->ni_cpts[i] = ni->ni_cpts[i];
3331 config->cfg_ncpts = num_cpts;
3335 * See if user land tools sent in a newer and larger version
3336 * of struct lnet_tunables than what the kernel uses.
3338 min_size = sizeof(*config) + sizeof(*net_config);
3340 if (config->cfg_hdr.ioc_len > min_size)
3341 tunable_size = config->cfg_hdr.ioc_len - min_size;
3343 /* Don't copy too much data to user space */
3344 min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
3345 lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
3347 if (lnd_cfg && min_size) {
3348 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
3349 config->cfg_config_u.cfg_net.net_interface_count = 1;
3351 /* Tell user land that kernel side has less data */
3352 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
3353 min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
3354 config->cfg_hdr.ioc_len -= min_size;
3360 lnet_get_ni_idx_locked(int idx)
3363 struct lnet_net *net;
3365 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3366 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3375 int lnet_get_net_healthv_locked(struct lnet_net *net)
3378 int best_healthv = 0;
3379 int healthv, ni_fatal;
3381 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3382 healthv = atomic_read(&ni->ni_healthv);
3383 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
3384 if (!ni_fatal && healthv > best_healthv)
3385 best_healthv = healthv;
3388 return best_healthv;
3392 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
3395 struct lnet_net *net = mynet;
3398 * It is possible that the net has been cleaned out while there is
3399 * a message being sent. This function accessed the net without
3400 * checking if the list is empty
3404 net = list_first_entry(&the_lnet.ln_nets,
3407 if (list_empty(&net->net_ni_list))
3409 ni = list_first_entry(&net->net_ni_list, struct lnet_ni,
3415 if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
3416 /* if you reached the end of the ni list and the net is
3417 * specified, then there are no more nis in that net */
3421 /* we reached the end of this net ni list. move to the
3423 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
3424 /* no more nets and no more NIs. */
3427 /* get the next net */
3428 net = list_first_entry(&prev->ni_net->net_list, struct lnet_net,
3430 if (list_empty(&net->net_ni_list))
3432 /* get the ni on it */
3433 ni = list_first_entry(&net->net_ni_list, struct lnet_ni,
3439 if (list_empty(&prev->ni_netlist))
3442 /* there are more nis left */
3443 ni = list_first_entry(&prev->ni_netlist, struct lnet_ni, ni_netlist);
3449 lnet_get_net_config(struct lnet_ioctl_config_data *config)
3454 int idx = config->cfg_count;
3456 cpt = lnet_net_lock_current();
3458 ni = lnet_get_ni_idx_locked(idx);
3463 lnet_fill_ni_info_legacy(ni, config);
3467 lnet_net_unlock(cpt);
3472 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
3473 struct lnet_ioctl_config_lnd_tunables *tun,
3474 struct lnet_ioctl_element_stats *stats,
3481 if (!cfg_ni || !tun || !stats)
3484 cpt = lnet_net_lock_current();
3486 ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
3491 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
3495 lnet_net_unlock(cpt);
3499 int lnet_get_ni_stats(struct lnet_ioctl_element_msg_stats *msg_stats)
3508 cpt = lnet_net_lock_current();
3510 ni = lnet_get_ni_idx_locked(msg_stats->im_idx);
3513 lnet_usr_translate_stats(msg_stats, &ni->ni_stats);
3517 lnet_net_unlock(cpt);
3522 static int lnet_add_net_common(struct lnet_net *net,
3523 struct lnet_ioctl_config_lnd_tunables *tun)
3525 struct lnet_handle_md ping_mdh;
3526 struct lnet_ping_buffer *pbuf;
3527 struct lnet_remotenet *rnet;
3532 lnet_net_lock(LNET_LOCK_EX);
3533 rnet = lnet_find_rnet_locked(net->net_id);
3534 lnet_net_unlock(LNET_LOCK_EX);
3536 * make sure that the net added doesn't invalidate the current
3537 * configuration LNet is keeping
3540 CERROR("Adding net %s will invalidate routing configuration\n",
3541 libcfs_net2str(net->net_id));
3547 memcpy(&net->net_tunables,
3548 &tun->lt_cmn, sizeof(net->net_tunables));
3550 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
3552 net_id = net->net_id;
3554 rc = lnet_startup_lndnet(net,
3555 (tun) ? &tun->lt_tun : NULL);
3559 /* make sure you calculate the correct number of slots in the ping
3560 * buffer. Since the ping info is a flattened list of all the NIs,
3561 * we should allocate enough slots to accomodate the number of NIs
3562 * which will be added.
3564 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3565 LNET_PING_INFO_HDR_SIZE +
3566 lnet_get_ni_bytes(),
3569 lnet_shutdown_lndnet(net);
3573 lnet_net_lock(LNET_LOCK_EX);
3574 net = lnet_get_net_locked(net_id);
3577 /* apply the UDSPs */
3578 rc = lnet_udsp_apply_policies_on_net(net);
3580 CERROR("Failed to apply UDSPs on local net %s\n",
3581 libcfs_net2str(net->net_id));
3583 /* At this point we lost track of which NI was just added, so we
3584 * just re-apply the policies on all of the NIs on this net
3586 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3587 rc = lnet_udsp_apply_policies_on_ni(ni);
3589 CERROR("Failed to apply UDSPs on ni %s\n",
3590 libcfs_nidstr(&ni->ni_nid));
3592 lnet_net_unlock(LNET_LOCK_EX);
3595 * Start the acceptor thread if this is the first network
3596 * being added that requires the thread.
3598 if (net->net_lnd->lnd_accept) {
3599 rc = lnet_acceptor_start();
3601 /* shutdown the net that we just started */
3602 CERROR("Failed to start up acceptor thread\n");
3603 lnet_shutdown_lndnet(net);
3608 lnet_net_lock(LNET_LOCK_EX);
3609 lnet_peer_net_added(net);
3610 lnet_net_unlock(LNET_LOCK_EX);
3612 lnet_ping_target_update(pbuf, ping_mdh);
3617 lnet_ping_md_unlink(pbuf, &ping_mdh);
3618 lnet_ping_buffer_decref(pbuf);
3623 lnet_set_tune_defaults(struct lnet_ioctl_config_lnd_tunables *tun)
3626 if (tun->lt_cmn.lct_peer_timeout < 0)
3627 tun->lt_cmn.lct_peer_timeout = DEFAULT_PEER_TIMEOUT;
3628 if (!tun->lt_cmn.lct_peer_tx_credits)
3629 tun->lt_cmn.lct_peer_tx_credits = DEFAULT_PEER_CREDITS;
3630 if (!tun->lt_cmn.lct_max_tx_credits)
3631 tun->lt_cmn.lct_max_tx_credits = DEFAULT_CREDITS;
3635 static int lnet_handle_legacy_ip2nets(char *ip2nets,
3636 struct lnet_ioctl_config_lnd_tunables *tun)
3638 struct lnet_net *net;
3641 LIST_HEAD(net_head);
3643 rc = lnet_parse_ip2nets(&nets, ip2nets);
3647 rc = lnet_parse_networks(&net_head, nets);
3651 lnet_set_tune_defaults(tun);
3653 mutex_lock(&the_lnet.ln_api_mutex);
3654 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3659 while ((net = list_first_entry_or_null(&net_head,
3661 net_list)) != NULL) {
3662 list_del_init(&net->net_list);
3663 rc = lnet_add_net_common(net, tun);
3669 mutex_unlock(&the_lnet.ln_api_mutex);
3671 while ((net = list_first_entry_or_null(&net_head,
3673 net_list)) != NULL) {
3674 list_del_init(&net->net_list);
3680 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf, u32 net_id,
3681 struct lnet_ioctl_config_lnd_tunables *tun)
3683 struct lnet_net *net;
3688 /* handle legacy ip2nets from DLC */
3689 if (conf->lic_legacy_ip2nets[0] != '\0')
3690 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
3693 lnd_type = LNET_NETTYP(net_id);
3695 if (!libcfs_isknown_lnd(lnd_type)) {
3696 CERROR("No valid net and lnd information provided\n");
3700 net = lnet_net_alloc(net_id, NULL);
3704 for (i = 0; i < conf->lic_ncpts; i++) {
3705 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER) {
3711 ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
3718 lnet_set_tune_defaults(tun);
3720 mutex_lock(&the_lnet.ln_api_mutex);
3721 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3725 rc = lnet_add_net_common(net, tun);
3728 mutex_unlock(&the_lnet.ln_api_mutex);
3730 /* If NI already exist delete this new unused copy */
3737 int lnet_dyn_del_ni(struct lnet_nid *nid)
3739 struct lnet_net *net;
3741 u32 net_id = LNET_NID_NET(nid);
3742 struct lnet_ping_buffer *pbuf;
3743 struct lnet_handle_md ping_mdh;
3747 /* don't allow userspace to shutdown the LOLND */
3748 if (LNET_NETTYP(net_id) == LOLND)
3751 mutex_lock(&the_lnet.ln_api_mutex);
3752 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3754 goto unlock_api_mutex;
3759 net = lnet_get_net_locked(net_id);
3761 CERROR("net %s not found\n",
3762 libcfs_net2str(net_id));
3767 if (!nid_addr_is_set(nid)) {
3768 /* remove the entire net */
3769 net_bytes = lnet_get_net_ni_bytes_locked(net);
3773 /* create and link a new ping info, before removing the old one */
3774 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3775 LNET_PING_INFO_HDR_SIZE +
3776 lnet_get_ni_bytes() - net_bytes,
3779 goto unlock_api_mutex;
3781 lnet_shutdown_lndnet(net);
3783 lnet_acceptor_stop();
3785 lnet_ping_target_update(pbuf, ping_mdh);
3787 goto unlock_api_mutex;
3790 ni = lnet_nid_to_ni_locked(nid, 0);
3792 CERROR("nid %s not found\n", libcfs_nidstr(nid));
3797 net_bytes = lnet_get_net_ni_bytes_locked(net);
3798 net_empty = list_is_singular(&net->net_ni_list);
3802 /* create and link a new ping info, before removing the old one */
3803 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3804 (LNET_PING_INFO_HDR_SIZE +
3805 lnet_get_ni_bytes() -
3806 lnet_ping_sts_size(&ni->ni_nid)),
3809 goto unlock_api_mutex;
3811 lnet_shutdown_lndni(ni);
3813 lnet_acceptor_stop();
3815 lnet_ping_target_update(pbuf, ping_mdh);
3817 /* check if the net is empty and remove it if it is */
3819 lnet_shutdown_lndnet(net);
3821 goto unlock_api_mutex;
3826 mutex_unlock(&the_lnet.ln_api_mutex);
3832 * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
3833 * They are only expected to be called for unique networks.
3834 * That can be as a result of older DLC library
3835 * calls. Multi-Rail DLC and beyond no longer uses these APIs.
3838 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
3840 struct lnet_net *net;
3841 LIST_HEAD(net_head);
3843 struct lnet_ioctl_config_lnd_tunables tun;
3844 const char *nets = conf->cfg_config_u.cfg_net.net_intf;
3846 /* Create a net/ni structures for the network string */
3847 rc = lnet_parse_networks(&net_head, nets);
3849 return rc == 0 ? -EINVAL : rc;
3851 mutex_lock(&the_lnet.ln_api_mutex);
3852 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3854 goto out_unlock_clean;
3858 rc = -EINVAL; /* only add one network per call */
3859 goto out_unlock_clean;
3862 net = list_first_entry(&net_head, struct lnet_net, net_list);
3863 list_del_init(&net->net_list);
3865 LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
3867 memset(&tun, 0, sizeof(tun));
3869 tun.lt_cmn.lct_peer_timeout =
3870 (!conf->cfg_config_u.cfg_net.net_peer_timeout) ? DEFAULT_PEER_TIMEOUT :
3871 conf->cfg_config_u.cfg_net.net_peer_timeout;
3872 tun.lt_cmn.lct_peer_tx_credits =
3873 (!conf->cfg_config_u.cfg_net.net_peer_tx_credits) ? DEFAULT_PEER_CREDITS :
3874 conf->cfg_config_u.cfg_net.net_peer_tx_credits;
3875 tun.lt_cmn.lct_peer_rtr_credits =
3876 conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
3877 tun.lt_cmn.lct_max_tx_credits =
3878 (!conf->cfg_config_u.cfg_net.net_max_tx_credits) ? DEFAULT_CREDITS :
3879 conf->cfg_config_u.cfg_net.net_max_tx_credits;
3881 rc = lnet_add_net_common(net, &tun);
3884 mutex_unlock(&the_lnet.ln_api_mutex);
3885 /* net_head list is empty in success case */
3886 while ((net = list_first_entry_or_null(&net_head,
3888 net_list)) != NULL) {
3889 list_del_init(&net->net_list);
3896 lnet_dyn_del_net(u32 net_id)
3898 struct lnet_net *net;
3899 struct lnet_ping_buffer *pbuf;
3900 struct lnet_handle_md ping_mdh;
3901 int net_ni_bytes, rc;
3903 /* don't allow userspace to shutdown the LOLND */
3904 if (LNET_NETTYP(net_id) == LOLND)
3907 mutex_lock(&the_lnet.ln_api_mutex);
3908 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3915 net = lnet_get_net_locked(net_id);
3922 net_ni_bytes = lnet_get_net_ni_bytes_locked(net);
3926 /* create and link a new ping info, before removing the old one */
3927 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3928 LNET_PING_INFO_HDR_SIZE +
3929 lnet_get_ni_bytes() - net_ni_bytes,
3934 lnet_shutdown_lndnet(net);
3936 lnet_acceptor_stop();
3938 lnet_ping_target_update(pbuf, ping_mdh);
3941 mutex_unlock(&the_lnet.ln_api_mutex);
3946 void lnet_update_ping_buffer(void)
3948 struct lnet_ping_buffer *pbuf;
3949 struct lnet_handle_md ping_mdh;
3951 if (the_lnet.ln_routing)
3954 mutex_lock(&the_lnet.ln_api_mutex);
3956 if (!lnet_ping_target_setup(&pbuf, &ping_mdh,
3957 LNET_PING_INFO_HDR_SIZE +
3958 lnet_get_ni_bytes(),
3960 lnet_ping_target_update(pbuf, ping_mdh);
3962 mutex_unlock(&the_lnet.ln_api_mutex);
3964 EXPORT_SYMBOL(lnet_update_ping_buffer);
3966 void lnet_incr_dlc_seq(void)
3968 atomic_inc(&lnet_dlc_seq_no);
3971 __u32 lnet_get_dlc_seq_locked(void)
3973 return atomic_read(&lnet_dlc_seq_no);
3977 lnet_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3979 struct lnet_net *net;
3982 lnet_net_lock(LNET_LOCK_EX);
3983 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3984 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3985 if (all || (nid_is_nid4(&ni->ni_nid) &&
3986 lnet_nid_to_nid4(&ni->ni_nid) == nid)) {
3987 atomic_set(&ni->ni_healthv, value);
3988 if (list_empty(&ni->ni_recovery) &&
3989 value < LNET_MAX_HEALTH_VALUE) {
3990 CERROR("manually adding local NI %s to recovery\n",
3991 libcfs_nidstr(&ni->ni_nid));
3992 list_add_tail(&ni->ni_recovery,
3993 &the_lnet.ln_mt_localNIRecovq);
3994 lnet_ni_addref_locked(ni, 0);
3997 lnet_net_unlock(LNET_LOCK_EX);
4003 lnet_net_unlock(LNET_LOCK_EX);
4007 lnet_ni_set_conns_per_peer(lnet_nid_t nid, int value, bool all)
4009 struct lnet_net *net;
4012 lnet_net_lock(LNET_LOCK_EX);
4013 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
4014 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
4015 if (lnet_nid_to_nid4(&ni->ni_nid) != nid && !all)
4017 if (LNET_NETTYP(net->net_id) == SOCKLND)
4018 ni->ni_lnd_tunables.lnd_tun_u.lnd_sock.lnd_conns_per_peer = value;
4019 else if (LNET_NETTYP(net->net_id) == O2IBLND)
4020 ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib.lnd_conns_per_peer = value;
4022 lnet_net_unlock(LNET_LOCK_EX);
4027 lnet_net_unlock(LNET_LOCK_EX);
4031 lnet_get_local_ni_hstats(struct lnet_ioctl_local_ni_hstats *stats)
4035 struct lnet_nid nid;
4037 lnet_nid4_to_nid(stats->hlni_nid, &nid);
4038 cpt = lnet_net_lock_current();
4039 ni = lnet_nid_to_ni_locked(&nid, cpt);
4045 stats->hlni_local_interrupt = atomic_read(&ni->ni_hstats.hlt_local_interrupt);
4046 stats->hlni_local_dropped = atomic_read(&ni->ni_hstats.hlt_local_dropped);
4047 stats->hlni_local_aborted = atomic_read(&ni->ni_hstats.hlt_local_aborted);
4048 stats->hlni_local_no_route = atomic_read(&ni->ni_hstats.hlt_local_no_route);
4049 stats->hlni_local_timeout = atomic_read(&ni->ni_hstats.hlt_local_timeout);
4050 stats->hlni_local_error = atomic_read(&ni->ni_hstats.hlt_local_error);
4051 stats->hlni_fatal_error = atomic_read(&ni->ni_fatal_error_on);
4052 stats->hlni_health_value = atomic_read(&ni->ni_healthv);
4053 stats->hlni_ping_count = ni->ni_ping_count;
4054 stats->hlni_next_ping = ni->ni_next_ping;
4057 lnet_net_unlock(cpt);
4063 lnet_get_local_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
4068 lnet_net_lock(LNET_LOCK_EX);
4069 list_for_each_entry(ni, &the_lnet.ln_mt_localNIRecovq, ni_recovery) {
4070 if (!nid_is_nid4(&ni->ni_nid))
4072 list->rlst_nid_array[i] = lnet_nid_to_nid4(&ni->ni_nid);
4074 if (i >= LNET_MAX_SHOW_NUM_NID)
4077 lnet_net_unlock(LNET_LOCK_EX);
4078 list->rlst_num_nids = i;
4084 lnet_get_peer_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
4086 struct lnet_peer_ni *lpni;
4089 lnet_net_lock(LNET_LOCK_EX);
4090 list_for_each_entry(lpni, &the_lnet.ln_mt_peerNIRecovq, lpni_recovery) {
4091 list->rlst_nid_array[i] = lnet_nid_to_nid4(&lpni->lpni_nid);
4093 if (i >= LNET_MAX_SHOW_NUM_NID)
4096 lnet_net_unlock(LNET_LOCK_EX);
4097 list->rlst_num_nids = i;
4103 * LNet ioctl handler.
4107 LNetCtl(unsigned int cmd, void *arg)
4109 struct libcfs_ioctl_data *data = arg;
4110 struct lnet_ioctl_config_data *config;
4112 struct lnet_nid nid;
4115 BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
4116 sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
4119 case IOC_LIBCFS_GET_NI: {
4120 struct lnet_processid id = {};
4122 rc = LNetGetId(data->ioc_count, &id);
4123 data->ioc_nid = lnet_nid_to_nid4(&id.nid);
4126 case IOC_LIBCFS_FAIL_NID:
4127 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
4129 case IOC_LIBCFS_ADD_ROUTE: {
4130 /* default router sensitivity to 1 */
4131 unsigned int sensitivity = 1;
4134 if (config->cfg_hdr.ioc_len < sizeof(*config))
4137 if (config->cfg_config_u.cfg_route.rtr_sensitivity) {
4139 config->cfg_config_u.cfg_route.rtr_sensitivity;
4142 lnet_nid4_to_nid(config->cfg_nid, &nid);
4143 mutex_lock(&the_lnet.ln_api_mutex);
4144 rc = lnet_add_route(config->cfg_net,
4145 config->cfg_config_u.cfg_route.rtr_hop,
4147 config->cfg_config_u.cfg_route.
4148 rtr_priority, sensitivity);
4149 mutex_unlock(&the_lnet.ln_api_mutex);
4153 case IOC_LIBCFS_DEL_ROUTE:
4156 if (config->cfg_hdr.ioc_len < sizeof(*config))
4159 lnet_nid4_to_nid(config->cfg_nid, &nid);
4160 mutex_lock(&the_lnet.ln_api_mutex);
4161 rc = lnet_del_route(config->cfg_net, &nid);
4162 mutex_unlock(&the_lnet.ln_api_mutex);
4165 case IOC_LIBCFS_GET_ROUTE:
4168 if (config->cfg_hdr.ioc_len < sizeof(*config))
4171 mutex_lock(&the_lnet.ln_api_mutex);
4172 rc = lnet_get_route(config->cfg_count,
4174 &config->cfg_config_u.cfg_route.rtr_hop,
4176 &config->cfg_config_u.cfg_route.rtr_flags,
4177 &config->cfg_config_u.cfg_route.
4179 &config->cfg_config_u.cfg_route.
4181 mutex_unlock(&the_lnet.ln_api_mutex);
4184 case IOC_LIBCFS_GET_LOCAL_NI: {
4185 struct lnet_ioctl_config_ni *cfg_ni;
4186 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
4187 struct lnet_ioctl_element_stats *stats;
4192 /* get the tunables if they are available */
4193 if (cfg_ni->lic_cfg_hdr.ioc_len <
4194 sizeof(*cfg_ni) + sizeof(*stats) + sizeof(*tun))
4197 stats = (struct lnet_ioctl_element_stats *)
4199 tun = (struct lnet_ioctl_config_lnd_tunables *)
4200 (cfg_ni->lic_bulk + sizeof(*stats));
4202 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
4205 mutex_lock(&the_lnet.ln_api_mutex);
4206 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
4207 mutex_unlock(&the_lnet.ln_api_mutex);
4211 case IOC_LIBCFS_GET_LOCAL_NI_MSG_STATS: {
4212 struct lnet_ioctl_element_msg_stats *msg_stats = arg;
4214 if (msg_stats->im_hdr.ioc_len != sizeof(*msg_stats))
4217 mutex_lock(&the_lnet.ln_api_mutex);
4218 rc = lnet_get_ni_stats(msg_stats);
4219 mutex_unlock(&the_lnet.ln_api_mutex);
4224 case IOC_LIBCFS_GET_NET: {
4225 size_t total = sizeof(*config) +
4226 sizeof(struct lnet_ioctl_net_config);
4229 if (config->cfg_hdr.ioc_len < total)
4232 mutex_lock(&the_lnet.ln_api_mutex);
4233 rc = lnet_get_net_config(config);
4234 mutex_unlock(&the_lnet.ln_api_mutex);
4238 case IOC_LIBCFS_GET_LNET_STATS:
4240 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
4242 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
4245 mutex_lock(&the_lnet.ln_api_mutex);
4246 rc = lnet_counters_get(&lnet_stats->st_cntrs);
4247 mutex_unlock(&the_lnet.ln_api_mutex);
4251 case IOC_LIBCFS_RESET_LNET_STATS:
4253 mutex_lock(&the_lnet.ln_api_mutex);
4254 lnet_counters_reset();
4255 mutex_unlock(&the_lnet.ln_api_mutex);
4259 case IOC_LIBCFS_CONFIG_RTR:
4262 if (config->cfg_hdr.ioc_len < sizeof(*config))
4265 mutex_lock(&the_lnet.ln_api_mutex);
4266 if (config->cfg_config_u.cfg_buffers.buf_enable) {
4267 rc = lnet_rtrpools_enable();
4268 mutex_unlock(&the_lnet.ln_api_mutex);
4271 lnet_rtrpools_disable();
4272 mutex_unlock(&the_lnet.ln_api_mutex);
4275 case IOC_LIBCFS_ADD_BUF:
4278 if (config->cfg_hdr.ioc_len < sizeof(*config))
4281 mutex_lock(&the_lnet.ln_api_mutex);
4282 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
4284 config->cfg_config_u.cfg_buffers.
4286 config->cfg_config_u.cfg_buffers.
4288 mutex_unlock(&the_lnet.ln_api_mutex);
4291 case IOC_LIBCFS_SET_NUMA_RANGE: {
4292 struct lnet_ioctl_set_value *numa;
4294 if (numa->sv_hdr.ioc_len != sizeof(*numa))
4296 lnet_net_lock(LNET_LOCK_EX);
4297 lnet_numa_range = numa->sv_value;
4298 lnet_net_unlock(LNET_LOCK_EX);
4302 case IOC_LIBCFS_GET_NUMA_RANGE: {
4303 struct lnet_ioctl_set_value *numa;
4305 if (numa->sv_hdr.ioc_len != sizeof(*numa))
4307 numa->sv_value = lnet_numa_range;
4311 case IOC_LIBCFS_GET_BUF: {
4312 struct lnet_ioctl_pool_cfg *pool_cfg;
4313 size_t total = sizeof(*config) + sizeof(*pool_cfg);
4317 if (config->cfg_hdr.ioc_len < total)
4320 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
4322 mutex_lock(&the_lnet.ln_api_mutex);
4323 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
4324 mutex_unlock(&the_lnet.ln_api_mutex);
4328 case IOC_LIBCFS_GET_LOCAL_HSTATS: {
4329 struct lnet_ioctl_local_ni_hstats *stats = arg;
4331 if (stats->hlni_hdr.ioc_len < sizeof(*stats))
4334 mutex_lock(&the_lnet.ln_api_mutex);
4335 rc = lnet_get_local_ni_hstats(stats);
4336 mutex_unlock(&the_lnet.ln_api_mutex);
4341 case IOC_LIBCFS_GET_RECOVERY_QUEUE: {
4342 struct lnet_ioctl_recovery_list *list = arg;
4343 if (list->rlst_hdr.ioc_len < sizeof(*list))
4346 mutex_lock(&the_lnet.ln_api_mutex);
4347 if (list->rlst_type == LNET_HEALTH_TYPE_LOCAL_NI)
4348 rc = lnet_get_local_ni_recovery_list(list);
4350 rc = lnet_get_peer_ni_recovery_list(list);
4351 mutex_unlock(&the_lnet.ln_api_mutex);
4355 case IOC_LIBCFS_ADD_PEER_NI: {
4356 struct lnet_ioctl_peer_cfg *cfg = arg;
4357 struct lnet_nid prim_nid;
4359 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4362 mutex_lock(&the_lnet.ln_api_mutex);
4363 lnet_nid4_to_nid(cfg->prcfg_prim_nid, &prim_nid);
4364 lnet_nid4_to_nid(cfg->prcfg_cfg_nid, &nid);
4365 rc = lnet_user_add_peer_ni(&prim_nid, &nid, cfg->prcfg_mr,
4366 cfg->prcfg_count == 1);
4367 mutex_unlock(&the_lnet.ln_api_mutex);
4371 case IOC_LIBCFS_DEL_PEER_NI: {
4372 struct lnet_ioctl_peer_cfg *cfg = arg;
4373 struct lnet_nid prim_nid;
4375 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4378 mutex_lock(&the_lnet.ln_api_mutex);
4379 lnet_nid4_to_nid(cfg->prcfg_prim_nid, &prim_nid);
4380 lnet_nid4_to_nid(cfg->prcfg_cfg_nid, &nid);
4381 rc = lnet_del_peer_ni(&prim_nid,
4384 mutex_unlock(&the_lnet.ln_api_mutex);
4388 case IOC_LIBCFS_GET_PEER_INFO: {
4389 struct lnet_ioctl_peer *peer_info = arg;
4391 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
4394 mutex_lock(&the_lnet.ln_api_mutex);
4395 rc = lnet_get_peer_ni_info(
4396 peer_info->pr_count,
4398 peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
4399 &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
4400 &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
4401 &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
4402 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
4403 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
4404 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
4405 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
4406 mutex_unlock(&the_lnet.ln_api_mutex);
4410 case IOC_LIBCFS_GET_PEER_NI: {
4411 struct lnet_ioctl_peer_cfg *cfg = arg;
4413 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4416 mutex_lock(&the_lnet.ln_api_mutex);
4417 rc = lnet_get_peer_info(cfg,
4418 (void __user *)cfg->prcfg_bulk);
4419 mutex_unlock(&the_lnet.ln_api_mutex);
4423 case IOC_LIBCFS_GET_PEER_LIST: {
4424 struct lnet_ioctl_peer_cfg *cfg = arg;
4426 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4429 mutex_lock(&the_lnet.ln_api_mutex);
4430 rc = lnet_get_peer_list(&cfg->prcfg_count, &cfg->prcfg_size,
4431 (struct lnet_process_id __user *)cfg->prcfg_bulk);
4432 mutex_unlock(&the_lnet.ln_api_mutex);
4436 case IOC_LIBCFS_SET_HEALHV: {
4437 struct lnet_ioctl_reset_health_cfg *cfg = arg;
4439 if (cfg->rh_hdr.ioc_len < sizeof(*cfg))
4441 if (cfg->rh_value < 0 ||
4442 cfg->rh_value > LNET_MAX_HEALTH_VALUE)
4443 value = LNET_MAX_HEALTH_VALUE;
4445 value = cfg->rh_value;
4446 CDEBUG(D_NET, "Manually setting healthv to %d for %s:%s. all = %d\n",
4447 value, (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI) ?
4448 "local" : "peer", libcfs_nid2str(cfg->rh_nid), cfg->rh_all);
4449 mutex_lock(&the_lnet.ln_api_mutex);
4450 if (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI)
4451 lnet_ni_set_healthv(cfg->rh_nid, value,
4454 lnet_peer_ni_set_healthv(cfg->rh_nid, value,
4456 mutex_unlock(&the_lnet.ln_api_mutex);
4460 case IOC_LIBCFS_SET_CONNS_PER_PEER: {
4461 struct lnet_ioctl_reset_conns_per_peer_cfg *cfg = arg;
4464 if (cfg->rcpp_hdr.ioc_len < sizeof(*cfg))
4466 if (cfg->rcpp_value < 0)
4469 value = cfg->rcpp_value;
4471 "Setting conns_per_peer to %d for %s. all = %d\n",
4472 value, libcfs_nid2str(cfg->rcpp_nid), cfg->rcpp_all);
4473 mutex_lock(&the_lnet.ln_api_mutex);
4474 lnet_ni_set_conns_per_peer(cfg->rcpp_nid, value, cfg->rcpp_all);
4475 mutex_unlock(&the_lnet.ln_api_mutex);
4479 case IOC_LIBCFS_NOTIFY_ROUTER: {
4480 time64_t deadline = ktime_get_real_seconds() - data->ioc_u64[0];
4482 /* The deadline passed in by the user should be some time in
4483 * seconds in the future since the UNIX epoch. We have to map
4484 * that deadline to the wall clock.
4486 deadline += ktime_get_seconds();
4487 lnet_nid4_to_nid(data->ioc_nid, &nid);
4488 return lnet_notify(NULL, &nid, data->ioc_flags, false,
4492 case IOC_LIBCFS_LNET_DIST:
4493 lnet_nid4_to_nid(data->ioc_nid, &nid);
4494 rc = LNetDist(&nid, &nid, &data->ioc_u32[1]);
4495 if (rc < 0 && rc != -EHOSTUNREACH)
4498 data->ioc_nid = lnet_nid_to_nid4(&nid);
4499 data->ioc_u32[0] = rc;
4502 case IOC_LIBCFS_TESTPROTOCOMPAT:
4503 the_lnet.ln_testprotocompat = data->ioc_flags;
4506 case IOC_LIBCFS_LNET_FAULT:
4507 return lnet_fault_ctl(data->ioc_flags, data);
4509 case IOC_LIBCFS_PING_PEER: {
4510 struct lnet_ioctl_ping_data *ping = arg;
4511 struct lnet_process_id __user *ids = ping->ping_buf;
4512 struct lnet_nid src_nid = LNET_ANY_NID;
4513 struct lnet_genl_ping_list plist;
4514 struct lnet_processid id;
4515 struct lnet_peer *lp;
4516 signed long timeout;
4519 /* Check if the supplied ping data supports source nid
4520 * NB: This check is sufficient if lnet_ioctl_ping_data has
4521 * additional fields added, but if they are re-ordered or
4522 * fields removed then this will break. It is expected that
4523 * these ioctls will be replaced with netlink implementation, so
4524 * it is probably not worth coming up with a more robust version
4525 * compatibility scheme.
4527 if (ping->ping_hdr.ioc_len >= sizeof(struct lnet_ioctl_ping_data))
4528 lnet_nid4_to_nid(ping->ping_src, &src_nid);
4530 /* If timeout is negative then set default of 3 minutes */
4531 if (((s32)ping->op_param) <= 0 ||
4532 ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
4533 timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
4535 timeout = nsecs_to_jiffies(ping->op_param * NSEC_PER_MSEC);
4537 id.pid = ping->ping_id.pid;
4538 lnet_nid4_to_nid(ping->ping_id.nid, &id.nid);
4539 rc = lnet_ping(&id, &src_nid, timeout, &plist,
4542 goto report_ping_err;
4545 for (i = 0; i < count; i++) {
4546 struct lnet_processid *result;
4547 struct lnet_process_id tmpid;
4549 result = genradix_ptr(&plist.lgpl_list, i);
4550 memset(&tmpid, 0, sizeof(tmpid));
4551 tmpid.pid = result->pid;
4552 tmpid.nid = lnet_nid_to_nid4(&result->nid);
4553 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid))) {
4555 goto report_ping_err;
4559 mutex_lock(&the_lnet.ln_api_mutex);
4560 lp = lnet_find_peer(&id.nid);
4563 lnet_nid_to_nid4(&lp->lp_primary_nid);
4564 ping->mr_info = lnet_peer_is_multi_rail(lp);
4565 lnet_peer_decref_locked(lp);
4567 mutex_unlock(&the_lnet.ln_api_mutex);
4569 ping->ping_count = count;
4571 genradix_free(&plist.lgpl_list);
4575 case IOC_LIBCFS_DISCOVER: {
4576 struct lnet_ioctl_ping_data *discover = arg;
4577 struct lnet_peer *lp;
4579 rc = lnet_discover(discover->ping_id, discover->op_param,
4581 discover->ping_count);
4585 mutex_lock(&the_lnet.ln_api_mutex);
4586 lnet_nid4_to_nid(discover->ping_id.nid, &nid);
4587 lp = lnet_find_peer(&nid);
4589 discover->ping_id.nid =
4590 lnet_nid_to_nid4(&lp->lp_primary_nid);
4591 discover->mr_info = lnet_peer_is_multi_rail(lp);
4592 lnet_peer_decref_locked(lp);
4594 mutex_unlock(&the_lnet.ln_api_mutex);
4596 discover->ping_count = rc;
4600 case IOC_LIBCFS_ADD_UDSP: {
4601 struct lnet_ioctl_udsp *ioc_udsp = arg;
4602 __u32 bulk_size = ioc_udsp->iou_hdr.ioc_len;
4604 mutex_lock(&the_lnet.ln_api_mutex);
4605 rc = lnet_udsp_demarshal_add(arg, bulk_size);
4607 rc = lnet_udsp_apply_policies(NULL, false);
4608 CDEBUG(D_NET, "policy application returned %d\n", rc);
4611 mutex_unlock(&the_lnet.ln_api_mutex);
4616 case IOC_LIBCFS_DEL_UDSP: {
4617 struct lnet_ioctl_udsp *ioc_udsp = arg;
4618 int idx = ioc_udsp->iou_idx;
4620 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4623 mutex_lock(&the_lnet.ln_api_mutex);
4624 rc = lnet_udsp_del_policy(idx);
4625 mutex_unlock(&the_lnet.ln_api_mutex);
4630 case IOC_LIBCFS_GET_UDSP_SIZE: {
4631 struct lnet_ioctl_udsp *ioc_udsp = arg;
4632 struct lnet_udsp *udsp;
4634 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4639 mutex_lock(&the_lnet.ln_api_mutex);
4640 udsp = lnet_udsp_get_policy(ioc_udsp->iou_idx);
4644 /* coming in iou_idx will hold the idx of the udsp
4645 * to get the size of. going out the iou_idx will
4646 * hold the size of the UDSP found at the passed
4649 ioc_udsp->iou_idx = lnet_get_udsp_size(udsp);
4650 if (ioc_udsp->iou_idx < 0)
4653 mutex_unlock(&the_lnet.ln_api_mutex);
4658 case IOC_LIBCFS_GET_UDSP: {
4659 struct lnet_ioctl_udsp *ioc_udsp = arg;
4660 struct lnet_udsp *udsp;
4662 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4667 mutex_lock(&the_lnet.ln_api_mutex);
4668 udsp = lnet_udsp_get_policy(ioc_udsp->iou_idx);
4672 rc = lnet_udsp_marshal(udsp, ioc_udsp);
4673 mutex_unlock(&the_lnet.ln_api_mutex);
4678 case IOC_LIBCFS_GET_CONST_UDSP_INFO: {
4679 struct lnet_ioctl_construct_udsp_info *info = arg;
4681 if (info->cud_hdr.ioc_len < sizeof(*info))
4684 CDEBUG(D_NET, "GET_UDSP_INFO for %s\n",
4685 libcfs_nid2str(info->cud_nid));
4687 mutex_lock(&the_lnet.ln_api_mutex);
4688 lnet_udsp_get_construct_info(info);
4689 mutex_unlock(&the_lnet.ln_api_mutex);
4695 ni = lnet_net2ni_addref(data->ioc_net);
4699 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
4702 rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
4709 EXPORT_SYMBOL(LNetCtl);
4711 static const struct ln_key_list net_props_list = {
4712 .lkl_maxattr = LNET_NET_ATTR_MAX,
4714 [LNET_NET_ATTR_HDR] = {
4716 .lkp_key_format = LNKF_SEQUENCE | LNKF_MAPPING,
4717 .lkp_data_type = NLA_NUL_STRING,
4719 [LNET_NET_ATTR_TYPE] = {
4720 .lkp_value = "net type",
4721 .lkp_data_type = NLA_STRING
4723 [LNET_NET_ATTR_LOCAL] = {
4724 .lkp_value = "local NI(s)",
4725 .lkp_key_format = LNKF_SEQUENCE | LNKF_MAPPING,
4726 .lkp_data_type = NLA_NESTED
4731 static struct ln_key_list local_ni_list = {
4732 .lkl_maxattr = LNET_NET_LOCAL_NI_ATTR_MAX,
4734 [LNET_NET_LOCAL_NI_ATTR_NID] = {
4736 .lkp_data_type = NLA_STRING
4738 [LNET_NET_LOCAL_NI_ATTR_STATUS] = {
4739 .lkp_value = "status",
4740 .lkp_data_type = NLA_STRING
4742 [LNET_NET_LOCAL_NI_ATTR_INTERFACE] = {
4743 .lkp_value = "interfaces",
4744 .lkp_key_format = LNKF_MAPPING,
4745 .lkp_data_type = NLA_NESTED
4750 static const struct ln_key_list local_ni_interfaces_list = {
4751 .lkl_maxattr = LNET_NET_LOCAL_NI_INTF_ATTR_MAX,
4753 [LNET_NET_LOCAL_NI_INTF_ATTR_TYPE] = {
4755 .lkp_data_type = NLA_STRING
4760 /* Use an index since the traversal is across LNet nets and ni collections */
4761 struct lnet_genl_net_list {
4762 unsigned int lngl_net_id;
4763 unsigned int lngl_idx;
4766 static inline struct lnet_genl_net_list *
4767 lnet_net_dump_ctx(struct netlink_callback *cb)
4769 return (struct lnet_genl_net_list *)cb->args[0];
4772 static int lnet_net_show_done(struct netlink_callback *cb)
4774 struct lnet_genl_net_list *nlist = lnet_net_dump_ctx(cb);
4777 LIBCFS_FREE(nlist, sizeof(*nlist));
4784 /* LNet net ->start() handler for GET requests */
4785 static int lnet_net_show_start(struct netlink_callback *cb)
4787 struct genlmsghdr *gnlh = nlmsg_data(cb->nlh);
4788 #ifdef HAVE_NL_PARSE_WITH_EXT_ACK
4789 struct netlink_ext_ack *extack = NULL;
4791 struct lnet_genl_net_list *nlist;
4792 int msg_len = genlmsg_len(gnlh);
4793 struct nlattr *params, *top;
4796 #ifdef HAVE_NL_DUMP_WITH_EXT_ACK
4797 extack = cb->extack;
4799 if (the_lnet.ln_refcount == 0) {
4800 NL_SET_ERR_MSG(extack, "LNet stack down");
4804 LIBCFS_ALLOC(nlist, sizeof(*nlist));
4808 nlist->lngl_net_id = LNET_NET_ANY;
4809 nlist->lngl_idx = 0;
4810 cb->args[0] = (long)nlist;
4815 params = genlmsg_data(gnlh);
4816 if (!(nla_type(params) & LN_SCALAR_ATTR_LIST)) {
4817 NL_SET_ERR_MSG(extack, "invalid configuration");
4821 nla_for_each_nested(top, params, rem) {
4825 nla_for_each_nested(net, top, rem2) {
4826 char filter[LNET_NIDSTR_SIZE];
4828 if (nla_type(net) != LN_SCALAR_ATTR_VALUE ||
4829 nla_strcmp(net, "net type") != 0)
4832 net = nla_next(net, &rem2);
4833 if (nla_type(net) != LN_SCALAR_ATTR_VALUE) {
4834 NL_SET_ERR_MSG(extack, "invalid config param");
4835 GOTO(report_err, rc = -EINVAL);
4838 rc = nla_strscpy(filter, net, sizeof(filter));
4840 NL_SET_ERR_MSG(extack, "failed to get param");
4841 GOTO(report_err, rc);
4845 nlist->lngl_net_id = libcfs_str2net(filter);
4846 if (nlist->lngl_net_id == LNET_NET_ANY) {
4847 NL_SET_ERR_MSG(extack, "cannot parse net");
4848 GOTO(report_err, rc = -ENOENT);
4854 lnet_net_show_done(cb);
4859 static int lnet_net_show_dump(struct sk_buff *msg,
4860 struct netlink_callback *cb)
4862 struct lnet_genl_net_list *nlist = lnet_net_dump_ctx(cb);
4863 #ifdef HAVE_NL_PARSE_WITH_EXT_ACK
4864 struct netlink_ext_ack *extack = NULL;
4866 int portid = NETLINK_CB(cb->skb).portid;
4867 int seq = cb->nlh->nlmsg_seq;
4868 struct lnet_net *net;
4869 int idx = 0, rc = 0;
4873 #ifdef HAVE_NL_DUMP_WITH_EXT_ACK
4874 extack = cb->extack;
4876 if (!nlist->lngl_idx) {
4877 const struct ln_key_list *all[] = {
4878 &net_props_list, &local_ni_list,
4879 &local_ni_interfaces_list,
4883 rc = lnet_genl_send_scalar_list(msg, portid, seq,
4885 NLM_F_CREATE | NLM_F_MULTI,
4886 LNET_CMD_NETS, all);
4888 NL_SET_ERR_MSG(extack, "failed to send key table");
4889 GOTO(send_error, rc);
4893 lnet_net_lock(LNET_LOCK_EX);
4895 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
4898 if (nlist->lngl_net_id != LNET_NET_ANY &&
4899 nlist->lngl_net_id != net->net_id)
4902 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
4903 struct nlattr *local_ni, *ni_attr;
4904 char *status = "up";
4906 if (idx++ < nlist->lngl_idx)
4909 hdr = genlmsg_put(msg, portid, seq, &lnet_family,
4910 NLM_F_MULTI, LNET_CMD_NETS);
4912 NL_SET_ERR_MSG(extack, "failed to send values");
4913 GOTO(net_unlock, rc = -EMSGSIZE);
4917 nla_put_string(msg, LNET_NET_ATTR_HDR, "");
4919 nla_put_string(msg, LNET_NET_ATTR_TYPE,
4920 libcfs_net2str(net->net_id));
4923 local_ni = nla_nest_start(msg, LNET_NET_ATTR_LOCAL);
4924 ni_attr = nla_nest_start(msg, idx - 1);
4927 nla_put_string(msg, LNET_NET_LOCAL_NI_ATTR_NID,
4928 libcfs_nidstr(&ni->ni_nid));
4929 if (nid_is_lo0(&ni->ni_nid) &&
4930 *ni->ni_status != LNET_NI_STATUS_UP)
4932 nla_put_string(msg, LNET_NET_LOCAL_NI_ATTR_STATUS, "up");
4934 if (!nid_is_lo0(&ni->ni_nid) && ni->ni_interface) {
4935 struct nlattr *intf_nest, *intf_attr;
4937 intf_nest = nla_nest_start(msg,
4938 LNET_NET_LOCAL_NI_ATTR_INTERFACE);
4939 intf_attr = nla_nest_start(msg, 0);
4941 LNET_NET_LOCAL_NI_INTF_ATTR_TYPE,
4943 nla_nest_end(msg, intf_attr);
4944 nla_nest_end(msg, intf_nest);
4948 nla_nest_end(msg, ni_attr);
4949 nla_nest_end(msg, local_ni);
4951 genlmsg_end(msg, hdr);
4956 struct nlmsghdr *nlh = nlmsg_hdr(msg);
4958 nlmsg_cancel(msg, nlh);
4959 NL_SET_ERR_MSG(extack, "Network is down");
4963 lnet_net_unlock(LNET_LOCK_EX);
4965 nlist->lngl_idx = idx;
4967 return lnet_nl_send_error(cb->skb, portid, seq, rc);
4970 #ifndef HAVE_NETLINK_CALLBACK_START
4971 static int lnet_old_net_show_dump(struct sk_buff *msg,
4972 struct netlink_callback *cb)
4975 int rc = lnet_net_show_start(cb);
4981 return lnet_net_show_dump(msg, cb);
4985 static int lnet_genl_parse_tunables(struct nlattr *settings,
4986 struct lnet_ioctl_config_lnd_tunables *tun)
4988 struct nlattr *param;
4991 nla_for_each_nested(param, settings, rem) {
4992 int type = LNET_NET_LOCAL_NI_TUNABLES_ATTR_UNSPEC;
4995 if (nla_type(param) != LN_SCALAR_ATTR_VALUE)
4998 if (nla_strcmp(param, "peer_timeout") == 0)
4999 type = LNET_NET_LOCAL_NI_TUNABLES_ATTR_PEER_TIMEOUT;
5000 else if (nla_strcmp(param, "peer_credits") == 0)
5001 type = LNET_NET_LOCAL_NI_TUNABLES_ATTR_PEER_CREDITS;
5002 else if (nla_strcmp(param, "peer_buffer_credits") == 0)
5003 type = LNET_NET_LOCAL_NI_TUNABLES_ATTR_PEER_BUFFER_CREDITS;
5004 else if (nla_strcmp(param, "credits") == 0)
5005 type = LNET_NET_LOCAL_NI_TUNABLES_ATTR_CREDITS;
5007 param = nla_next(param, &rem);
5008 if (nla_type(param) != LN_SCALAR_ATTR_INT_VALUE)
5011 num = nla_get_s64(param);
5013 case LNET_NET_LOCAL_NI_TUNABLES_ATTR_PEER_TIMEOUT:
5015 tun->lt_cmn.lct_peer_timeout = num;
5017 case LNET_NET_LOCAL_NI_TUNABLES_ATTR_PEER_CREDITS:
5019 tun->lt_cmn.lct_peer_tx_credits = num;
5021 case LNET_NET_LOCAL_NI_TUNABLES_ATTR_PEER_BUFFER_CREDITS:
5023 tun->lt_cmn.lct_peer_rtr_credits = num;
5025 case LNET_NET_LOCAL_NI_TUNABLES_ATTR_CREDITS:
5027 tun->lt_cmn.lct_max_tx_credits = num;
5037 static int lnet_genl_parse_lnd_tunables(struct nlattr *settings,
5038 struct lnet_lnd_tunables *tun,
5039 const struct lnet_lnd *lnd)
5041 const struct ln_key_list *list = lnd->lnd_keys;
5042 struct nlattr *param;
5046 /* silently ignore these setting if the LND driver doesn't
5047 * support any LND tunables
5049 if (!list || !lnd->lnd_nl_set || !list->lkl_maxattr)
5052 nla_for_each_nested(param, settings, rem) {
5053 if (nla_type(param) != LN_SCALAR_ATTR_VALUE)
5056 for (i = 1; i <= list->lkl_maxattr; i++) {
5057 if (!list->lkl_list[i].lkp_value ||
5058 nla_strcmp(param, list->lkl_list[i].lkp_value) != 0)
5061 param = nla_next(param, &rem);
5062 rc = lnd->lnd_nl_set(LNET_CMD_NETS, param, i, tun);
5072 lnet_genl_parse_local_ni(struct nlattr *entry, struct genl_info *info,
5073 int net_id, struct lnet_ioctl_config_ni *conf,
5076 bool create = info->nlhdr->nlmsg_flags & NLM_F_CREATE;
5077 struct lnet_ioctl_config_lnd_tunables *tun;
5078 struct nlattr *settings;
5081 LIBCFS_ALLOC(tun, sizeof(struct lnet_ioctl_config_lnd_tunables));
5083 GENL_SET_ERR_MSG(info, "cannot allocate memory for tunables");
5084 GOTO(out, rc = -ENOMEM);
5087 /* Use LND defaults */
5088 tun->lt_cmn.lct_peer_timeout = -1;
5089 tun->lt_cmn.lct_peer_tx_credits = -1;
5090 tun->lt_cmn.lct_peer_rtr_credits = -1;
5091 tun->lt_cmn.lct_max_tx_credits = -1;
5092 conf->lic_ncpts = 0;
5094 nla_for_each_nested(settings, entry, rem3) {
5095 if (nla_type(settings) != LN_SCALAR_ATTR_VALUE)
5098 if (nla_strcmp(settings, "interfaces") == 0) {
5099 struct nlattr *intf;
5102 settings = nla_next(settings, &rem3);
5103 if (nla_type(settings) !=
5104 LN_SCALAR_ATTR_LIST) {
5105 GENL_SET_ERR_MSG(info,
5106 "invalid interfaces");
5107 GOTO(out, rc = -EINVAL);
5110 nla_for_each_nested(intf, settings, rem4) {
5111 intf = nla_next(intf, &rem4);
5112 if (nla_type(intf) !=
5113 LN_SCALAR_ATTR_VALUE) {
5114 GENL_SET_ERR_MSG(info,
5115 "0 key is invalid");
5116 GOTO(out, rc = -EINVAL);
5119 rc = nla_strscpy(conf->lic_ni_intf, intf,
5120 sizeof(conf->lic_ni_intf));
5122 GENL_SET_ERR_MSG(info,
5123 "failed to parse interfaces");
5128 } else if (nla_strcmp(settings, "tunables") == 0) {
5129 settings = nla_next(settings, &rem3);
5130 if (nla_type(settings) !=
5131 LN_SCALAR_ATTR_LIST) {
5132 GENL_SET_ERR_MSG(info,
5133 "invalid tunables");
5134 GOTO(out, rc = -EINVAL);
5137 rc = lnet_genl_parse_tunables(settings, tun);
5139 GENL_SET_ERR_MSG(info,
5140 "failed to parse tunables");
5143 } else if ((nla_strcmp(settings, "lnd tunables") == 0)) {
5144 const struct lnet_lnd *lnd;
5146 lnd = lnet_load_lnd(LNET_NETTYP(net_id));
5148 GENL_SET_ERR_MSG(info,
5149 "LND type not supported");
5150 GOTO(out, rc = PTR_ERR(lnd));
5153 settings = nla_next(settings, &rem3);
5154 if (nla_type(settings) !=
5155 LN_SCALAR_ATTR_LIST) {
5156 GENL_SET_ERR_MSG(info,
5157 "lnd tunables should be list\n");
5158 GOTO(out, rc = -EINVAL);
5161 rc = lnet_genl_parse_lnd_tunables(settings,
5164 GENL_SET_ERR_MSG(info,
5165 "failed to parse lnd tunables");
5168 } else if (nla_strcmp(settings, "CPT") == 0) {
5172 settings = nla_next(settings, &rem3);
5173 if (nla_type(settings) != LN_SCALAR_ATTR_LIST) {
5174 GENL_SET_ERR_MSG(info,
5175 "CPT should be list");
5176 GOTO(out, rc = -EINVAL);
5179 nla_for_each_nested(cpt, settings, rem4) {
5182 if (nla_type(cpt) !=
5183 LN_SCALAR_ATTR_INT_VALUE) {
5184 GENL_SET_ERR_MSG(info,
5185 "invalid CPT config");
5186 GOTO(out, rc = -EINVAL);
5189 core = nla_get_s64(cpt);
5190 if (core >= LNET_CPT_NUMBER) {
5191 GENL_SET_ERR_MSG(info,
5192 "invalid CPT value");
5193 GOTO(out, rc = -ERANGE);
5196 conf->lic_cpts[conf->lic_ncpts] = core;
5203 struct lnet_net *net;
5207 if (!strlen(conf->lic_ni_intf)) {
5208 GENL_SET_ERR_MSG(info,
5209 "interface is missing");
5213 lnet_net_lock(LNET_LOCK_EX);
5214 net = lnet_get_net_locked(net_id);
5216 GENL_SET_ERR_MSG(info,
5217 "LNet net doesn't exist");
5218 lnet_net_unlock(LNET_LOCK_EX);
5222 list_for_each_entry(ni, &net->net_ni_list,
5224 if (!ni->ni_interface ||
5225 strcmp(ni->ni_interface,
5226 conf->lic_ni_intf) != 0)
5229 lnet_net_unlock(LNET_LOCK_EX);
5230 rc = lnet_dyn_del_ni(&ni->ni_nid);
5232 GENL_SET_ERR_MSG(info,
5233 "cannot del LNet NI");
5239 if (rc < 0) { /* will be -ENODEV */
5240 GENL_SET_ERR_MSG(info,
5241 "interface invalid for deleting LNet NI");
5242 lnet_net_unlock(LNET_LOCK_EX);
5245 if (!strlen(conf->lic_ni_intf)) {
5246 GENL_SET_ERR_MSG(info,
5247 "interface is missing");
5251 rc = lnet_dyn_add_ni(conf, net_id, tun);
5254 GENL_SET_ERR_MSG(info,
5255 "cannot parse net");
5258 GENL_SET_ERR_MSG(info,
5262 GENL_SET_ERR_MSG(info,
5263 "cannot add LNet NI");
5270 LIBCFS_FREE(tun, sizeof(struct lnet_ioctl_config_lnd_tunables));
5275 static int lnet_net_cmd(struct sk_buff *skb, struct genl_info *info)
5277 struct nlmsghdr *nlh = nlmsg_hdr(skb);
5278 struct genlmsghdr *gnlh = nlmsg_data(nlh);
5279 struct nlattr *params = genlmsg_data(gnlh);
5280 int msg_len, rem, rc = 0;
5281 struct nlattr *attr;
5283 msg_len = genlmsg_len(gnlh);
5285 GENL_SET_ERR_MSG(info, "no configuration");
5289 if (!(nla_type(params) & LN_SCALAR_ATTR_LIST)) {
5290 GENL_SET_ERR_MSG(info, "invalid configuration");
5294 nla_for_each_nested(attr, params, rem) {
5295 struct lnet_ioctl_config_ni conf;
5296 u32 net_id = LNET_NET_ANY;
5297 struct nlattr *entry;
5298 bool ni_list = false;
5301 if (nla_type(attr) != LN_SCALAR_ATTR_LIST)
5304 nla_for_each_nested(entry, attr, rem2) {
5305 switch (nla_type(entry)) {
5306 case LN_SCALAR_ATTR_VALUE: {
5309 memset(&conf, 0, sizeof(conf));
5310 if (nla_strcmp(entry, "ip2net") == 0) {
5311 entry = nla_next(entry, &rem2);
5312 if (nla_type(entry) !=
5313 LN_SCALAR_ATTR_VALUE) {
5314 GENL_SET_ERR_MSG(info,
5315 "ip2net has invalid key");
5316 GOTO(out, rc = -EINVAL);
5319 len = nla_strscpy(conf.lic_legacy_ip2nets,
5321 sizeof(conf.lic_legacy_ip2nets));
5323 GENL_SET_ERR_MSG(info,
5324 "ip2net key string is invalid");
5325 GOTO(out, rc = len);
5328 } else if (nla_strcmp(entry, "net type") == 0) {
5329 char tmp[LNET_NIDSTR_SIZE];
5331 entry = nla_next(entry, &rem2);
5332 if (nla_type(entry) !=
5333 LN_SCALAR_ATTR_VALUE) {
5334 GENL_SET_ERR_MSG(info,
5335 "net type has invalid key");
5336 GOTO(out, rc = -EINVAL);
5339 len = nla_strscpy(tmp, entry,
5342 GENL_SET_ERR_MSG(info,
5343 "net type key string is invalid");
5344 GOTO(out, rc = len);
5347 net_id = libcfs_str2net(tmp);
5349 GENL_SET_ERR_MSG(info,
5350 "cannot parse net");
5351 GOTO(out, rc = -ENODEV);
5353 if (LNET_NETTYP(net_id) == LOLND) {
5354 GENL_SET_ERR_MSG(info,
5355 "setting @lo not allowed");
5356 GOTO(out, rc = -ENODEV);
5358 conf.lic_legacy_ip2nets[0] = '\0';
5359 conf.lic_ni_intf[0] = '\0';
5366 case LN_SCALAR_ATTR_LIST: {
5367 struct nlattr *interface;
5370 nla_for_each_nested(interface, entry, rem3) {
5371 rc = lnet_genl_parse_local_ni(interface, info,
5379 /* it is possible a newer version of the user land send
5380 * values older kernels doesn't handle. So silently
5381 * ignore these values
5388 /* Handle case of just sent NET with no list of NIDs */
5389 if (!(info->nlhdr->nlmsg_flags & NLM_F_CREATE) && !ni_list) {
5390 rc = lnet_dyn_del_net(net_id);
5392 GENL_SET_ERR_MSG(info,
5393 "cannot del network");
5401 /** LNet route handling */
5403 /* We can't use struct lnet_ioctl_config_data since it lacks
5404 * support for large NIDS
5406 struct lnet_route_properties {
5407 struct lnet_nid lrp_gateway;
5412 u32 lrp_sensitivity;
5415 struct lnet_genl_route_list {
5416 unsigned int lgrl_index;
5417 unsigned int lgrl_count;
5418 GENRADIX(struct lnet_route_properties) lgrl_list;
5421 static inline struct lnet_genl_route_list *
5422 lnet_route_dump_ctx(struct netlink_callback *cb)
5424 return (struct lnet_genl_route_list *)cb->args[0];
5427 static int lnet_route_show_done(struct netlink_callback *cb)
5429 struct lnet_genl_route_list *rlist = lnet_route_dump_ctx(cb);
5432 genradix_free(&rlist->lgrl_list);
5433 CFS_FREE_PTR(rlist);
5440 int lnet_scan_route(struct lnet_genl_route_list *rlist,
5441 struct lnet_route_properties *settings)
5443 struct lnet_remotenet *rnet;
5444 struct list_head *rn_list;
5445 struct lnet_route *route;
5448 cpt = lnet_net_lock_current();
5450 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
5451 rn_list = &the_lnet.ln_remote_nets_hash[i];
5452 list_for_each_entry(rnet, rn_list, lrn_list) {
5453 if (settings->lrp_net != LNET_NET_ANY &&
5454 settings->lrp_net != rnet->lrn_net)
5457 list_for_each_entry(route, &rnet->lrn_routes,
5459 struct lnet_route_properties *prop;
5461 if (!LNET_NID_IS_ANY(&settings->lrp_gateway) &&
5462 !nid_same(&settings->lrp_gateway,
5467 if (settings->lrp_hop != -1 &&
5468 settings->lrp_hop != route->lr_hops)
5471 if (settings->lrp_priority != -1 &&
5472 settings->lrp_priority != route->lr_priority)
5475 if (settings->lrp_sensitivity != -1 &&
5476 settings->lrp_sensitivity !=
5477 route->lr_gateway->lp_health_sensitivity)
5480 prop = genradix_ptr_alloc(&rlist->lgrl_list,
5481 rlist->lgrl_count++,
5484 GOTO(failed_alloc, rc = -ENOMEM);
5486 prop->lrp_net = rnet->lrn_net;
5487 prop->lrp_gateway = route->lr_nid;
5488 prop->lrp_hop = route->lr_hops;
5489 prop->lrp_priority = route->lr_priority;
5490 prop->lrp_sensitivity =
5491 route->lr_gateway->lp_health_sensitivity;
5492 if (lnet_is_route_alive(route))
5493 prop->lrp_flags |= LNET_RT_ALIVE;
5495 prop->lrp_flags &= ~LNET_RT_ALIVE;
5496 if (route->lr_single_hop)
5497 prop->lrp_flags &= ~LNET_RT_MULTI_HOP;
5499 prop->lrp_flags |= LNET_RT_MULTI_HOP;
5505 lnet_net_unlock(cpt);
5509 /* LNet route ->start() handler for GET requests */
5510 static int lnet_route_show_start(struct netlink_callback *cb)
5512 struct genlmsghdr *gnlh = nlmsg_data(cb->nlh);
5513 #ifdef HAVE_NL_PARSE_WITH_EXT_ACK
5514 struct netlink_ext_ack *extack = NULL;
5516 struct lnet_genl_route_list *rlist;
5517 int msg_len = genlmsg_len(gnlh);
5520 #ifdef HAVE_NL_DUMP_WITH_EXT_ACK
5521 extack = cb->extack;
5523 if (the_lnet.ln_refcount == 0 ||
5524 the_lnet.ln_state != LNET_STATE_RUNNING) {
5525 NL_SET_ERR_MSG(extack, "Network is down");
5529 CFS_ALLOC_PTR(rlist);
5531 NL_SET_ERR_MSG(extack, "No memory for route list");
5535 genradix_init(&rlist->lgrl_list);
5536 rlist->lgrl_count = 0;
5537 rlist->lgrl_index = 0;
5538 cb->args[0] = (long)rlist;
5540 mutex_lock(&the_lnet.ln_api_mutex);
5542 struct lnet_route_properties tmp = {
5543 .lrp_gateway = LNET_ANY_NID,
5544 .lrp_net = LNET_NET_ANY,
5547 .lrp_sensitivity = -1,
5550 rc = lnet_scan_route(rlist, &tmp);
5552 NL_SET_ERR_MSG(extack,
5553 "failed to allocate router data");
5554 GOTO(report_err, rc);
5557 struct nlattr *params = genlmsg_data(gnlh);
5558 struct nlattr *attr;
5561 nla_for_each_nested(attr, params, rem) {
5562 struct lnet_route_properties tmp = {
5563 .lrp_gateway = LNET_ANY_NID,
5564 .lrp_net = LNET_NET_ANY,
5567 .lrp_sensitivity = -1,
5569 struct nlattr *route;
5572 if (nla_type(attr) != LN_SCALAR_ATTR_LIST)
5575 nla_for_each_nested(route, attr, rem2) {
5576 if (nla_type(route) != LN_SCALAR_ATTR_VALUE)
5579 if (nla_strcmp(route, "net") == 0) {
5580 char nw[LNET_NIDSTR_SIZE];
5582 route = nla_next(route, &rem2);
5583 if (nla_type(route) !=
5584 LN_SCALAR_ATTR_VALUE) {
5585 NL_SET_ERR_MSG(extack,
5586 "invalid net param");
5587 GOTO(report_err, rc = -EINVAL);
5590 rc = nla_strscpy(nw, route, sizeof(nw));
5592 NL_SET_ERR_MSG(extack,
5593 "failed to get route param");
5594 GOTO(report_err, rc);
5597 tmp.lrp_net = libcfs_str2net(strim(nw));
5598 } else if (nla_strcmp(route, "gateway") == 0) {
5599 char gw[LNET_NIDSTR_SIZE];
5601 route = nla_next(route, &rem2);
5602 if (nla_type(route) !=
5603 LN_SCALAR_ATTR_VALUE) {
5604 NL_SET_ERR_MSG(extack,
5605 "invalid gateway param");
5606 GOTO(report_err, rc = -EINVAL);
5609 rc = nla_strscpy(gw, route, sizeof(gw));
5611 NL_SET_ERR_MSG(extack,
5612 "failed to get route param");
5613 GOTO(report_err, rc);
5616 libcfs_strnid(&tmp.lrp_gateway, strim(gw));
5617 } else if (nla_strcmp(route, "hop") == 0) {
5618 route = nla_next(route, &rem2);
5619 if (nla_type(route) !=
5620 LN_SCALAR_ATTR_INT_VALUE) {
5621 NL_SET_ERR_MSG(extack,
5622 "invalid hop param");
5623 GOTO(report_err, rc = -EINVAL);
5626 tmp.lrp_hop = nla_get_s64(route);
5627 if (tmp.lrp_hop != -1)
5628 clamp_t(s32, tmp.lrp_hop, 1, 127);
5629 } else if (nla_strcmp(route, "priority") == 0) {
5630 route = nla_next(route, &rem2);
5631 if (nla_type(route) !=
5632 LN_SCALAR_ATTR_INT_VALUE) {
5633 NL_SET_ERR_MSG(extack,
5634 "invalid priority param");
5635 GOTO(report_err, rc = -EINVAL);
5638 tmp.lrp_priority = nla_get_s64(route);
5642 rc = lnet_scan_route(rlist, &tmp);
5644 NL_SET_ERR_MSG(extack,
5645 "failed to allocate router data");
5646 GOTO(report_err, rc);
5651 mutex_unlock(&the_lnet.ln_api_mutex);
5654 lnet_route_show_done(cb);
5659 static const struct ln_key_list route_props_list = {
5660 .lkl_maxattr = LNET_ROUTE_ATTR_MAX,
5662 [LNET_ROUTE_ATTR_HDR] = {
5663 .lkp_value = "route",
5664 .lkp_key_format = LNKF_SEQUENCE | LNKF_MAPPING,
5665 .lkp_data_type = NLA_NUL_STRING,
5667 [LNET_ROUTE_ATTR_NET] = {
5669 .lkp_data_type = NLA_STRING
5671 [LNET_ROUTE_ATTR_GATEWAY] = {
5672 .lkp_value = "gateway",
5673 .lkp_data_type = NLA_STRING
5675 [LNET_ROUTE_ATTR_HOP] = {
5677 .lkp_data_type = NLA_S32
5679 [LNET_ROUTE_ATTR_PRIORITY] = {
5680 .lkp_value = "priority",
5681 .lkp_data_type = NLA_U32
5683 [LNET_ROUTE_ATTR_HEALTH_SENSITIVITY] = {
5684 .lkp_value = "health_sensitivity",
5685 .lkp_data_type = NLA_U32
5687 [LNET_ROUTE_ATTR_STATE] = {
5688 .lkp_value = "state",
5689 .lkp_data_type = NLA_STRING,
5691 [LNET_ROUTE_ATTR_TYPE] = {
5692 .lkp_value = "type",
5693 .lkp_data_type = NLA_STRING,
5699 static int lnet_route_show_dump(struct sk_buff *msg,
5700 struct netlink_callback *cb)
5702 struct lnet_genl_route_list *rlist = lnet_route_dump_ctx(cb);
5703 struct genlmsghdr *gnlh = nlmsg_data(cb->nlh);
5704 #ifdef HAVE_NL_PARSE_WITH_EXT_ACK
5705 struct netlink_ext_ack *extack = NULL;
5707 int portid = NETLINK_CB(cb->skb).portid;
5708 int seq = cb->nlh->nlmsg_seq;
5709 int idx = rlist->lgrl_index;
5712 #ifdef HAVE_NL_DUMP_WITH_EXT_ACK
5713 extack = cb->extack;
5715 if (!rlist->lgrl_count) {
5716 NL_SET_ERR_MSG(extack, "No routes found");
5717 GOTO(send_error, rc = -ENOENT);
5721 const struct ln_key_list *all[] = {
5722 &route_props_list, NULL
5725 rc = lnet_genl_send_scalar_list(msg, portid, seq,
5727 NLM_F_CREATE | NLM_F_MULTI,
5728 LNET_CMD_ROUTES, all);
5730 NL_SET_ERR_MSG(extack, "failed to send key table");
5731 GOTO(send_error, rc);
5735 /* If not routes found send an empty message and not an error */
5736 if (!rlist->lgrl_count) {
5739 hdr = genlmsg_put(msg, portid, seq, &lnet_family,
5740 NLM_F_MULTI, LNET_CMD_ROUTES);
5742 NL_SET_ERR_MSG(extack, "failed to send values");
5743 genlmsg_cancel(msg, hdr);
5744 GOTO(send_error, rc = -EMSGSIZE);
5746 genlmsg_end(msg, hdr);
5751 while (idx < rlist->lgrl_count) {
5752 struct lnet_route_properties *prop;
5755 prop = genradix_ptr(&rlist->lgrl_list, idx++);
5757 hdr = genlmsg_put(msg, portid, seq, &lnet_family,
5758 NLM_F_MULTI, LNET_CMD_ROUTES);
5760 NL_SET_ERR_MSG(extack, "failed to send values");
5761 genlmsg_cancel(msg, hdr);
5762 GOTO(send_error, rc = -EMSGSIZE);
5766 nla_put_string(msg, LNET_ROUTE_ATTR_HDR, "");
5768 nla_put_string(msg, LNET_ROUTE_ATTR_NET,
5769 libcfs_net2str(prop->lrp_net));
5770 nla_put_string(msg, LNET_ROUTE_ATTR_GATEWAY,
5771 libcfs_nidstr(&prop->lrp_gateway));
5772 if (gnlh->version) {
5773 nla_put_s32(msg, LNET_ROUTE_ATTR_HOP, prop->lrp_hop);
5774 nla_put_u32(msg, LNET_ROUTE_ATTR_PRIORITY, prop->lrp_priority);
5775 nla_put_u32(msg, LNET_ROUTE_ATTR_HEALTH_SENSITIVITY,
5776 prop->lrp_sensitivity);
5778 nla_put_string(msg, LNET_ROUTE_ATTR_STATE,
5779 prop->lrp_flags & LNET_RT_ALIVE ?
5781 nla_put_string(msg, LNET_ROUTE_ATTR_TYPE,
5782 prop->lrp_flags & LNET_RT_MULTI_HOP ?
5783 "multi-hop" : "single-hop");
5785 genlmsg_end(msg, hdr);
5787 rlist->lgrl_index = idx;
5789 return lnet_nl_send_error(cb->skb, portid, seq, rc);
5792 #ifndef HAVE_NETLINK_CALLBACK_START
5793 static int lnet_old_route_show_dump(struct sk_buff *msg,
5794 struct netlink_callback *cb)
5797 int rc = lnet_route_show_start(cb);
5803 return lnet_route_show_dump(msg, cb);
5805 #endif /* !HAVE_NETLINK_CALLBACK_START */
5807 static inline struct lnet_genl_ping_list *
5808 lnet_ping_dump_ctx(struct netlink_callback *cb)
5810 return (struct lnet_genl_ping_list *)cb->args[0];
5813 static int lnet_ping_show_done(struct netlink_callback *cb)
5815 struct lnet_genl_ping_list *plist = lnet_ping_dump_ctx(cb);
5818 genradix_free(&plist->lgpl_failed);
5819 genradix_free(&plist->lgpl_list);
5820 LIBCFS_FREE(plist, sizeof(*plist));
5827 /* LNet ping ->start() handler for GET requests */
5828 static int lnet_ping_show_start(struct netlink_callback *cb)
5830 struct genlmsghdr *gnlh = nlmsg_data(cb->nlh);
5831 #ifdef HAVE_NL_PARSE_WITH_EXT_ACK
5832 struct netlink_ext_ack *extack = NULL;
5834 struct lnet_genl_ping_list *plist;
5835 int msg_len = genlmsg_len(gnlh);
5836 struct nlattr *params, *top;
5839 #ifdef HAVE_NL_DUMP_WITH_EXT_ACK
5840 extack = cb->extack;
5842 if (the_lnet.ln_refcount == 0) {
5843 NL_SET_ERR_MSG(extack, "Network is down");
5848 NL_SET_ERR_MSG(extack, "Ping needs NID targets");
5852 LIBCFS_ALLOC(plist, sizeof(*plist));
5854 NL_SET_ERR_MSG(extack, "failed to setup ping list");
5857 genradix_init(&plist->lgpl_list);
5858 plist->lgpl_timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
5859 plist->lgpl_src_nid = LNET_ANY_NID;
5860 plist->lgpl_index = 0;
5861 plist->lgpl_list_count = 0;
5862 cb->args[0] = (long)plist;
5864 params = genlmsg_data(gnlh);
5865 nla_for_each_attr(top, params, msg_len, rem) {
5866 struct nlattr *nids;
5869 switch (nla_type(top)) {
5870 case LN_SCALAR_ATTR_VALUE:
5871 if (nla_strcmp(top, "timeout") == 0) {
5874 top = nla_next(top, &rem);
5875 if (nla_type(top) != LN_SCALAR_ATTR_INT_VALUE) {
5876 NL_SET_ERR_MSG(extack,
5877 "invalid timeout param");
5878 GOTO(report_err, rc = -EINVAL);
5881 /* If timeout is negative then set default of
5884 timeout = nla_get_s64(top);
5886 timeout < (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
5887 plist->lgpl_timeout =
5888 nsecs_to_jiffies(timeout * NSEC_PER_MSEC);
5889 } else if (nla_strcmp(top, "source") == 0) {
5890 char nidstr[LNET_NIDSTR_SIZE + 1];
5892 top = nla_next(top, &rem);
5893 if (nla_type(top) != LN_SCALAR_ATTR_VALUE) {
5894 NL_SET_ERR_MSG(extack,
5895 "invalid source param");
5896 GOTO(report_err, rc = -EINVAL);
5899 rc = nla_strscpy(nidstr, top, sizeof(nidstr));
5901 NL_SET_ERR_MSG(extack,
5902 "failed to parse source nid");
5903 GOTO(report_err, rc);
5906 rc = libcfs_strnid(&plist->lgpl_src_nid,
5909 NL_SET_ERR_MSG(extack,
5910 "invalid source nid");
5911 GOTO(report_err, rc);
5916 case LN_SCALAR_ATTR_LIST:
5917 nla_for_each_nested(nids, top, rem2) {
5918 char nid[LNET_NIDSTR_SIZE + 1];
5919 struct lnet_processid *id;
5921 if (nla_type(nids) != LN_SCALAR_ATTR_VALUE)
5924 memset(nid, 0, sizeof(nid));
5925 rc = nla_strscpy(nid, nids, sizeof(nid));
5927 NL_SET_ERR_MSG(extack,
5928 "failed to get NID");
5929 GOTO(report_err, rc);
5932 id = genradix_ptr_alloc(&plist->lgpl_list,
5933 plist->lgpl_list_count++,
5936 NL_SET_ERR_MSG(extack,
5937 "failed to allocate NID");
5938 GOTO(report_err, rc = -ENOMEM);
5941 rc = libcfs_strid(id, strim(nid));
5943 NL_SET_ERR_MSG(extack, "invalid NID");
5944 GOTO(report_err, rc);
5955 lnet_ping_show_done(cb);
5960 static const struct ln_key_list ping_props_list = {
5961 .lkl_maxattr = LNET_PING_ATTR_MAX,
5963 [LNET_PING_ATTR_HDR] = {
5964 .lkp_value = "ping",
5965 .lkp_key_format = LNKF_SEQUENCE | LNKF_MAPPING,
5966 .lkp_data_type = NLA_NUL_STRING,
5968 [LNET_PING_ATTR_PRIMARY_NID] = {
5969 .lkp_value = "primary nid",
5970 .lkp_data_type = NLA_STRING
5972 [LNET_PING_ATTR_ERRNO] = {
5973 .lkp_value = "errno",
5974 .lkp_data_type = NLA_S16
5976 [LNET_PING_ATTR_MULTIRAIL] = {
5977 .lkp_value = "Multi-Rail",
5978 .lkp_data_type = NLA_FLAG
5980 [LNET_PING_ATTR_PEER_NI_LIST] = {
5981 .lkp_value = "peer_ni",
5982 .lkp_key_format = LNKF_SEQUENCE | LNKF_MAPPING,
5983 .lkp_data_type = NLA_NESTED
5988 static struct ln_key_list ping_peer_ni_list = {
5989 .lkl_maxattr = LNET_PING_PEER_NI_ATTR_MAX,
5991 [LNET_PING_PEER_NI_ATTR_NID] = {
5993 .lkp_data_type = NLA_STRING
5998 static int lnet_ping_show_dump(struct sk_buff *msg,
5999 struct netlink_callback *cb)
6001 struct lnet_genl_ping_list *plist = lnet_ping_dump_ctx(cb);
6002 struct genlmsghdr *gnlh = nlmsg_data(cb->nlh);
6003 #ifdef HAVE_NL_PARSE_WITH_EXT_ACK
6004 struct netlink_ext_ack *extack = NULL;
6006 int portid = NETLINK_CB(cb->skb).portid;
6007 int seq = cb->nlh->nlmsg_seq;
6008 int idx = plist->lgpl_index;
6011 #ifdef HAVE_NL_DUMP_WITH_EXT_ACK
6012 extack = cb->extack;
6014 if (!plist->lgpl_index) {
6015 const struct ln_key_list *all[] = {
6016 &ping_props_list, &ping_peer_ni_list, NULL
6019 rc = lnet_genl_send_scalar_list(msg, portid, seq,
6021 NLM_F_CREATE | NLM_F_MULTI,
6022 LNET_CMD_PING, all);
6024 NL_SET_ERR_MSG(extack, "failed to send key table");
6025 GOTO(send_error, rc);
6028 genradix_init(&plist->lgpl_failed);
6031 while (idx < plist->lgpl_list_count) {
6032 struct lnet_nid primary_nid = LNET_ANY_NID;
6033 struct lnet_genl_ping_list peers;
6034 struct lnet_processid *id;
6035 struct nlattr *nid_list;
6036 struct lnet_peer *lp;
6037 bool mr_flag = false;
6041 id = genradix_ptr(&plist->lgpl_list, idx++);
6042 if (nid_is_lo0(&id->nid))
6045 rc = lnet_ping(id, &plist->lgpl_src_nid, plist->lgpl_timeout,
6046 &peers, lnet_interfaces_max);
6048 struct lnet_fail_ping *fail;
6050 fail = genradix_ptr_alloc(&plist->lgpl_failed,
6051 plist->lgpl_failed_count++,
6054 NL_SET_ERR_MSG(extack,
6055 "failed to allocate failed NID");
6056 GOTO(send_error, rc);
6059 fail->lfp_errno = rc;
6063 mutex_lock(&the_lnet.ln_api_mutex);
6064 lp = lnet_find_peer(&id->nid);
6066 primary_nid = lp->lp_primary_nid;
6067 mr_flag = lnet_peer_is_multi_rail(lp);
6068 lnet_peer_decref_locked(lp);
6070 mutex_unlock(&the_lnet.ln_api_mutex);
6072 hdr = genlmsg_put(msg, portid, seq, &lnet_family,
6073 NLM_F_MULTI, LNET_CMD_PING);
6075 NL_SET_ERR_MSG(extack, "failed to send values");
6076 genlmsg_cancel(msg, hdr);
6077 GOTO(send_error, rc = -EMSGSIZE);
6081 nla_put_string(msg, LNET_PING_ATTR_HDR, "");
6083 nla_put_string(msg, LNET_PING_ATTR_PRIMARY_NID,
6084 libcfs_nidstr(&primary_nid));
6086 nla_put_flag(msg, LNET_PING_ATTR_MULTIRAIL);
6088 nid_list = nla_nest_start(msg, LNET_PING_ATTR_PEER_NI_LIST);
6089 for (count = 0; count < rc; count++) {
6090 struct lnet_processid *result;
6091 struct nlattr *nid_attr;
6094 result = genradix_ptr(&peers.lgpl_list, count);
6095 if (nid_is_lo0(&result->nid))
6098 nid_attr = nla_nest_start(msg, count + 1);
6099 if (gnlh->version == 1)
6100 idstr = libcfs_nidstr(&result->nid);
6102 idstr = libcfs_idstr(result);
6103 nla_put_string(msg, LNET_PING_PEER_NI_ATTR_NID, idstr);
6104 nla_nest_end(msg, nid_attr);
6106 nla_nest_end(msg, nid_list);
6107 genlmsg_end(msg, hdr);
6109 genradix_free(&peers.lgpl_list);
6112 for (i = 0; i < plist->lgpl_failed_count; i++) {
6113 struct lnet_fail_ping *fail;
6116 fail = genradix_ptr(&plist->lgpl_failed, i);
6118 hdr = genlmsg_put(msg, portid, seq, &lnet_family,
6119 NLM_F_MULTI, LNET_CMD_PING);
6121 NL_SET_ERR_MSG(extack, "failed to send failed values");
6122 genlmsg_cancel(msg, hdr);
6123 GOTO(send_error, rc = -EMSGSIZE);
6127 nla_put_string(msg, LNET_PING_ATTR_HDR, "");
6129 nla_put_string(msg, LNET_PING_ATTR_PRIMARY_NID,
6130 libcfs_nidstr(&fail->lfp_id.nid));
6131 nla_put_s16(msg, LNET_PING_ATTR_ERRNO, fail->lfp_errno);
6132 genlmsg_end(msg, hdr);
6134 rc = 0; /* don't treat it as an error */
6136 plist->lgpl_index = idx;
6138 return lnet_nl_send_error(cb->skb, portid, seq, rc);
6141 #ifndef HAVE_NETLINK_CALLBACK_START
6142 static int lnet_old_ping_show_dump(struct sk_buff *msg,
6143 struct netlink_callback *cb)
6146 int rc = lnet_ping_show_start(cb);
6152 return lnet_ping_show_dump(msg, cb);
6156 static const struct genl_multicast_group lnet_mcast_grps[] = {
6157 { .name = "ip2net", },
6159 { .name = "route", },
6160 { .name = "ping", },
6163 static const struct genl_ops lnet_genl_ops[] = {
6165 .cmd = LNET_CMD_NETS,
6166 .flags = GENL_ADMIN_PERM,
6167 #ifdef HAVE_NETLINK_CALLBACK_START
6168 .start = lnet_net_show_start,
6169 .dumpit = lnet_net_show_dump,
6171 .dumpit = lnet_old_net_show_dump,
6173 .done = lnet_net_show_done,
6174 .doit = lnet_net_cmd,
6177 .cmd = LNET_CMD_ROUTES,
6178 #ifdef HAVE_NETLINK_CALLBACK_START
6179 .start = lnet_route_show_start,
6180 .dumpit = lnet_route_show_dump,
6182 .dumpit = lnet_old_route_show_dump,
6184 .done = lnet_route_show_done,
6187 .cmd = LNET_CMD_PING,
6188 #ifdef HAVE_NETLINK_CALLBACK_START
6189 .start = lnet_ping_show_start,
6190 .dumpit = lnet_ping_show_dump,
6192 .dumpit = lnet_old_ping_show_dump,
6194 .done = lnet_ping_show_done,
6198 static struct genl_family lnet_family = {
6199 .name = LNET_GENL_NAME,
6200 .version = LNET_GENL_VERSION,
6201 .module = THIS_MODULE,
6203 .ops = lnet_genl_ops,
6204 .n_ops = ARRAY_SIZE(lnet_genl_ops),
6205 .mcgrps = lnet_mcast_grps,
6206 .n_mcgrps = ARRAY_SIZE(lnet_mcast_grps),
6207 #ifdef GENL_FAMILY_HAS_RESV_START_OP
6208 .resv_start_op = __LNET_CMD_MAX_PLUS_ONE,
6212 void LNetDebugPeer(struct lnet_processid *id)
6214 lnet_debug_peer(&id->nid);
6216 EXPORT_SYMBOL(LNetDebugPeer);
6219 * Determine if the specified peer \a nid is on the local node.
6221 * \param nid peer nid to check
6223 * \retval true If peer NID is on the local node.
6224 * \retval false If peer NID is not on the local node.
6226 bool LNetIsPeerLocal(struct lnet_nid *nid)
6228 struct lnet_net *net;
6232 cpt = lnet_net_lock_current();
6233 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
6234 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
6235 if (nid_same(&ni->ni_nid, nid)) {
6236 lnet_net_unlock(cpt);
6241 lnet_net_unlock(cpt);
6245 EXPORT_SYMBOL(LNetIsPeerLocal);
6248 * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
6249 * Note that all interfaces share a same PID, as requested by LNetNIInit().
6251 * \param index Index of the interface to look up.
6252 * \param id On successful return, this location will hold the
6253 * struct lnet_process_id ID of the interface.
6255 * \retval 0 If an interface exists at \a index.
6256 * \retval -ENOENT If no interface has been found.
6259 LNetGetId(unsigned int index, struct lnet_processid *id)
6262 struct lnet_net *net;
6266 LASSERT(the_lnet.ln_refcount > 0);
6268 cpt = lnet_net_lock_current();
6270 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
6271 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
6272 if (!nid_is_nid4(&ni->ni_nid))
6273 /* FIXME this needs to be handled */
6278 id->nid = ni->ni_nid;
6279 id->pid = the_lnet.ln_pid;
6285 lnet_net_unlock(cpt);
6288 EXPORT_SYMBOL(LNetGetId);
6294 struct lnet_handle_md mdh;
6295 struct completion completion;
6299 lnet_ping_event_handler(struct lnet_event *event)
6301 struct ping_data *pd = event->md_user_ptr;
6303 CDEBUG(D_NET, "ping event (%d %d)%s\n",
6304 event->type, event->status,
6305 event->unlinked ? " unlinked" : "");
6307 if (event->status) {
6309 pd->rc = event->status;
6310 } else if (event->type == LNET_EVENT_REPLY) {
6312 pd->rc = event->mlength;
6315 if (event->unlinked)
6316 pd->pd_unlinked = 1;
6318 if (event->unlinked ||
6319 (event->type == LNET_EVENT_SEND && event->status))
6320 complete(&pd->completion);
6323 static int lnet_ping(struct lnet_processid *id, struct lnet_nid *src_nid,
6324 signed long timeout, struct lnet_genl_ping_list *plist,
6327 int id_bytes = sizeof(struct lnet_ni_status); /* For 0@lo */
6328 struct lnet_md md = { NULL };
6329 struct ping_data pd = { 0 };
6330 struct lnet_ping_buffer *pbuf;
6331 struct lnet_processid pid;
6332 struct lnet_ping_iter pi;
6339 genradix_init(&plist->lgpl_list);
6341 /* n_ids limit is arbitrary */
6342 if (n_ids <= 0 || LNET_NID_IS_ANY(&id->nid))
6345 /* if the user buffer has more space than the lnet_interfaces_max
6346 * then only fill it up to lnet_interfaces_max
6348 if (n_ids > lnet_interfaces_max)
6349 n_ids = lnet_interfaces_max;
6351 if (id->pid == LNET_PID_ANY)
6352 id->pid = LNET_PID_LUSTRE;
6354 id_bytes += lnet_ping_sts_size(&id->nid) * n_ids;
6355 pbuf = lnet_ping_buffer_alloc(id_bytes, GFP_NOFS);
6359 /* initialize md content */
6360 md.start = &pbuf->pb_info;
6361 md.length = id_bytes;
6362 md.threshold = 2; /* GET/REPLY */
6364 md.options = LNET_MD_TRUNCATE;
6366 md.handler = lnet_ping_event_handler;
6368 init_completion(&pd.completion);
6370 rc = LNetMDBind(&md, LNET_UNLINK, &pd.mdh);
6372 CERROR("Can't bind MD: %d\n", rc);
6373 goto fail_ping_buffer_decref;
6376 rc = LNetGet(src_nid, pd.mdh, id, LNET_RESERVED_PORTAL,
6377 LNET_PROTO_PING_MATCHBITS, 0, false);
6379 /* Don't CERROR; this could be deliberate! */
6380 rc2 = LNetMDUnlink(pd.mdh);
6383 /* NB must wait for the UNLINK event below... */
6386 /* Ensure completion in finite time... */
6387 wait_for_completion_timeout(&pd.completion, timeout);
6388 if (!pd.pd_unlinked) {
6389 LNetMDUnlink(pd.mdh);
6390 wait_for_completion(&pd.completion);
6395 goto fail_ping_buffer_decref;
6399 LASSERT(nob >= 0 && nob <= id_bytes);
6401 rc = -EPROTO; /* if I can't parse... */
6403 if (nob < LNET_PING_INFO_HDR_SIZE) {
6404 CERROR("%s: ping info too short %d\n",
6405 libcfs_idstr(id), nob);
6406 goto fail_ping_buffer_decref;
6409 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
6410 lnet_swap_pinginfo(pbuf);
6411 } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
6412 CERROR("%s: Unexpected magic %08x\n",
6413 libcfs_idstr(id), pbuf->pb_info.pi_magic);
6414 goto fail_ping_buffer_decref;
6417 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
6418 CERROR("%s: ping w/o NI status: 0x%x\n",
6419 libcfs_idstr(id), pbuf->pb_info.pi_features);
6420 goto fail_ping_buffer_decref;
6423 /* Test if smaller than lnet_pinginfo with just one pi_ni status info.
6424 * That one might contain size when large nids are used.
6426 if (nob < offsetof(struct lnet_ping_info, pi_ni[1])) {
6427 CERROR("%s: Short reply %d(%lu min)\n",
6428 libcfs_idstr(id), nob,
6429 offsetof(struct lnet_ping_info, pi_ni[1]));
6430 goto fail_ping_buffer_decref;
6433 if (ping_info_count_entries(pbuf) < n_ids) {
6434 n_ids = ping_info_count_entries(pbuf);
6435 id_bytes = lnet_ping_info_size(&pbuf->pb_info);
6438 if (nob < id_bytes) {
6439 CERROR("%s: Short reply %d(%d expected)\n",
6440 libcfs_idstr(id), nob, id_bytes);
6441 goto fail_ping_buffer_decref;
6444 for (st = ping_iter_first(&pi, pbuf, &pid.nid);
6446 st = ping_iter_next(&pi, &pid.nid)) {
6447 id = genradix_ptr_alloc(&plist->lgpl_list, i++, GFP_ATOMIC);
6450 goto fail_ping_buffer_decref;
6453 id->pid = pbuf->pb_info.pi_pid;
6457 fail_ping_buffer_decref:
6458 lnet_ping_buffer_decref(pbuf);
6463 lnet_discover(struct lnet_process_id id4, __u32 force,
6464 struct lnet_process_id __user *ids, int n_ids)
6466 struct lnet_peer_ni *lpni;
6467 struct lnet_peer_ni *p;
6468 struct lnet_peer *lp;
6469 struct lnet_process_id *buf;
6470 struct lnet_processid id;
6476 id4.nid == LNET_NID_ANY)
6479 lnet_pid4_to_pid(id4, &id);
6480 if (id.pid == LNET_PID_ANY)
6481 id.pid = LNET_PID_LUSTRE;
6484 * If the user buffer has more space than the lnet_interfaces_max,
6485 * then only fill it up to lnet_interfaces_max.
6487 if (n_ids > lnet_interfaces_max)
6488 n_ids = lnet_interfaces_max;
6490 CFS_ALLOC_PTR_ARRAY(buf, n_ids);
6494 cpt = lnet_net_lock_current();
6495 lpni = lnet_peerni_by_nid_locked(&id.nid, NULL, cpt);
6502 * Clearing the NIDS_UPTODATE flag ensures the peer will
6503 * be discovered, provided discovery has not been disabled.
6505 lp = lpni->lpni_peer_net->lpn_peer;
6506 spin_lock(&lp->lp_lock);
6507 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
6508 /* If the force flag is set, force a PING and PUSH as well. */
6510 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
6511 spin_unlock(&lp->lp_lock);
6512 rc = lnet_discover_peer_locked(lpni, cpt, true);
6516 /* The lpni (or lp) for this NID may have changed and our ref is
6517 * the only thing keeping the old one around. Release the ref
6518 * and lookup the lpni again
6520 lnet_peer_ni_decref_locked(lpni);
6521 lpni = lnet_peer_ni_find_locked(&id.nid);
6526 lp = lpni->lpni_peer_net->lpn_peer;
6530 while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
6531 buf[i].pid = id.pid;
6532 buf[i].nid = lnet_nid_to_nid4(&p->lpni_nid);
6539 lnet_peer_ni_decref_locked(lpni);
6541 lnet_net_unlock(cpt);
6544 if (copy_to_user(ids, buf, rc * sizeof(*buf)))
6546 CFS_FREE_PTR_ARRAY(buf, n_ids);
6552 * Retrieve peer discovery status.
6554 * \retval 1 if lnet_peer_discovery_disabled is 0
6555 * \retval 0 if lnet_peer_discovery_disabled is 1
6558 LNetGetPeerDiscoveryStatus(void)
6560 return !lnet_peer_discovery_disabled;
6562 EXPORT_SYMBOL(LNetGetPeerDiscoveryStatus);