4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #define DEBUG_SUBSYSTEM S_LNET
34 #include <linux/ctype.h>
35 #include <linux/log2.h>
36 #include <linux/ktime.h>
37 #include <linux/moduleparam.h>
38 #include <linux/uaccess.h>
39 #ifdef HAVE_SCHED_HEADERS
40 #include <linux/sched/signal.h>
42 #include <net/genetlink.h>
44 #include <libcfs/linux/linux-net.h>
45 #include <lnet/udsp.h>
46 #include <lnet/lib-lnet.h>
48 #define D_LNI D_CONSOLE
51 * initialize ln_api_mutex statically, since it needs to be used in
52 * discovery_set callback. That module parameter callback can be called
53 * before module init completes. The mutex needs to be ready for use then.
55 struct lnet the_lnet = {
56 .ln_api_mutex = __MUTEX_INITIALIZER(the_lnet.ln_api_mutex),
57 }; /* THE state of the network */
58 EXPORT_SYMBOL(the_lnet);
60 static char *ip2nets = "";
61 module_param(ip2nets, charp, 0444);
62 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
64 static char *networks = "";
65 module_param(networks, charp, 0444);
66 MODULE_PARM_DESC(networks, "local networks");
68 static char *routes = "";
69 module_param(routes, charp, 0444);
70 MODULE_PARM_DESC(routes, "routes to non-local networks");
72 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
73 module_param(rnet_htable_size, int, 0444);
74 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
76 static int use_tcp_bonding;
77 module_param(use_tcp_bonding, int, 0444);
78 MODULE_PARM_DESC(use_tcp_bonding,
79 "use_tcp_bonding parameter has been removed");
81 unsigned int lnet_numa_range = 0;
82 module_param(lnet_numa_range, uint, 0444);
83 MODULE_PARM_DESC(lnet_numa_range,
84 "NUMA range to consider during Multi-Rail selection");
87 * lnet_health_sensitivity determines by how much we decrement the health
88 * value on sending error. The value defaults to 100, which means health
89 * interface health is decremented by 100 points every failure.
91 unsigned int lnet_health_sensitivity = 100;
92 static int sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
93 #ifdef HAVE_KERNEL_PARAM_OPS
94 static struct kernel_param_ops param_ops_health_sensitivity = {
95 .set = sensitivity_set,
98 #define param_check_health_sensitivity(name, p) \
99 __param_check(name, p, int)
100 module_param(lnet_health_sensitivity, health_sensitivity, S_IRUGO|S_IWUSR);
102 module_param_call(lnet_health_sensitivity, sensitivity_set, param_get_int,
103 &lnet_health_sensitivity, S_IRUGO|S_IWUSR);
105 MODULE_PARM_DESC(lnet_health_sensitivity,
106 "Value to decrement the health value by on error");
109 * lnet_recovery_interval determines how often we should perform recovery
110 * on unhealthy interfaces.
112 unsigned int lnet_recovery_interval = 1;
113 static int recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp);
114 #ifdef HAVE_KERNEL_PARAM_OPS
115 static struct kernel_param_ops param_ops_recovery_interval = {
116 .set = recovery_interval_set,
117 .get = param_get_int,
119 #define param_check_recovery_interval(name, p) \
120 __param_check(name, p, int)
121 module_param(lnet_recovery_interval, recovery_interval, S_IRUGO|S_IWUSR);
123 module_param_call(lnet_recovery_interval, recovery_interval_set, param_get_int,
124 &lnet_recovery_interval, S_IRUGO|S_IWUSR);
126 MODULE_PARM_DESC(lnet_recovery_interval,
127 "DEPRECATED - Interval to recover unhealthy interfaces in seconds");
129 unsigned int lnet_recovery_limit;
130 module_param(lnet_recovery_limit, uint, 0644);
131 MODULE_PARM_DESC(lnet_recovery_limit,
132 "How long to attempt recovery of unhealthy peer interfaces in seconds. Set to 0 to allow indefinite recovery");
134 unsigned int lnet_max_recovery_ping_interval = 900;
135 unsigned int lnet_max_recovery_ping_count = 9;
136 static int max_recovery_ping_interval_set(const char *val,
137 cfs_kernel_param_arg_t *kp);
139 #define param_check_max_recovery_ping_interval(name, p) \
140 __param_check(name, p, int)
142 #ifdef HAVE_KERNEL_PARAM_OPS
143 static struct kernel_param_ops param_ops_max_recovery_ping_interval = {
144 .set = max_recovery_ping_interval_set,
145 .get = param_get_int,
147 module_param(lnet_max_recovery_ping_interval, max_recovery_ping_interval, 0644);
149 module_param_call(lnet_max_recovery_ping_interval, max_recovery_ping_interval,
150 param_get_int, &lnet_max_recovery_ping_interval, 0644);
152 MODULE_PARM_DESC(lnet_max_recovery_ping_interval,
153 "The max interval between LNet recovery pings, in seconds");
155 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
156 static int intf_max_set(const char *val, cfs_kernel_param_arg_t *kp);
158 static struct kernel_param_ops param_ops_interfaces_max = {
160 .get = param_get_int,
163 #define param_check_interfaces_max(name, p) \
164 __param_check(name, p, int)
166 #ifdef HAVE_KERNEL_PARAM_OPS
167 module_param(lnet_interfaces_max, interfaces_max, 0644);
169 module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
170 ¶m_ops_interfaces_max, 0644);
172 MODULE_PARM_DESC(lnet_interfaces_max,
173 "Maximum number of interfaces in a node.");
175 unsigned lnet_peer_discovery_disabled = 0;
176 static int discovery_set(const char *val, cfs_kernel_param_arg_t *kp);
178 static struct kernel_param_ops param_ops_discovery_disabled = {
179 .set = discovery_set,
180 .get = param_get_int,
183 #define param_check_discovery_disabled(name, p) \
184 __param_check(name, p, int)
185 #ifdef HAVE_KERNEL_PARAM_OPS
186 module_param(lnet_peer_discovery_disabled, discovery_disabled, 0644);
188 module_param_call(lnet_peer_discovery_disabled, discovery_set, param_get_int,
189 ¶m_ops_discovery_disabled, 0644);
191 MODULE_PARM_DESC(lnet_peer_discovery_disabled,
192 "Set to 1 to disable peer discovery on this node.");
194 unsigned int lnet_drop_asym_route;
195 static int drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp);
197 static struct kernel_param_ops param_ops_drop_asym_route = {
198 .set = drop_asym_route_set,
199 .get = param_get_int,
202 #define param_check_drop_asym_route(name, p) \
203 __param_check(name, p, int)
204 #ifdef HAVE_KERNEL_PARAM_OPS
205 module_param(lnet_drop_asym_route, drop_asym_route, 0644);
207 module_param_call(lnet_drop_asym_route, drop_asym_route_set, param_get_int,
208 ¶m_ops_drop_asym_route, 0644);
210 MODULE_PARM_DESC(lnet_drop_asym_route,
211 "Set to 1 to drop asymmetrical route messages.");
213 #define LNET_TRANSACTION_TIMEOUT_DEFAULT 50
214 unsigned int lnet_transaction_timeout = LNET_TRANSACTION_TIMEOUT_DEFAULT;
215 static int transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp);
216 #ifdef HAVE_KERNEL_PARAM_OPS
217 static struct kernel_param_ops param_ops_transaction_timeout = {
218 .set = transaction_to_set,
219 .get = param_get_int,
222 #define param_check_transaction_timeout(name, p) \
223 __param_check(name, p, int)
224 module_param(lnet_transaction_timeout, transaction_timeout, S_IRUGO|S_IWUSR);
226 module_param_call(lnet_transaction_timeout, transaction_to_set, param_get_int,
227 &lnet_transaction_timeout, S_IRUGO|S_IWUSR);
229 MODULE_PARM_DESC(lnet_transaction_timeout,
230 "Maximum number of seconds to wait for a peer response.");
232 #define LNET_RETRY_COUNT_DEFAULT 2
233 unsigned int lnet_retry_count = LNET_RETRY_COUNT_DEFAULT;
234 static int retry_count_set(const char *val, cfs_kernel_param_arg_t *kp);
235 #ifdef HAVE_KERNEL_PARAM_OPS
236 static struct kernel_param_ops param_ops_retry_count = {
237 .set = retry_count_set,
238 .get = param_get_int,
241 #define param_check_retry_count(name, p) \
242 __param_check(name, p, int)
243 module_param(lnet_retry_count, retry_count, S_IRUGO|S_IWUSR);
245 module_param_call(lnet_retry_count, retry_count_set, param_get_int,
246 &lnet_retry_count, S_IRUGO|S_IWUSR);
248 MODULE_PARM_DESC(lnet_retry_count,
249 "Maximum number of times to retry transmitting a message");
251 unsigned int lnet_response_tracking = 3;
252 static int response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp);
254 #ifdef HAVE_KERNEL_PARAM_OPS
255 static struct kernel_param_ops param_ops_response_tracking = {
256 .set = response_tracking_set,
257 .get = param_get_int,
260 #define param_check_response_tracking(name, p) \
261 __param_check(name, p, int)
262 module_param(lnet_response_tracking, response_tracking, 0644);
264 module_param_call(lnet_response_tracking, response_tracking_set, param_get_int,
265 &lnet_response_tracking, 0644);
267 MODULE_PARM_DESC(lnet_response_tracking,
268 "(0|1|2|3) LNet Internal Only|GET Reply only|PUT ACK only|Full Tracking (default)");
270 #define LNET_LND_TIMEOUT_DEFAULT ((LNET_TRANSACTION_TIMEOUT_DEFAULT - 1) / \
271 (LNET_RETRY_COUNT_DEFAULT + 1))
272 unsigned int lnet_lnd_timeout = LNET_LND_TIMEOUT_DEFAULT;
273 static void lnet_set_lnd_timeout(void)
275 lnet_lnd_timeout = (lnet_transaction_timeout - 1) /
276 (lnet_retry_count + 1);
280 * This sequence number keeps track of how many times DLC was used to
281 * update the local NIs. It is incremented when a NI is added or
282 * removed and checked when sending a message to determine if there is
283 * a need to re-run the selection algorithm. See lnet_select_pathway()
284 * for more details on its usage.
286 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
288 static int lnet_ping(struct lnet_process_id id4, struct lnet_nid *src_nid,
289 signed long timeout, struct lnet_process_id __user *ids,
292 static int lnet_discover(struct lnet_process_id id, __u32 force,
293 struct lnet_process_id __user *ids, int n_ids);
296 sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
299 unsigned *sensitivity = (unsigned *)kp->arg;
302 rc = kstrtoul(val, 0, &value);
304 CERROR("Invalid module parameter value for 'lnet_health_sensitivity'\n");
309 * The purpose of locking the api_mutex here is to ensure that
310 * the correct value ends up stored properly.
312 mutex_lock(&the_lnet.ln_api_mutex);
314 if (value > LNET_MAX_HEALTH_VALUE) {
315 mutex_unlock(&the_lnet.ln_api_mutex);
316 CERROR("Invalid health value. Maximum: %d value = %lu\n",
317 LNET_MAX_HEALTH_VALUE, value);
321 if (*sensitivity != 0 && value == 0 && lnet_retry_count != 0) {
322 lnet_retry_count = 0;
323 lnet_set_lnd_timeout();
326 *sensitivity = value;
328 mutex_unlock(&the_lnet.ln_api_mutex);
334 recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp)
336 CWARN("'lnet_recovery_interval' has been deprecated\n");
342 max_recovery_ping_interval_set(const char *val, cfs_kernel_param_arg_t *kp)
347 rc = kstrtoul(val, 0, &value);
349 CERROR("Invalid module parameter value for 'lnet_max_recovery_ping_interval'\n");
354 CERROR("Invalid max ping timeout. Must be strictly positive\n");
358 /* The purpose of locking the api_mutex here is to ensure that
359 * the correct value ends up stored properly.
361 mutex_lock(&the_lnet.ln_api_mutex);
362 lnet_max_recovery_ping_interval = value;
363 lnet_max_recovery_ping_count = 0;
366 lnet_max_recovery_ping_count++;
369 mutex_unlock(&the_lnet.ln_api_mutex);
375 discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
378 unsigned *discovery_off = (unsigned *)kp->arg;
380 struct lnet_ping_buffer *pbuf;
382 rc = kstrtoul(val, 0, &value);
384 CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
388 value = (value) ? 1 : 0;
391 * The purpose of locking the api_mutex here is to ensure that
392 * the correct value ends up stored properly.
394 mutex_lock(&the_lnet.ln_api_mutex);
396 if (value == *discovery_off) {
397 mutex_unlock(&the_lnet.ln_api_mutex);
402 * We still want to set the discovery value even when LNet is not
403 * running. This is the case when LNet is being loaded and we want
404 * the module parameters to take effect. Otherwise if we're
405 * changing the value dynamically, we want to set it after
408 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
409 *discovery_off = value;
410 mutex_unlock(&the_lnet.ln_api_mutex);
414 /* tell peers that discovery setting has changed */
415 lnet_net_lock(LNET_LOCK_EX);
416 pbuf = the_lnet.ln_ping_target;
418 pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
420 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
421 lnet_net_unlock(LNET_LOCK_EX);
423 /* only send a push when we're turning off discovery */
424 if (*discovery_off <= 0 && value > 0)
425 lnet_push_update_to_peers(1);
426 *discovery_off = value;
428 mutex_unlock(&the_lnet.ln_api_mutex);
434 drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp)
437 unsigned int *drop_asym_route = (unsigned int *)kp->arg;
440 rc = kstrtoul(val, 0, &value);
442 CERROR("Invalid module parameter value for "
443 "'lnet_drop_asym_route'\n");
448 * The purpose of locking the api_mutex here is to ensure that
449 * the correct value ends up stored properly.
451 mutex_lock(&the_lnet.ln_api_mutex);
453 if (value == *drop_asym_route) {
454 mutex_unlock(&the_lnet.ln_api_mutex);
458 *drop_asym_route = value;
460 mutex_unlock(&the_lnet.ln_api_mutex);
466 transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp)
469 unsigned *transaction_to = (unsigned *)kp->arg;
472 rc = kstrtoul(val, 0, &value);
474 CERROR("Invalid module parameter value for 'lnet_transaction_timeout'\n");
479 * The purpose of locking the api_mutex here is to ensure that
480 * the correct value ends up stored properly.
482 mutex_lock(&the_lnet.ln_api_mutex);
484 if (value <= lnet_retry_count || value == 0) {
485 mutex_unlock(&the_lnet.ln_api_mutex);
486 CERROR("Invalid value for lnet_transaction_timeout (%lu). "
487 "Has to be greater than lnet_retry_count (%u)\n",
488 value, lnet_retry_count);
492 if (value == *transaction_to) {
493 mutex_unlock(&the_lnet.ln_api_mutex);
497 *transaction_to = value;
498 /* Update the lnet_lnd_timeout now that we've modified the
499 * transaction timeout
501 lnet_set_lnd_timeout();
503 mutex_unlock(&the_lnet.ln_api_mutex);
509 retry_count_set(const char *val, cfs_kernel_param_arg_t *kp)
512 unsigned *retry_count = (unsigned *)kp->arg;
515 rc = kstrtoul(val, 0, &value);
517 CERROR("Invalid module parameter value for 'lnet_retry_count'\n");
522 * The purpose of locking the api_mutex here is to ensure that
523 * the correct value ends up stored properly.
525 mutex_lock(&the_lnet.ln_api_mutex);
527 if (lnet_health_sensitivity == 0 && value > 0) {
528 mutex_unlock(&the_lnet.ln_api_mutex);
529 CERROR("Can not set lnet_retry_count when health feature is turned off\n");
533 if (value > lnet_transaction_timeout) {
534 mutex_unlock(&the_lnet.ln_api_mutex);
535 CERROR("Invalid value for lnet_retry_count (%lu). "
536 "Has to be smaller than lnet_transaction_timeout (%u)\n",
537 value, lnet_transaction_timeout);
541 *retry_count = value;
543 /* Update the lnet_lnd_timeout now that we've modified the
546 lnet_set_lnd_timeout();
548 mutex_unlock(&the_lnet.ln_api_mutex);
554 intf_max_set(const char *val, cfs_kernel_param_arg_t *kp)
558 rc = kstrtoint(val, 0, &value);
560 CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
564 if (value < LNET_INTERFACES_MIN) {
565 CWARN("max interfaces provided are too small, setting to %d\n",
566 LNET_INTERFACES_MAX_DEFAULT);
567 value = LNET_INTERFACES_MAX_DEFAULT;
570 *(int *)kp->arg = value;
576 response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp)
579 unsigned long new_value;
581 rc = kstrtoul(val, 0, &new_value);
583 CERROR("Invalid value for 'lnet_response_tracking'\n");
587 if (new_value < 0 || new_value > 3) {
588 CWARN("Invalid value (%lu) for 'lnet_response_tracking'\n",
593 lnet_response_tracking = new_value;
599 lnet_get_routes(void)
605 lnet_get_networks(void)
610 if (*networks != 0 && *ip2nets != 0) {
611 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
612 "'ip2nets' but not both at once\n");
617 rc = lnet_parse_ip2nets(&nets, ip2nets);
618 return (rc == 0) ? nets : NULL;
628 lnet_init_locks(void)
630 spin_lock_init(&the_lnet.ln_eq_wait_lock);
631 spin_lock_init(&the_lnet.ln_msg_resend_lock);
632 init_completion(&the_lnet.ln_mt_wait_complete);
633 mutex_init(&the_lnet.ln_lnd_mutex);
636 struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
637 struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
639 struct kmem_cache *lnet_udsp_cachep; /* udsp cache */
640 struct kmem_cache *lnet_rspt_cachep; /* response tracker cache */
641 struct kmem_cache *lnet_msg_cachep;
644 lnet_slab_setup(void)
646 /* create specific kmem_cache for MEs and small MDs (i.e., originally
647 * allocated in <size-xxx> kmem_cache).
649 lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(struct lnet_me),
651 if (!lnet_mes_cachep)
654 lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
655 LNET_SMALL_MD_SIZE, 0, 0,
657 if (!lnet_small_mds_cachep)
660 lnet_udsp_cachep = kmem_cache_create("lnet_udsp",
661 sizeof(struct lnet_udsp),
663 if (!lnet_udsp_cachep)
666 lnet_rspt_cachep = kmem_cache_create("lnet_rspt", sizeof(struct lnet_rsp_tracker),
668 if (!lnet_rspt_cachep)
671 lnet_msg_cachep = kmem_cache_create("lnet_msg", sizeof(struct lnet_msg),
673 if (!lnet_msg_cachep)
680 lnet_slab_cleanup(void)
682 if (lnet_msg_cachep) {
683 kmem_cache_destroy(lnet_msg_cachep);
684 lnet_msg_cachep = NULL;
687 if (lnet_rspt_cachep) {
688 kmem_cache_destroy(lnet_rspt_cachep);
689 lnet_rspt_cachep = NULL;
692 if (lnet_udsp_cachep) {
693 kmem_cache_destroy(lnet_udsp_cachep);
694 lnet_udsp_cachep = NULL;
697 if (lnet_small_mds_cachep) {
698 kmem_cache_destroy(lnet_small_mds_cachep);
699 lnet_small_mds_cachep = NULL;
702 if (lnet_mes_cachep) {
703 kmem_cache_destroy(lnet_mes_cachep);
704 lnet_mes_cachep = NULL;
709 lnet_create_remote_nets_table(void)
712 struct list_head *hash;
714 LASSERT(the_lnet.ln_remote_nets_hash == NULL);
715 LASSERT(the_lnet.ln_remote_nets_hbits > 0);
716 CFS_ALLOC_PTR_ARRAY(hash, LNET_REMOTE_NETS_HASH_SIZE);
718 CERROR("Failed to create remote nets hash table\n");
722 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
723 INIT_LIST_HEAD(&hash[i]);
724 the_lnet.ln_remote_nets_hash = hash;
729 lnet_destroy_remote_nets_table(void)
733 if (the_lnet.ln_remote_nets_hash == NULL)
736 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
737 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
739 CFS_FREE_PTR_ARRAY(the_lnet.ln_remote_nets_hash,
740 LNET_REMOTE_NETS_HASH_SIZE);
741 the_lnet.ln_remote_nets_hash = NULL;
745 lnet_destroy_locks(void)
747 if (the_lnet.ln_res_lock != NULL) {
748 cfs_percpt_lock_free(the_lnet.ln_res_lock);
749 the_lnet.ln_res_lock = NULL;
752 if (the_lnet.ln_net_lock != NULL) {
753 cfs_percpt_lock_free(the_lnet.ln_net_lock);
754 the_lnet.ln_net_lock = NULL;
759 lnet_create_locks(void)
763 the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
764 if (the_lnet.ln_res_lock == NULL)
767 the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
768 if (the_lnet.ln_net_lock == NULL)
774 lnet_destroy_locks();
778 static void lnet_assert_wire_constants(void)
780 /* Wire protocol assertions generated by 'wirecheck'
781 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
782 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
783 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7)
787 BUILD_BUG_ON(LNET_PROTO_TCP_MAGIC != 0xeebc0ded);
788 BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MAJOR != 1);
789 BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MINOR != 0);
790 BUILD_BUG_ON(LNET_MSG_ACK != 0);
791 BUILD_BUG_ON(LNET_MSG_PUT != 1);
792 BUILD_BUG_ON(LNET_MSG_GET != 2);
793 BUILD_BUG_ON(LNET_MSG_REPLY != 3);
794 BUILD_BUG_ON(LNET_MSG_HELLO != 4);
796 BUILD_BUG_ON((int)sizeof(lnet_nid_t) != 8);
797 BUILD_BUG_ON((int)sizeof(lnet_pid_t) != 4);
799 /* Checks for struct lnet_nid */
800 BUILD_BUG_ON((int)sizeof(struct lnet_nid) != 20);
801 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_size) != 0);
802 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_size) != 1);
803 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_type) != 1);
804 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_type) != 1);
805 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_num) != 2);
806 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_num) != 2);
807 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_addr) != 4);
808 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_addr) != 16);
810 /* Checks for struct lnet_process_id_packed */
811 BUILD_BUG_ON((int)sizeof(struct lnet_process_id_packed) != 12);
812 BUILD_BUG_ON((int)offsetof(struct lnet_process_id_packed, nid) != 0);
813 BUILD_BUG_ON((int)sizeof(((struct lnet_process_id_packed *)0)->nid) != 8);
814 BUILD_BUG_ON((int)offsetof(struct lnet_process_id_packed, pid) != 8);
815 BUILD_BUG_ON((int)sizeof(((struct lnet_process_id_packed *)0)->pid) != 4);
817 /* Checks for struct lnet_handle_wire */
818 BUILD_BUG_ON((int)sizeof(struct lnet_handle_wire) != 16);
819 BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
820 wh_interface_cookie) != 0);
821 BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) != 8);
822 BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
823 wh_object_cookie) != 8);
824 BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) != 8);
826 /* Checks for struct struct lnet_magicversion */
827 BUILD_BUG_ON((int)sizeof(struct lnet_magicversion) != 8);
828 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, magic) != 0);
829 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->magic) != 4);
830 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, version_major) != 4);
831 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_major) != 2);
832 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion,
833 version_minor) != 6);
834 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_minor) != 2);
836 /* Checks for struct _lnet_hdr_nid4 */
837 BUILD_BUG_ON((int)sizeof(struct _lnet_hdr_nid4) != 72);
838 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, dest_nid) != 0);
839 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->dest_nid) != 8);
840 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, src_nid) != 8);
841 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->src_nid) != 8);
842 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, dest_pid) != 16);
843 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->dest_pid) != 4);
844 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, src_pid) != 20);
845 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->src_pid) != 4);
846 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, type) != 24);
847 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->type) != 4);
848 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, payload_length) != 28);
849 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->payload_length) != 4);
850 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg) != 32);
851 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg) != 40);
854 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.ack.dst_wmd) != 32);
855 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.ack.dst_wmd) != 16);
856 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.ack.match_bits) != 48);
857 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.ack.match_bits) != 8);
858 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.ack.mlength) != 56);
859 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.ack.mlength) != 4);
862 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.ack_wmd) != 32);
863 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.ack_wmd) != 16);
864 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.match_bits) != 48);
865 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.match_bits) != 8);
866 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.hdr_data) != 56);
867 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.hdr_data) != 8);
868 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.ptl_index) != 64);
869 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.ptl_index) != 4);
870 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.offset) != 68);
871 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.offset) != 4);
874 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.return_wmd) != 32);
875 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.return_wmd) != 16);
876 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.match_bits) != 48);
877 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.match_bits) != 8);
878 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.ptl_index) != 56);
879 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.ptl_index) != 4);
880 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.src_offset) != 60);
881 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.src_offset) != 4);
882 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.sink_length) != 64);
883 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.sink_length) != 4);
886 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.reply.dst_wmd) != 32);
887 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.reply.dst_wmd) != 16);
890 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.hello.incarnation) != 32);
891 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.hello.incarnation) != 8);
892 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.hello.type) != 40);
893 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.hello.type) != 4);
895 /* Checks for struct lnet_ni_status and related constants */
896 BUILD_BUG_ON(LNET_NI_STATUS_INVALID != 0x00000000);
897 BUILD_BUG_ON(LNET_NI_STATUS_UP != 0x15aac0de);
898 BUILD_BUG_ON(LNET_NI_STATUS_DOWN != 0xdeadface);
900 /* Checks for struct lnet_ni_status */
901 BUILD_BUG_ON((int)sizeof(struct lnet_ni_status) != 16);
902 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_nid) != 0);
903 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) != 8);
904 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_status) != 8);
905 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_status) != 4);
906 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_msg_size) != 12);
907 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_msg_size) != 4);
909 /* Checks for struct lnet_ni_large_status */
910 BUILD_BUG_ON((int)sizeof(struct lnet_ni_large_status) != 24);
911 BUILD_BUG_ON((int)offsetof(struct lnet_ni_large_status, ns_status) != 0);
912 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_large_status *)0)->ns_status) != 4);
913 BUILD_BUG_ON((int)offsetof(struct lnet_ni_large_status, ns_nid) != 4);
914 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_large_status *)0)->ns_nid) != 20);
916 /* Checks for struct lnet_ping_info and related constants */
917 BUILD_BUG_ON(LNET_PROTO_PING_MAGIC != 0x70696E67);
918 BUILD_BUG_ON(LNET_PING_FEAT_INVAL != 0);
919 BUILD_BUG_ON(LNET_PING_FEAT_BASE != 1);
920 BUILD_BUG_ON(LNET_PING_FEAT_NI_STATUS != 2);
921 BUILD_BUG_ON(LNET_PING_FEAT_RTE_DISABLED != 4);
922 BUILD_BUG_ON(LNET_PING_FEAT_MULTI_RAIL != 8);
923 BUILD_BUG_ON(LNET_PING_FEAT_DISCOVERY != 16);
924 BUILD_BUG_ON(LNET_PING_FEAT_LARGE_ADDR != 32);
925 BUILD_BUG_ON(LNET_PING_FEAT_PRIMARY_LARGE != 64);
926 BUILD_BUG_ON(LNET_PING_FEAT_BITS != 127);
928 /* Checks for struct lnet_ping_info */
929 BUILD_BUG_ON((int)sizeof(struct lnet_ping_info) != 16);
930 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_magic) != 0);
931 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) != 4);
932 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_features) != 4);
933 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_features) != 4);
934 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_pid) != 8);
935 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) != 4);
936 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_nnis) != 12);
937 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) != 4);
938 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_ni) != 16);
939 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) != 0);
941 /* Acceptor connection request */
942 BUILD_BUG_ON(LNET_PROTO_ACCEPTOR_VERSION != 1);
944 /* Checks for struct lnet_acceptor_connreq */
945 BUILD_BUG_ON((int)sizeof(struct lnet_acceptor_connreq) != 16);
946 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_magic) != 0);
947 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_magic) != 4);
948 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_version) != 4);
949 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_version) != 4);
950 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_nid) != 8);
951 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_nid) != 8);
953 /* Checks for struct lnet_acceptor_connreq_v2 */
954 BUILD_BUG_ON((int)sizeof(struct lnet_acceptor_connreq_v2) != 28);
955 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_magic) != 0);
956 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_magic) != 4);
957 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_version) != 4);
958 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_version) != 4);
959 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_nid) != 8);
960 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_nid) != 20);
962 /* Checks for struct lnet_counters_common */
963 BUILD_BUG_ON((int)sizeof(struct lnet_counters_common) != 60);
964 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_msgs_alloc) != 0);
965 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_msgs_alloc) != 4);
966 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_msgs_max) != 4);
967 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_msgs_max) != 4);
968 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_errors) != 8);
969 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_errors) != 4);
970 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_send_count) != 12);
971 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_send_count) != 4);
972 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_recv_count) != 16);
973 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_recv_count) != 4);
974 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_route_count) != 20);
975 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_route_count) != 4);
976 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_drop_count) != 24);
977 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_drop_count) != 4);
978 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_send_length) != 28);
979 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_send_length) != 8);
980 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_recv_length) != 36);
981 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_recv_length) != 8);
982 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_route_length) != 44);
983 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_route_length) != 8);
984 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_drop_length) != 52);
985 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_drop_length) != 8);
988 static const struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
990 const struct lnet_lnd *lnd;
992 /* holding lnd mutex */
993 if (type >= NUM_LNDS)
995 lnd = the_lnet.ln_lnds[type];
996 LASSERT(!lnd || lnd->lnd_type == type);
1002 lnet_get_lnd_timeout(void)
1004 return lnet_lnd_timeout;
1006 EXPORT_SYMBOL(lnet_get_lnd_timeout);
1009 lnet_register_lnd(const struct lnet_lnd *lnd)
1011 mutex_lock(&the_lnet.ln_lnd_mutex);
1013 LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
1014 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
1016 the_lnet.ln_lnds[lnd->lnd_type] = lnd;
1018 CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
1020 mutex_unlock(&the_lnet.ln_lnd_mutex);
1022 EXPORT_SYMBOL(lnet_register_lnd);
1025 lnet_unregister_lnd(const struct lnet_lnd *lnd)
1027 mutex_lock(&the_lnet.ln_lnd_mutex);
1029 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
1031 the_lnet.ln_lnds[lnd->lnd_type] = NULL;
1032 CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
1034 mutex_unlock(&the_lnet.ln_lnd_mutex);
1036 EXPORT_SYMBOL(lnet_unregister_lnd);
1039 lnet_counters_get_common_locked(struct lnet_counters_common *common)
1041 struct lnet_counters *ctr;
1044 /* FIXME !!! Their is no assert_lnet_net_locked() to ensure this
1045 * actually called under the protection of the lnet_net_lock.
1047 memset(common, 0, sizeof(*common));
1049 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
1050 common->lcc_msgs_max += ctr->lct_common.lcc_msgs_max;
1051 common->lcc_msgs_alloc += ctr->lct_common.lcc_msgs_alloc;
1052 common->lcc_errors += ctr->lct_common.lcc_errors;
1053 common->lcc_send_count += ctr->lct_common.lcc_send_count;
1054 common->lcc_recv_count += ctr->lct_common.lcc_recv_count;
1055 common->lcc_route_count += ctr->lct_common.lcc_route_count;
1056 common->lcc_drop_count += ctr->lct_common.lcc_drop_count;
1057 common->lcc_send_length += ctr->lct_common.lcc_send_length;
1058 common->lcc_recv_length += ctr->lct_common.lcc_recv_length;
1059 common->lcc_route_length += ctr->lct_common.lcc_route_length;
1060 common->lcc_drop_length += ctr->lct_common.lcc_drop_length;
1065 lnet_counters_get_common(struct lnet_counters_common *common)
1067 lnet_net_lock(LNET_LOCK_EX);
1068 lnet_counters_get_common_locked(common);
1069 lnet_net_unlock(LNET_LOCK_EX);
1071 EXPORT_SYMBOL(lnet_counters_get_common);
1074 lnet_counters_get(struct lnet_counters *counters)
1076 struct lnet_counters *ctr;
1077 struct lnet_counters_health *health = &counters->lct_health;
1080 memset(counters, 0, sizeof(*counters));
1082 lnet_net_lock(LNET_LOCK_EX);
1084 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1085 GOTO(out_unlock, rc = -ENODEV);
1087 lnet_counters_get_common_locked(&counters->lct_common);
1089 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
1090 health->lch_rst_alloc += ctr->lct_health.lch_rst_alloc;
1091 health->lch_resend_count += ctr->lct_health.lch_resend_count;
1092 health->lch_response_timeout_count +=
1093 ctr->lct_health.lch_response_timeout_count;
1094 health->lch_local_interrupt_count +=
1095 ctr->lct_health.lch_local_interrupt_count;
1096 health->lch_local_dropped_count +=
1097 ctr->lct_health.lch_local_dropped_count;
1098 health->lch_local_aborted_count +=
1099 ctr->lct_health.lch_local_aborted_count;
1100 health->lch_local_no_route_count +=
1101 ctr->lct_health.lch_local_no_route_count;
1102 health->lch_local_timeout_count +=
1103 ctr->lct_health.lch_local_timeout_count;
1104 health->lch_local_error_count +=
1105 ctr->lct_health.lch_local_error_count;
1106 health->lch_remote_dropped_count +=
1107 ctr->lct_health.lch_remote_dropped_count;
1108 health->lch_remote_error_count +=
1109 ctr->lct_health.lch_remote_error_count;
1110 health->lch_remote_timeout_count +=
1111 ctr->lct_health.lch_remote_timeout_count;
1112 health->lch_network_timeout_count +=
1113 ctr->lct_health.lch_network_timeout_count;
1116 lnet_net_unlock(LNET_LOCK_EX);
1119 EXPORT_SYMBOL(lnet_counters_get);
1122 lnet_counters_reset(void)
1124 struct lnet_counters *counters;
1127 lnet_net_lock(LNET_LOCK_EX);
1129 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1132 cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
1133 memset(counters, 0, sizeof(struct lnet_counters));
1135 lnet_net_unlock(LNET_LOCK_EX);
1139 lnet_res_type2str(int type)
1144 case LNET_COOKIE_TYPE_MD:
1146 case LNET_COOKIE_TYPE_ME:
1148 case LNET_COOKIE_TYPE_EQ:
1154 lnet_res_container_cleanup(struct lnet_res_container *rec)
1158 if (rec->rec_type == 0) /* not set yet, it's uninitialized */
1161 while (!list_empty(&rec->rec_active)) {
1162 struct list_head *e = rec->rec_active.next;
1165 if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
1166 lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
1168 } else { /* NB: Active MEs should be attached on portals */
1175 /* Found alive MD/ME/EQ, user really should unlink/free
1176 * all of them before finalize LNet, but if someone didn't,
1177 * we have to recycle garbage for him */
1178 CERROR("%d active elements on exit of %s container\n",
1179 count, lnet_res_type2str(rec->rec_type));
1182 if (rec->rec_lh_hash != NULL) {
1183 CFS_FREE_PTR_ARRAY(rec->rec_lh_hash, LNET_LH_HASH_SIZE);
1184 rec->rec_lh_hash = NULL;
1187 rec->rec_type = 0; /* mark it as finalized */
1191 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
1196 LASSERT(rec->rec_type == 0);
1198 rec->rec_type = type;
1199 INIT_LIST_HEAD(&rec->rec_active);
1201 rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
1203 /* Arbitrary choice of hash table size */
1204 LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
1205 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
1206 if (rec->rec_lh_hash == NULL) {
1211 for (i = 0; i < LNET_LH_HASH_SIZE; i++)
1212 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
1217 CERROR("Failed to setup %s resource container\n",
1218 lnet_res_type2str(type));
1219 lnet_res_container_cleanup(rec);
1224 lnet_res_containers_destroy(struct lnet_res_container **recs)
1226 struct lnet_res_container *rec;
1229 cfs_percpt_for_each(rec, i, recs)
1230 lnet_res_container_cleanup(rec);
1232 cfs_percpt_free(recs);
1235 static struct lnet_res_container **
1236 lnet_res_containers_create(int type)
1238 struct lnet_res_container **recs;
1239 struct lnet_res_container *rec;
1243 recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
1245 CERROR("Failed to allocate %s resource containers\n",
1246 lnet_res_type2str(type));
1250 cfs_percpt_for_each(rec, i, recs) {
1251 rc = lnet_res_container_setup(rec, i, type);
1253 lnet_res_containers_destroy(recs);
1261 struct lnet_libhandle *
1262 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
1264 /* ALWAYS called with lnet_res_lock held */
1265 struct list_head *head;
1266 struct lnet_libhandle *lh;
1269 if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
1272 hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
1273 head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
1275 list_for_each_entry(lh, head, lh_hash_chain) {
1276 if (lh->lh_cookie == cookie)
1284 lnet_res_lh_initialize(struct lnet_res_container *rec,
1285 struct lnet_libhandle *lh)
1287 /* ALWAYS called with lnet_res_lock held */
1288 unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
1291 lh->lh_cookie = rec->rec_lh_cookie;
1292 rec->rec_lh_cookie += 1 << ibits;
1294 hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
1296 list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
1300 lnet_create_array_of_queues(void)
1302 struct list_head **qs;
1303 struct list_head *q;
1306 qs = cfs_percpt_alloc(lnet_cpt_table(),
1307 sizeof(struct list_head));
1309 CERROR("Failed to allocate queues\n");
1313 cfs_percpt_for_each(q, i, qs)
1319 static int lnet_unprepare(void);
1322 lnet_prepare(lnet_pid_t requested_pid)
1324 /* Prepare to bring up the network */
1325 struct lnet_res_container **recs;
1328 if (requested_pid == LNET_PID_ANY) {
1329 /* Don't instantiate LNET just for me */
1333 LASSERT(the_lnet.ln_refcount == 0);
1335 the_lnet.ln_routing = 0;
1337 LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
1338 the_lnet.ln_pid = requested_pid;
1340 INIT_LIST_HEAD(&the_lnet.ln_test_peers);
1341 INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
1342 INIT_LIST_HEAD(&the_lnet.ln_nets);
1343 INIT_LIST_HEAD(&the_lnet.ln_routers);
1344 INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
1345 INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
1346 INIT_LIST_HEAD(&the_lnet.ln_dc_request);
1347 INIT_LIST_HEAD(&the_lnet.ln_dc_working);
1348 INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
1349 INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq);
1350 INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
1351 INIT_LIST_HEAD(&the_lnet.ln_udsp_list);
1352 init_waitqueue_head(&the_lnet.ln_dc_waitq);
1353 the_lnet.ln_mt_handler = NULL;
1354 init_completion(&the_lnet.ln_started);
1356 rc = lnet_slab_setup();
1360 rc = lnet_create_remote_nets_table();
1365 * NB the interface cookie in wire handles guards against delayed
1366 * replies and ACKs appearing valid after reboot.
1368 the_lnet.ln_interface_cookie = ktime_get_real_ns();
1370 the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
1371 sizeof(struct lnet_counters));
1372 if (the_lnet.ln_counters == NULL) {
1373 CERROR("Failed to allocate counters for LNet\n");
1378 rc = lnet_peer_tables_create();
1382 rc = lnet_msg_containers_create();
1386 rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
1387 LNET_COOKIE_TYPE_EQ);
1391 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
1397 the_lnet.ln_md_containers = recs;
1399 rc = lnet_portals_create();
1401 CERROR("Failed to create portals for LNet: %d\n", rc);
1405 the_lnet.ln_mt_zombie_rstqs = lnet_create_array_of_queues();
1406 if (!the_lnet.ln_mt_zombie_rstqs) {
1419 lnet_unprepare(void)
1421 /* NB no LNET_LOCK since this is the last reference. All LND instances
1422 * have shut down already, so it is safe to unlink and free all
1423 * descriptors, even those that appear committed to a network op (eg MD
1424 * with non-zero pending count) */
1426 lnet_fail_nid(LNET_NID_ANY, 0);
1428 LASSERT(the_lnet.ln_refcount == 0);
1429 LASSERT(list_empty(&the_lnet.ln_test_peers));
1430 LASSERT(list_empty(&the_lnet.ln_nets));
1432 if (the_lnet.ln_mt_zombie_rstqs) {
1433 lnet_clean_zombie_rstqs();
1434 the_lnet.ln_mt_zombie_rstqs = NULL;
1437 lnet_assert_handler_unused(the_lnet.ln_mt_handler);
1438 the_lnet.ln_mt_handler = NULL;
1440 lnet_portals_destroy();
1442 if (the_lnet.ln_md_containers != NULL) {
1443 lnet_res_containers_destroy(the_lnet.ln_md_containers);
1444 the_lnet.ln_md_containers = NULL;
1447 lnet_res_container_cleanup(&the_lnet.ln_eq_container);
1449 lnet_msg_containers_destroy();
1451 lnet_rtrpools_free(0);
1453 if (the_lnet.ln_counters != NULL) {
1454 cfs_percpt_free(the_lnet.ln_counters);
1455 the_lnet.ln_counters = NULL;
1457 lnet_destroy_remote_nets_table();
1458 lnet_udsp_destroy(true);
1459 lnet_slab_cleanup();
1465 lnet_net2ni_locked(__u32 net_id, int cpt)
1468 struct lnet_net *net;
1470 LASSERT(cpt != LNET_LOCK_EX);
1472 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1473 if (net->net_id == net_id) {
1474 ni = list_first_entry(&net->net_ni_list, struct lnet_ni,
1484 lnet_net2ni_addref(__u32 net)
1489 ni = lnet_net2ni_locked(net, 0);
1491 lnet_ni_addref_locked(ni, 0);
1496 EXPORT_SYMBOL(lnet_net2ni_addref);
1499 lnet_get_net_locked(__u32 net_id)
1501 struct lnet_net *net;
1503 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1504 if (net->net_id == net_id)
1512 lnet_net_clr_pref_rtrs(struct lnet_net *net)
1514 struct list_head zombies;
1515 struct lnet_nid_list *ne;
1516 struct lnet_nid_list *tmp;
1518 INIT_LIST_HEAD(&zombies);
1520 lnet_net_lock(LNET_LOCK_EX);
1521 list_splice_init(&net->net_rtr_pref_nids, &zombies);
1522 lnet_net_unlock(LNET_LOCK_EX);
1524 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1525 list_del_init(&ne->nl_list);
1526 LIBCFS_FREE(ne, sizeof(*ne));
1531 lnet_net_add_pref_rtr(struct lnet_net *net,
1532 struct lnet_nid *gw_nid)
1533 __must_hold(&the_lnet.ln_api_mutex)
1535 struct lnet_nid_list *ne;
1537 /* This function is called with api_mutex held. When the api_mutex
1538 * is held the list can not be modified, as it is only modified as
1539 * a result of applying a UDSP and that happens under api_mutex
1542 list_for_each_entry(ne, &net->net_rtr_pref_nids, nl_list) {
1543 if (nid_same(&ne->nl_nid, gw_nid))
1547 LIBCFS_ALLOC(ne, sizeof(*ne));
1551 ne->nl_nid = *gw_nid;
1553 /* Lock the cpt to protect against addition and checks in the
1554 * selection algorithm
1556 lnet_net_lock(LNET_LOCK_EX);
1557 list_add(&ne->nl_list, &net->net_rtr_pref_nids);
1558 lnet_net_unlock(LNET_LOCK_EX);
1564 lnet_net_is_pref_rtr_locked(struct lnet_net *net, struct lnet_nid *rtr_nid)
1566 struct lnet_nid_list *ne;
1568 CDEBUG(D_NET, "%s: rtr pref empty: %d\n",
1569 libcfs_net2str(net->net_id),
1570 list_empty(&net->net_rtr_pref_nids));
1572 if (list_empty(&net->net_rtr_pref_nids))
1575 list_for_each_entry(ne, &net->net_rtr_pref_nids, nl_list) {
1576 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
1577 libcfs_nidstr(&ne->nl_nid),
1578 libcfs_nidstr(rtr_nid));
1579 if (nid_same(rtr_nid, &ne->nl_nid))
1587 lnet_nid4_cpt_hash(lnet_nid_t nid, unsigned int number)
1590 __u64 pair_bits = 0x0001000100010001LLU;
1591 __u64 mask = pair_bits * 0xFF;
1594 /* Use (sum-by-multiplication of nid bytes) mod (number of CPTs)
1595 * to match nid to a CPT.
1597 pair_sum = (key & mask) + ((key >> 8) & mask);
1598 pair_sum = (pair_sum * pair_bits) >> 48;
1600 CDEBUG(D_NET, "Match nid %s to cpt %u\n",
1601 libcfs_nid2str(nid), (unsigned int)(pair_sum) % number);
1603 return (unsigned int)(pair_sum) % number;
1607 lnet_nid_cpt_hash(struct lnet_nid *nid, unsigned int number)
1613 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
1618 if (nid_is_nid4(nid))
1619 return lnet_nid4_cpt_hash(lnet_nid_to_nid4(nid), number);
1621 for (i = 0; i < 4; i++)
1622 h = hash_32(nid->nid_addr[i]^h, 32);
1623 val = hash_32(LNET_NID_NET(nid) ^ h, LNET_CPT_BITS);
1626 return (unsigned int)(h + val + (val >> 1)) % number;
1630 lnet_cpt_of_nid_locked(struct lnet_nid *nid, struct lnet_ni *ni)
1632 struct lnet_net *net;
1634 /* must called with hold of lnet_net_lock */
1635 if (LNET_CPT_NUMBER == 1)
1636 return 0; /* the only one */
1639 * If NI is provided then use the CPT identified in the NI cpt
1640 * list if one exists. If one doesn't exist, then that NI is
1641 * associated with all CPTs and it follows that the net it belongs
1642 * to is implicitly associated with all CPTs, so just hash the nid
1646 if (ni->ni_cpts != NULL)
1647 return ni->ni_cpts[lnet_nid_cpt_hash(nid,
1650 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1653 /* no NI provided so look at the net */
1654 net = lnet_get_net_locked(LNET_NID_NET(nid));
1656 if (net != NULL && net->net_cpts != NULL) {
1657 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
1660 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1664 lnet_nid2cpt(struct lnet_nid *nid, struct lnet_ni *ni)
1669 if (LNET_CPT_NUMBER == 1)
1670 return 0; /* the only one */
1672 cpt = lnet_net_lock_current();
1674 cpt2 = lnet_cpt_of_nid_locked(nid, ni);
1676 lnet_net_unlock(cpt);
1680 EXPORT_SYMBOL(lnet_nid2cpt);
1683 lnet_cpt_of_nid(lnet_nid_t nid4, struct lnet_ni *ni)
1685 struct lnet_nid nid;
1687 if (LNET_CPT_NUMBER == 1)
1688 return 0; /* the only one */
1690 lnet_nid4_to_nid(nid4, &nid);
1691 return lnet_nid2cpt(&nid, ni);
1693 EXPORT_SYMBOL(lnet_cpt_of_nid);
1696 lnet_islocalnet_locked(__u32 net_id)
1698 struct lnet_net *net;
1701 net = lnet_get_net_locked(net_id);
1703 local = net != NULL;
1709 lnet_islocalnet(__u32 net_id)
1714 cpt = lnet_net_lock_current();
1716 local = lnet_islocalnet_locked(net_id);
1718 lnet_net_unlock(cpt);
1724 lnet_nid_to_ni_locked(struct lnet_nid *nid, int cpt)
1726 struct lnet_net *net;
1729 LASSERT(cpt != LNET_LOCK_EX);
1731 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1732 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1733 if (nid_same(&ni->ni_nid, nid))
1742 lnet_nid_to_ni_addref(struct lnet_nid *nid)
1747 ni = lnet_nid_to_ni_locked(nid, 0);
1749 lnet_ni_addref_locked(ni, 0);
1754 EXPORT_SYMBOL(lnet_nid_to_ni_addref);
1757 lnet_islocalnid(struct lnet_nid *nid)
1762 cpt = lnet_net_lock_current();
1763 ni = lnet_nid_to_ni_locked(nid, cpt);
1764 lnet_net_unlock(cpt);
1770 lnet_count_acceptor_nets(void)
1772 /* Return the # of NIs that need the acceptor. */
1774 struct lnet_net *net;
1777 cpt = lnet_net_lock_current();
1778 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1779 /* all socklnd type networks should have the acceptor
1781 if (net->net_lnd->lnd_accept != NULL)
1785 lnet_net_unlock(cpt);
1790 struct lnet_ping_buffer *
1791 lnet_ping_buffer_alloc(int nbytes, gfp_t gfp)
1793 struct lnet_ping_buffer *pbuf;
1795 LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nbytes), gfp);
1797 pbuf->pb_nbytes = nbytes; /* sizeof of pb_info */
1798 pbuf->pb_needs_post = false;
1799 atomic_set(&pbuf->pb_refcnt, 1);
1806 lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
1808 LASSERT(atomic_read(&pbuf->pb_refcnt) == 0);
1809 LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nbytes));
1812 static struct lnet_ping_buffer *
1813 lnet_ping_target_create(int nbytes)
1815 struct lnet_ping_buffer *pbuf;
1817 pbuf = lnet_ping_buffer_alloc(nbytes, GFP_NOFS);
1819 CERROR("Can't allocate ping source [%d]\n", nbytes);
1823 pbuf->pb_info.pi_nnis = 0;
1824 pbuf->pb_info.pi_pid = the_lnet.ln_pid;
1825 pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
1826 pbuf->pb_info.pi_features =
1827 LNET_PING_FEAT_NI_STATUS | LNET_PING_FEAT_MULTI_RAIL;
1833 lnet_get_net_ni_bytes_locked(struct lnet_net *net)
1838 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1839 bytes += lnet_ping_sts_size(&ni->ni_nid);
1845 lnet_get_ni_bytes(void)
1848 struct lnet_net *net;
1853 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1854 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1855 bytes += lnet_ping_sts_size(&ni->ni_nid);
1864 lnet_swap_pinginfo(struct lnet_ping_buffer *pbuf)
1866 struct lnet_ni_large_status *lstat, *lend;
1867 struct lnet_ni_status *stat, *end;
1871 __swab32s(&pbuf->pb_info.pi_magic);
1872 __swab32s(&pbuf->pb_info.pi_features);
1873 __swab32s(&pbuf->pb_info.pi_pid);
1874 __swab32s(&pbuf->pb_info.pi_nnis);
1875 nnis = pbuf->pb_info.pi_nnis;
1876 stat = &pbuf->pb_info.pi_ni[0];
1877 end = (void *)&pbuf->pb_info + pbuf->pb_nbytes;
1878 for (i = 0; i < nnis && stat + 1 <= end; i++, stat++) {
1879 __swab64s(&stat->ns_nid);
1880 __swab32s(&stat->ns_status);
1882 /* Might be total size */
1883 __swab32s(&stat->ns_msg_size);
1885 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_LARGE_ADDR))
1888 lstat = (struct lnet_ni_large_status *)stat;
1890 while (lstat + 1 <= lend) {
1891 __swab32s(&lstat->ns_status);
1892 /* struct lnet_nid never needs to be swabed */
1893 lstat = lnet_ping_sts_next(lstat);
1898 lnet_ping_info_validate(struct lnet_ping_info *pinfo)
1902 if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
1904 if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
1906 /* Loopback is guaranteed to be present */
1907 if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
1909 if (LNET_PING_INFO_LONI(pinfo) != LNET_NID_LO_0)
1915 lnet_ping_target_destroy(void)
1917 struct lnet_net *net;
1920 lnet_net_lock(LNET_LOCK_EX);
1922 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1923 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1925 ni->ni_status = NULL;
1930 lnet_ping_buffer_decref(the_lnet.ln_ping_target);
1931 the_lnet.ln_ping_target = NULL;
1933 lnet_net_unlock(LNET_LOCK_EX);
1937 lnet_ping_target_event_handler(struct lnet_event *event)
1939 struct lnet_ping_buffer *pbuf = event->md_user_ptr;
1941 if (event->unlinked)
1942 lnet_ping_buffer_decref(pbuf);
1946 lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
1947 struct lnet_handle_md *ping_mdh,
1948 int ni_bytes, bool set_eq)
1950 struct lnet_processid id = {
1951 .nid = LNET_ANY_NID,
1955 struct lnet_md md = { NULL };
1959 the_lnet.ln_ping_target_handler =
1960 lnet_ping_target_event_handler;
1962 *ppbuf = lnet_ping_target_create(ni_bytes);
1963 if (*ppbuf == NULL) {
1968 /* Ping target ME/MD */
1969 me = LNetMEAttach(LNET_RESERVED_PORTAL, &id,
1970 LNET_PROTO_PING_MATCHBITS, 0,
1971 LNET_UNLINK, LNET_INS_AFTER);
1974 CERROR("Can't create ping target ME: %d\n", rc);
1975 goto fail_decref_ping_buffer;
1978 /* initialize md content */
1979 md.start = &(*ppbuf)->pb_info;
1980 md.length = (*ppbuf)->pb_nbytes;
1981 md.threshold = LNET_MD_THRESH_INF;
1983 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1984 LNET_MD_MANAGE_REMOTE;
1985 md.handler = the_lnet.ln_ping_target_handler;
1986 md.user_ptr = *ppbuf;
1988 rc = LNetMDAttach(me, &md, LNET_RETAIN, ping_mdh);
1990 CERROR("Can't attach ping target MD: %d\n", rc);
1991 goto fail_decref_ping_buffer;
1993 lnet_ping_buffer_addref(*ppbuf);
1997 fail_decref_ping_buffer:
1998 LASSERT(atomic_read(&(*ppbuf)->pb_refcnt) == 1);
1999 lnet_ping_buffer_decref(*ppbuf);
2006 lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
2007 struct lnet_handle_md *ping_mdh)
2009 LNetMDUnlink(*ping_mdh);
2010 LNetInvalidateMDHandle(ping_mdh);
2012 /* NB the MD could be busy; this just starts the unlink */
2013 wait_var_event_warning(&pbuf->pb_refcnt,
2014 atomic_read(&pbuf->pb_refcnt) <= 1,
2015 "Still waiting for ping data MD to unlink\n");
2019 lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
2022 struct lnet_net *net;
2023 struct lnet_ni_status *ns, *end;
2024 struct lnet_ni_large_status *lns, *lend;
2027 pbuf->pb_info.pi_nnis = 0;
2028 ns = &pbuf->pb_info.pi_ni[0];
2029 end = (void *)&pbuf->pb_info + pbuf->pb_nbytes;
2030 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2031 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2032 if (!nid_is_nid4(&ni->ni_nid)) {
2033 if (ns == &pbuf->pb_info.pi_ni[1]) {
2034 /* This is primary, and it is long */
2035 pbuf->pb_info.pi_features |=
2036 LNET_PING_FEAT_PRIMARY_LARGE;
2040 LASSERT(ns + 1 <= end);
2041 ns->ns_nid = lnet_nid_to_nid4(&ni->ni_nid);
2044 ns->ns_status = lnet_ni_get_status_locked(ni);
2045 ni->ni_status = &ns->ns_status;
2048 pbuf->pb_info.pi_nnis++;
2055 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2056 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2057 if (nid_is_nid4(&ni->ni_nid))
2059 LASSERT(lns + 1 <= lend);
2061 lns->ns_nid = ni->ni_nid;
2064 ns->ns_status = lnet_ni_get_status_locked(ni);
2065 ni->ni_status = &lns->ns_status;
2068 lns = lnet_ping_sts_next(lns);
2071 if ((void *)lns > (void *)ns) {
2072 /* Record total info size */
2073 pbuf->pb_info.pi_ni[0].ns_msg_size =
2074 (void *)lns - (void *)&pbuf->pb_info;
2075 pbuf->pb_info.pi_features |= LNET_PING_FEAT_LARGE_ADDR;
2078 /* We (ab)use the ns_status of the loopback interface to
2079 * transmit the sequence number. The first interface listed
2080 * must be the loopback interface.
2082 rc = lnet_ping_info_validate(&pbuf->pb_info);
2084 LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
2087 LNET_PING_BUFFER_SEQNO(pbuf) =
2088 atomic_inc_return(&the_lnet.ln_ping_target_seqno);
2092 lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
2093 struct lnet_handle_md ping_mdh)
2095 struct lnet_ping_buffer *old_pbuf = NULL;
2096 struct lnet_handle_md old_ping_md;
2098 /* switch the NIs to point to the new ping info created */
2099 lnet_net_lock(LNET_LOCK_EX);
2101 if (!the_lnet.ln_routing)
2102 pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
2103 if (!lnet_peer_discovery_disabled)
2104 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
2106 /* Ensure only known feature bits have been set. */
2107 LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
2108 LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
2110 lnet_ping_target_install_locked(pbuf);
2112 if (the_lnet.ln_ping_target) {
2113 old_pbuf = the_lnet.ln_ping_target;
2114 old_ping_md = the_lnet.ln_ping_target_md;
2116 the_lnet.ln_ping_target_md = ping_mdh;
2117 the_lnet.ln_ping_target = pbuf;
2119 lnet_net_unlock(LNET_LOCK_EX);
2122 /* unlink and free the old ping info */
2123 lnet_ping_md_unlink(old_pbuf, &old_ping_md);
2124 lnet_ping_buffer_decref(old_pbuf);
2127 lnet_push_update_to_peers(0);
2131 lnet_ping_target_fini(void)
2133 lnet_ping_md_unlink(the_lnet.ln_ping_target,
2134 &the_lnet.ln_ping_target_md);
2136 lnet_assert_handler_unused(the_lnet.ln_ping_target_handler);
2137 lnet_ping_target_destroy();
2140 /* Resize the push target. */
2141 int lnet_push_target_resize(void)
2143 struct lnet_handle_md mdh;
2144 struct lnet_handle_md old_mdh;
2145 struct lnet_ping_buffer *pbuf;
2146 struct lnet_ping_buffer *old_pbuf;
2151 nbytes = the_lnet.ln_push_target_nbytes;
2153 CDEBUG(D_NET, "Invalid nbytes %d\n", nbytes);
2157 /* NB: lnet_ping_buffer_alloc() sets pbuf refcount to 1. That ref is
2158 * dropped when we need to resize again (see "old_pbuf" below) or when
2159 * LNet is shutdown (see lnet_push_target_fini())
2161 pbuf = lnet_ping_buffer_alloc(nbytes, GFP_NOFS);
2163 CDEBUG(D_NET, "Can't allocate pbuf for nbytes %d\n", nbytes);
2167 rc = lnet_push_target_post(pbuf, &mdh);
2169 CDEBUG(D_NET, "Failed to post push target: %d\n", rc);
2170 lnet_ping_buffer_decref(pbuf);
2174 lnet_net_lock(LNET_LOCK_EX);
2175 old_pbuf = the_lnet.ln_push_target;
2176 old_mdh = the_lnet.ln_push_target_md;
2177 the_lnet.ln_push_target = pbuf;
2178 the_lnet.ln_push_target_md = mdh;
2179 lnet_net_unlock(LNET_LOCK_EX);
2182 LNetMDUnlink(old_mdh);
2183 /* Drop ref set by lnet_ping_buffer_alloc() */
2184 lnet_ping_buffer_decref(old_pbuf);
2187 /* Received another push or reply that requires a larger buffer */
2188 if (nbytes < the_lnet.ln_push_target_nbytes)
2191 CDEBUG(D_NET, "nbytes %d success\n", nbytes);
2195 int lnet_push_target_post(struct lnet_ping_buffer *pbuf,
2196 struct lnet_handle_md *mdhp)
2198 struct lnet_processid id = { LNET_ANY_NID, LNET_PID_ANY };
2199 struct lnet_md md = { NULL };
2203 me = LNetMEAttach(LNET_RESERVED_PORTAL, &id,
2204 LNET_PROTO_PING_MATCHBITS, 0,
2205 LNET_UNLINK, LNET_INS_AFTER);
2208 CERROR("Can't create push target ME: %d\n", rc);
2212 pbuf->pb_needs_post = false;
2214 /* This reference is dropped by lnet_push_target_event_handler() */
2215 lnet_ping_buffer_addref(pbuf);
2217 /* initialize md content */
2218 md.start = &pbuf->pb_info;
2219 md.length = pbuf->pb_nbytes;
2222 md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE;
2224 md.handler = the_lnet.ln_push_target_handler;
2226 rc = LNetMDAttach(me, &md, LNET_UNLINK, mdhp);
2228 CERROR("Can't attach push MD: %d\n", rc);
2229 lnet_ping_buffer_decref(pbuf);
2230 pbuf->pb_needs_post = true;
2234 CDEBUG(D_NET, "posted push target %p\n", pbuf);
2239 static void lnet_push_target_event_handler(struct lnet_event *ev)
2241 struct lnet_ping_buffer *pbuf = ev->md_user_ptr;
2243 CDEBUG(D_NET, "type %d status %d unlinked %d\n", ev->type, ev->status,
2246 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2247 lnet_swap_pinginfo(pbuf);
2249 if (ev->type == LNET_EVENT_UNLINK) {
2250 /* Drop ref added by lnet_push_target_post() */
2251 lnet_ping_buffer_decref(pbuf);
2255 lnet_peer_push_event(ev);
2257 /* Drop ref added by lnet_push_target_post */
2258 lnet_ping_buffer_decref(pbuf);
2261 /* Initialize the push target. */
2262 static int lnet_push_target_init(void)
2266 if (the_lnet.ln_push_target)
2269 the_lnet.ln_push_target_handler =
2270 lnet_push_target_event_handler;
2272 rc = LNetSetLazyPortal(LNET_RESERVED_PORTAL);
2275 /* Start at the required minimum, we'll enlarge if required. */
2276 the_lnet.ln_push_target_nbytes = LNET_PING_INFO_MIN_SIZE;
2278 rc = lnet_push_target_resize();
2280 LNetClearLazyPortal(LNET_RESERVED_PORTAL);
2281 the_lnet.ln_push_target_handler = NULL;
2287 /* Clean up the push target. */
2288 static void lnet_push_target_fini(void)
2290 if (!the_lnet.ln_push_target)
2293 /* Unlink and invalidate to prevent new references. */
2294 LNetMDUnlink(the_lnet.ln_push_target_md);
2295 LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
2297 /* Wait for the unlink to complete. */
2298 wait_var_event_warning(&the_lnet.ln_push_target->pb_refcnt,
2299 atomic_read(&the_lnet.ln_push_target->pb_refcnt) <= 1,
2300 "Still waiting for ping data MD to unlink\n");
2302 /* Drop ref set by lnet_ping_buffer_alloc() */
2303 lnet_ping_buffer_decref(the_lnet.ln_push_target);
2304 the_lnet.ln_push_target = NULL;
2305 the_lnet.ln_push_target_nbytes = 0;
2307 LNetClearLazyPortal(LNET_RESERVED_PORTAL);
2308 lnet_assert_handler_unused(the_lnet.ln_push_target_handler);
2309 the_lnet.ln_push_target_handler = NULL;
2313 lnet_ni_tq_credits(struct lnet_ni *ni)
2317 LASSERT(ni->ni_ncpts >= 1);
2319 if (ni->ni_ncpts == 1)
2320 return ni->ni_net->net_tunables.lct_max_tx_credits;
2322 credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
2323 credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
2324 credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
2330 lnet_ni_unlink_locked(struct lnet_ni *ni)
2332 /* move it to zombie list and nobody can find it anymore */
2333 LASSERT(!list_empty(&ni->ni_netlist));
2334 list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
2335 lnet_ni_decref_locked(ni, 0);
2339 lnet_clear_zombies_nis_locked(struct lnet_net *net)
2344 struct list_head *zombie_list = &net->net_ni_zombie;
2347 * Now wait for the NIs I just nuked to show up on the zombie
2348 * list and shut them down in guaranteed thread context
2351 while ((ni = list_first_entry_or_null(zombie_list,
2353 ni_netlist)) != NULL) {
2357 list_del_init(&ni->ni_netlist);
2358 /* the ni should be in deleting state. If it's not it's
2360 LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
2361 cfs_percpt_for_each(ref, j, ni->ni_refs) {
2364 /* still busy, add it back to zombie list */
2365 list_add(&ni->ni_netlist, zombie_list);
2369 if (!list_empty(&ni->ni_netlist)) {
2370 /* Unlock mutex while waiting to allow other
2371 * threads to read the LNet state and fall through
2374 lnet_net_unlock(LNET_LOCK_EX);
2375 mutex_unlock(&the_lnet.ln_api_mutex);
2378 if ((i & (-i)) == i) {
2380 "Waiting for zombie LNI %s\n",
2381 libcfs_nidstr(&ni->ni_nid));
2383 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2385 mutex_lock(&the_lnet.ln_api_mutex);
2386 lnet_net_lock(LNET_LOCK_EX);
2390 lnet_net_unlock(LNET_LOCK_EX);
2392 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
2394 LASSERT(!in_interrupt());
2395 /* Holding the LND mutex makes it safe for lnd_shutdown
2396 * to call module_put(). Module unload cannot finish
2397 * until lnet_unregister_lnd() completes, and that
2398 * requires the LND mutex.
2400 mutex_unlock(&the_lnet.ln_api_mutex);
2401 mutex_lock(&the_lnet.ln_lnd_mutex);
2402 (net->net_lnd->lnd_shutdown)(ni);
2403 mutex_unlock(&the_lnet.ln_lnd_mutex);
2404 mutex_lock(&the_lnet.ln_api_mutex);
2407 CDEBUG(D_LNI, "Removed LNI %s\n",
2408 libcfs_nidstr(&ni->ni_nid));
2412 lnet_net_lock(LNET_LOCK_EX);
2416 /* shutdown down the NI and release refcount */
2418 lnet_shutdown_lndni(struct lnet_ni *ni)
2421 struct lnet_net *net = ni->ni_net;
2423 lnet_net_lock(LNET_LOCK_EX);
2425 ni->ni_state = LNET_NI_STATE_DELETING;
2427 lnet_ni_unlink_locked(ni);
2428 lnet_incr_dlc_seq();
2429 lnet_net_unlock(LNET_LOCK_EX);
2431 /* clear messages for this NI on the lazy portal */
2432 for (i = 0; i < the_lnet.ln_nportals; i++)
2433 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
2435 lnet_net_lock(LNET_LOCK_EX);
2436 lnet_clear_zombies_nis_locked(net);
2437 lnet_net_unlock(LNET_LOCK_EX);
2441 lnet_shutdown_lndnet(struct lnet_net *net)
2445 lnet_net_lock(LNET_LOCK_EX);
2447 list_del_init(&net->net_list);
2449 while ((ni = list_first_entry_or_null(&net->net_ni_list,
2451 ni_netlist)) != NULL) {
2452 lnet_net_unlock(LNET_LOCK_EX);
2453 lnet_shutdown_lndni(ni);
2454 lnet_net_lock(LNET_LOCK_EX);
2457 lnet_net_unlock(LNET_LOCK_EX);
2459 /* Do peer table cleanup for this net */
2460 lnet_peer_tables_cleanup(net);
2466 lnet_shutdown_lndnets(void)
2468 struct lnet_net *net;
2470 struct lnet_msg *msg, *tmp;
2472 /* NB called holding the global mutex */
2474 /* All quiet on the API front */
2475 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING ||
2476 the_lnet.ln_state == LNET_STATE_STOPPING);
2477 LASSERT(the_lnet.ln_refcount == 0);
2479 lnet_net_lock(LNET_LOCK_EX);
2480 the_lnet.ln_state = LNET_STATE_STOPPING;
2483 * move the nets to the zombie list to avoid them being
2484 * picked up for new work. LONET is also included in the
2485 * Nets that will be moved to the zombie list
2487 list_splice_init(&the_lnet.ln_nets, &the_lnet.ln_net_zombie);
2489 /* Drop the cached loopback Net. */
2490 if (the_lnet.ln_loni != NULL) {
2491 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
2492 the_lnet.ln_loni = NULL;
2494 lnet_net_unlock(LNET_LOCK_EX);
2496 /* iterate through the net zombie list and delete each net */
2497 while ((net = list_first_entry_or_null(&the_lnet.ln_net_zombie,
2500 lnet_shutdown_lndnet(net);
2502 spin_lock(&the_lnet.ln_msg_resend_lock);
2503 list_splice(&the_lnet.ln_msg_resend, &resend);
2504 spin_unlock(&the_lnet.ln_msg_resend_lock);
2506 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
2507 list_del_init(&msg->msg_list);
2508 msg->msg_no_resend = true;
2509 lnet_finalize(msg, -ECANCELED);
2512 lnet_net_lock(LNET_LOCK_EX);
2513 the_lnet.ln_state = LNET_STATE_SHUTDOWN;
2514 lnet_net_unlock(LNET_LOCK_EX);
2518 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
2521 struct lnet_tx_queue *tq;
2523 struct lnet_net *net = ni->ni_net;
2525 mutex_lock(&the_lnet.ln_lnd_mutex);
2528 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
2529 ni->ni_lnd_tunables_set = true;
2532 rc = (net->net_lnd->lnd_startup)(ni);
2534 mutex_unlock(&the_lnet.ln_lnd_mutex);
2537 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
2538 rc, libcfs_lnd2str(net->net_lnd->lnd_type));
2543 ni->ni_state = LNET_NI_STATE_ACTIVE;
2546 /* We keep a reference on the loopback net through the loopback NI */
2547 if (net->net_lnd->lnd_type == LOLND) {
2549 LASSERT(the_lnet.ln_loni == NULL);
2550 the_lnet.ln_loni = ni;
2551 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
2552 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
2553 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
2554 ni->ni_net->net_tunables.lct_peer_timeout = 0;
2558 if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
2559 ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
2560 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
2561 libcfs_lnd2str(net->net_lnd->lnd_type),
2562 ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
2564 /* shutdown the NI since if we get here then it must've already
2567 lnet_shutdown_lndni(ni);
2571 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
2572 tq->tq_credits_min =
2573 tq->tq_credits_max =
2574 tq->tq_credits = lnet_ni_tq_credits(ni);
2577 atomic_set(&ni->ni_tx_credits,
2578 lnet_ni_tq_credits(ni) * ni->ni_ncpts);
2579 atomic_set(&ni->ni_healthv, LNET_MAX_HEALTH_VALUE);
2581 /* Nodes with small feet have little entropy. The NID for this
2582 * node gives the most entropy in the low bits.
2584 add_device_randomness(&ni->ni_nid, sizeof(ni->ni_nid));
2586 CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
2587 libcfs_nidstr(&ni->ni_nid),
2588 ni->ni_net->net_tunables.lct_peer_tx_credits,
2589 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
2590 ni->ni_net->net_tunables.lct_peer_rtr_credits,
2591 ni->ni_net->net_tunables.lct_peer_timeout);
2599 static const struct lnet_lnd *lnet_load_lnd(u32 lnd_type)
2601 const struct lnet_lnd *lnd;
2604 mutex_lock(&the_lnet.ln_lnd_mutex);
2605 lnd = lnet_find_lnd_by_type(lnd_type);
2607 mutex_unlock(&the_lnet.ln_lnd_mutex);
2608 rc = request_module("%s", libcfs_lnd2modname(lnd_type));
2609 mutex_lock(&the_lnet.ln_lnd_mutex);
2611 lnd = lnet_find_lnd_by_type(lnd_type);
2613 mutex_unlock(&the_lnet.ln_lnd_mutex);
2614 CERROR("Can't load LND %s, module %s, rc=%d\n",
2615 libcfs_lnd2str(lnd_type),
2616 libcfs_lnd2modname(lnd_type), rc);
2617 #ifndef HAVE_MODULE_LOADING_SUPPORT
2618 LCONSOLE_ERROR_MSG(0x104,
2619 "Your kernel must be compiled with kernel module loading support.");
2621 return ERR_PTR(-EINVAL);
2624 mutex_unlock(&the_lnet.ln_lnd_mutex);
2630 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
2633 struct lnet_net *net_l = NULL;
2634 LIST_HEAD(local_ni_list);
2638 const struct lnet_lnd *lnd;
2640 net->net_tunables.lct_peer_timeout;
2642 net->net_tunables.lct_max_tx_credits;
2643 int peerrtrcredits =
2644 net->net_tunables.lct_peer_rtr_credits;
2647 * make sure that this net is unique. If it isn't then
2648 * we are adding interfaces to an already existing network, and
2649 * 'net' is just a convenient way to pass in the list.
2650 * if it is unique we need to find the LND and load it if
2653 if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
2654 lnd_type = LNET_NETTYP(net->net_id);
2656 lnd = lnet_load_lnd(lnd_type);
2662 mutex_lock(&the_lnet.ln_lnd_mutex);
2664 mutex_unlock(&the_lnet.ln_lnd_mutex);
2670 * net_l: if the network being added is unique then net_l
2671 * will point to that network
2672 * if the network being added is not unique then
2673 * net_l points to the existing network.
2675 * When we enter the loop below, we'll pick NIs off he
2676 * network beign added and start them up, then add them to
2677 * a local ni list. Once we've successfully started all
2678 * the NIs then we join the local NI list (of started up
2679 * networks) with the net_l->net_ni_list, which should
2680 * point to the correct network to add the new ni list to
2682 * If any of the new NIs fail to start up, then we want to
2683 * iterate through the local ni list, which should include
2684 * any NIs which were successfully started up, and shut
2687 * After than we want to delete the network being added,
2688 * to avoid a memory leak.
2690 while ((ni = list_first_entry_or_null(&net->net_ni_added,
2692 ni_netlist)) != NULL) {
2693 list_del_init(&ni->ni_netlist);
2695 /* make sure that the the NI we're about to start
2696 * up is actually unique. if it's not fail. */
2697 if (!lnet_ni_unique_net(&net_l->net_ni_list,
2698 ni->ni_interface)) {
2703 /* adjust the pointer the parent network, just in case it
2704 * the net is a duplicate */
2707 rc = lnet_startup_lndni(ni, tun);
2713 list_add_tail(&ni->ni_netlist, &local_ni_list);
2718 lnet_net_lock(LNET_LOCK_EX);
2719 list_splice_tail(&local_ni_list, &net_l->net_ni_list);
2720 lnet_incr_dlc_seq();
2721 lnet_net_unlock(LNET_LOCK_EX);
2723 /* if the network is not unique then we don't want to keep
2724 * it around after we're done. Free it. Otherwise add that
2725 * net to the global the_lnet.ln_nets */
2726 if (net_l != net && net_l != NULL) {
2728 * TODO - note. currently the tunables can not be updated
2734 * restore tunables after it has been overwitten by the
2737 if (peer_timeout != -1)
2738 net->net_tunables.lct_peer_timeout = peer_timeout;
2739 if (maxtxcredits != -1)
2740 net->net_tunables.lct_max_tx_credits = maxtxcredits;
2741 if (peerrtrcredits != -1)
2742 net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
2744 lnet_net_lock(LNET_LOCK_EX);
2745 list_add_tail(&net->net_list, &the_lnet.ln_nets);
2746 lnet_net_unlock(LNET_LOCK_EX);
2753 * shutdown the new NIs that are being started up
2754 * free the NET being started
2756 while ((ni = list_first_entry_or_null(&local_ni_list,
2758 ni_netlist)) != NULL)
2759 lnet_shutdown_lndni(ni);
2768 lnet_startup_lndnets(struct list_head *netlist)
2770 struct lnet_net *net;
2775 * Change to running state before bringing up the LNDs. This
2776 * allows lnet_shutdown_lndnets() to assert that we've passed
2779 lnet_net_lock(LNET_LOCK_EX);
2780 the_lnet.ln_state = LNET_STATE_RUNNING;
2781 lnet_net_unlock(LNET_LOCK_EX);
2783 while ((net = list_first_entry_or_null(netlist,
2785 net_list)) != NULL) {
2786 list_del_init(&net->net_list);
2788 rc = lnet_startup_lndnet(net, NULL);
2798 lnet_shutdown_lndnets();
2803 static int lnet_genl_parse_list(struct sk_buff *msg,
2804 const struct ln_key_list *data[], u16 idx)
2806 const struct ln_key_list *list = data[idx];
2807 const struct ln_key_props *props;
2808 struct nlattr *node;
2814 if (!list->lkl_maxattr)
2817 props = list->lkl_list;
2821 node = nla_nest_start(msg, LN_SCALAR_ATTR_LIST);
2825 for (count = 1; count <= list->lkl_maxattr; count++) {
2826 struct nlattr *key = nla_nest_start(msg, count);
2829 nla_put_u16(msg, LN_SCALAR_ATTR_LIST_SIZE,
2832 nla_put_u16(msg, LN_SCALAR_ATTR_INDEX, count);
2833 if (props[count].lkp_value)
2834 nla_put_string(msg, LN_SCALAR_ATTR_VALUE,
2835 props[count].lkp_value);
2836 if (props[count].lkp_key_format)
2837 nla_put_u16(msg, LN_SCALAR_ATTR_KEY_FORMAT,
2838 props[count].lkp_key_format);
2839 nla_put_u16(msg, LN_SCALAR_ATTR_NLA_TYPE,
2840 props[count].lkp_data_type);
2841 if (props[count].lkp_data_type == NLA_NESTED) {
2844 rc = lnet_genl_parse_list(msg, data, ++idx);
2850 nla_nest_end(msg, key);
2853 nla_nest_end(msg, node);
2857 int lnet_genl_send_scalar_list(struct sk_buff *msg, u32 portid, u32 seq,
2858 const struct genl_family *family, int flags,
2859 u8 cmd, const struct ln_key_list *data[])
2867 hdr = genlmsg_put(msg, portid, seq, family, flags, cmd);
2869 GOTO(canceled, rc = -EMSGSIZE);
2871 rc = lnet_genl_parse_list(msg, data, 0);
2875 genlmsg_end(msg, hdr);
2878 genlmsg_cancel(msg, hdr);
2879 return rc > 0 ? 0 : rc;
2881 EXPORT_SYMBOL(lnet_genl_send_scalar_list);
2883 static struct genl_family lnet_family;
2886 * Initialize LNet library.
2888 * Automatically called at module loading time. Caller has to call
2889 * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
2890 * latter returned 0. It must be called exactly once.
2892 * \retval 0 on success
2893 * \retval -ve on failures.
2895 int lnet_lib_init(void)
2899 lnet_assert_wire_constants();
2901 /* refer to global cfs_cpt_table for now */
2902 the_lnet.ln_cpt_table = cfs_cpt_tab;
2903 the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_tab);
2905 LASSERT(the_lnet.ln_cpt_number > 0);
2906 if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
2907 /* we are under risk of consuming all lh_cookie */
2908 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
2909 "please change setting of CPT-table and retry\n",
2910 the_lnet.ln_cpt_number, LNET_CPT_MAX);
2914 while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
2915 the_lnet.ln_cpt_bits++;
2917 rc = lnet_create_locks();
2919 CERROR("Can't create LNet global locks: %d\n", rc);
2923 rc = genl_register_family(&lnet_family);
2925 lnet_destroy_locks();
2926 CERROR("Can't register LNet netlink family: %d\n", rc);
2930 the_lnet.ln_refcount = 0;
2931 INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
2932 INIT_LIST_HEAD(&the_lnet.ln_msg_resend);
2934 /* The hash table size is the number of bits it takes to express the set
2935 * ln_num_routes, minus 1 (better to under estimate than over so we
2936 * don't waste memory). */
2937 if (rnet_htable_size <= 0)
2938 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
2939 else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
2940 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
2941 the_lnet.ln_remote_nets_hbits = max_t(int, 1,
2942 order_base_2(rnet_htable_size) - 1);
2944 /* All LNDs apart from the LOLND are in separate modules. They
2945 * register themselves when their module loads, and unregister
2946 * themselves when their module is unloaded. */
2947 lnet_register_lnd(&the_lolnd);
2952 * Finalize LNet library.
2954 * \pre lnet_lib_init() called with success.
2955 * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
2957 * As this happens at module-unload, all lnds must already be unloaded,
2958 * so they must already be unregistered.
2960 void lnet_lib_exit(void)
2964 LASSERT(the_lnet.ln_refcount == 0);
2965 lnet_unregister_lnd(&the_lolnd);
2966 for (i = 0; i < NUM_LNDS; i++)
2967 LASSERT(!the_lnet.ln_lnds[i]);
2968 lnet_destroy_locks();
2969 genl_unregister_family(&lnet_family);
2973 * Set LNet PID and start LNet interfaces, routing, and forwarding.
2975 * Users must call this function at least once before any other functions.
2976 * For each successful call there must be a corresponding call to
2977 * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
2980 * The PID used by LNet may be different from the one requested.
2983 * \param requested_pid PID requested by the caller.
2985 * \return >= 0 on success, and < 0 error code on failures.
2988 LNetNIInit(lnet_pid_t requested_pid)
2990 int im_a_router = 0;
2993 struct lnet_ping_buffer *pbuf;
2994 struct lnet_handle_md ping_mdh;
2995 LIST_HEAD(net_head);
2996 struct lnet_net *net;
2998 mutex_lock(&the_lnet.ln_api_mutex);
3000 CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
3002 if (the_lnet.ln_state == LNET_STATE_STOPPING) {
3003 mutex_unlock(&the_lnet.ln_api_mutex);
3007 if (the_lnet.ln_refcount > 0) {
3008 rc = the_lnet.ln_refcount++;
3009 mutex_unlock(&the_lnet.ln_api_mutex);
3013 rc = lnet_prepare(requested_pid);
3015 mutex_unlock(&the_lnet.ln_api_mutex);
3019 /* create a network for Loopback network */
3020 net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
3023 goto err_empty_list;
3026 /* Add in the loopback NI */
3027 if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
3029 goto err_empty_list;
3032 if (use_tcp_bonding)
3033 CWARN("use_tcp_bonding has been removed. Use Multi-Rail and Dynamic Discovery instead, see LU-13641\n");
3035 /* If LNet is being initialized via DLC it is possible
3036 * that the user requests not to load module parameters (ones which
3037 * are supported by DLC) on initialization. Therefore, make sure not
3038 * to load networks, routes and forwarding from module parameters
3039 * in this case. On cleanup in case of failure only clean up
3040 * routes if it has been loaded */
3041 if (!the_lnet.ln_nis_from_mod_params) {
3042 rc = lnet_parse_networks(&net_head, lnet_get_networks());
3044 goto err_empty_list;
3047 rc = lnet_startup_lndnets(&net_head);
3049 goto err_empty_list;
3051 if (!the_lnet.ln_nis_from_mod_params) {
3052 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
3054 goto err_shutdown_lndnis;
3056 rc = lnet_rtrpools_alloc(im_a_router);
3058 goto err_destroy_routes;
3061 rc = lnet_acceptor_start();
3063 goto err_destroy_routes;
3065 the_lnet.ln_refcount = 1;
3066 /* Now I may use my own API functions... */
3068 ni_bytes = LNET_PING_INFO_HDR_SIZE;
3069 list_for_each_entry(net, &the_lnet.ln_nets, net_list)
3070 ni_bytes += lnet_get_net_ni_bytes_locked(net);
3072 rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_bytes, true);
3074 goto err_acceptor_stop;
3076 lnet_ping_target_update(pbuf, ping_mdh);
3078 the_lnet.ln_mt_handler = lnet_mt_event_handler;
3080 rc = lnet_push_target_init();
3084 rc = lnet_peer_discovery_start();
3086 goto err_destroy_push_target;
3088 rc = lnet_monitor_thr_start();
3090 goto err_stop_discovery_thr;
3093 lnet_router_debugfs_init();
3095 mutex_unlock(&the_lnet.ln_api_mutex);
3097 complete_all(&the_lnet.ln_started);
3099 /* wait for all routers to start */
3100 lnet_wait_router_start();
3104 err_stop_discovery_thr:
3105 lnet_peer_discovery_stop();
3106 err_destroy_push_target:
3107 lnet_push_target_fini();
3109 lnet_ping_target_fini();
3111 the_lnet.ln_refcount = 0;
3112 lnet_acceptor_stop();
3114 if (!the_lnet.ln_nis_from_mod_params)
3115 lnet_destroy_routes();
3116 err_shutdown_lndnis:
3117 lnet_shutdown_lndnets();
3121 mutex_unlock(&the_lnet.ln_api_mutex);
3122 while ((net = list_first_entry_or_null(&net_head,
3124 net_list)) != NULL) {
3125 list_del_init(&net->net_list);
3130 EXPORT_SYMBOL(LNetNIInit);
3133 * Stop LNet interfaces, routing, and forwarding.
3135 * Users must call this function once for each successful call to LNetNIInit().
3136 * Once the LNetNIFini() operation has been started, the results of pending
3137 * API operations are undefined.
3139 * \return always 0 for current implementation.
3144 mutex_lock(&the_lnet.ln_api_mutex);
3146 LASSERT(the_lnet.ln_refcount > 0);
3148 if (the_lnet.ln_refcount != 1) {
3149 the_lnet.ln_refcount--;
3151 LASSERT(!the_lnet.ln_niinit_self);
3153 lnet_net_lock(LNET_LOCK_EX);
3154 the_lnet.ln_state = LNET_STATE_STOPPING;
3155 lnet_net_unlock(LNET_LOCK_EX);
3159 lnet_router_debugfs_fini();
3160 lnet_monitor_thr_stop();
3161 lnet_peer_discovery_stop();
3162 lnet_push_target_fini();
3163 lnet_ping_target_fini();
3165 /* Teardown fns that use my own API functions BEFORE here */
3166 the_lnet.ln_refcount = 0;
3168 lnet_acceptor_stop();
3169 lnet_destroy_routes();
3170 lnet_shutdown_lndnets();
3174 mutex_unlock(&the_lnet.ln_api_mutex);
3177 EXPORT_SYMBOL(LNetNIFini);
3180 * Grabs the ni data from the ni structure and fills the out
3183 * \param[in] ni network interface structure
3184 * \param[out] cfg_ni NI config information
3185 * \param[out] tun network and LND tunables
3188 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
3189 struct lnet_ioctl_config_lnd_tunables *tun,
3190 struct lnet_ioctl_element_stats *stats,
3193 size_t min_size = 0;
3196 if (!ni || !cfg_ni || !tun || !nid_is_nid4(&ni->ni_nid))
3199 if (ni->ni_interface != NULL) {
3200 strncpy(cfg_ni->lic_ni_intf,
3202 sizeof(cfg_ni->lic_ni_intf));
3205 cfg_ni->lic_nid = lnet_nid_to_nid4(&ni->ni_nid);
3206 cfg_ni->lic_status = lnet_ni_get_status_locked(ni);
3207 cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
3209 memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
3212 stats->iel_send_count = lnet_sum_stats(&ni->ni_stats,
3213 LNET_STATS_TYPE_SEND);
3214 stats->iel_recv_count = lnet_sum_stats(&ni->ni_stats,
3215 LNET_STATS_TYPE_RECV);
3216 stats->iel_drop_count = lnet_sum_stats(&ni->ni_stats,
3217 LNET_STATS_TYPE_DROP);
3221 * tun->lt_tun will always be present, but in order to be
3222 * backwards compatible, we need to deal with the cases when
3223 * tun->lt_tun is smaller than what the kernel has, because it
3224 * comes from an older version of a userspace program, then we'll
3225 * need to copy as much information as we have available space.
3227 min_size = tun_size - sizeof(tun->lt_cmn);
3228 memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
3230 /* copy over the cpts */
3231 if (ni->ni_ncpts == LNET_CPT_NUMBER &&
3232 ni->ni_cpts == NULL) {
3233 for (i = 0; i < ni->ni_ncpts; i++)
3234 cfg_ni->lic_cpts[i] = i;
3237 ni->ni_cpts != NULL && i < ni->ni_ncpts &&
3238 i < LNET_MAX_SHOW_NUM_CPT;
3240 cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
3242 cfg_ni->lic_ncpts = ni->ni_ncpts;
3246 * NOTE: This is a legacy function left in the code to be backwards
3247 * compatible with older userspace programs. It should eventually be
3250 * Grabs the ni data from the ni structure and fills the out
3253 * \param[in] ni network interface structure
3254 * \param[out] config config information
3257 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
3258 struct lnet_ioctl_config_data *config)
3260 struct lnet_ioctl_net_config *net_config;
3261 struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
3262 size_t min_size, tunable_size = 0;
3265 if (!ni || !config || !nid_is_nid4(&ni->ni_nid))
3268 net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
3272 if (!ni->ni_interface)
3275 strncpy(net_config->ni_interface,
3277 sizeof(net_config->ni_interface));
3279 config->cfg_nid = lnet_nid_to_nid4(&ni->ni_nid);
3280 config->cfg_config_u.cfg_net.net_peer_timeout =
3281 ni->ni_net->net_tunables.lct_peer_timeout;
3282 config->cfg_config_u.cfg_net.net_max_tx_credits =
3283 ni->ni_net->net_tunables.lct_max_tx_credits;
3284 config->cfg_config_u.cfg_net.net_peer_tx_credits =
3285 ni->ni_net->net_tunables.lct_peer_tx_credits;
3286 config->cfg_config_u.cfg_net.net_peer_rtr_credits =
3287 ni->ni_net->net_tunables.lct_peer_rtr_credits;
3289 net_config->ni_status = lnet_ni_get_status_locked(ni);
3292 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
3294 for (i = 0; i < num_cpts; i++)
3295 net_config->ni_cpts[i] = ni->ni_cpts[i];
3297 config->cfg_ncpts = num_cpts;
3301 * See if user land tools sent in a newer and larger version
3302 * of struct lnet_tunables than what the kernel uses.
3304 min_size = sizeof(*config) + sizeof(*net_config);
3306 if (config->cfg_hdr.ioc_len > min_size)
3307 tunable_size = config->cfg_hdr.ioc_len - min_size;
3309 /* Don't copy too much data to user space */
3310 min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
3311 lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
3313 if (lnd_cfg && min_size) {
3314 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
3315 config->cfg_config_u.cfg_net.net_interface_count = 1;
3317 /* Tell user land that kernel side has less data */
3318 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
3319 min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
3320 config->cfg_hdr.ioc_len -= min_size;
3326 lnet_get_ni_idx_locked(int idx)
3329 struct lnet_net *net;
3331 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3332 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3341 int lnet_get_net_healthv_locked(struct lnet_net *net)
3344 int best_healthv = 0;
3345 int healthv, ni_fatal;
3347 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3348 healthv = atomic_read(&ni->ni_healthv);
3349 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
3350 if (!ni_fatal && healthv > best_healthv)
3351 best_healthv = healthv;
3354 return best_healthv;
3358 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
3361 struct lnet_net *net = mynet;
3364 * It is possible that the net has been cleaned out while there is
3365 * a message being sent. This function accessed the net without
3366 * checking if the list is empty
3370 net = list_first_entry(&the_lnet.ln_nets,
3373 if (list_empty(&net->net_ni_list))
3375 ni = list_first_entry(&net->net_ni_list, struct lnet_ni,
3381 if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
3382 /* if you reached the end of the ni list and the net is
3383 * specified, then there are no more nis in that net */
3387 /* we reached the end of this net ni list. move to the
3389 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
3390 /* no more nets and no more NIs. */
3393 /* get the next net */
3394 net = list_first_entry(&prev->ni_net->net_list, struct lnet_net,
3396 if (list_empty(&net->net_ni_list))
3398 /* get the ni on it */
3399 ni = list_first_entry(&net->net_ni_list, struct lnet_ni,
3405 if (list_empty(&prev->ni_netlist))
3408 /* there are more nis left */
3409 ni = list_first_entry(&prev->ni_netlist, struct lnet_ni, ni_netlist);
3415 lnet_get_net_config(struct lnet_ioctl_config_data *config)
3420 int idx = config->cfg_count;
3422 cpt = lnet_net_lock_current();
3424 ni = lnet_get_ni_idx_locked(idx);
3429 lnet_fill_ni_info_legacy(ni, config);
3433 lnet_net_unlock(cpt);
3438 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
3439 struct lnet_ioctl_config_lnd_tunables *tun,
3440 struct lnet_ioctl_element_stats *stats,
3447 if (!cfg_ni || !tun || !stats)
3450 cpt = lnet_net_lock_current();
3452 ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
3457 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
3461 lnet_net_unlock(cpt);
3465 int lnet_get_ni_stats(struct lnet_ioctl_element_msg_stats *msg_stats)
3474 cpt = lnet_net_lock_current();
3476 ni = lnet_get_ni_idx_locked(msg_stats->im_idx);
3479 lnet_usr_translate_stats(msg_stats, &ni->ni_stats);
3483 lnet_net_unlock(cpt);
3488 static int lnet_add_net_common(struct lnet_net *net,
3489 struct lnet_ioctl_config_lnd_tunables *tun)
3491 struct lnet_handle_md ping_mdh;
3492 struct lnet_ping_buffer *pbuf;
3493 struct lnet_remotenet *rnet;
3498 lnet_net_lock(LNET_LOCK_EX);
3499 rnet = lnet_find_rnet_locked(net->net_id);
3500 lnet_net_unlock(LNET_LOCK_EX);
3502 * make sure that the net added doesn't invalidate the current
3503 * configuration LNet is keeping
3506 CERROR("Adding net %s will invalidate routing configuration\n",
3507 libcfs_net2str(net->net_id));
3513 memcpy(&net->net_tunables,
3514 &tun->lt_cmn, sizeof(net->net_tunables));
3516 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
3518 net_id = net->net_id;
3520 rc = lnet_startup_lndnet(net,
3521 (tun) ? &tun->lt_tun : NULL);
3525 /* make sure you calculate the correct number of slots in the ping
3526 * buffer. Since the ping info is a flattened list of all the NIs,
3527 * we should allocate enough slots to accomodate the number of NIs
3528 * which will be added.
3530 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3531 LNET_PING_INFO_HDR_SIZE +
3532 lnet_get_ni_bytes(),
3535 lnet_shutdown_lndnet(net);
3539 lnet_net_lock(LNET_LOCK_EX);
3540 net = lnet_get_net_locked(net_id);
3543 /* apply the UDSPs */
3544 rc = lnet_udsp_apply_policies_on_net(net);
3546 CERROR("Failed to apply UDSPs on local net %s\n",
3547 libcfs_net2str(net->net_id));
3549 /* At this point we lost track of which NI was just added, so we
3550 * just re-apply the policies on all of the NIs on this net
3552 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3553 rc = lnet_udsp_apply_policies_on_ni(ni);
3555 CERROR("Failed to apply UDSPs on ni %s\n",
3556 libcfs_nidstr(&ni->ni_nid));
3558 lnet_net_unlock(LNET_LOCK_EX);
3561 * Start the acceptor thread if this is the first network
3562 * being added that requires the thread.
3564 if (net->net_lnd->lnd_accept) {
3565 rc = lnet_acceptor_start();
3567 /* shutdown the net that we just started */
3568 CERROR("Failed to start up acceptor thread\n");
3569 lnet_shutdown_lndnet(net);
3574 lnet_net_lock(LNET_LOCK_EX);
3575 lnet_peer_net_added(net);
3576 lnet_net_unlock(LNET_LOCK_EX);
3578 lnet_ping_target_update(pbuf, ping_mdh);
3583 lnet_ping_md_unlink(pbuf, &ping_mdh);
3584 lnet_ping_buffer_decref(pbuf);
3589 lnet_set_tune_defaults(struct lnet_ioctl_config_lnd_tunables *tun)
3592 if (tun->lt_cmn.lct_peer_timeout < 0)
3593 tun->lt_cmn.lct_peer_timeout = DEFAULT_PEER_TIMEOUT;
3594 if (!tun->lt_cmn.lct_peer_tx_credits)
3595 tun->lt_cmn.lct_peer_tx_credits = DEFAULT_PEER_CREDITS;
3596 if (!tun->lt_cmn.lct_max_tx_credits)
3597 tun->lt_cmn.lct_max_tx_credits = DEFAULT_CREDITS;
3601 static int lnet_handle_legacy_ip2nets(char *ip2nets,
3602 struct lnet_ioctl_config_lnd_tunables *tun)
3604 struct lnet_net *net;
3607 LIST_HEAD(net_head);
3609 rc = lnet_parse_ip2nets(&nets, ip2nets);
3613 rc = lnet_parse_networks(&net_head, nets);
3617 lnet_set_tune_defaults(tun);
3619 mutex_lock(&the_lnet.ln_api_mutex);
3620 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3625 while ((net = list_first_entry_or_null(&net_head,
3627 net_list)) != NULL) {
3628 list_del_init(&net->net_list);
3629 rc = lnet_add_net_common(net, tun);
3635 mutex_unlock(&the_lnet.ln_api_mutex);
3637 while ((net = list_first_entry_or_null(&net_head,
3639 net_list)) != NULL) {
3640 list_del_init(&net->net_list);
3646 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf, u32 net_id,
3647 struct lnet_ioctl_config_lnd_tunables *tun)
3649 struct lnet_net *net;
3654 /* handle legacy ip2nets from DLC */
3655 if (conf->lic_legacy_ip2nets[0] != '\0')
3656 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
3659 lnd_type = LNET_NETTYP(net_id);
3661 if (!libcfs_isknown_lnd(lnd_type)) {
3662 CERROR("No valid net and lnd information provided\n");
3666 net = lnet_net_alloc(net_id, NULL);
3670 for (i = 0; i < conf->lic_ncpts; i++) {
3671 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER) {
3677 ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
3684 lnet_set_tune_defaults(tun);
3686 mutex_lock(&the_lnet.ln_api_mutex);
3687 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3691 rc = lnet_add_net_common(net, tun);
3694 mutex_unlock(&the_lnet.ln_api_mutex);
3702 int lnet_dyn_del_ni(struct lnet_nid *nid)
3704 struct lnet_net *net;
3706 u32 net_id = LNET_NID_NET(nid);
3707 struct lnet_ping_buffer *pbuf;
3708 struct lnet_handle_md ping_mdh;
3712 /* don't allow userspace to shutdown the LOLND */
3713 if (LNET_NETTYP(net_id) == LOLND)
3716 mutex_lock(&the_lnet.ln_api_mutex);
3717 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3719 goto unlock_api_mutex;
3724 net = lnet_get_net_locked(net_id);
3726 CERROR("net %s not found\n",
3727 libcfs_net2str(net_id));
3732 if (!nid_addr_is_set(nid)) {
3733 /* remove the entire net */
3734 net_bytes = lnet_get_net_ni_bytes_locked(net);
3738 /* create and link a new ping info, before removing the old one */
3739 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3740 LNET_PING_INFO_HDR_SIZE +
3741 lnet_get_ni_bytes() - net_bytes,
3744 goto unlock_api_mutex;
3746 lnet_shutdown_lndnet(net);
3748 lnet_acceptor_stop();
3750 lnet_ping_target_update(pbuf, ping_mdh);
3752 goto unlock_api_mutex;
3755 ni = lnet_nid_to_ni_locked(nid, 0);
3757 CERROR("nid %s not found\n", libcfs_nidstr(nid));
3762 net_bytes = lnet_get_net_ni_bytes_locked(net);
3763 net_empty = list_is_singular(&net->net_ni_list);
3767 /* create and link a new ping info, before removing the old one */
3768 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3769 (LNET_PING_INFO_HDR_SIZE +
3770 lnet_get_ni_bytes() -
3771 lnet_ping_sts_size(&ni->ni_nid)),
3774 goto unlock_api_mutex;
3776 lnet_shutdown_lndni(ni);
3778 lnet_acceptor_stop();
3780 lnet_ping_target_update(pbuf, ping_mdh);
3782 /* check if the net is empty and remove it if it is */
3784 lnet_shutdown_lndnet(net);
3786 goto unlock_api_mutex;
3791 mutex_unlock(&the_lnet.ln_api_mutex);
3797 * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
3798 * They are only expected to be called for unique networks.
3799 * That can be as a result of older DLC library
3800 * calls. Multi-Rail DLC and beyond no longer uses these APIs.
3803 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
3805 struct lnet_net *net;
3806 LIST_HEAD(net_head);
3808 struct lnet_ioctl_config_lnd_tunables tun;
3809 const char *nets = conf->cfg_config_u.cfg_net.net_intf;
3811 /* Create a net/ni structures for the network string */
3812 rc = lnet_parse_networks(&net_head, nets);
3814 return rc == 0 ? -EINVAL : rc;
3816 mutex_lock(&the_lnet.ln_api_mutex);
3817 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3819 goto out_unlock_clean;
3823 rc = -EINVAL; /* only add one network per call */
3824 goto out_unlock_clean;
3827 net = list_first_entry(&net_head, struct lnet_net, net_list);
3828 list_del_init(&net->net_list);
3830 LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
3832 memset(&tun, 0, sizeof(tun));
3834 tun.lt_cmn.lct_peer_timeout =
3835 (!conf->cfg_config_u.cfg_net.net_peer_timeout) ? DEFAULT_PEER_TIMEOUT :
3836 conf->cfg_config_u.cfg_net.net_peer_timeout;
3837 tun.lt_cmn.lct_peer_tx_credits =
3838 (!conf->cfg_config_u.cfg_net.net_peer_tx_credits) ? DEFAULT_PEER_CREDITS :
3839 conf->cfg_config_u.cfg_net.net_peer_tx_credits;
3840 tun.lt_cmn.lct_peer_rtr_credits =
3841 conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
3842 tun.lt_cmn.lct_max_tx_credits =
3843 (!conf->cfg_config_u.cfg_net.net_max_tx_credits) ? DEFAULT_CREDITS :
3844 conf->cfg_config_u.cfg_net.net_max_tx_credits;
3846 rc = lnet_add_net_common(net, &tun);
3849 mutex_unlock(&the_lnet.ln_api_mutex);
3850 /* net_head list is empty in success case */
3851 while ((net = list_first_entry_or_null(&net_head,
3853 net_list)) != NULL) {
3854 list_del_init(&net->net_list);
3861 lnet_dyn_del_net(u32 net_id)
3863 struct lnet_net *net;
3864 struct lnet_ping_buffer *pbuf;
3865 struct lnet_handle_md ping_mdh;
3866 int net_ni_bytes, rc;
3868 /* don't allow userspace to shutdown the LOLND */
3869 if (LNET_NETTYP(net_id) == LOLND)
3872 mutex_lock(&the_lnet.ln_api_mutex);
3873 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3880 net = lnet_get_net_locked(net_id);
3887 net_ni_bytes = lnet_get_net_ni_bytes_locked(net);
3891 /* create and link a new ping info, before removing the old one */
3892 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3893 LNET_PING_INFO_HDR_SIZE +
3894 lnet_get_ni_bytes() - net_ni_bytes,
3899 lnet_shutdown_lndnet(net);
3901 lnet_acceptor_stop();
3903 lnet_ping_target_update(pbuf, ping_mdh);
3906 mutex_unlock(&the_lnet.ln_api_mutex);
3911 void lnet_incr_dlc_seq(void)
3913 atomic_inc(&lnet_dlc_seq_no);
3916 __u32 lnet_get_dlc_seq_locked(void)
3918 return atomic_read(&lnet_dlc_seq_no);
3922 lnet_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3924 struct lnet_net *net;
3927 lnet_net_lock(LNET_LOCK_EX);
3928 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3929 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3930 if (all || (nid_is_nid4(&ni->ni_nid) &&
3931 lnet_nid_to_nid4(&ni->ni_nid) == nid)) {
3932 atomic_set(&ni->ni_healthv, value);
3933 if (list_empty(&ni->ni_recovery) &&
3934 value < LNET_MAX_HEALTH_VALUE) {
3935 CERROR("manually adding local NI %s to recovery\n",
3936 libcfs_nidstr(&ni->ni_nid));
3937 list_add_tail(&ni->ni_recovery,
3938 &the_lnet.ln_mt_localNIRecovq);
3939 lnet_ni_addref_locked(ni, 0);
3942 lnet_net_unlock(LNET_LOCK_EX);
3948 lnet_net_unlock(LNET_LOCK_EX);
3952 lnet_ni_set_conns_per_peer(lnet_nid_t nid, int value, bool all)
3954 struct lnet_net *net;
3957 lnet_net_lock(LNET_LOCK_EX);
3958 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3959 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3960 if (lnet_nid_to_nid4(&ni->ni_nid) != nid && !all)
3962 if (LNET_NETTYP(net->net_id) == SOCKLND)
3963 ni->ni_lnd_tunables.lnd_tun_u.lnd_sock.lnd_conns_per_peer = value;
3964 else if (LNET_NETTYP(net->net_id) == O2IBLND)
3965 ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib.lnd_conns_per_peer = value;
3967 lnet_net_unlock(LNET_LOCK_EX);
3972 lnet_net_unlock(LNET_LOCK_EX);
3976 lnet_get_local_ni_hstats(struct lnet_ioctl_local_ni_hstats *stats)
3980 struct lnet_nid nid;
3982 lnet_nid4_to_nid(stats->hlni_nid, &nid);
3983 cpt = lnet_net_lock_current();
3984 ni = lnet_nid_to_ni_locked(&nid, cpt);
3990 stats->hlni_local_interrupt = atomic_read(&ni->ni_hstats.hlt_local_interrupt);
3991 stats->hlni_local_dropped = atomic_read(&ni->ni_hstats.hlt_local_dropped);
3992 stats->hlni_local_aborted = atomic_read(&ni->ni_hstats.hlt_local_aborted);
3993 stats->hlni_local_no_route = atomic_read(&ni->ni_hstats.hlt_local_no_route);
3994 stats->hlni_local_timeout = atomic_read(&ni->ni_hstats.hlt_local_timeout);
3995 stats->hlni_local_error = atomic_read(&ni->ni_hstats.hlt_local_error);
3996 stats->hlni_fatal_error = atomic_read(&ni->ni_fatal_error_on);
3997 stats->hlni_health_value = atomic_read(&ni->ni_healthv);
3998 stats->hlni_ping_count = ni->ni_ping_count;
3999 stats->hlni_next_ping = ni->ni_next_ping;
4002 lnet_net_unlock(cpt);
4008 lnet_get_local_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
4013 lnet_net_lock(LNET_LOCK_EX);
4014 list_for_each_entry(ni, &the_lnet.ln_mt_localNIRecovq, ni_recovery) {
4015 if (!nid_is_nid4(&ni->ni_nid))
4017 list->rlst_nid_array[i] = lnet_nid_to_nid4(&ni->ni_nid);
4019 if (i >= LNET_MAX_SHOW_NUM_NID)
4022 lnet_net_unlock(LNET_LOCK_EX);
4023 list->rlst_num_nids = i;
4029 lnet_get_peer_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
4031 struct lnet_peer_ni *lpni;
4034 lnet_net_lock(LNET_LOCK_EX);
4035 list_for_each_entry(lpni, &the_lnet.ln_mt_peerNIRecovq, lpni_recovery) {
4036 list->rlst_nid_array[i] = lnet_nid_to_nid4(&lpni->lpni_nid);
4038 if (i >= LNET_MAX_SHOW_NUM_NID)
4041 lnet_net_unlock(LNET_LOCK_EX);
4042 list->rlst_num_nids = i;
4048 * LNet ioctl handler.
4052 LNetCtl(unsigned int cmd, void *arg)
4054 struct libcfs_ioctl_data *data = arg;
4055 struct lnet_ioctl_config_data *config;
4057 struct lnet_nid nid;
4060 BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
4061 sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
4064 case IOC_LIBCFS_FAIL_NID:
4065 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
4067 case IOC_LIBCFS_ADD_ROUTE: {
4068 /* default router sensitivity to 1 */
4069 unsigned int sensitivity = 1;
4072 if (config->cfg_hdr.ioc_len < sizeof(*config))
4075 if (config->cfg_config_u.cfg_route.rtr_sensitivity) {
4077 config->cfg_config_u.cfg_route.rtr_sensitivity;
4080 lnet_nid4_to_nid(config->cfg_nid, &nid);
4081 mutex_lock(&the_lnet.ln_api_mutex);
4082 rc = lnet_add_route(config->cfg_net,
4083 config->cfg_config_u.cfg_route.rtr_hop,
4085 config->cfg_config_u.cfg_route.
4086 rtr_priority, sensitivity);
4087 mutex_unlock(&the_lnet.ln_api_mutex);
4091 case IOC_LIBCFS_DEL_ROUTE:
4094 if (config->cfg_hdr.ioc_len < sizeof(*config))
4097 lnet_nid4_to_nid(config->cfg_nid, &nid);
4098 mutex_lock(&the_lnet.ln_api_mutex);
4099 rc = lnet_del_route(config->cfg_net, &nid);
4100 mutex_unlock(&the_lnet.ln_api_mutex);
4103 case IOC_LIBCFS_GET_ROUTE:
4106 if (config->cfg_hdr.ioc_len < sizeof(*config))
4109 mutex_lock(&the_lnet.ln_api_mutex);
4110 rc = lnet_get_route(config->cfg_count,
4112 &config->cfg_config_u.cfg_route.rtr_hop,
4114 &config->cfg_config_u.cfg_route.rtr_flags,
4115 &config->cfg_config_u.cfg_route.
4117 &config->cfg_config_u.cfg_route.
4119 mutex_unlock(&the_lnet.ln_api_mutex);
4122 case IOC_LIBCFS_GET_LOCAL_NI: {
4123 struct lnet_ioctl_config_ni *cfg_ni;
4124 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
4125 struct lnet_ioctl_element_stats *stats;
4130 /* get the tunables if they are available */
4131 if (cfg_ni->lic_cfg_hdr.ioc_len <
4132 sizeof(*cfg_ni) + sizeof(*stats) + sizeof(*tun))
4135 stats = (struct lnet_ioctl_element_stats *)
4137 tun = (struct lnet_ioctl_config_lnd_tunables *)
4138 (cfg_ni->lic_bulk + sizeof(*stats));
4140 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
4143 mutex_lock(&the_lnet.ln_api_mutex);
4144 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
4145 mutex_unlock(&the_lnet.ln_api_mutex);
4149 case IOC_LIBCFS_GET_LOCAL_NI_MSG_STATS: {
4150 struct lnet_ioctl_element_msg_stats *msg_stats = arg;
4152 if (msg_stats->im_hdr.ioc_len != sizeof(*msg_stats))
4155 mutex_lock(&the_lnet.ln_api_mutex);
4156 rc = lnet_get_ni_stats(msg_stats);
4157 mutex_unlock(&the_lnet.ln_api_mutex);
4162 case IOC_LIBCFS_GET_NET: {
4163 size_t total = sizeof(*config) +
4164 sizeof(struct lnet_ioctl_net_config);
4167 if (config->cfg_hdr.ioc_len < total)
4170 mutex_lock(&the_lnet.ln_api_mutex);
4171 rc = lnet_get_net_config(config);
4172 mutex_unlock(&the_lnet.ln_api_mutex);
4176 case IOC_LIBCFS_GET_LNET_STATS:
4178 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
4180 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
4183 mutex_lock(&the_lnet.ln_api_mutex);
4184 rc = lnet_counters_get(&lnet_stats->st_cntrs);
4185 mutex_unlock(&the_lnet.ln_api_mutex);
4189 case IOC_LIBCFS_RESET_LNET_STATS:
4191 mutex_lock(&the_lnet.ln_api_mutex);
4192 lnet_counters_reset();
4193 mutex_unlock(&the_lnet.ln_api_mutex);
4197 case IOC_LIBCFS_CONFIG_RTR:
4200 if (config->cfg_hdr.ioc_len < sizeof(*config))
4203 mutex_lock(&the_lnet.ln_api_mutex);
4204 if (config->cfg_config_u.cfg_buffers.buf_enable) {
4205 rc = lnet_rtrpools_enable();
4206 mutex_unlock(&the_lnet.ln_api_mutex);
4209 lnet_rtrpools_disable();
4210 mutex_unlock(&the_lnet.ln_api_mutex);
4213 case IOC_LIBCFS_ADD_BUF:
4216 if (config->cfg_hdr.ioc_len < sizeof(*config))
4219 mutex_lock(&the_lnet.ln_api_mutex);
4220 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
4222 config->cfg_config_u.cfg_buffers.
4224 config->cfg_config_u.cfg_buffers.
4226 mutex_unlock(&the_lnet.ln_api_mutex);
4229 case IOC_LIBCFS_SET_NUMA_RANGE: {
4230 struct lnet_ioctl_set_value *numa;
4232 if (numa->sv_hdr.ioc_len != sizeof(*numa))
4234 lnet_net_lock(LNET_LOCK_EX);
4235 lnet_numa_range = numa->sv_value;
4236 lnet_net_unlock(LNET_LOCK_EX);
4240 case IOC_LIBCFS_GET_NUMA_RANGE: {
4241 struct lnet_ioctl_set_value *numa;
4243 if (numa->sv_hdr.ioc_len != sizeof(*numa))
4245 numa->sv_value = lnet_numa_range;
4249 case IOC_LIBCFS_GET_BUF: {
4250 struct lnet_ioctl_pool_cfg *pool_cfg;
4251 size_t total = sizeof(*config) + sizeof(*pool_cfg);
4255 if (config->cfg_hdr.ioc_len < total)
4258 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
4260 mutex_lock(&the_lnet.ln_api_mutex);
4261 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
4262 mutex_unlock(&the_lnet.ln_api_mutex);
4266 case IOC_LIBCFS_GET_LOCAL_HSTATS: {
4267 struct lnet_ioctl_local_ni_hstats *stats = arg;
4269 if (stats->hlni_hdr.ioc_len < sizeof(*stats))
4272 mutex_lock(&the_lnet.ln_api_mutex);
4273 rc = lnet_get_local_ni_hstats(stats);
4274 mutex_unlock(&the_lnet.ln_api_mutex);
4279 case IOC_LIBCFS_GET_RECOVERY_QUEUE: {
4280 struct lnet_ioctl_recovery_list *list = arg;
4281 if (list->rlst_hdr.ioc_len < sizeof(*list))
4284 mutex_lock(&the_lnet.ln_api_mutex);
4285 if (list->rlst_type == LNET_HEALTH_TYPE_LOCAL_NI)
4286 rc = lnet_get_local_ni_recovery_list(list);
4288 rc = lnet_get_peer_ni_recovery_list(list);
4289 mutex_unlock(&the_lnet.ln_api_mutex);
4293 case IOC_LIBCFS_ADD_PEER_NI: {
4294 struct lnet_ioctl_peer_cfg *cfg = arg;
4295 struct lnet_nid prim_nid;
4297 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4300 mutex_lock(&the_lnet.ln_api_mutex);
4301 lnet_nid4_to_nid(cfg->prcfg_prim_nid, &prim_nid);
4302 lnet_nid4_to_nid(cfg->prcfg_cfg_nid, &nid);
4303 rc = lnet_add_peer_ni(&prim_nid, &nid, cfg->prcfg_mr, false);
4304 mutex_unlock(&the_lnet.ln_api_mutex);
4308 case IOC_LIBCFS_DEL_PEER_NI: {
4309 struct lnet_ioctl_peer_cfg *cfg = arg;
4310 struct lnet_nid prim_nid;
4312 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4315 mutex_lock(&the_lnet.ln_api_mutex);
4316 lnet_nid4_to_nid(cfg->prcfg_prim_nid, &prim_nid);
4317 lnet_nid4_to_nid(cfg->prcfg_cfg_nid, &nid);
4318 rc = lnet_del_peer_ni(&prim_nid,
4320 mutex_unlock(&the_lnet.ln_api_mutex);
4324 case IOC_LIBCFS_GET_PEER_INFO: {
4325 struct lnet_ioctl_peer *peer_info = arg;
4327 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
4330 mutex_lock(&the_lnet.ln_api_mutex);
4331 rc = lnet_get_peer_ni_info(
4332 peer_info->pr_count,
4334 peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
4335 &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
4336 &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
4337 &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
4338 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
4339 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
4340 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
4341 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
4342 mutex_unlock(&the_lnet.ln_api_mutex);
4346 case IOC_LIBCFS_GET_PEER_NI: {
4347 struct lnet_ioctl_peer_cfg *cfg = arg;
4349 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4352 mutex_lock(&the_lnet.ln_api_mutex);
4353 rc = lnet_get_peer_info(cfg,
4354 (void __user *)cfg->prcfg_bulk);
4355 mutex_unlock(&the_lnet.ln_api_mutex);
4359 case IOC_LIBCFS_GET_PEER_LIST: {
4360 struct lnet_ioctl_peer_cfg *cfg = arg;
4362 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4365 mutex_lock(&the_lnet.ln_api_mutex);
4366 rc = lnet_get_peer_list(&cfg->prcfg_count, &cfg->prcfg_size,
4367 (struct lnet_process_id __user *)cfg->prcfg_bulk);
4368 mutex_unlock(&the_lnet.ln_api_mutex);
4372 case IOC_LIBCFS_SET_HEALHV: {
4373 struct lnet_ioctl_reset_health_cfg *cfg = arg;
4375 if (cfg->rh_hdr.ioc_len < sizeof(*cfg))
4377 if (cfg->rh_value < 0 ||
4378 cfg->rh_value > LNET_MAX_HEALTH_VALUE)
4379 value = LNET_MAX_HEALTH_VALUE;
4381 value = cfg->rh_value;
4382 CDEBUG(D_NET, "Manually setting healthv to %d for %s:%s. all = %d\n",
4383 value, (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI) ?
4384 "local" : "peer", libcfs_nid2str(cfg->rh_nid), cfg->rh_all);
4385 mutex_lock(&the_lnet.ln_api_mutex);
4386 if (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI)
4387 lnet_ni_set_healthv(cfg->rh_nid, value,
4390 lnet_peer_ni_set_healthv(cfg->rh_nid, value,
4392 mutex_unlock(&the_lnet.ln_api_mutex);
4396 case IOC_LIBCFS_SET_CONNS_PER_PEER: {
4397 struct lnet_ioctl_reset_conns_per_peer_cfg *cfg = arg;
4400 if (cfg->rcpp_hdr.ioc_len < sizeof(*cfg))
4402 if (cfg->rcpp_value < 0)
4405 value = cfg->rcpp_value;
4407 "Setting conns_per_peer to %d for %s. all = %d\n",
4408 value, libcfs_nid2str(cfg->rcpp_nid), cfg->rcpp_all);
4409 mutex_lock(&the_lnet.ln_api_mutex);
4410 lnet_ni_set_conns_per_peer(cfg->rcpp_nid, value, cfg->rcpp_all);
4411 mutex_unlock(&the_lnet.ln_api_mutex);
4415 case IOC_LIBCFS_NOTIFY_ROUTER: {
4416 time64_t deadline = ktime_get_real_seconds() - data->ioc_u64[0];
4418 /* The deadline passed in by the user should be some time in
4419 * seconds in the future since the UNIX epoch. We have to map
4420 * that deadline to the wall clock.
4422 deadline += ktime_get_seconds();
4423 lnet_nid4_to_nid(data->ioc_nid, &nid);
4424 return lnet_notify(NULL, &nid, data->ioc_flags, false,
4428 case IOC_LIBCFS_LNET_DIST:
4429 lnet_nid4_to_nid(data->ioc_nid, &nid);
4430 rc = LNetDist(&nid, &nid, &data->ioc_u32[1]);
4431 if (rc < 0 && rc != -EHOSTUNREACH)
4434 data->ioc_nid = lnet_nid_to_nid4(&nid);
4435 data->ioc_u32[0] = rc;
4438 case IOC_LIBCFS_TESTPROTOCOMPAT:
4439 the_lnet.ln_testprotocompat = data->ioc_flags;
4442 case IOC_LIBCFS_LNET_FAULT:
4443 return lnet_fault_ctl(data->ioc_flags, data);
4445 case IOC_LIBCFS_PING: {
4446 struct lnet_process_id id4;
4447 signed long timeout;
4449 id4.nid = data->ioc_nid;
4450 id4.pid = data->ioc_u32[0];
4452 /* If timeout is negative then set default of 3 minutes */
4453 if (((s32)data->ioc_u32[1] <= 0) ||
4454 data->ioc_u32[1] > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
4455 timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
4457 timeout = nsecs_to_jiffies(data->ioc_u32[1] * NSEC_PER_MSEC);
4459 rc = lnet_ping(id4, &LNET_ANY_NID, timeout, data->ioc_pbuf1,
4460 data->ioc_plen1 / sizeof(struct lnet_process_id));
4465 data->ioc_count = rc;
4469 case IOC_LIBCFS_PING_PEER: {
4470 struct lnet_ioctl_ping_data *ping = arg;
4471 struct lnet_nid src_nid = LNET_ANY_NID;
4472 struct lnet_peer *lp;
4473 signed long timeout;
4475 /* Check if the supplied ping data supports source nid
4476 * NB: This check is sufficient if lnet_ioctl_ping_data has
4477 * additional fields added, but if they are re-ordered or
4478 * fields removed then this will break. It is expected that
4479 * these ioctls will be replaced with netlink implementation, so
4480 * it is probably not worth coming up with a more robust version
4481 * compatibility scheme.
4483 if (ping->ping_hdr.ioc_len >= sizeof(struct lnet_ioctl_ping_data))
4484 lnet_nid4_to_nid(ping->ping_src, &src_nid);
4486 /* If timeout is negative then set default of 3 minutes */
4487 if (((s32)ping->op_param) <= 0 ||
4488 ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
4489 timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
4491 timeout = nsecs_to_jiffies(ping->op_param * NSEC_PER_MSEC);
4493 rc = lnet_ping(ping->ping_id, &src_nid, timeout,
4499 mutex_lock(&the_lnet.ln_api_mutex);
4500 lnet_nid4_to_nid(ping->ping_id.nid, &nid);
4501 lp = lnet_find_peer(&nid);
4504 lnet_nid_to_nid4(&lp->lp_primary_nid);
4505 ping->mr_info = lnet_peer_is_multi_rail(lp);
4506 lnet_peer_decref_locked(lp);
4508 mutex_unlock(&the_lnet.ln_api_mutex);
4510 ping->ping_count = rc;
4514 case IOC_LIBCFS_DISCOVER: {
4515 struct lnet_ioctl_ping_data *discover = arg;
4516 struct lnet_peer *lp;
4518 rc = lnet_discover(discover->ping_id, discover->op_param,
4520 discover->ping_count);
4524 mutex_lock(&the_lnet.ln_api_mutex);
4525 lnet_nid4_to_nid(discover->ping_id.nid, &nid);
4526 lp = lnet_find_peer(&nid);
4528 discover->ping_id.nid =
4529 lnet_nid_to_nid4(&lp->lp_primary_nid);
4530 discover->mr_info = lnet_peer_is_multi_rail(lp);
4531 lnet_peer_decref_locked(lp);
4533 mutex_unlock(&the_lnet.ln_api_mutex);
4535 discover->ping_count = rc;
4539 case IOC_LIBCFS_ADD_UDSP: {
4540 struct lnet_ioctl_udsp *ioc_udsp = arg;
4541 __u32 bulk_size = ioc_udsp->iou_hdr.ioc_len;
4543 mutex_lock(&the_lnet.ln_api_mutex);
4544 rc = lnet_udsp_demarshal_add(arg, bulk_size);
4546 rc = lnet_udsp_apply_policies(NULL, false);
4547 CDEBUG(D_NET, "policy application returned %d\n", rc);
4550 mutex_unlock(&the_lnet.ln_api_mutex);
4555 case IOC_LIBCFS_DEL_UDSP: {
4556 struct lnet_ioctl_udsp *ioc_udsp = arg;
4557 int idx = ioc_udsp->iou_idx;
4559 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4562 mutex_lock(&the_lnet.ln_api_mutex);
4563 rc = lnet_udsp_del_policy(idx);
4565 rc = lnet_udsp_apply_policies(NULL, false);
4566 CDEBUG(D_NET, "policy re-application returned %d\n",
4570 mutex_unlock(&the_lnet.ln_api_mutex);
4575 case IOC_LIBCFS_GET_UDSP_SIZE: {
4576 struct lnet_ioctl_udsp *ioc_udsp = arg;
4577 struct lnet_udsp *udsp;
4579 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4584 mutex_lock(&the_lnet.ln_api_mutex);
4585 udsp = lnet_udsp_get_policy(ioc_udsp->iou_idx);
4589 /* coming in iou_idx will hold the idx of the udsp
4590 * to get the size of. going out the iou_idx will
4591 * hold the size of the UDSP found at the passed
4594 ioc_udsp->iou_idx = lnet_get_udsp_size(udsp);
4595 if (ioc_udsp->iou_idx < 0)
4598 mutex_unlock(&the_lnet.ln_api_mutex);
4603 case IOC_LIBCFS_GET_UDSP: {
4604 struct lnet_ioctl_udsp *ioc_udsp = arg;
4605 struct lnet_udsp *udsp;
4607 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4612 mutex_lock(&the_lnet.ln_api_mutex);
4613 udsp = lnet_udsp_get_policy(ioc_udsp->iou_idx);
4617 rc = lnet_udsp_marshal(udsp, ioc_udsp);
4618 mutex_unlock(&the_lnet.ln_api_mutex);
4623 case IOC_LIBCFS_GET_CONST_UDSP_INFO: {
4624 struct lnet_ioctl_construct_udsp_info *info = arg;
4626 if (info->cud_hdr.ioc_len < sizeof(*info))
4629 CDEBUG(D_NET, "GET_UDSP_INFO for %s\n",
4630 libcfs_nid2str(info->cud_nid));
4632 mutex_lock(&the_lnet.ln_api_mutex);
4633 lnet_udsp_get_construct_info(info);
4634 mutex_unlock(&the_lnet.ln_api_mutex);
4640 ni = lnet_net2ni_addref(data->ioc_net);
4644 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
4647 rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
4654 EXPORT_SYMBOL(LNetCtl);
4656 static const struct ln_key_list net_props_list = {
4657 .lkl_maxattr = LNET_NET_ATTR_MAX,
4659 [LNET_NET_ATTR_HDR] = {
4661 .lkp_key_format = LNKF_SEQUENCE | LNKF_MAPPING,
4662 .lkp_data_type = NLA_NUL_STRING,
4664 [LNET_NET_ATTR_TYPE] = {
4665 .lkp_value = "net type",
4666 .lkp_data_type = NLA_STRING
4668 [LNET_NET_ATTR_LOCAL] = {
4669 .lkp_value = "local NI(s)",
4670 .lkp_key_format = LNKF_SEQUENCE | LNKF_MAPPING,
4671 .lkp_data_type = NLA_NESTED
4676 static struct ln_key_list local_ni_list = {
4677 .lkl_maxattr = LNET_NET_LOCAL_NI_ATTR_MAX,
4679 [LNET_NET_LOCAL_NI_ATTR_NID] = {
4681 .lkp_data_type = NLA_STRING
4683 [LNET_NET_LOCAL_NI_ATTR_STATUS] = {
4684 .lkp_value = "status",
4685 .lkp_data_type = NLA_STRING
4687 [LNET_NET_LOCAL_NI_ATTR_INTERFACE] = {
4688 .lkp_value = "interfaces",
4689 .lkp_key_format = LNKF_MAPPING,
4690 .lkp_data_type = NLA_NESTED
4695 static const struct ln_key_list local_ni_interfaces_list = {
4696 .lkl_maxattr = LNET_NET_LOCAL_NI_INTF_ATTR_MAX,
4698 [LNET_NET_LOCAL_NI_INTF_ATTR_TYPE] = {
4700 .lkp_data_type = NLA_STRING
4705 /* Use an index since the traversal is across LNet nets and ni collections */
4706 struct lnet_genl_net_list {
4707 unsigned int lngl_net_id;
4708 unsigned int lngl_idx;
4711 static inline struct lnet_genl_net_list *
4712 lnet_net_dump_ctx(struct netlink_callback *cb)
4714 return (struct lnet_genl_net_list *)cb->args[0];
4717 static int lnet_net_show_done(struct netlink_callback *cb)
4719 struct lnet_genl_net_list *nlist = lnet_net_dump_ctx(cb);
4722 LIBCFS_FREE(nlist, sizeof(*nlist));
4729 /* LNet net ->start() handler for GET requests */
4730 static int lnet_net_show_start(struct netlink_callback *cb)
4732 struct genlmsghdr *gnlh = nlmsg_data(cb->nlh);
4733 #ifdef HAVE_NL_PARSE_WITH_EXT_ACK
4734 struct netlink_ext_ack *extack = NULL;
4736 struct lnet_genl_net_list *nlist;
4737 int msg_len = genlmsg_len(gnlh);
4738 struct nlattr *params, *top;
4741 #ifdef HAVE_NL_DUMP_WITH_EXT_ACK
4742 extack = cb->extack;
4744 if (the_lnet.ln_refcount == 0) {
4745 NL_SET_ERR_MSG(extack, "LNet stack down");
4749 LIBCFS_ALLOC(nlist, sizeof(*nlist));
4753 nlist->lngl_net_id = LNET_NET_ANY;
4754 nlist->lngl_idx = 0;
4755 cb->args[0] = (long)nlist;
4760 params = genlmsg_data(gnlh);
4761 nla_for_each_attr(top, params, msg_len, rem) {
4765 nla_for_each_nested(net, top, rem2) {
4766 char filter[LNET_NIDSTR_SIZE];
4768 if (nla_type(net) != LN_SCALAR_ATTR_VALUE ||
4769 nla_strcmp(net, "name") != 0)
4772 net = nla_next(net, &rem2);
4773 if (nla_type(net) != LN_SCALAR_ATTR_VALUE) {
4774 NL_SET_ERR_MSG(extack, "invalid config param");
4775 GOTO(report_err, rc = -EINVAL);
4778 rc = nla_strscpy(filter, net, sizeof(filter));
4780 NL_SET_ERR_MSG(extack, "failed to get param");
4781 GOTO(report_err, rc);
4785 nlist->lngl_net_id = libcfs_str2net(filter);
4786 if (nlist->lngl_net_id == LNET_NET_ANY) {
4787 NL_SET_ERR_MSG(extack, "cannot parse net");
4788 GOTO(report_err, rc = -ENOENT);
4794 lnet_net_show_done(cb);
4799 static int lnet_net_show_dump(struct sk_buff *msg,
4800 struct netlink_callback *cb)
4802 struct lnet_genl_net_list *nlist = lnet_net_dump_ctx(cb);
4803 #ifdef HAVE_NL_PARSE_WITH_EXT_ACK
4804 struct netlink_ext_ack *extack = NULL;
4806 int portid = NETLINK_CB(cb->skb).portid;
4807 int seq = cb->nlh->nlmsg_seq;
4808 struct lnet_net *net;
4809 int idx = 0, rc = 0;
4813 #ifdef HAVE_NL_DUMP_WITH_EXT_ACK
4814 extack = cb->extack;
4816 if (!nlist->lngl_idx) {
4817 const struct ln_key_list *all[] = {
4818 &net_props_list, &local_ni_list,
4819 &local_ni_interfaces_list,
4823 rc = lnet_genl_send_scalar_list(msg, portid, seq,
4825 NLM_F_CREATE | NLM_F_MULTI,
4826 LNET_CMD_NETS, all);
4828 NL_SET_ERR_MSG(extack, "failed to send key table");
4829 GOTO(send_error, rc);
4833 lnet_net_lock(LNET_LOCK_EX);
4835 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
4838 if (nlist->lngl_net_id != LNET_NET_ANY &&
4839 nlist->lngl_net_id != net->net_id)
4842 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
4843 struct nlattr *local_ni, *ni_attr;
4844 char *status = "up";
4846 if (idx++ < nlist->lngl_idx)
4849 hdr = genlmsg_put(msg, portid, seq, &lnet_family,
4850 NLM_F_MULTI, LNET_CMD_NETS);
4852 NL_SET_ERR_MSG(extack, "failed to send values");
4853 GOTO(net_unlock, rc = -EMSGSIZE);
4857 nla_put_string(msg, LNET_NET_ATTR_HDR, "");
4859 nla_put_string(msg, LNET_NET_ATTR_TYPE,
4860 libcfs_net2str(net->net_id));
4863 local_ni = nla_nest_start(msg, LNET_NET_ATTR_LOCAL);
4864 ni_attr = nla_nest_start(msg, idx - 1);
4867 nla_put_string(msg, LNET_NET_LOCAL_NI_ATTR_NID,
4868 libcfs_nidstr(&ni->ni_nid));
4869 if (nid_is_lo0(&ni->ni_nid) &&
4870 *ni->ni_status != LNET_NI_STATUS_UP)
4872 nla_put_string(msg, LNET_NET_LOCAL_NI_ATTR_STATUS, "up");
4874 if (!nid_is_lo0(&ni->ni_nid) && ni->ni_interface) {
4875 struct nlattr *intf_nest, *intf_attr;
4877 intf_nest = nla_nest_start(msg,
4878 LNET_NET_LOCAL_NI_ATTR_INTERFACE);
4879 intf_attr = nla_nest_start(msg, 0);
4881 LNET_NET_LOCAL_NI_INTF_ATTR_TYPE,
4883 nla_nest_end(msg, intf_attr);
4884 nla_nest_end(msg, intf_nest);
4888 nla_nest_end(msg, ni_attr);
4889 nla_nest_end(msg, local_ni);
4891 genlmsg_end(msg, hdr);
4896 struct nlmsghdr *nlh = nlmsg_hdr(msg);
4898 nlmsg_cancel(msg, nlh);
4899 NL_SET_ERR_MSG(extack, "Network is down");
4903 lnet_net_unlock(LNET_LOCK_EX);
4905 nlist->lngl_idx = idx;
4907 return lnet_nl_send_error(cb->skb, portid, seq, rc);
4910 #ifndef HAVE_NETLINK_CALLBACK_START
4911 static int lnet_old_net_show_dump(struct sk_buff *msg,
4912 struct netlink_callback *cb)
4915 int rc = lnet_net_show_start(cb);
4921 return lnet_net_show_dump(msg, cb);
4925 static int lnet_genl_parse_tunables(struct nlattr *settings,
4926 struct lnet_ioctl_config_lnd_tunables *tun)
4928 struct nlattr *param;
4931 nla_for_each_nested(param, settings, rem) {
4932 int type = LNET_NET_LOCAL_NI_TUNABLES_ATTR_UNSPEC;
4935 if (nla_type(param) != LN_SCALAR_ATTR_VALUE)
4938 if (nla_strcmp(param, "peer_timeout") == 0)
4939 type = LNET_NET_LOCAL_NI_TUNABLES_ATTR_PEER_TIMEOUT;
4940 else if (nla_strcmp(param, "peer_credits") == 0)
4941 type = LNET_NET_LOCAL_NI_TUNABLES_ATTR_PEER_CREDITS;
4942 else if (nla_strcmp(param, "peer_buffer_credits") == 0)
4943 type = LNET_NET_LOCAL_NI_TUNABLES_ATTR_PEER_BUFFER_CREDITS;
4944 else if (nla_strcmp(param, "credits") == 0)
4945 type = LNET_NET_LOCAL_NI_TUNABLES_ATTR_CREDITS;
4947 param = nla_next(param, &rem);
4948 if (nla_type(param) != LN_SCALAR_ATTR_INT_VALUE)
4951 num = nla_get_s64(param);
4953 case LNET_NET_LOCAL_NI_TUNABLES_ATTR_PEER_TIMEOUT:
4954 tun->lt_cmn.lct_peer_timeout = num;
4956 case LNET_NET_LOCAL_NI_TUNABLES_ATTR_PEER_CREDITS:
4957 tun->lt_cmn.lct_peer_tx_credits = num;
4959 case LNET_NET_LOCAL_NI_TUNABLES_ATTR_PEER_BUFFER_CREDITS:
4960 tun->lt_cmn.lct_peer_rtr_credits = num;
4962 case LNET_NET_LOCAL_NI_TUNABLES_ATTR_CREDITS:
4963 tun->lt_cmn.lct_max_tx_credits = num;
4974 lnet_genl_parse_lnd_tunables(struct nlattr *settings,
4975 struct lnet_ioctl_config_lnd_tunables *tun,
4976 const struct lnet_lnd *lnd)
4978 const struct ln_key_list *list = lnd->lnd_keys;
4979 struct nlattr *param;
4986 if (!lnd->lnd_nl_set)
4989 if (!list->lkl_maxattr)
4992 nla_for_each_nested(param, settings, rem) {
4993 if (nla_type(param) != LN_SCALAR_ATTR_VALUE)
4996 for (i = 1; i <= list->lkl_maxattr; i++) {
4997 if (!list->lkl_list[i].lkp_value ||
4998 nla_strcmp(param, list->lkl_list[i].lkp_value) != 0)
5001 param = nla_next(param, &rem);
5002 rc = lnd->lnd_nl_set(LNET_CMD_NETS, param, i, tun);
5012 lnet_genl_parse_local_ni(struct nlattr *entry, struct genl_info *info,
5013 int net_id, struct lnet_ioctl_config_ni *conf,
5014 struct lnet_ioctl_config_lnd_tunables *tun,
5017 struct nlattr *settings;
5020 nla_for_each_nested(settings, entry, rem3) {
5021 if (nla_type(settings) != LN_SCALAR_ATTR_VALUE)
5024 if (nla_strcmp(settings, "interfaces") == 0) {
5025 struct nlattr *intf;
5028 settings = nla_next(settings, &rem3);
5029 if (nla_type(settings) !=
5030 LN_SCALAR_ATTR_LIST) {
5031 GENL_SET_ERR_MSG(info,
5032 "invalid interfaces");
5033 GOTO(out, rc = -EINVAL);
5036 nla_for_each_nested(intf, settings, rem4) {
5037 intf = nla_next(intf, &rem4);
5038 if (nla_type(intf) !=
5039 LN_SCALAR_ATTR_VALUE) {
5040 GENL_SET_ERR_MSG(info,
5041 "0 key is invalid");
5042 GOTO(out, rc = -EINVAL);
5045 rc = nla_strscpy(conf->lic_ni_intf, intf,
5046 sizeof(conf->lic_ni_intf));
5048 GENL_SET_ERR_MSG(info,
5049 "failed to parse interfaces");
5054 } else if (nla_strcmp(settings, "tunables") == 0) {
5055 settings = nla_next(settings, &rem3);
5056 if (nla_type(settings) !=
5057 LN_SCALAR_ATTR_LIST) {
5058 GENL_SET_ERR_MSG(info,
5059 "invalid tunables");
5060 GOTO(out, rc = -EINVAL);
5063 rc = lnet_genl_parse_tunables(settings, tun);
5065 GENL_SET_ERR_MSG(info,
5066 "failed to parse tunables");
5069 } else if ((nla_strcmp(settings, "lnd tunables") == 0)) {
5070 const struct lnet_lnd *lnd;
5072 lnd = lnet_load_lnd(LNET_NETTYP(net_id));
5074 GENL_SET_ERR_MSG(info,
5075 "LND type not supported");
5076 GOTO(out, rc = PTR_ERR(lnd));
5079 settings = nla_next(settings, &rem3);
5080 if (nla_type(settings) !=
5081 LN_SCALAR_ATTR_LIST) {
5082 GENL_SET_ERR_MSG(info,
5083 "lnd tunables should be list\n");
5084 GOTO(out, rc = -EINVAL);
5087 rc = lnet_genl_parse_lnd_tunables(settings,
5090 GENL_SET_ERR_MSG(info,
5091 "failed to parse lnd tunables");
5094 } else if (nla_strcmp(settings, "CPT") == 0) {
5098 settings = nla_next(settings, &rem3);
5099 if (nla_type(settings) != LN_SCALAR_ATTR_LIST) {
5100 GENL_SET_ERR_MSG(info,
5101 "CPT should be list");
5102 GOTO(out, rc = -EINVAL);
5105 nla_for_each_nested(cpt, settings, rem4) {
5108 if (nla_type(cpt) !=
5109 LN_SCALAR_ATTR_INT_VALUE) {
5110 GENL_SET_ERR_MSG(info,
5111 "invalid CPT config");
5112 GOTO(out, rc = -EINVAL);
5115 core = nla_get_s64(cpt);
5116 if (core >= LNET_CPT_NUMBER) {
5117 GENL_SET_ERR_MSG(info,
5118 "invalid CPT value");
5119 GOTO(out, rc = -ERANGE);
5122 conf->lic_cpts[conf->lic_ncpts] = core;
5131 static int lnet_net_cmd(struct sk_buff *skb, struct genl_info *info)
5133 struct nlmsghdr *nlh = nlmsg_hdr(skb);
5134 struct genlmsghdr *gnlh = nlmsg_data(nlh);
5135 struct nlattr *params = genlmsg_data(gnlh);
5136 int msg_len, rem, rc = 0;
5137 struct nlattr *attr;
5139 msg_len = genlmsg_len(gnlh);
5141 GENL_SET_ERR_MSG(info, "no configuration");
5145 nla_for_each_attr(attr, params, msg_len, rem) {
5146 struct lnet_ioctl_config_ni conf;
5147 u32 net_id = LNET_NET_ANY;
5148 struct nlattr *entry;
5149 bool ni_list = false;
5152 if (nla_type(attr) != LN_SCALAR_ATTR_LIST)
5155 nla_for_each_nested(entry, attr, rem2) {
5156 switch (nla_type(entry)) {
5157 case LN_SCALAR_ATTR_VALUE: {
5160 memset(&conf, 0, sizeof(conf));
5161 if (nla_strcmp(entry, "ip2net") == 0) {
5162 entry = nla_next(entry, &rem2);
5163 if (nla_type(entry) !=
5164 LN_SCALAR_ATTR_VALUE) {
5165 GENL_SET_ERR_MSG(info,
5166 "ip2net has invalid key");
5167 GOTO(out, rc = -EINVAL);
5170 len = nla_strscpy(conf.lic_legacy_ip2nets,
5172 sizeof(conf.lic_legacy_ip2nets));
5174 GENL_SET_ERR_MSG(info,
5175 "ip2net key string is invalid");
5176 GOTO(out, rc = len);
5179 } else if (nla_strcmp(entry, "net type") == 0) {
5180 char tmp[LNET_NIDSTR_SIZE];
5182 entry = nla_next(entry, &rem2);
5183 if (nla_type(entry) !=
5184 LN_SCALAR_ATTR_VALUE) {
5185 GENL_SET_ERR_MSG(info,
5186 "net type has invalid key");
5187 GOTO(out, rc = -EINVAL);
5190 len = nla_strscpy(tmp, entry,
5193 GENL_SET_ERR_MSG(info,
5194 "net type key string is invalid");
5195 GOTO(out, rc = len);
5198 net_id = libcfs_str2net(tmp);
5200 GENL_SET_ERR_MSG(info,
5201 "cannot parse net");
5202 GOTO(out, rc = -ENODEV);
5204 if (LNET_NETTYP(net_id) == LOLND) {
5205 GENL_SET_ERR_MSG(info,
5206 "setting @lo not allowed");
5207 GOTO(out, rc = -ENODEV);
5209 conf.lic_legacy_ip2nets[0] = '\0';
5210 conf.lic_ni_intf[0] = '\0';
5217 case LN_SCALAR_ATTR_LIST: {
5218 bool create = info->nlhdr->nlmsg_flags &
5220 struct lnet_ioctl_config_lnd_tunables tun;
5222 memset(&tun, 0, sizeof(tun));
5223 tun.lt_cmn.lct_peer_timeout = -1;
5226 rc = lnet_genl_parse_local_ni(entry, info,
5233 struct lnet_net *net;
5237 if (!strlen(conf.lic_ni_intf)) {
5238 GENL_SET_ERR_MSG(info,
5239 "interface is missing");
5243 lnet_net_lock(LNET_LOCK_EX);
5244 net = lnet_get_net_locked(net_id);
5246 GENL_SET_ERR_MSG(info,
5247 "LNet net doesn't exist");
5250 list_for_each_entry(ni, &net->net_ni_list,
5252 if (!ni->ni_interface ||
5253 strncmp(ni->ni_interface,
5255 strlen(conf.lic_ni_intf)) != 0) {
5260 lnet_net_unlock(LNET_LOCK_EX);
5261 rc = lnet_dyn_del_ni(&ni->ni_nid);
5262 lnet_net_lock(LNET_LOCK_EX);
5264 GENL_SET_ERR_MSG(info,
5265 "cannot del LNet NI");
5271 lnet_net_unlock(LNET_LOCK_EX);
5273 rc = lnet_dyn_add_ni(&conf, net_id, &tun);
5276 GENL_SET_ERR_MSG(info,
5277 "cannot parse net");
5280 GENL_SET_ERR_MSG(info,
5284 GENL_SET_ERR_MSG(info,
5285 "cannot add LNet NI");
5294 /* it is possible a newer version of the user land send
5295 * values older kernels doesn't handle. So silently
5296 * ignore these values
5303 /* Handle case of just sent NET with no list of NIDs */
5304 if (!(info->nlhdr->nlmsg_flags & NLM_F_CREATE) && !ni_list) {
5305 rc = lnet_dyn_del_net(net_id);
5307 GENL_SET_ERR_MSG(info,
5308 "cannot del network");
5316 static const struct genl_multicast_group lnet_mcast_grps[] = {
5317 { .name = "ip2net", },
5321 static const struct genl_ops lnet_genl_ops[] = {
5323 .cmd = LNET_CMD_NETS,
5324 #ifdef HAVE_NETLINK_CALLBACK_START
5325 .start = lnet_net_show_start,
5326 .dumpit = lnet_net_show_dump,
5328 .dumpit = lnet_old_net_show_dump,
5330 .done = lnet_net_show_done,
5331 .doit = lnet_net_cmd,
5335 static struct genl_family lnet_family = {
5336 .name = LNET_GENL_NAME,
5337 .version = LNET_GENL_VERSION,
5338 .module = THIS_MODULE,
5340 .ops = lnet_genl_ops,
5341 .n_ops = ARRAY_SIZE(lnet_genl_ops),
5342 .mcgrps = lnet_mcast_grps,
5343 .n_mcgrps = ARRAY_SIZE(lnet_mcast_grps),
5346 void LNetDebugPeer(struct lnet_processid *id)
5348 lnet_debug_peer(&id->nid);
5350 EXPORT_SYMBOL(LNetDebugPeer);
5353 * Determine if the specified peer \a nid is on the local node.
5355 * \param nid peer nid to check
5357 * \retval true If peer NID is on the local node.
5358 * \retval false If peer NID is not on the local node.
5360 bool LNetIsPeerLocal(struct lnet_nid *nid)
5362 struct lnet_net *net;
5366 cpt = lnet_net_lock_current();
5367 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
5368 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
5369 if (nid_same(&ni->ni_nid, nid)) {
5370 lnet_net_unlock(cpt);
5375 lnet_net_unlock(cpt);
5379 EXPORT_SYMBOL(LNetIsPeerLocal);
5382 * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
5383 * Note that all interfaces share a same PID, as requested by LNetNIInit().
5385 * \param index Index of the interface to look up.
5386 * \param id On successful return, this location will hold the
5387 * struct lnet_process_id ID of the interface.
5389 * \retval 0 If an interface exists at \a index.
5390 * \retval -ENOENT If no interface has been found.
5393 LNetGetId(unsigned int index, struct lnet_processid *id)
5396 struct lnet_net *net;
5400 LASSERT(the_lnet.ln_refcount > 0);
5402 cpt = lnet_net_lock_current();
5404 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
5405 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
5406 if (!nid_is_nid4(&ni->ni_nid))
5407 /* FIXME this needs to be handled */
5412 id->nid = ni->ni_nid;
5413 id->pid = the_lnet.ln_pid;
5419 lnet_net_unlock(cpt);
5422 EXPORT_SYMBOL(LNetGetId);
5428 struct lnet_handle_md mdh;
5429 struct completion completion;
5433 lnet_ping_event_handler(struct lnet_event *event)
5435 struct ping_data *pd = event->md_user_ptr;
5437 CDEBUG(D_NET, "ping event (%d %d)%s\n",
5438 event->type, event->status,
5439 event->unlinked ? " unlinked" : "");
5441 if (event->status) {
5443 pd->rc = event->status;
5444 } else if (event->type == LNET_EVENT_REPLY) {
5446 pd->rc = event->mlength;
5449 if (event->unlinked)
5450 pd->pd_unlinked = 1;
5452 if (event->unlinked ||
5453 (event->type == LNET_EVENT_SEND && event->status))
5454 complete(&pd->completion);
5457 /* lnet_ping() only works with nid4 nids, so we can calculate
5458 * size from number of nids
5460 #define LNET_PING_INFO_SIZE(NNIDS) \
5461 offsetof(struct lnet_ping_info, pi_ni[NNIDS])
5463 static int lnet_ping(struct lnet_process_id id4, struct lnet_nid *src_nid,
5464 signed long timeout, struct lnet_process_id __user *ids,
5467 struct lnet_md md = { NULL };
5468 struct ping_data pd = { 0 };
5469 struct lnet_ping_buffer *pbuf;
5470 struct lnet_process_id tmpid;
5471 struct lnet_processid id;
5478 /* n_ids limit is arbitrary */
5479 if (n_ids <= 0 || id4.nid == LNET_NID_ANY)
5483 * if the user buffer has more space than the lnet_interfaces_max
5484 * then only fill it up to lnet_interfaces_max
5486 if (n_ids > lnet_interfaces_max)
5487 n_ids = lnet_interfaces_max;
5489 if (id4.pid == LNET_PID_ANY)
5490 id4.pid = LNET_PID_LUSTRE;
5492 id_bytes = LNET_PING_INFO_SIZE(n_ids);
5493 pbuf = lnet_ping_buffer_alloc(id_bytes, GFP_NOFS);
5497 /* initialize md content */
5498 md.start = &pbuf->pb_info;
5499 md.length = id_bytes;
5500 md.threshold = 2; /* GET/REPLY */
5502 md.options = LNET_MD_TRUNCATE;
5504 md.handler = lnet_ping_event_handler;
5506 init_completion(&pd.completion);
5508 rc = LNetMDBind(&md, LNET_UNLINK, &pd.mdh);
5510 CERROR("Can't bind MD: %d\n", rc);
5511 goto fail_ping_buffer_decref;
5514 lnet_pid4_to_pid(id4, &id);
5515 rc = LNetGet(src_nid, pd.mdh, &id, LNET_RESERVED_PORTAL,
5516 LNET_PROTO_PING_MATCHBITS, 0, false);
5519 /* Don't CERROR; this could be deliberate! */
5520 rc2 = LNetMDUnlink(pd.mdh);
5523 /* NB must wait for the UNLINK event below... */
5526 /* Ensure completion in finite time... */
5527 wait_for_completion_timeout(&pd.completion, timeout);
5528 if (!pd.pd_unlinked) {
5529 LNetMDUnlink(pd.mdh);
5530 wait_for_completion(&pd.completion);
5534 goto fail_ping_buffer_decref;
5538 LASSERT(nob >= 0 && nob <= id_bytes);
5540 rc = -EPROTO; /* if I can't parse... */
5543 CERROR("%s: ping info too short %d\n",
5544 libcfs_idstr(&id), nob);
5545 goto fail_ping_buffer_decref;
5548 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
5549 lnet_swap_pinginfo(pbuf);
5550 } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
5551 CERROR("%s: Unexpected magic %08x\n",
5552 libcfs_idstr(&id), pbuf->pb_info.pi_magic);
5553 goto fail_ping_buffer_decref;
5556 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
5557 CERROR("%s: ping w/o NI status: 0x%x\n",
5558 libcfs_idstr(&id), pbuf->pb_info.pi_features);
5559 goto fail_ping_buffer_decref;
5562 /* Test if smaller than lnet_pinginfo with just one pi_ni status info.
5563 * That one might contain size when large nids are used.
5565 if (nob < LNET_PING_INFO_SIZE(1)) {
5566 CERROR("%s: Short reply %d(%lu min)\n",
5567 libcfs_idstr(&id), nob, LNET_PING_INFO_SIZE(1));
5568 goto fail_ping_buffer_decref;
5571 if (pbuf->pb_info.pi_nnis < n_ids) {
5572 n_ids = pbuf->pb_info.pi_nnis;
5573 id_bytes = lnet_ping_info_size(&pbuf->pb_info);
5576 if (nob < id_bytes) {
5577 CERROR("%s: Short reply %d(%d expected)\n",
5578 libcfs_idstr(&id), nob, id_bytes);
5579 goto fail_ping_buffer_decref;
5582 rc = -EFAULT; /* if I segv in copy_to_user()... */
5584 memset(&tmpid, 0, sizeof(tmpid));
5585 for (i = 0; i < n_ids; i++) {
5586 tmpid.pid = pbuf->pb_info.pi_pid;
5587 tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
5588 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
5589 goto fail_ping_buffer_decref;
5591 rc = pbuf->pb_info.pi_nnis;
5593 fail_ping_buffer_decref:
5594 lnet_ping_buffer_decref(pbuf);
5597 #undef LNET_PING_INFO_SIZE
5600 lnet_discover(struct lnet_process_id id4, __u32 force,
5601 struct lnet_process_id __user *ids, int n_ids)
5603 struct lnet_peer_ni *lpni;
5604 struct lnet_peer_ni *p;
5605 struct lnet_peer *lp;
5606 struct lnet_process_id *buf;
5607 struct lnet_processid id;
5613 id4.nid == LNET_NID_ANY)
5616 lnet_pid4_to_pid(id4, &id);
5617 if (id.pid == LNET_PID_ANY)
5618 id.pid = LNET_PID_LUSTRE;
5621 * If the user buffer has more space than the lnet_interfaces_max,
5622 * then only fill it up to lnet_interfaces_max.
5624 if (n_ids > lnet_interfaces_max)
5625 n_ids = lnet_interfaces_max;
5627 CFS_ALLOC_PTR_ARRAY(buf, n_ids);
5631 cpt = lnet_net_lock_current();
5632 lpni = lnet_peerni_by_nid_locked(&id.nid, NULL, cpt);
5639 * Clearing the NIDS_UPTODATE flag ensures the peer will
5640 * be discovered, provided discovery has not been disabled.
5642 lp = lpni->lpni_peer_net->lpn_peer;
5643 spin_lock(&lp->lp_lock);
5644 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
5645 /* If the force flag is set, force a PING and PUSH as well. */
5647 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
5648 spin_unlock(&lp->lp_lock);
5649 rc = lnet_discover_peer_locked(lpni, cpt, true);
5653 /* The lpni (or lp) for this NID may have changed and our ref is
5654 * the only thing keeping the old one around. Release the ref
5655 * and lookup the lpni again
5657 lnet_peer_ni_decref_locked(lpni);
5658 lpni = lnet_peer_ni_find_locked(&id.nid);
5663 lp = lpni->lpni_peer_net->lpn_peer;
5667 while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
5668 buf[i].pid = id.pid;
5669 buf[i].nid = lnet_nid_to_nid4(&p->lpni_nid);
5676 lnet_peer_ni_decref_locked(lpni);
5678 lnet_net_unlock(cpt);
5681 if (copy_to_user(ids, buf, rc * sizeof(*buf)))
5683 CFS_FREE_PTR_ARRAY(buf, n_ids);
5689 * Retrieve peer discovery status.
5691 * \retval 1 if lnet_peer_discovery_disabled is 0
5692 * \retval 0 if lnet_peer_discovery_disabled is 1
5695 LNetGetPeerDiscoveryStatus(void)
5697 return !lnet_peer_discovery_disabled;
5699 EXPORT_SYMBOL(LNetGetPeerDiscoveryStatus);