4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #define DEBUG_SUBSYSTEM S_LNET
34 #include <linux/ctype.h>
35 #include <linux/log2.h>
36 #include <linux/ktime.h>
37 #include <linux/moduleparam.h>
38 #include <linux/uaccess.h>
39 #ifdef HAVE_SCHED_HEADERS
40 #include <linux/sched/signal.h>
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
45 #define D_LNI D_CONSOLE
48 * initialize ln_api_mutex statically, since it needs to be used in
49 * discovery_set callback. That module parameter callback can be called
50 * before module init completes. The mutex needs to be ready for use then.
52 struct lnet the_lnet = {
53 .ln_api_mutex = __MUTEX_INITIALIZER(the_lnet.ln_api_mutex),
54 }; /* THE state of the network */
55 EXPORT_SYMBOL(the_lnet);
57 static char *ip2nets = "";
58 module_param(ip2nets, charp, 0444);
59 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
61 static char *networks = "";
62 module_param(networks, charp, 0444);
63 MODULE_PARM_DESC(networks, "local networks");
65 static char *routes = "";
66 module_param(routes, charp, 0444);
67 MODULE_PARM_DESC(routes, "routes to non-local networks");
69 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
70 module_param(rnet_htable_size, int, 0444);
71 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
73 static int use_tcp_bonding;
74 module_param(use_tcp_bonding, int, 0444);
75 MODULE_PARM_DESC(use_tcp_bonding,
76 "use_tcp_bonding parameter has been removed");
78 unsigned int lnet_numa_range = 0;
79 module_param(lnet_numa_range, uint, 0444);
80 MODULE_PARM_DESC(lnet_numa_range,
81 "NUMA range to consider during Multi-Rail selection");
84 * lnet_health_sensitivity determines by how much we decrement the health
85 * value on sending error. The value defaults to 100, which means health
86 * interface health is decremented by 100 points every failure.
88 unsigned int lnet_health_sensitivity = 100;
89 static int sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
90 #ifdef HAVE_KERNEL_PARAM_OPS
91 static struct kernel_param_ops param_ops_health_sensitivity = {
92 .set = sensitivity_set,
95 #define param_check_health_sensitivity(name, p) \
96 __param_check(name, p, int)
97 module_param(lnet_health_sensitivity, health_sensitivity, S_IRUGO|S_IWUSR);
99 module_param_call(lnet_health_sensitivity, sensitivity_set, param_get_int,
100 &lnet_health_sensitivity, S_IRUGO|S_IWUSR);
102 MODULE_PARM_DESC(lnet_health_sensitivity,
103 "Value to decrement the health value by on error");
106 * lnet_recovery_interval determines how often we should perform recovery
107 * on unhealthy interfaces.
109 unsigned int lnet_recovery_interval = 1;
110 static int recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp);
111 #ifdef HAVE_KERNEL_PARAM_OPS
112 static struct kernel_param_ops param_ops_recovery_interval = {
113 .set = recovery_interval_set,
114 .get = param_get_int,
116 #define param_check_recovery_interval(name, p) \
117 __param_check(name, p, int)
118 module_param(lnet_recovery_interval, recovery_interval, S_IRUGO|S_IWUSR);
120 module_param_call(lnet_recovery_interval, recovery_interval_set, param_get_int,
121 &lnet_recovery_interval, S_IRUGO|S_IWUSR);
123 MODULE_PARM_DESC(lnet_recovery_interval,
124 "DEPRECATED - Interval to recover unhealthy interfaces in seconds");
126 unsigned int lnet_recovery_limit;
127 module_param(lnet_recovery_limit, uint, 0644);
128 MODULE_PARM_DESC(lnet_recovery_limit,
129 "How long to attempt recovery of unhealthy peer interfaces in seconds. Set to 0 to allow indefinite recovery");
131 unsigned int lnet_max_recovery_ping_interval = 900;
132 unsigned int lnet_max_recovery_ping_count = 9;
133 static int max_recovery_ping_interval_set(const char *val,
134 cfs_kernel_param_arg_t *kp);
136 #define param_check_max_recovery_ping_interval(name, p) \
137 __param_check(name, p, int)
139 #ifdef HAVE_KERNEL_PARAM_OPS
140 static struct kernel_param_ops param_ops_max_recovery_ping_interval = {
141 .set = max_recovery_ping_interval_set,
142 .get = param_get_int,
144 module_param(lnet_max_recovery_ping_interval, max_recovery_ping_interval, 0644);
146 module_param_call(lnet_max_recovery_ping_interval, max_recovery_ping_interval,
147 param_get_int, &lnet_max_recovery_ping_interval, 0644);
149 MODULE_PARM_DESC(lnet_max_recovery_ping_interval,
150 "The max interval between LNet recovery pings, in seconds");
152 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
153 static int intf_max_set(const char *val, cfs_kernel_param_arg_t *kp);
155 static struct kernel_param_ops param_ops_interfaces_max = {
157 .get = param_get_int,
160 #define param_check_interfaces_max(name, p) \
161 __param_check(name, p, int)
163 #ifdef HAVE_KERNEL_PARAM_OPS
164 module_param(lnet_interfaces_max, interfaces_max, 0644);
166 module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
167 ¶m_ops_interfaces_max, 0644);
169 MODULE_PARM_DESC(lnet_interfaces_max,
170 "Maximum number of interfaces in a node.");
172 unsigned lnet_peer_discovery_disabled = 0;
173 static int discovery_set(const char *val, cfs_kernel_param_arg_t *kp);
175 static struct kernel_param_ops param_ops_discovery_disabled = {
176 .set = discovery_set,
177 .get = param_get_int,
180 #define param_check_discovery_disabled(name, p) \
181 __param_check(name, p, int)
182 #ifdef HAVE_KERNEL_PARAM_OPS
183 module_param(lnet_peer_discovery_disabled, discovery_disabled, 0644);
185 module_param_call(lnet_peer_discovery_disabled, discovery_set, param_get_int,
186 ¶m_ops_discovery_disabled, 0644);
188 MODULE_PARM_DESC(lnet_peer_discovery_disabled,
189 "Set to 1 to disable peer discovery on this node.");
191 unsigned int lnet_drop_asym_route;
192 static int drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp);
194 static struct kernel_param_ops param_ops_drop_asym_route = {
195 .set = drop_asym_route_set,
196 .get = param_get_int,
199 #define param_check_drop_asym_route(name, p) \
200 __param_check(name, p, int)
201 #ifdef HAVE_KERNEL_PARAM_OPS
202 module_param(lnet_drop_asym_route, drop_asym_route, 0644);
204 module_param_call(lnet_drop_asym_route, drop_asym_route_set, param_get_int,
205 ¶m_ops_drop_asym_route, 0644);
207 MODULE_PARM_DESC(lnet_drop_asym_route,
208 "Set to 1 to drop asymmetrical route messages.");
210 #define LNET_TRANSACTION_TIMEOUT_DEFAULT 50
211 unsigned int lnet_transaction_timeout = LNET_TRANSACTION_TIMEOUT_DEFAULT;
212 static int transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp);
213 #ifdef HAVE_KERNEL_PARAM_OPS
214 static struct kernel_param_ops param_ops_transaction_timeout = {
215 .set = transaction_to_set,
216 .get = param_get_int,
219 #define param_check_transaction_timeout(name, p) \
220 __param_check(name, p, int)
221 module_param(lnet_transaction_timeout, transaction_timeout, S_IRUGO|S_IWUSR);
223 module_param_call(lnet_transaction_timeout, transaction_to_set, param_get_int,
224 &lnet_transaction_timeout, S_IRUGO|S_IWUSR);
226 MODULE_PARM_DESC(lnet_transaction_timeout,
227 "Maximum number of seconds to wait for a peer response.");
229 #define LNET_RETRY_COUNT_DEFAULT 2
230 unsigned int lnet_retry_count = LNET_RETRY_COUNT_DEFAULT;
231 static int retry_count_set(const char *val, cfs_kernel_param_arg_t *kp);
232 #ifdef HAVE_KERNEL_PARAM_OPS
233 static struct kernel_param_ops param_ops_retry_count = {
234 .set = retry_count_set,
235 .get = param_get_int,
238 #define param_check_retry_count(name, p) \
239 __param_check(name, p, int)
240 module_param(lnet_retry_count, retry_count, S_IRUGO|S_IWUSR);
242 module_param_call(lnet_retry_count, retry_count_set, param_get_int,
243 &lnet_retry_count, S_IRUGO|S_IWUSR);
245 MODULE_PARM_DESC(lnet_retry_count,
246 "Maximum number of times to retry transmitting a message");
248 unsigned int lnet_response_tracking = 3;
249 static int response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp);
251 #ifdef HAVE_KERNEL_PARAM_OPS
252 static struct kernel_param_ops param_ops_response_tracking = {
253 .set = response_tracking_set,
254 .get = param_get_int,
257 #define param_check_response_tracking(name, p) \
258 __param_check(name, p, int)
259 module_param(lnet_response_tracking, response_tracking, 0644);
261 module_param_call(lnet_response_tracking, response_tracking_set, param_get_int,
262 &lnet_response_tracking, 0644);
264 MODULE_PARM_DESC(lnet_response_tracking,
265 "(0|1|2|3) LNet Internal Only|GET Reply only|PUT ACK only|Full Tracking (default)");
267 #define LNET_LND_TIMEOUT_DEFAULT ((LNET_TRANSACTION_TIMEOUT_DEFAULT - 1) / \
268 (LNET_RETRY_COUNT_DEFAULT + 1))
269 unsigned int lnet_lnd_timeout = LNET_LND_TIMEOUT_DEFAULT;
270 static void lnet_set_lnd_timeout(void)
272 lnet_lnd_timeout = (lnet_transaction_timeout - 1) /
273 (lnet_retry_count + 1);
277 * This sequence number keeps track of how many times DLC was used to
278 * update the local NIs. It is incremented when a NI is added or
279 * removed and checked when sending a message to determine if there is
280 * a need to re-run the selection algorithm. See lnet_select_pathway()
281 * for more details on its usage.
283 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
285 static int lnet_ping(struct lnet_process_id id4, struct lnet_nid *src_nid,
286 signed long timeout, struct lnet_process_id __user *ids,
289 static int lnet_discover(struct lnet_process_id id, __u32 force,
290 struct lnet_process_id __user *ids, int n_ids);
293 sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
296 unsigned *sensitivity = (unsigned *)kp->arg;
299 rc = kstrtoul(val, 0, &value);
301 CERROR("Invalid module parameter value for 'lnet_health_sensitivity'\n");
306 * The purpose of locking the api_mutex here is to ensure that
307 * the correct value ends up stored properly.
309 mutex_lock(&the_lnet.ln_api_mutex);
311 if (value > LNET_MAX_HEALTH_VALUE) {
312 mutex_unlock(&the_lnet.ln_api_mutex);
313 CERROR("Invalid health value. Maximum: %d value = %lu\n",
314 LNET_MAX_HEALTH_VALUE, value);
318 if (*sensitivity != 0 && value == 0 && lnet_retry_count != 0) {
319 lnet_retry_count = 0;
320 lnet_set_lnd_timeout();
323 *sensitivity = value;
325 mutex_unlock(&the_lnet.ln_api_mutex);
331 recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp)
333 CWARN("'lnet_recovery_interval' has been deprecated\n");
339 max_recovery_ping_interval_set(const char *val, cfs_kernel_param_arg_t *kp)
344 rc = kstrtoul(val, 0, &value);
346 CERROR("Invalid module parameter value for 'lnet_max_recovery_ping_interval'\n");
351 CERROR("Invalid max ping timeout. Must be strictly positive\n");
355 /* The purpose of locking the api_mutex here is to ensure that
356 * the correct value ends up stored properly.
358 mutex_lock(&the_lnet.ln_api_mutex);
359 lnet_max_recovery_ping_interval = value;
360 lnet_max_recovery_ping_count = 0;
363 lnet_max_recovery_ping_count++;
366 mutex_unlock(&the_lnet.ln_api_mutex);
372 discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
375 unsigned *discovery_off = (unsigned *)kp->arg;
377 struct lnet_ping_buffer *pbuf;
379 rc = kstrtoul(val, 0, &value);
381 CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
385 value = (value) ? 1 : 0;
388 * The purpose of locking the api_mutex here is to ensure that
389 * the correct value ends up stored properly.
391 mutex_lock(&the_lnet.ln_api_mutex);
393 if (value == *discovery_off) {
394 mutex_unlock(&the_lnet.ln_api_mutex);
399 * We still want to set the discovery value even when LNet is not
400 * running. This is the case when LNet is being loaded and we want
401 * the module parameters to take effect. Otherwise if we're
402 * changing the value dynamically, we want to set it after
405 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
406 *discovery_off = value;
407 mutex_unlock(&the_lnet.ln_api_mutex);
411 /* tell peers that discovery setting has changed */
412 lnet_net_lock(LNET_LOCK_EX);
413 pbuf = the_lnet.ln_ping_target;
415 pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
417 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
418 lnet_net_unlock(LNET_LOCK_EX);
420 /* only send a push when we're turning off discovery */
421 if (*discovery_off <= 0 && value > 0)
422 lnet_push_update_to_peers(1);
423 *discovery_off = value;
425 mutex_unlock(&the_lnet.ln_api_mutex);
431 drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp)
434 unsigned int *drop_asym_route = (unsigned int *)kp->arg;
437 rc = kstrtoul(val, 0, &value);
439 CERROR("Invalid module parameter value for "
440 "'lnet_drop_asym_route'\n");
445 * The purpose of locking the api_mutex here is to ensure that
446 * the correct value ends up stored properly.
448 mutex_lock(&the_lnet.ln_api_mutex);
450 if (value == *drop_asym_route) {
451 mutex_unlock(&the_lnet.ln_api_mutex);
455 *drop_asym_route = value;
457 mutex_unlock(&the_lnet.ln_api_mutex);
463 transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp)
466 unsigned *transaction_to = (unsigned *)kp->arg;
469 rc = kstrtoul(val, 0, &value);
471 CERROR("Invalid module parameter value for 'lnet_transaction_timeout'\n");
476 * The purpose of locking the api_mutex here is to ensure that
477 * the correct value ends up stored properly.
479 mutex_lock(&the_lnet.ln_api_mutex);
481 if (value <= lnet_retry_count || value == 0) {
482 mutex_unlock(&the_lnet.ln_api_mutex);
483 CERROR("Invalid value for lnet_transaction_timeout (%lu). "
484 "Has to be greater than lnet_retry_count (%u)\n",
485 value, lnet_retry_count);
489 if (value == *transaction_to) {
490 mutex_unlock(&the_lnet.ln_api_mutex);
494 *transaction_to = value;
495 /* Update the lnet_lnd_timeout now that we've modified the
496 * transaction timeout
498 lnet_set_lnd_timeout();
500 mutex_unlock(&the_lnet.ln_api_mutex);
506 retry_count_set(const char *val, cfs_kernel_param_arg_t *kp)
509 unsigned *retry_count = (unsigned *)kp->arg;
512 rc = kstrtoul(val, 0, &value);
514 CERROR("Invalid module parameter value for 'lnet_retry_count'\n");
519 * The purpose of locking the api_mutex here is to ensure that
520 * the correct value ends up stored properly.
522 mutex_lock(&the_lnet.ln_api_mutex);
524 if (lnet_health_sensitivity == 0 && value > 0) {
525 mutex_unlock(&the_lnet.ln_api_mutex);
526 CERROR("Can not set lnet_retry_count when health feature is turned off\n");
530 if (value > lnet_transaction_timeout) {
531 mutex_unlock(&the_lnet.ln_api_mutex);
532 CERROR("Invalid value for lnet_retry_count (%lu). "
533 "Has to be smaller than lnet_transaction_timeout (%u)\n",
534 value, lnet_transaction_timeout);
538 *retry_count = value;
540 /* Update the lnet_lnd_timeout now that we've modified the
543 lnet_set_lnd_timeout();
545 mutex_unlock(&the_lnet.ln_api_mutex);
551 intf_max_set(const char *val, cfs_kernel_param_arg_t *kp)
555 rc = kstrtoint(val, 0, &value);
557 CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
561 if (value < LNET_INTERFACES_MIN) {
562 CWARN("max interfaces provided are too small, setting to %d\n",
563 LNET_INTERFACES_MAX_DEFAULT);
564 value = LNET_INTERFACES_MAX_DEFAULT;
567 *(int *)kp->arg = value;
573 response_tracking_set(const char *val, cfs_kernel_param_arg_t *kp)
576 unsigned long new_value;
578 rc = kstrtoul(val, 0, &new_value);
580 CERROR("Invalid value for 'lnet_response_tracking'\n");
584 if (new_value < 0 || new_value > 3) {
585 CWARN("Invalid value (%lu) for 'lnet_response_tracking'\n",
590 lnet_response_tracking = new_value;
596 lnet_get_routes(void)
602 lnet_get_networks(void)
607 if (*networks != 0 && *ip2nets != 0) {
608 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
609 "'ip2nets' but not both at once\n");
614 rc = lnet_parse_ip2nets(&nets, ip2nets);
615 return (rc == 0) ? nets : NULL;
625 lnet_init_locks(void)
627 spin_lock_init(&the_lnet.ln_eq_wait_lock);
628 spin_lock_init(&the_lnet.ln_msg_resend_lock);
629 init_completion(&the_lnet.ln_mt_wait_complete);
630 mutex_init(&the_lnet.ln_lnd_mutex);
633 struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
634 struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
636 struct kmem_cache *lnet_udsp_cachep; /* udsp cache */
637 struct kmem_cache *lnet_rspt_cachep; /* response tracker cache */
638 struct kmem_cache *lnet_msg_cachep;
641 lnet_slab_setup(void)
643 /* create specific kmem_cache for MEs and small MDs (i.e., originally
644 * allocated in <size-xxx> kmem_cache).
646 lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(struct lnet_me),
648 if (!lnet_mes_cachep)
651 lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
652 LNET_SMALL_MD_SIZE, 0, 0,
654 if (!lnet_small_mds_cachep)
657 lnet_udsp_cachep = kmem_cache_create("lnet_udsp",
658 sizeof(struct lnet_udsp),
660 if (!lnet_udsp_cachep)
663 lnet_rspt_cachep = kmem_cache_create("lnet_rspt", sizeof(struct lnet_rsp_tracker),
665 if (!lnet_rspt_cachep)
668 lnet_msg_cachep = kmem_cache_create("lnet_msg", sizeof(struct lnet_msg),
670 if (!lnet_msg_cachep)
677 lnet_slab_cleanup(void)
679 if (lnet_msg_cachep) {
680 kmem_cache_destroy(lnet_msg_cachep);
681 lnet_msg_cachep = NULL;
684 if (lnet_rspt_cachep) {
685 kmem_cache_destroy(lnet_rspt_cachep);
686 lnet_rspt_cachep = NULL;
689 if (lnet_udsp_cachep) {
690 kmem_cache_destroy(lnet_udsp_cachep);
691 lnet_udsp_cachep = NULL;
694 if (lnet_small_mds_cachep) {
695 kmem_cache_destroy(lnet_small_mds_cachep);
696 lnet_small_mds_cachep = NULL;
699 if (lnet_mes_cachep) {
700 kmem_cache_destroy(lnet_mes_cachep);
701 lnet_mes_cachep = NULL;
706 lnet_create_remote_nets_table(void)
709 struct list_head *hash;
711 LASSERT(the_lnet.ln_remote_nets_hash == NULL);
712 LASSERT(the_lnet.ln_remote_nets_hbits > 0);
713 CFS_ALLOC_PTR_ARRAY(hash, LNET_REMOTE_NETS_HASH_SIZE);
715 CERROR("Failed to create remote nets hash table\n");
719 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
720 INIT_LIST_HEAD(&hash[i]);
721 the_lnet.ln_remote_nets_hash = hash;
726 lnet_destroy_remote_nets_table(void)
730 if (the_lnet.ln_remote_nets_hash == NULL)
733 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
734 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
736 CFS_FREE_PTR_ARRAY(the_lnet.ln_remote_nets_hash,
737 LNET_REMOTE_NETS_HASH_SIZE);
738 the_lnet.ln_remote_nets_hash = NULL;
742 lnet_destroy_locks(void)
744 if (the_lnet.ln_res_lock != NULL) {
745 cfs_percpt_lock_free(the_lnet.ln_res_lock);
746 the_lnet.ln_res_lock = NULL;
749 if (the_lnet.ln_net_lock != NULL) {
750 cfs_percpt_lock_free(the_lnet.ln_net_lock);
751 the_lnet.ln_net_lock = NULL;
756 lnet_create_locks(void)
760 the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
761 if (the_lnet.ln_res_lock == NULL)
764 the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
765 if (the_lnet.ln_net_lock == NULL)
771 lnet_destroy_locks();
775 static void lnet_assert_wire_constants(void)
777 /* Wire protocol assertions generated by 'wirecheck'
778 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
779 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
780 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7)
784 BUILD_BUG_ON(LNET_PROTO_TCP_MAGIC != 0xeebc0ded);
785 BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MAJOR != 1);
786 BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MINOR != 0);
787 BUILD_BUG_ON(LNET_MSG_ACK != 0);
788 BUILD_BUG_ON(LNET_MSG_PUT != 1);
789 BUILD_BUG_ON(LNET_MSG_GET != 2);
790 BUILD_BUG_ON(LNET_MSG_REPLY != 3);
791 BUILD_BUG_ON(LNET_MSG_HELLO != 4);
793 BUILD_BUG_ON((int)sizeof(lnet_nid_t) != 8);
794 BUILD_BUG_ON((int)sizeof(lnet_pid_t) != 4);
796 /* Checks for struct lnet_nid */
797 BUILD_BUG_ON((int)sizeof(struct lnet_nid) != 20);
798 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_size) != 0);
799 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_size) != 1);
800 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_type) != 1);
801 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_type) != 1);
802 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_num) != 2);
803 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_num) != 2);
804 BUILD_BUG_ON((int)offsetof(struct lnet_nid, nid_addr) != 4);
805 BUILD_BUG_ON((int)sizeof(((struct lnet_nid *)0)->nid_addr) != 16);
807 /* Checks for struct lnet_process_id_packed */
808 BUILD_BUG_ON((int)sizeof(struct lnet_process_id_packed) != 12);
809 BUILD_BUG_ON((int)offsetof(struct lnet_process_id_packed, nid) != 0);
810 BUILD_BUG_ON((int)sizeof(((struct lnet_process_id_packed *)0)->nid) != 8);
811 BUILD_BUG_ON((int)offsetof(struct lnet_process_id_packed, pid) != 8);
812 BUILD_BUG_ON((int)sizeof(((struct lnet_process_id_packed *)0)->pid) != 4);
814 /* Checks for struct lnet_handle_wire */
815 BUILD_BUG_ON((int)sizeof(struct lnet_handle_wire) != 16);
816 BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
817 wh_interface_cookie) != 0);
818 BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) != 8);
819 BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
820 wh_object_cookie) != 8);
821 BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) != 8);
823 /* Checks for struct struct lnet_magicversion */
824 BUILD_BUG_ON((int)sizeof(struct lnet_magicversion) != 8);
825 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, magic) != 0);
826 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->magic) != 4);
827 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, version_major) != 4);
828 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_major) != 2);
829 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion,
830 version_minor) != 6);
831 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_minor) != 2);
833 /* Checks for struct _lnet_hdr_nid4 */
834 BUILD_BUG_ON((int)sizeof(struct _lnet_hdr_nid4) != 72);
835 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, dest_nid) != 0);
836 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->dest_nid) != 8);
837 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, src_nid) != 8);
838 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->src_nid) != 8);
839 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, dest_pid) != 16);
840 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->dest_pid) != 4);
841 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, src_pid) != 20);
842 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->src_pid) != 4);
843 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, type) != 24);
844 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->type) != 4);
845 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, payload_length) != 28);
846 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->payload_length) != 4);
847 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg) != 32);
848 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg) != 40);
851 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.ack.dst_wmd) != 32);
852 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.ack.dst_wmd) != 16);
853 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.ack.match_bits) != 48);
854 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.ack.match_bits) != 8);
855 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.ack.mlength) != 56);
856 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.ack.mlength) != 4);
859 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.ack_wmd) != 32);
860 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.ack_wmd) != 16);
861 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.match_bits) != 48);
862 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.match_bits) != 8);
863 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.hdr_data) != 56);
864 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.hdr_data) != 8);
865 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.ptl_index) != 64);
866 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.ptl_index) != 4);
867 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.offset) != 68);
868 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.offset) != 4);
871 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.return_wmd) != 32);
872 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.return_wmd) != 16);
873 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.match_bits) != 48);
874 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.match_bits) != 8);
875 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.ptl_index) != 56);
876 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.ptl_index) != 4);
877 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.src_offset) != 60);
878 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.src_offset) != 4);
879 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.sink_length) != 64);
880 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.sink_length) != 4);
883 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.reply.dst_wmd) != 32);
884 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.reply.dst_wmd) != 16);
887 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.hello.incarnation) != 32);
888 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.hello.incarnation) != 8);
889 BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.hello.type) != 40);
890 BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.hello.type) != 4);
892 /* Checks for struct lnet_ni_status and related constants */
893 BUILD_BUG_ON(LNET_NI_STATUS_INVALID != 0x00000000);
894 BUILD_BUG_ON(LNET_NI_STATUS_UP != 0x15aac0de);
895 BUILD_BUG_ON(LNET_NI_STATUS_DOWN != 0xdeadface);
897 /* Checks for struct lnet_ni_status */
898 BUILD_BUG_ON((int)sizeof(struct lnet_ni_status) != 16);
899 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_nid) != 0);
900 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) != 8);
901 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_status) != 8);
902 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_status) != 4);
903 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_unused) != 12);
904 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_unused) != 4);
906 /* Checks for struct lnet_ping_info and related constants */
907 BUILD_BUG_ON(LNET_PROTO_PING_MAGIC != 0x70696E67);
908 BUILD_BUG_ON(LNET_PING_FEAT_INVAL != 0);
909 BUILD_BUG_ON(LNET_PING_FEAT_BASE != 1);
910 BUILD_BUG_ON(LNET_PING_FEAT_NI_STATUS != 2);
911 BUILD_BUG_ON(LNET_PING_FEAT_RTE_DISABLED != 4);
912 BUILD_BUG_ON(LNET_PING_FEAT_MULTI_RAIL != 8);
913 BUILD_BUG_ON(LNET_PING_FEAT_DISCOVERY != 16);
914 BUILD_BUG_ON(LNET_PING_FEAT_BITS != 31);
916 /* Checks for struct lnet_ping_info */
917 BUILD_BUG_ON((int)sizeof(struct lnet_ping_info) != 16);
918 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_magic) != 0);
919 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) != 4);
920 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_features) != 4);
921 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_features) != 4);
922 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_pid) != 8);
923 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) != 4);
924 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_nnis) != 12);
925 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) != 4);
926 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_ni) != 16);
927 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) != 0);
929 /* Acceptor connection request */
930 BUILD_BUG_ON(LNET_PROTO_ACCEPTOR_VERSION != 1);
932 /* Checks for struct lnet_acceptor_connreq */
933 BUILD_BUG_ON((int)sizeof(struct lnet_acceptor_connreq) != 16);
934 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_magic) != 0);
935 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_magic) != 4);
936 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_version) != 4);
937 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_version) != 4);
938 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_nid) != 8);
939 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_nid) != 8);
941 /* Checks for struct lnet_acceptor_connreq_v2 */
942 BUILD_BUG_ON((int)sizeof(struct lnet_acceptor_connreq_v2) != 28);
943 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_magic) != 0);
944 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_magic) != 4);
945 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_version) != 4);
946 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_version) != 4);
947 BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_nid) != 8);
948 BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_nid) != 20);
950 /* Checks for struct lnet_counters_common */
951 BUILD_BUG_ON((int)sizeof(struct lnet_counters_common) != 60);
952 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_msgs_alloc) != 0);
953 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_msgs_alloc) != 4);
954 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_msgs_max) != 4);
955 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_msgs_max) != 4);
956 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_errors) != 8);
957 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_errors) != 4);
958 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_send_count) != 12);
959 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_send_count) != 4);
960 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_recv_count) != 16);
961 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_recv_count) != 4);
962 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_route_count) != 20);
963 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_route_count) != 4);
964 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_drop_count) != 24);
965 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_drop_count) != 4);
966 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_send_length) != 28);
967 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_send_length) != 8);
968 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_recv_length) != 36);
969 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_recv_length) != 8);
970 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_route_length) != 44);
971 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_route_length) != 8);
972 BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_drop_length) != 52);
973 BUILD_BUG_ON((int)sizeof(((struct lnet_counters_common *)0)->lcc_drop_length) != 8);
976 static const struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
978 const struct lnet_lnd *lnd;
980 /* holding lnd mutex */
981 if (type >= NUM_LNDS)
983 lnd = the_lnet.ln_lnds[type];
984 LASSERT(!lnd || lnd->lnd_type == type);
990 lnet_get_lnd_timeout(void)
992 return lnet_lnd_timeout;
994 EXPORT_SYMBOL(lnet_get_lnd_timeout);
997 lnet_register_lnd(const struct lnet_lnd *lnd)
999 mutex_lock(&the_lnet.ln_lnd_mutex);
1001 LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
1002 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
1004 the_lnet.ln_lnds[lnd->lnd_type] = lnd;
1006 CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
1008 mutex_unlock(&the_lnet.ln_lnd_mutex);
1010 EXPORT_SYMBOL(lnet_register_lnd);
1013 lnet_unregister_lnd(const struct lnet_lnd *lnd)
1015 mutex_lock(&the_lnet.ln_lnd_mutex);
1017 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
1019 the_lnet.ln_lnds[lnd->lnd_type] = NULL;
1020 CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
1022 mutex_unlock(&the_lnet.ln_lnd_mutex);
1024 EXPORT_SYMBOL(lnet_unregister_lnd);
1027 lnet_counters_get_common_locked(struct lnet_counters_common *common)
1029 struct lnet_counters *ctr;
1032 /* FIXME !!! Their is no assert_lnet_net_locked() to ensure this
1033 * actually called under the protection of the lnet_net_lock.
1035 memset(common, 0, sizeof(*common));
1037 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
1038 common->lcc_msgs_max += ctr->lct_common.lcc_msgs_max;
1039 common->lcc_msgs_alloc += ctr->lct_common.lcc_msgs_alloc;
1040 common->lcc_errors += ctr->lct_common.lcc_errors;
1041 common->lcc_send_count += ctr->lct_common.lcc_send_count;
1042 common->lcc_recv_count += ctr->lct_common.lcc_recv_count;
1043 common->lcc_route_count += ctr->lct_common.lcc_route_count;
1044 common->lcc_drop_count += ctr->lct_common.lcc_drop_count;
1045 common->lcc_send_length += ctr->lct_common.lcc_send_length;
1046 common->lcc_recv_length += ctr->lct_common.lcc_recv_length;
1047 common->lcc_route_length += ctr->lct_common.lcc_route_length;
1048 common->lcc_drop_length += ctr->lct_common.lcc_drop_length;
1053 lnet_counters_get_common(struct lnet_counters_common *common)
1055 lnet_net_lock(LNET_LOCK_EX);
1056 lnet_counters_get_common_locked(common);
1057 lnet_net_unlock(LNET_LOCK_EX);
1059 EXPORT_SYMBOL(lnet_counters_get_common);
1062 lnet_counters_get(struct lnet_counters *counters)
1064 struct lnet_counters *ctr;
1065 struct lnet_counters_health *health = &counters->lct_health;
1068 memset(counters, 0, sizeof(*counters));
1070 lnet_net_lock(LNET_LOCK_EX);
1072 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1073 GOTO(out_unlock, rc = -ENODEV);
1075 lnet_counters_get_common_locked(&counters->lct_common);
1077 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
1078 health->lch_rst_alloc += ctr->lct_health.lch_rst_alloc;
1079 health->lch_resend_count += ctr->lct_health.lch_resend_count;
1080 health->lch_response_timeout_count +=
1081 ctr->lct_health.lch_response_timeout_count;
1082 health->lch_local_interrupt_count +=
1083 ctr->lct_health.lch_local_interrupt_count;
1084 health->lch_local_dropped_count +=
1085 ctr->lct_health.lch_local_dropped_count;
1086 health->lch_local_aborted_count +=
1087 ctr->lct_health.lch_local_aborted_count;
1088 health->lch_local_no_route_count +=
1089 ctr->lct_health.lch_local_no_route_count;
1090 health->lch_local_timeout_count +=
1091 ctr->lct_health.lch_local_timeout_count;
1092 health->lch_local_error_count +=
1093 ctr->lct_health.lch_local_error_count;
1094 health->lch_remote_dropped_count +=
1095 ctr->lct_health.lch_remote_dropped_count;
1096 health->lch_remote_error_count +=
1097 ctr->lct_health.lch_remote_error_count;
1098 health->lch_remote_timeout_count +=
1099 ctr->lct_health.lch_remote_timeout_count;
1100 health->lch_network_timeout_count +=
1101 ctr->lct_health.lch_network_timeout_count;
1104 lnet_net_unlock(LNET_LOCK_EX);
1107 EXPORT_SYMBOL(lnet_counters_get);
1110 lnet_counters_reset(void)
1112 struct lnet_counters *counters;
1115 lnet_net_lock(LNET_LOCK_EX);
1117 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1120 cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
1121 memset(counters, 0, sizeof(struct lnet_counters));
1123 lnet_net_unlock(LNET_LOCK_EX);
1127 lnet_res_type2str(int type)
1132 case LNET_COOKIE_TYPE_MD:
1134 case LNET_COOKIE_TYPE_ME:
1136 case LNET_COOKIE_TYPE_EQ:
1142 lnet_res_container_cleanup(struct lnet_res_container *rec)
1146 if (rec->rec_type == 0) /* not set yet, it's uninitialized */
1149 while (!list_empty(&rec->rec_active)) {
1150 struct list_head *e = rec->rec_active.next;
1153 if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
1154 lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
1156 } else { /* NB: Active MEs should be attached on portals */
1163 /* Found alive MD/ME/EQ, user really should unlink/free
1164 * all of them before finalize LNet, but if someone didn't,
1165 * we have to recycle garbage for him */
1166 CERROR("%d active elements on exit of %s container\n",
1167 count, lnet_res_type2str(rec->rec_type));
1170 if (rec->rec_lh_hash != NULL) {
1171 CFS_FREE_PTR_ARRAY(rec->rec_lh_hash, LNET_LH_HASH_SIZE);
1172 rec->rec_lh_hash = NULL;
1175 rec->rec_type = 0; /* mark it as finalized */
1179 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
1184 LASSERT(rec->rec_type == 0);
1186 rec->rec_type = type;
1187 INIT_LIST_HEAD(&rec->rec_active);
1189 rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
1191 /* Arbitrary choice of hash table size */
1192 LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
1193 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
1194 if (rec->rec_lh_hash == NULL) {
1199 for (i = 0; i < LNET_LH_HASH_SIZE; i++)
1200 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
1205 CERROR("Failed to setup %s resource container\n",
1206 lnet_res_type2str(type));
1207 lnet_res_container_cleanup(rec);
1212 lnet_res_containers_destroy(struct lnet_res_container **recs)
1214 struct lnet_res_container *rec;
1217 cfs_percpt_for_each(rec, i, recs)
1218 lnet_res_container_cleanup(rec);
1220 cfs_percpt_free(recs);
1223 static struct lnet_res_container **
1224 lnet_res_containers_create(int type)
1226 struct lnet_res_container **recs;
1227 struct lnet_res_container *rec;
1231 recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
1233 CERROR("Failed to allocate %s resource containers\n",
1234 lnet_res_type2str(type));
1238 cfs_percpt_for_each(rec, i, recs) {
1239 rc = lnet_res_container_setup(rec, i, type);
1241 lnet_res_containers_destroy(recs);
1249 struct lnet_libhandle *
1250 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
1252 /* ALWAYS called with lnet_res_lock held */
1253 struct list_head *head;
1254 struct lnet_libhandle *lh;
1257 if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
1260 hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
1261 head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
1263 list_for_each_entry(lh, head, lh_hash_chain) {
1264 if (lh->lh_cookie == cookie)
1272 lnet_res_lh_initialize(struct lnet_res_container *rec,
1273 struct lnet_libhandle *lh)
1275 /* ALWAYS called with lnet_res_lock held */
1276 unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
1279 lh->lh_cookie = rec->rec_lh_cookie;
1280 rec->rec_lh_cookie += 1 << ibits;
1282 hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
1284 list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
1288 lnet_create_array_of_queues(void)
1290 struct list_head **qs;
1291 struct list_head *q;
1294 qs = cfs_percpt_alloc(lnet_cpt_table(),
1295 sizeof(struct list_head));
1297 CERROR("Failed to allocate queues\n");
1301 cfs_percpt_for_each(q, i, qs)
1307 static int lnet_unprepare(void);
1310 lnet_prepare(lnet_pid_t requested_pid)
1312 /* Prepare to bring up the network */
1313 struct lnet_res_container **recs;
1316 if (requested_pid == LNET_PID_ANY) {
1317 /* Don't instantiate LNET just for me */
1321 LASSERT(the_lnet.ln_refcount == 0);
1323 the_lnet.ln_routing = 0;
1325 LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
1326 the_lnet.ln_pid = requested_pid;
1328 INIT_LIST_HEAD(&the_lnet.ln_test_peers);
1329 INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
1330 INIT_LIST_HEAD(&the_lnet.ln_nets);
1331 INIT_LIST_HEAD(&the_lnet.ln_routers);
1332 INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
1333 INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
1334 INIT_LIST_HEAD(&the_lnet.ln_dc_request);
1335 INIT_LIST_HEAD(&the_lnet.ln_dc_working);
1336 INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
1337 INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq);
1338 INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
1339 INIT_LIST_HEAD(&the_lnet.ln_udsp_list);
1340 init_waitqueue_head(&the_lnet.ln_dc_waitq);
1341 the_lnet.ln_mt_handler = NULL;
1342 init_completion(&the_lnet.ln_started);
1344 rc = lnet_slab_setup();
1348 rc = lnet_create_remote_nets_table();
1353 * NB the interface cookie in wire handles guards against delayed
1354 * replies and ACKs appearing valid after reboot.
1356 the_lnet.ln_interface_cookie = ktime_get_real_ns();
1358 the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
1359 sizeof(struct lnet_counters));
1360 if (the_lnet.ln_counters == NULL) {
1361 CERROR("Failed to allocate counters for LNet\n");
1366 rc = lnet_peer_tables_create();
1370 rc = lnet_msg_containers_create();
1374 rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
1375 LNET_COOKIE_TYPE_EQ);
1379 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
1385 the_lnet.ln_md_containers = recs;
1387 rc = lnet_portals_create();
1389 CERROR("Failed to create portals for LNet: %d\n", rc);
1393 the_lnet.ln_mt_zombie_rstqs = lnet_create_array_of_queues();
1394 if (!the_lnet.ln_mt_zombie_rstqs) {
1407 lnet_unprepare(void)
1409 /* NB no LNET_LOCK since this is the last reference. All LND instances
1410 * have shut down already, so it is safe to unlink and free all
1411 * descriptors, even those that appear committed to a network op (eg MD
1412 * with non-zero pending count) */
1414 lnet_fail_nid(LNET_NID_ANY, 0);
1416 LASSERT(the_lnet.ln_refcount == 0);
1417 LASSERT(list_empty(&the_lnet.ln_test_peers));
1418 LASSERT(list_empty(&the_lnet.ln_nets));
1420 if (the_lnet.ln_mt_zombie_rstqs) {
1421 lnet_clean_zombie_rstqs();
1422 the_lnet.ln_mt_zombie_rstqs = NULL;
1425 lnet_assert_handler_unused(the_lnet.ln_mt_handler);
1426 the_lnet.ln_mt_handler = NULL;
1428 lnet_portals_destroy();
1430 if (the_lnet.ln_md_containers != NULL) {
1431 lnet_res_containers_destroy(the_lnet.ln_md_containers);
1432 the_lnet.ln_md_containers = NULL;
1435 lnet_res_container_cleanup(&the_lnet.ln_eq_container);
1437 lnet_msg_containers_destroy();
1439 lnet_rtrpools_free(0);
1441 if (the_lnet.ln_counters != NULL) {
1442 cfs_percpt_free(the_lnet.ln_counters);
1443 the_lnet.ln_counters = NULL;
1445 lnet_destroy_remote_nets_table();
1446 lnet_udsp_destroy(true);
1447 lnet_slab_cleanup();
1453 lnet_net2ni_locked(__u32 net_id, int cpt)
1456 struct lnet_net *net;
1458 LASSERT(cpt != LNET_LOCK_EX);
1460 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1461 if (net->net_id == net_id) {
1462 ni = list_first_entry(&net->net_ni_list, struct lnet_ni,
1472 lnet_net2ni_addref(__u32 net)
1477 ni = lnet_net2ni_locked(net, 0);
1479 lnet_ni_addref_locked(ni, 0);
1484 EXPORT_SYMBOL(lnet_net2ni_addref);
1487 lnet_get_net_locked(__u32 net_id)
1489 struct lnet_net *net;
1491 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1492 if (net->net_id == net_id)
1500 lnet_net_clr_pref_rtrs(struct lnet_net *net)
1502 struct list_head zombies;
1503 struct lnet_nid_list *ne;
1504 struct lnet_nid_list *tmp;
1506 INIT_LIST_HEAD(&zombies);
1508 lnet_net_lock(LNET_LOCK_EX);
1509 list_splice_init(&net->net_rtr_pref_nids, &zombies);
1510 lnet_net_unlock(LNET_LOCK_EX);
1512 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1513 list_del_init(&ne->nl_list);
1514 LIBCFS_FREE(ne, sizeof(*ne));
1519 lnet_net_add_pref_rtr(struct lnet_net *net,
1520 struct lnet_nid *gw_nid)
1521 __must_hold(&the_lnet.ln_api_mutex)
1523 struct lnet_nid_list *ne;
1525 /* This function is called with api_mutex held. When the api_mutex
1526 * is held the list can not be modified, as it is only modified as
1527 * a result of applying a UDSP and that happens under api_mutex
1530 list_for_each_entry(ne, &net->net_rtr_pref_nids, nl_list) {
1531 if (nid_same(&ne->nl_nid, gw_nid))
1535 LIBCFS_ALLOC(ne, sizeof(*ne));
1539 ne->nl_nid = *gw_nid;
1541 /* Lock the cpt to protect against addition and checks in the
1542 * selection algorithm
1544 lnet_net_lock(LNET_LOCK_EX);
1545 list_add(&ne->nl_list, &net->net_rtr_pref_nids);
1546 lnet_net_unlock(LNET_LOCK_EX);
1552 lnet_net_is_pref_rtr_locked(struct lnet_net *net, struct lnet_nid *rtr_nid)
1554 struct lnet_nid_list *ne;
1556 CDEBUG(D_NET, "%s: rtr pref empty: %d\n",
1557 libcfs_net2str(net->net_id),
1558 list_empty(&net->net_rtr_pref_nids));
1560 if (list_empty(&net->net_rtr_pref_nids))
1563 list_for_each_entry(ne, &net->net_rtr_pref_nids, nl_list) {
1564 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
1565 libcfs_nidstr(&ne->nl_nid),
1566 libcfs_nidstr(rtr_nid));
1567 if (nid_same(rtr_nid, &ne->nl_nid))
1575 lnet_nid4_cpt_hash(lnet_nid_t nid, unsigned int number)
1578 __u64 pair_bits = 0x0001000100010001LLU;
1579 __u64 mask = pair_bits * 0xFF;
1582 /* Use (sum-by-multiplication of nid bytes) mod (number of CPTs)
1583 * to match nid to a CPT.
1585 pair_sum = (key & mask) + ((key >> 8) & mask);
1586 pair_sum = (pair_sum * pair_bits) >> 48;
1588 CDEBUG(D_NET, "Match nid %s to cpt %u\n",
1589 libcfs_nid2str(nid), (unsigned int)(pair_sum) % number);
1591 return (unsigned int)(pair_sum) % number;
1595 lnet_nid_cpt_hash(struct lnet_nid *nid, unsigned int number)
1601 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
1606 if (nid_is_nid4(nid))
1607 return lnet_nid4_cpt_hash(lnet_nid_to_nid4(nid), number);
1609 for (i = 0; i < 4; i++)
1610 h = hash_32(nid->nid_addr[i]^h, 32);
1611 val = hash_32(LNET_NID_NET(nid) ^ h, LNET_CPT_BITS);
1614 return (unsigned int)(h + val + (val >> 1)) % number;
1618 lnet_cpt_of_nid_locked(struct lnet_nid *nid, struct lnet_ni *ni)
1620 struct lnet_net *net;
1622 /* must called with hold of lnet_net_lock */
1623 if (LNET_CPT_NUMBER == 1)
1624 return 0; /* the only one */
1627 * If NI is provided then use the CPT identified in the NI cpt
1628 * list if one exists. If one doesn't exist, then that NI is
1629 * associated with all CPTs and it follows that the net it belongs
1630 * to is implicitly associated with all CPTs, so just hash the nid
1634 if (ni->ni_cpts != NULL)
1635 return ni->ni_cpts[lnet_nid_cpt_hash(nid,
1638 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1641 /* no NI provided so look at the net */
1642 net = lnet_get_net_locked(LNET_NID_NET(nid));
1644 if (net != NULL && net->net_cpts != NULL) {
1645 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
1648 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1652 lnet_nid2cpt(struct lnet_nid *nid, struct lnet_ni *ni)
1657 if (LNET_CPT_NUMBER == 1)
1658 return 0; /* the only one */
1660 cpt = lnet_net_lock_current();
1662 cpt2 = lnet_cpt_of_nid_locked(nid, ni);
1664 lnet_net_unlock(cpt);
1668 EXPORT_SYMBOL(lnet_nid2cpt);
1671 lnet_cpt_of_nid(lnet_nid_t nid4, struct lnet_ni *ni)
1673 struct lnet_nid nid;
1675 if (LNET_CPT_NUMBER == 1)
1676 return 0; /* the only one */
1678 lnet_nid4_to_nid(nid4, &nid);
1679 return lnet_nid2cpt(&nid, ni);
1681 EXPORT_SYMBOL(lnet_cpt_of_nid);
1684 lnet_islocalnet_locked(__u32 net_id)
1686 struct lnet_net *net;
1689 net = lnet_get_net_locked(net_id);
1691 local = net != NULL;
1697 lnet_islocalnet(__u32 net_id)
1702 cpt = lnet_net_lock_current();
1704 local = lnet_islocalnet_locked(net_id);
1706 lnet_net_unlock(cpt);
1712 lnet_nid_to_ni_locked(struct lnet_nid *nid, int cpt)
1714 struct lnet_net *net;
1717 LASSERT(cpt != LNET_LOCK_EX);
1719 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1720 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1721 if (nid_same(&ni->ni_nid, nid))
1730 lnet_nid2ni_locked(lnet_nid_t nid4, int cpt)
1732 struct lnet_nid nid;
1734 lnet_nid4_to_nid(nid4, &nid);
1735 return lnet_nid_to_ni_locked(&nid, cpt);
1739 lnet_nid2ni_addref(lnet_nid_t nid4)
1742 struct lnet_nid nid;
1744 lnet_nid4_to_nid(nid4, &nid);
1747 ni = lnet_nid_to_ni_locked(&nid, 0);
1749 lnet_ni_addref_locked(ni, 0);
1754 EXPORT_SYMBOL(lnet_nid2ni_addref);
1757 lnet_nid_to_ni_addref(struct lnet_nid *nid)
1762 ni = lnet_nid_to_ni_locked(nid, 0);
1764 lnet_ni_addref_locked(ni, 0);
1769 EXPORT_SYMBOL(lnet_nid_to_ni_addref);
1772 lnet_islocalnid(struct lnet_nid *nid)
1777 cpt = lnet_net_lock_current();
1778 ni = lnet_nid_to_ni_locked(nid, cpt);
1779 lnet_net_unlock(cpt);
1785 lnet_count_acceptor_nets(void)
1787 /* Return the # of NIs that need the acceptor. */
1789 struct lnet_net *net;
1792 cpt = lnet_net_lock_current();
1793 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1794 /* all socklnd type networks should have the acceptor
1796 if (net->net_lnd->lnd_accept != NULL)
1800 lnet_net_unlock(cpt);
1805 struct lnet_ping_buffer *
1806 lnet_ping_buffer_alloc(int nnis, gfp_t gfp)
1808 struct lnet_ping_buffer *pbuf;
1810 LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nnis), gfp);
1812 pbuf->pb_nnis = nnis;
1813 pbuf->pb_needs_post = false;
1814 atomic_set(&pbuf->pb_refcnt, 1);
1821 lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
1823 LASSERT(atomic_read(&pbuf->pb_refcnt) == 0);
1824 LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
1827 static struct lnet_ping_buffer *
1828 lnet_ping_target_create(int nnis)
1830 struct lnet_ping_buffer *pbuf;
1832 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1834 CERROR("Can't allocate ping source [%d]\n", nnis);
1838 pbuf->pb_info.pi_nnis = nnis;
1839 pbuf->pb_info.pi_pid = the_lnet.ln_pid;
1840 pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
1841 pbuf->pb_info.pi_features =
1842 LNET_PING_FEAT_NI_STATUS | LNET_PING_FEAT_MULTI_RAIL;
1848 lnet_get_net_ni_count_locked(struct lnet_net *net)
1853 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1860 lnet_get_net_ni_count_pre(struct lnet_net *net)
1865 list_for_each_entry(ni, &net->net_ni_added, ni_netlist)
1872 lnet_get_ni_count(void)
1875 struct lnet_net *net;
1880 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1881 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1891 lnet_swap_pinginfo(struct lnet_ping_buffer *pbuf)
1893 struct lnet_ni_status *stat;
1897 __swab32s(&pbuf->pb_info.pi_magic);
1898 __swab32s(&pbuf->pb_info.pi_features);
1899 __swab32s(&pbuf->pb_info.pi_pid);
1900 __swab32s(&pbuf->pb_info.pi_nnis);
1901 nnis = pbuf->pb_info.pi_nnis;
1902 if (nnis > pbuf->pb_nnis)
1903 nnis = pbuf->pb_nnis;
1904 for (i = 0; i < nnis; i++) {
1905 stat = &pbuf->pb_info.pi_ni[i];
1906 __swab64s(&stat->ns_nid);
1907 __swab32s(&stat->ns_status);
1912 lnet_ping_info_validate(struct lnet_ping_info *pinfo)
1916 if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
1918 if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
1920 /* Loopback is guaranteed to be present */
1921 if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
1923 if (LNET_PING_INFO_LONI(pinfo) != LNET_NID_LO_0)
1929 lnet_ping_target_destroy(void)
1931 struct lnet_net *net;
1934 lnet_net_lock(LNET_LOCK_EX);
1936 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1937 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1939 ni->ni_status = NULL;
1944 lnet_ping_buffer_decref(the_lnet.ln_ping_target);
1945 the_lnet.ln_ping_target = NULL;
1947 lnet_net_unlock(LNET_LOCK_EX);
1951 lnet_ping_target_event_handler(struct lnet_event *event)
1953 struct lnet_ping_buffer *pbuf = event->md_user_ptr;
1955 if (event->unlinked)
1956 lnet_ping_buffer_decref(pbuf);
1960 lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
1961 struct lnet_handle_md *ping_mdh,
1962 int ni_count, bool set_eq)
1964 struct lnet_processid id = {
1965 .nid = LNET_ANY_NID,
1969 struct lnet_md md = { NULL };
1973 the_lnet.ln_ping_target_handler =
1974 lnet_ping_target_event_handler;
1976 *ppbuf = lnet_ping_target_create(ni_count);
1977 if (*ppbuf == NULL) {
1982 /* Ping target ME/MD */
1983 me = LNetMEAttach(LNET_RESERVED_PORTAL, &id,
1984 LNET_PROTO_PING_MATCHBITS, 0,
1985 LNET_UNLINK, LNET_INS_AFTER);
1988 CERROR("Can't create ping target ME: %d\n", rc);
1989 goto fail_decref_ping_buffer;
1992 /* initialize md content */
1993 md.start = &(*ppbuf)->pb_info;
1994 md.length = LNET_PING_INFO_SIZE((*ppbuf)->pb_nnis);
1995 md.threshold = LNET_MD_THRESH_INF;
1997 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1998 LNET_MD_MANAGE_REMOTE;
1999 md.handler = the_lnet.ln_ping_target_handler;
2000 md.user_ptr = *ppbuf;
2002 rc = LNetMDAttach(me, &md, LNET_RETAIN, ping_mdh);
2004 CERROR("Can't attach ping target MD: %d\n", rc);
2005 goto fail_decref_ping_buffer;
2007 lnet_ping_buffer_addref(*ppbuf);
2011 fail_decref_ping_buffer:
2012 LASSERT(atomic_read(&(*ppbuf)->pb_refcnt) == 1);
2013 lnet_ping_buffer_decref(*ppbuf);
2020 lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
2021 struct lnet_handle_md *ping_mdh)
2023 LNetMDUnlink(*ping_mdh);
2024 LNetInvalidateMDHandle(ping_mdh);
2026 /* NB the MD could be busy; this just starts the unlink */
2027 wait_var_event_warning(&pbuf->pb_refcnt,
2028 atomic_read(&pbuf->pb_refcnt) <= 1,
2029 "Still waiting for ping data MD to unlink\n");
2033 lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
2036 struct lnet_net *net;
2037 struct lnet_ni_status *ns;
2042 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2043 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2044 LASSERT(i < pbuf->pb_nnis);
2046 ns = &pbuf->pb_info.pi_ni[i];
2048 if (!nid_is_nid4(&ni->ni_nid))
2050 ns->ns_nid = lnet_nid_to_nid4(&ni->ni_nid);
2053 ns->ns_status = lnet_ni_get_status_locked(ni);
2054 ni->ni_status = &ns->ns_status;
2061 * We (ab)use the ns_status of the loopback interface to
2062 * transmit the sequence number. The first interface listed
2063 * must be the loopback interface.
2065 rc = lnet_ping_info_validate(&pbuf->pb_info);
2067 LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
2070 LNET_PING_BUFFER_SEQNO(pbuf) =
2071 atomic_inc_return(&the_lnet.ln_ping_target_seqno);
2075 lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
2076 struct lnet_handle_md ping_mdh)
2078 struct lnet_ping_buffer *old_pbuf = NULL;
2079 struct lnet_handle_md old_ping_md;
2081 /* switch the NIs to point to the new ping info created */
2082 lnet_net_lock(LNET_LOCK_EX);
2084 if (!the_lnet.ln_routing)
2085 pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
2086 if (!lnet_peer_discovery_disabled)
2087 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
2089 /* Ensure only known feature bits have been set. */
2090 LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
2091 LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
2093 lnet_ping_target_install_locked(pbuf);
2095 if (the_lnet.ln_ping_target) {
2096 old_pbuf = the_lnet.ln_ping_target;
2097 old_ping_md = the_lnet.ln_ping_target_md;
2099 the_lnet.ln_ping_target_md = ping_mdh;
2100 the_lnet.ln_ping_target = pbuf;
2102 lnet_net_unlock(LNET_LOCK_EX);
2105 /* unlink and free the old ping info */
2106 lnet_ping_md_unlink(old_pbuf, &old_ping_md);
2107 lnet_ping_buffer_decref(old_pbuf);
2110 lnet_push_update_to_peers(0);
2114 lnet_ping_target_fini(void)
2116 lnet_ping_md_unlink(the_lnet.ln_ping_target,
2117 &the_lnet.ln_ping_target_md);
2119 lnet_assert_handler_unused(the_lnet.ln_ping_target_handler);
2120 lnet_ping_target_destroy();
2123 /* Resize the push target. */
2124 int lnet_push_target_resize(void)
2126 struct lnet_handle_md mdh;
2127 struct lnet_handle_md old_mdh;
2128 struct lnet_ping_buffer *pbuf;
2129 struct lnet_ping_buffer *old_pbuf;
2134 nnis = the_lnet.ln_push_target_nnis;
2136 CDEBUG(D_NET, "Invalid nnis %d\n", nnis);
2140 /* NB: lnet_ping_buffer_alloc() sets pbuf refcount to 1. That ref is
2141 * dropped when we need to resize again (see "old_pbuf" below) or when
2142 * LNet is shutdown (see lnet_push_target_fini())
2144 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
2146 CDEBUG(D_NET, "Can't allocate pbuf for nnis %d\n", nnis);
2150 rc = lnet_push_target_post(pbuf, &mdh);
2152 CDEBUG(D_NET, "Failed to post push target: %d\n", rc);
2153 lnet_ping_buffer_decref(pbuf);
2157 lnet_net_lock(LNET_LOCK_EX);
2158 old_pbuf = the_lnet.ln_push_target;
2159 old_mdh = the_lnet.ln_push_target_md;
2160 the_lnet.ln_push_target = pbuf;
2161 the_lnet.ln_push_target_md = mdh;
2162 lnet_net_unlock(LNET_LOCK_EX);
2165 LNetMDUnlink(old_mdh);
2166 /* Drop ref set by lnet_ping_buffer_alloc() */
2167 lnet_ping_buffer_decref(old_pbuf);
2170 /* Received another push or reply that requires a larger buffer */
2171 if (nnis < the_lnet.ln_push_target_nnis)
2174 CDEBUG(D_NET, "nnis %d success\n", nnis);
2178 int lnet_push_target_post(struct lnet_ping_buffer *pbuf,
2179 struct lnet_handle_md *mdhp)
2181 struct lnet_processid id = { LNET_ANY_NID, LNET_PID_ANY };
2182 struct lnet_md md = { NULL };
2186 me = LNetMEAttach(LNET_RESERVED_PORTAL, &id,
2187 LNET_PROTO_PING_MATCHBITS, 0,
2188 LNET_UNLINK, LNET_INS_AFTER);
2191 CERROR("Can't create push target ME: %d\n", rc);
2195 pbuf->pb_needs_post = false;
2197 /* This reference is dropped by lnet_push_target_event_handler() */
2198 lnet_ping_buffer_addref(pbuf);
2200 /* initialize md content */
2201 md.start = &pbuf->pb_info;
2202 md.length = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
2205 md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE;
2207 md.handler = the_lnet.ln_push_target_handler;
2209 rc = LNetMDAttach(me, &md, LNET_UNLINK, mdhp);
2211 CERROR("Can't attach push MD: %d\n", rc);
2212 lnet_ping_buffer_decref(pbuf);
2213 pbuf->pb_needs_post = true;
2217 CDEBUG(D_NET, "posted push target %p\n", pbuf);
2222 static void lnet_push_target_event_handler(struct lnet_event *ev)
2224 struct lnet_ping_buffer *pbuf = ev->md_user_ptr;
2226 CDEBUG(D_NET, "type %d status %d unlinked %d\n", ev->type, ev->status,
2229 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2230 lnet_swap_pinginfo(pbuf);
2232 if (ev->type == LNET_EVENT_UNLINK) {
2233 /* Drop ref added by lnet_push_target_post() */
2234 lnet_ping_buffer_decref(pbuf);
2238 lnet_peer_push_event(ev);
2240 /* Drop ref added by lnet_push_target_post */
2241 lnet_ping_buffer_decref(pbuf);
2244 /* Initialize the push target. */
2245 static int lnet_push_target_init(void)
2249 if (the_lnet.ln_push_target)
2252 the_lnet.ln_push_target_handler =
2253 lnet_push_target_event_handler;
2255 rc = LNetSetLazyPortal(LNET_RESERVED_PORTAL);
2258 /* Start at the required minimum, we'll enlarge if required. */
2259 the_lnet.ln_push_target_nnis = LNET_INTERFACES_MIN;
2261 rc = lnet_push_target_resize();
2264 LNetClearLazyPortal(LNET_RESERVED_PORTAL);
2265 the_lnet.ln_push_target_handler = NULL;
2271 /* Clean up the push target. */
2272 static void lnet_push_target_fini(void)
2274 if (!the_lnet.ln_push_target)
2277 /* Unlink and invalidate to prevent new references. */
2278 LNetMDUnlink(the_lnet.ln_push_target_md);
2279 LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
2281 /* Wait for the unlink to complete. */
2282 wait_var_event_warning(&the_lnet.ln_push_target->pb_refcnt,
2283 atomic_read(&the_lnet.ln_push_target->pb_refcnt) <= 1,
2284 "Still waiting for ping data MD to unlink\n");
2286 /* Drop ref set by lnet_ping_buffer_alloc() */
2287 lnet_ping_buffer_decref(the_lnet.ln_push_target);
2288 the_lnet.ln_push_target = NULL;
2289 the_lnet.ln_push_target_nnis = 0;
2291 LNetClearLazyPortal(LNET_RESERVED_PORTAL);
2292 lnet_assert_handler_unused(the_lnet.ln_push_target_handler);
2293 the_lnet.ln_push_target_handler = NULL;
2297 lnet_ni_tq_credits(struct lnet_ni *ni)
2301 LASSERT(ni->ni_ncpts >= 1);
2303 if (ni->ni_ncpts == 1)
2304 return ni->ni_net->net_tunables.lct_max_tx_credits;
2306 credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
2307 credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
2308 credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
2314 lnet_ni_unlink_locked(struct lnet_ni *ni)
2316 /* move it to zombie list and nobody can find it anymore */
2317 LASSERT(!list_empty(&ni->ni_netlist));
2318 list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
2319 lnet_ni_decref_locked(ni, 0);
2323 lnet_clear_zombies_nis_locked(struct lnet_net *net)
2328 struct list_head *zombie_list = &net->net_ni_zombie;
2331 * Now wait for the NIs I just nuked to show up on the zombie
2332 * list and shut them down in guaranteed thread context
2335 while ((ni = list_first_entry_or_null(zombie_list,
2337 ni_netlist)) != NULL) {
2341 list_del_init(&ni->ni_netlist);
2342 /* the ni should be in deleting state. If it's not it's
2344 LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
2345 cfs_percpt_for_each(ref, j, ni->ni_refs) {
2348 /* still busy, add it back to zombie list */
2349 list_add(&ni->ni_netlist, zombie_list);
2353 if (!list_empty(&ni->ni_netlist)) {
2354 /* Unlock mutex while waiting to allow other
2355 * threads to read the LNet state and fall through
2358 lnet_net_unlock(LNET_LOCK_EX);
2359 mutex_unlock(&the_lnet.ln_api_mutex);
2362 if ((i & (-i)) == i) {
2364 "Waiting for zombie LNI %s\n",
2365 libcfs_nidstr(&ni->ni_nid));
2367 schedule_timeout_uninterruptible(cfs_time_seconds(1));
2369 mutex_lock(&the_lnet.ln_api_mutex);
2370 lnet_net_lock(LNET_LOCK_EX);
2374 lnet_net_unlock(LNET_LOCK_EX);
2376 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
2378 LASSERT(!in_interrupt());
2379 /* Holding the LND mutex makes it safe for lnd_shutdown
2380 * to call module_put(). Module unload cannot finish
2381 * until lnet_unregister_lnd() completes, and that
2382 * requires the LND mutex.
2384 mutex_unlock(&the_lnet.ln_api_mutex);
2385 mutex_lock(&the_lnet.ln_lnd_mutex);
2386 (net->net_lnd->lnd_shutdown)(ni);
2387 mutex_unlock(&the_lnet.ln_lnd_mutex);
2388 mutex_lock(&the_lnet.ln_api_mutex);
2391 CDEBUG(D_LNI, "Removed LNI %s\n",
2392 libcfs_nidstr(&ni->ni_nid));
2396 lnet_net_lock(LNET_LOCK_EX);
2400 /* shutdown down the NI and release refcount */
2402 lnet_shutdown_lndni(struct lnet_ni *ni)
2405 struct lnet_net *net = ni->ni_net;
2407 lnet_net_lock(LNET_LOCK_EX);
2409 ni->ni_state = LNET_NI_STATE_DELETING;
2411 lnet_ni_unlink_locked(ni);
2412 lnet_incr_dlc_seq();
2413 lnet_net_unlock(LNET_LOCK_EX);
2415 /* clear messages for this NI on the lazy portal */
2416 for (i = 0; i < the_lnet.ln_nportals; i++)
2417 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
2419 lnet_net_lock(LNET_LOCK_EX);
2420 lnet_clear_zombies_nis_locked(net);
2421 lnet_net_unlock(LNET_LOCK_EX);
2425 lnet_shutdown_lndnet(struct lnet_net *net)
2429 lnet_net_lock(LNET_LOCK_EX);
2431 list_del_init(&net->net_list);
2433 while ((ni = list_first_entry_or_null(&net->net_ni_list,
2435 ni_netlist)) != NULL) {
2436 lnet_net_unlock(LNET_LOCK_EX);
2437 lnet_shutdown_lndni(ni);
2438 lnet_net_lock(LNET_LOCK_EX);
2441 lnet_net_unlock(LNET_LOCK_EX);
2443 /* Do peer table cleanup for this net */
2444 lnet_peer_tables_cleanup(net);
2450 lnet_shutdown_lndnets(void)
2452 struct lnet_net *net;
2454 struct lnet_msg *msg, *tmp;
2456 /* NB called holding the global mutex */
2458 /* All quiet on the API front */
2459 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING ||
2460 the_lnet.ln_state == LNET_STATE_STOPPING);
2461 LASSERT(the_lnet.ln_refcount == 0);
2463 lnet_net_lock(LNET_LOCK_EX);
2464 the_lnet.ln_state = LNET_STATE_STOPPING;
2467 * move the nets to the zombie list to avoid them being
2468 * picked up for new work. LONET is also included in the
2469 * Nets that will be moved to the zombie list
2471 list_splice_init(&the_lnet.ln_nets, &the_lnet.ln_net_zombie);
2473 /* Drop the cached loopback Net. */
2474 if (the_lnet.ln_loni != NULL) {
2475 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
2476 the_lnet.ln_loni = NULL;
2478 lnet_net_unlock(LNET_LOCK_EX);
2480 /* iterate through the net zombie list and delete each net */
2481 while ((net = list_first_entry_or_null(&the_lnet.ln_net_zombie,
2484 lnet_shutdown_lndnet(net);
2486 spin_lock(&the_lnet.ln_msg_resend_lock);
2487 list_splice(&the_lnet.ln_msg_resend, &resend);
2488 spin_unlock(&the_lnet.ln_msg_resend_lock);
2490 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
2491 list_del_init(&msg->msg_list);
2492 msg->msg_no_resend = true;
2493 lnet_finalize(msg, -ECANCELED);
2496 lnet_net_lock(LNET_LOCK_EX);
2497 the_lnet.ln_state = LNET_STATE_SHUTDOWN;
2498 lnet_net_unlock(LNET_LOCK_EX);
2502 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
2505 struct lnet_tx_queue *tq;
2507 struct lnet_net *net = ni->ni_net;
2509 mutex_lock(&the_lnet.ln_lnd_mutex);
2512 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
2513 ni->ni_lnd_tunables_set = true;
2516 rc = (net->net_lnd->lnd_startup)(ni);
2518 mutex_unlock(&the_lnet.ln_lnd_mutex);
2521 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
2522 rc, libcfs_lnd2str(net->net_lnd->lnd_type));
2527 ni->ni_state = LNET_NI_STATE_ACTIVE;
2530 /* We keep a reference on the loopback net through the loopback NI */
2531 if (net->net_lnd->lnd_type == LOLND) {
2533 LASSERT(the_lnet.ln_loni == NULL);
2534 the_lnet.ln_loni = ni;
2535 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
2536 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
2537 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
2538 ni->ni_net->net_tunables.lct_peer_timeout = 0;
2542 if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
2543 ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
2544 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
2545 libcfs_lnd2str(net->net_lnd->lnd_type),
2546 ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
2548 /* shutdown the NI since if we get here then it must've already
2551 lnet_shutdown_lndni(ni);
2555 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
2556 tq->tq_credits_min =
2557 tq->tq_credits_max =
2558 tq->tq_credits = lnet_ni_tq_credits(ni);
2561 atomic_set(&ni->ni_tx_credits,
2562 lnet_ni_tq_credits(ni) * ni->ni_ncpts);
2563 atomic_set(&ni->ni_healthv, LNET_MAX_HEALTH_VALUE);
2565 /* Nodes with small feet have little entropy. The NID for this
2566 * node gives the most entropy in the low bits.
2568 add_device_randomness(&ni->ni_nid, sizeof(ni->ni_nid));
2570 CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
2571 libcfs_nidstr(&ni->ni_nid),
2572 ni->ni_net->net_tunables.lct_peer_tx_credits,
2573 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
2574 ni->ni_net->net_tunables.lct_peer_rtr_credits,
2575 ni->ni_net->net_tunables.lct_peer_timeout);
2584 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
2587 struct lnet_net *net_l = NULL;
2588 LIST_HEAD(local_ni_list);
2592 const struct lnet_lnd *lnd;
2594 net->net_tunables.lct_peer_timeout;
2596 net->net_tunables.lct_max_tx_credits;
2597 int peerrtrcredits =
2598 net->net_tunables.lct_peer_rtr_credits;
2601 * make sure that this net is unique. If it isn't then
2602 * we are adding interfaces to an already existing network, and
2603 * 'net' is just a convenient way to pass in the list.
2604 * if it is unique we need to find the LND and load it if
2607 if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
2608 lnd_type = LNET_NETTYP(net->net_id);
2610 mutex_lock(&the_lnet.ln_lnd_mutex);
2611 lnd = lnet_find_lnd_by_type(lnd_type);
2614 mutex_unlock(&the_lnet.ln_lnd_mutex);
2615 rc = request_module("%s", libcfs_lnd2modname(lnd_type));
2616 mutex_lock(&the_lnet.ln_lnd_mutex);
2618 lnd = lnet_find_lnd_by_type(lnd_type);
2620 mutex_unlock(&the_lnet.ln_lnd_mutex);
2621 CERROR("Can't load LND %s, module %s, rc=%d\n",
2622 libcfs_lnd2str(lnd_type),
2623 libcfs_lnd2modname(lnd_type), rc);
2624 #ifndef HAVE_MODULE_LOADING_SUPPORT
2625 LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
2626 "compiled with kernel module "
2627 "loading support.");
2636 mutex_unlock(&the_lnet.ln_lnd_mutex);
2642 * net_l: if the network being added is unique then net_l
2643 * will point to that network
2644 * if the network being added is not unique then
2645 * net_l points to the existing network.
2647 * When we enter the loop below, we'll pick NIs off he
2648 * network beign added and start them up, then add them to
2649 * a local ni list. Once we've successfully started all
2650 * the NIs then we join the local NI list (of started up
2651 * networks) with the net_l->net_ni_list, which should
2652 * point to the correct network to add the new ni list to
2654 * If any of the new NIs fail to start up, then we want to
2655 * iterate through the local ni list, which should include
2656 * any NIs which were successfully started up, and shut
2659 * After than we want to delete the network being added,
2660 * to avoid a memory leak.
2662 while ((ni = list_first_entry_or_null(&net->net_ni_added,
2664 ni_netlist)) != NULL) {
2665 list_del_init(&ni->ni_netlist);
2667 /* make sure that the the NI we're about to start
2668 * up is actually unique. if it's not fail. */
2669 if (!lnet_ni_unique_net(&net_l->net_ni_list,
2670 ni->ni_interface)) {
2675 /* adjust the pointer the parent network, just in case it
2676 * the net is a duplicate */
2679 rc = lnet_startup_lndni(ni, tun);
2685 list_add_tail(&ni->ni_netlist, &local_ni_list);
2690 lnet_net_lock(LNET_LOCK_EX);
2691 list_splice_tail(&local_ni_list, &net_l->net_ni_list);
2692 lnet_incr_dlc_seq();
2693 lnet_net_unlock(LNET_LOCK_EX);
2695 /* if the network is not unique then we don't want to keep
2696 * it around after we're done. Free it. Otherwise add that
2697 * net to the global the_lnet.ln_nets */
2698 if (net_l != net && net_l != NULL) {
2700 * TODO - note. currently the tunables can not be updated
2706 * restore tunables after it has been overwitten by the
2709 if (peer_timeout != -1)
2710 net->net_tunables.lct_peer_timeout = peer_timeout;
2711 if (maxtxcredits != -1)
2712 net->net_tunables.lct_max_tx_credits = maxtxcredits;
2713 if (peerrtrcredits != -1)
2714 net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
2716 lnet_net_lock(LNET_LOCK_EX);
2717 list_add_tail(&net->net_list, &the_lnet.ln_nets);
2718 lnet_net_unlock(LNET_LOCK_EX);
2725 * shutdown the new NIs that are being started up
2726 * free the NET being started
2728 while ((ni = list_first_entry_or_null(&local_ni_list,
2730 ni_netlist)) != NULL)
2731 lnet_shutdown_lndni(ni);
2740 lnet_startup_lndnets(struct list_head *netlist)
2742 struct lnet_net *net;
2747 * Change to running state before bringing up the LNDs. This
2748 * allows lnet_shutdown_lndnets() to assert that we've passed
2751 lnet_net_lock(LNET_LOCK_EX);
2752 the_lnet.ln_state = LNET_STATE_RUNNING;
2753 lnet_net_unlock(LNET_LOCK_EX);
2755 while ((net = list_first_entry_or_null(netlist,
2757 net_list)) != NULL) {
2758 list_del_init(&net->net_list);
2760 rc = lnet_startup_lndnet(net, NULL);
2770 lnet_shutdown_lndnets();
2775 static int lnet_genl_parse_list(struct sk_buff *msg,
2776 const struct ln_key_list *data[], u16 idx)
2778 const struct ln_key_list *list = data[idx];
2779 const struct ln_key_props *props;
2780 struct nlattr *node;
2786 if (!list->lkl_maxattr)
2789 props = list->lkl_list;
2793 node = nla_nest_start(msg, LN_SCALAR_ATTR_LIST);
2797 for (count = 1; count <= list->lkl_maxattr; count++) {
2798 struct nlattr *key = nla_nest_start(msg, count);
2801 nla_put_u16(msg, LN_SCALAR_ATTR_LIST_SIZE,
2804 nla_put_u16(msg, LN_SCALAR_ATTR_INDEX, count);
2805 if (props[count].lkp_value)
2806 nla_put_string(msg, LN_SCALAR_ATTR_VALUE,
2807 props[count].lkp_value);
2808 if (props[count].lkp_key_format)
2809 nla_put_u16(msg, LN_SCALAR_ATTR_KEY_FORMAT,
2810 props[count].lkp_key_format);
2811 nla_put_u16(msg, LN_SCALAR_ATTR_NLA_TYPE,
2812 props[count].lkp_data_type);
2813 if (props[count].lkp_data_type == NLA_NESTED) {
2816 rc = lnet_genl_parse_list(msg, data, ++idx);
2822 nla_nest_end(msg, key);
2825 nla_nest_end(msg, node);
2829 int lnet_genl_send_scalar_list(struct sk_buff *msg, u32 portid, u32 seq,
2830 const struct genl_family *family, int flags,
2831 u8 cmd, const struct ln_key_list *data[])
2839 hdr = genlmsg_put(msg, portid, seq, family, flags, cmd);
2841 GOTO(canceled, rc = -EMSGSIZE);
2843 rc = lnet_genl_parse_list(msg, data, 0);
2847 genlmsg_end(msg, hdr);
2850 genlmsg_cancel(msg, hdr);
2851 return rc > 0 ? 0 : rc;
2853 EXPORT_SYMBOL(lnet_genl_send_scalar_list);
2856 * Initialize LNet library.
2858 * Automatically called at module loading time. Caller has to call
2859 * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
2860 * latter returned 0. It must be called exactly once.
2862 * \retval 0 on success
2863 * \retval -ve on failures.
2865 int lnet_lib_init(void)
2869 lnet_assert_wire_constants();
2871 /* refer to global cfs_cpt_table for now */
2872 the_lnet.ln_cpt_table = cfs_cpt_tab;
2873 the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_tab);
2875 LASSERT(the_lnet.ln_cpt_number > 0);
2876 if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
2877 /* we are under risk of consuming all lh_cookie */
2878 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
2879 "please change setting of CPT-table and retry\n",
2880 the_lnet.ln_cpt_number, LNET_CPT_MAX);
2884 while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
2885 the_lnet.ln_cpt_bits++;
2887 rc = lnet_create_locks();
2889 CERROR("Can't create LNet global locks: %d\n", rc);
2893 the_lnet.ln_refcount = 0;
2894 INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
2895 INIT_LIST_HEAD(&the_lnet.ln_msg_resend);
2897 /* The hash table size is the number of bits it takes to express the set
2898 * ln_num_routes, minus 1 (better to under estimate than over so we
2899 * don't waste memory). */
2900 if (rnet_htable_size <= 0)
2901 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
2902 else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
2903 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
2904 the_lnet.ln_remote_nets_hbits = max_t(int, 1,
2905 order_base_2(rnet_htable_size) - 1);
2907 /* All LNDs apart from the LOLND are in separate modules. They
2908 * register themselves when their module loads, and unregister
2909 * themselves when their module is unloaded. */
2910 lnet_register_lnd(&the_lolnd);
2915 * Finalize LNet library.
2917 * \pre lnet_lib_init() called with success.
2918 * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
2920 * As this happens at module-unload, all lnds must already be unloaded,
2921 * so they must already be unregistered.
2923 void lnet_lib_exit(void)
2927 LASSERT(the_lnet.ln_refcount == 0);
2928 lnet_unregister_lnd(&the_lolnd);
2929 for (i = 0; i < NUM_LNDS; i++)
2930 LASSERT(!the_lnet.ln_lnds[i]);
2931 lnet_destroy_locks();
2935 * Set LNet PID and start LNet interfaces, routing, and forwarding.
2937 * Users must call this function at least once before any other functions.
2938 * For each successful call there must be a corresponding call to
2939 * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
2942 * The PID used by LNet may be different from the one requested.
2945 * \param requested_pid PID requested by the caller.
2947 * \return >= 0 on success, and < 0 error code on failures.
2950 LNetNIInit(lnet_pid_t requested_pid)
2952 int im_a_router = 0;
2955 struct lnet_ping_buffer *pbuf;
2956 struct lnet_handle_md ping_mdh;
2957 LIST_HEAD(net_head);
2958 struct lnet_net *net;
2960 mutex_lock(&the_lnet.ln_api_mutex);
2962 CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
2964 if (the_lnet.ln_state == LNET_STATE_STOPPING) {
2965 mutex_unlock(&the_lnet.ln_api_mutex);
2969 if (the_lnet.ln_refcount > 0) {
2970 rc = the_lnet.ln_refcount++;
2971 mutex_unlock(&the_lnet.ln_api_mutex);
2975 rc = lnet_prepare(requested_pid);
2977 mutex_unlock(&the_lnet.ln_api_mutex);
2981 /* create a network for Loopback network */
2982 net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
2985 goto err_empty_list;
2988 /* Add in the loopback NI */
2989 if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
2991 goto err_empty_list;
2994 if (use_tcp_bonding)
2995 CWARN("use_tcp_bonding has been removed. Use Multi-Rail and Dynamic Discovery instead, see LU-13641\n");
2997 /* If LNet is being initialized via DLC it is possible
2998 * that the user requests not to load module parameters (ones which
2999 * are supported by DLC) on initialization. Therefore, make sure not
3000 * to load networks, routes and forwarding from module parameters
3001 * in this case. On cleanup in case of failure only clean up
3002 * routes if it has been loaded */
3003 if (!the_lnet.ln_nis_from_mod_params) {
3004 rc = lnet_parse_networks(&net_head, lnet_get_networks());
3006 goto err_empty_list;
3009 ni_count = lnet_startup_lndnets(&net_head);
3012 goto err_empty_list;
3015 if (!the_lnet.ln_nis_from_mod_params) {
3016 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
3018 goto err_shutdown_lndnis;
3020 rc = lnet_rtrpools_alloc(im_a_router);
3022 goto err_destroy_routes;
3025 rc = lnet_acceptor_start();
3027 goto err_destroy_routes;
3029 the_lnet.ln_refcount = 1;
3030 /* Now I may use my own API functions... */
3032 rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_count, true);
3034 goto err_acceptor_stop;
3036 lnet_ping_target_update(pbuf, ping_mdh);
3038 the_lnet.ln_mt_handler = lnet_mt_event_handler;
3040 rc = lnet_push_target_init();
3044 rc = lnet_peer_discovery_start();
3046 goto err_destroy_push_target;
3048 rc = lnet_monitor_thr_start();
3050 goto err_stop_discovery_thr;
3053 lnet_router_debugfs_init();
3055 mutex_unlock(&the_lnet.ln_api_mutex);
3057 complete_all(&the_lnet.ln_started);
3059 /* wait for all routers to start */
3060 lnet_wait_router_start();
3064 err_stop_discovery_thr:
3065 lnet_peer_discovery_stop();
3066 err_destroy_push_target:
3067 lnet_push_target_fini();
3069 lnet_ping_target_fini();
3071 the_lnet.ln_refcount = 0;
3072 lnet_acceptor_stop();
3074 if (!the_lnet.ln_nis_from_mod_params)
3075 lnet_destroy_routes();
3076 err_shutdown_lndnis:
3077 lnet_shutdown_lndnets();
3081 mutex_unlock(&the_lnet.ln_api_mutex);
3082 while ((net = list_first_entry_or_null(&net_head,
3084 net_list)) != NULL) {
3085 list_del_init(&net->net_list);
3090 EXPORT_SYMBOL(LNetNIInit);
3093 * Stop LNet interfaces, routing, and forwarding.
3095 * Users must call this function once for each successful call to LNetNIInit().
3096 * Once the LNetNIFini() operation has been started, the results of pending
3097 * API operations are undefined.
3099 * \return always 0 for current implementation.
3104 mutex_lock(&the_lnet.ln_api_mutex);
3106 LASSERT(the_lnet.ln_refcount > 0);
3108 if (the_lnet.ln_refcount != 1) {
3109 the_lnet.ln_refcount--;
3111 LASSERT(!the_lnet.ln_niinit_self);
3113 lnet_net_lock(LNET_LOCK_EX);
3114 the_lnet.ln_state = LNET_STATE_STOPPING;
3115 lnet_net_unlock(LNET_LOCK_EX);
3119 lnet_router_debugfs_fini();
3120 lnet_monitor_thr_stop();
3121 lnet_peer_discovery_stop();
3122 lnet_push_target_fini();
3123 lnet_ping_target_fini();
3125 /* Teardown fns that use my own API functions BEFORE here */
3126 the_lnet.ln_refcount = 0;
3128 lnet_acceptor_stop();
3129 lnet_destroy_routes();
3130 lnet_shutdown_lndnets();
3134 mutex_unlock(&the_lnet.ln_api_mutex);
3137 EXPORT_SYMBOL(LNetNIFini);
3140 * Grabs the ni data from the ni structure and fills the out
3143 * \param[in] ni network interface structure
3144 * \param[out] cfg_ni NI config information
3145 * \param[out] tun network and LND tunables
3148 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
3149 struct lnet_ioctl_config_lnd_tunables *tun,
3150 struct lnet_ioctl_element_stats *stats,
3153 size_t min_size = 0;
3156 if (!ni || !cfg_ni || !tun || !nid_is_nid4(&ni->ni_nid))
3159 if (ni->ni_interface != NULL) {
3160 strncpy(cfg_ni->lic_ni_intf,
3162 sizeof(cfg_ni->lic_ni_intf));
3165 cfg_ni->lic_nid = lnet_nid_to_nid4(&ni->ni_nid);
3166 cfg_ni->lic_status = lnet_ni_get_status_locked(ni);
3167 cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
3169 memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
3172 stats->iel_send_count = lnet_sum_stats(&ni->ni_stats,
3173 LNET_STATS_TYPE_SEND);
3174 stats->iel_recv_count = lnet_sum_stats(&ni->ni_stats,
3175 LNET_STATS_TYPE_RECV);
3176 stats->iel_drop_count = lnet_sum_stats(&ni->ni_stats,
3177 LNET_STATS_TYPE_DROP);
3181 * tun->lt_tun will always be present, but in order to be
3182 * backwards compatible, we need to deal with the cases when
3183 * tun->lt_tun is smaller than what the kernel has, because it
3184 * comes from an older version of a userspace program, then we'll
3185 * need to copy as much information as we have available space.
3187 min_size = tun_size - sizeof(tun->lt_cmn);
3188 memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
3190 /* copy over the cpts */
3191 if (ni->ni_ncpts == LNET_CPT_NUMBER &&
3192 ni->ni_cpts == NULL) {
3193 for (i = 0; i < ni->ni_ncpts; i++)
3194 cfg_ni->lic_cpts[i] = i;
3197 ni->ni_cpts != NULL && i < ni->ni_ncpts &&
3198 i < LNET_MAX_SHOW_NUM_CPT;
3200 cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
3202 cfg_ni->lic_ncpts = ni->ni_ncpts;
3206 * NOTE: This is a legacy function left in the code to be backwards
3207 * compatible with older userspace programs. It should eventually be
3210 * Grabs the ni data from the ni structure and fills the out
3213 * \param[in] ni network interface structure
3214 * \param[out] config config information
3217 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
3218 struct lnet_ioctl_config_data *config)
3220 struct lnet_ioctl_net_config *net_config;
3221 struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
3222 size_t min_size, tunable_size = 0;
3225 if (!ni || !config || !nid_is_nid4(&ni->ni_nid))
3228 net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
3232 if (!ni->ni_interface)
3235 strncpy(net_config->ni_interface,
3237 sizeof(net_config->ni_interface));
3239 config->cfg_nid = lnet_nid_to_nid4(&ni->ni_nid);
3240 config->cfg_config_u.cfg_net.net_peer_timeout =
3241 ni->ni_net->net_tunables.lct_peer_timeout;
3242 config->cfg_config_u.cfg_net.net_max_tx_credits =
3243 ni->ni_net->net_tunables.lct_max_tx_credits;
3244 config->cfg_config_u.cfg_net.net_peer_tx_credits =
3245 ni->ni_net->net_tunables.lct_peer_tx_credits;
3246 config->cfg_config_u.cfg_net.net_peer_rtr_credits =
3247 ni->ni_net->net_tunables.lct_peer_rtr_credits;
3249 net_config->ni_status = lnet_ni_get_status_locked(ni);
3252 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
3254 for (i = 0; i < num_cpts; i++)
3255 net_config->ni_cpts[i] = ni->ni_cpts[i];
3257 config->cfg_ncpts = num_cpts;
3261 * See if user land tools sent in a newer and larger version
3262 * of struct lnet_tunables than what the kernel uses.
3264 min_size = sizeof(*config) + sizeof(*net_config);
3266 if (config->cfg_hdr.ioc_len > min_size)
3267 tunable_size = config->cfg_hdr.ioc_len - min_size;
3269 /* Don't copy too much data to user space */
3270 min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
3271 lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
3273 if (lnd_cfg && min_size) {
3274 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
3275 config->cfg_config_u.cfg_net.net_interface_count = 1;
3277 /* Tell user land that kernel side has less data */
3278 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
3279 min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
3280 config->cfg_hdr.ioc_len -= min_size;
3286 lnet_get_ni_idx_locked(int idx)
3289 struct lnet_net *net;
3291 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3292 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3301 int lnet_get_net_healthv_locked(struct lnet_net *net)
3304 int best_healthv = 0;
3305 int healthv, ni_fatal;
3307 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3308 healthv = atomic_read(&ni->ni_healthv);
3309 ni_fatal = atomic_read(&ni->ni_fatal_error_on);
3310 if (!ni_fatal && healthv > best_healthv)
3311 best_healthv = healthv;
3314 return best_healthv;
3318 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
3321 struct lnet_net *net = mynet;
3324 * It is possible that the net has been cleaned out while there is
3325 * a message being sent. This function accessed the net without
3326 * checking if the list is empty
3330 net = list_first_entry(&the_lnet.ln_nets,
3333 if (list_empty(&net->net_ni_list))
3335 ni = list_first_entry(&net->net_ni_list, struct lnet_ni,
3341 if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
3342 /* if you reached the end of the ni list and the net is
3343 * specified, then there are no more nis in that net */
3347 /* we reached the end of this net ni list. move to the
3349 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
3350 /* no more nets and no more NIs. */
3353 /* get the next net */
3354 net = list_first_entry(&prev->ni_net->net_list, struct lnet_net,
3356 if (list_empty(&net->net_ni_list))
3358 /* get the ni on it */
3359 ni = list_first_entry(&net->net_ni_list, struct lnet_ni,
3365 if (list_empty(&prev->ni_netlist))
3368 /* there are more nis left */
3369 ni = list_first_entry(&prev->ni_netlist, struct lnet_ni, ni_netlist);
3375 lnet_get_net_config(struct lnet_ioctl_config_data *config)
3380 int idx = config->cfg_count;
3382 cpt = lnet_net_lock_current();
3384 ni = lnet_get_ni_idx_locked(idx);
3389 lnet_fill_ni_info_legacy(ni, config);
3393 lnet_net_unlock(cpt);
3398 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
3399 struct lnet_ioctl_config_lnd_tunables *tun,
3400 struct lnet_ioctl_element_stats *stats,
3407 if (!cfg_ni || !tun || !stats)
3410 cpt = lnet_net_lock_current();
3412 ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
3417 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
3421 lnet_net_unlock(cpt);
3425 int lnet_get_ni_stats(struct lnet_ioctl_element_msg_stats *msg_stats)
3434 cpt = lnet_net_lock_current();
3436 ni = lnet_get_ni_idx_locked(msg_stats->im_idx);
3439 lnet_usr_translate_stats(msg_stats, &ni->ni_stats);
3443 lnet_net_unlock(cpt);
3448 static int lnet_add_net_common(struct lnet_net *net,
3449 struct lnet_ioctl_config_lnd_tunables *tun)
3451 struct lnet_handle_md ping_mdh;
3452 struct lnet_ping_buffer *pbuf;
3453 struct lnet_remotenet *rnet;
3459 lnet_net_lock(LNET_LOCK_EX);
3460 rnet = lnet_find_rnet_locked(net->net_id);
3461 lnet_net_unlock(LNET_LOCK_EX);
3463 * make sure that the net added doesn't invalidate the current
3464 * configuration LNet is keeping
3467 CERROR("Adding net %s will invalidate routing configuration\n",
3468 libcfs_net2str(net->net_id));
3474 * make sure you calculate the correct number of slots in the ping
3475 * buffer. Since the ping info is a flattened list of all the NIs,
3476 * we should allocate enough slots to accomodate the number of NIs
3477 * which will be added.
3479 * since ni hasn't been configured yet, use
3480 * lnet_get_net_ni_count_pre() which checks the net_ni_added list
3482 net_ni_count = lnet_get_net_ni_count_pre(net);
3484 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3485 net_ni_count + lnet_get_ni_count(),
3493 memcpy(&net->net_tunables,
3494 &tun->lt_cmn, sizeof(net->net_tunables));
3496 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
3498 net_id = net->net_id;
3500 rc = lnet_startup_lndnet(net,
3501 (tun) ? &tun->lt_tun : NULL);
3505 lnet_net_lock(LNET_LOCK_EX);
3506 net = lnet_get_net_locked(net_id);
3509 /* apply the UDSPs */
3510 rc = lnet_udsp_apply_policies_on_net(net);
3512 CERROR("Failed to apply UDSPs on local net %s\n",
3513 libcfs_net2str(net->net_id));
3515 /* At this point we lost track of which NI was just added, so we
3516 * just re-apply the policies on all of the NIs on this net
3518 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3519 rc = lnet_udsp_apply_policies_on_ni(ni);
3521 CERROR("Failed to apply UDSPs on ni %s\n",
3522 libcfs_nidstr(&ni->ni_nid));
3524 lnet_net_unlock(LNET_LOCK_EX);
3527 * Start the acceptor thread if this is the first network
3528 * being added that requires the thread.
3530 if (net->net_lnd->lnd_accept) {
3531 rc = lnet_acceptor_start();
3533 /* shutdown the net that we just started */
3534 CERROR("Failed to start up acceptor thread\n");
3535 lnet_shutdown_lndnet(net);
3540 lnet_net_lock(LNET_LOCK_EX);
3541 lnet_peer_net_added(net);
3542 lnet_net_unlock(LNET_LOCK_EX);
3544 lnet_ping_target_update(pbuf, ping_mdh);
3549 lnet_ping_md_unlink(pbuf, &ping_mdh);
3550 lnet_ping_buffer_decref(pbuf);
3555 lnet_set_tune_defaults(struct lnet_ioctl_config_lnd_tunables *tun)
3558 if (!tun->lt_cmn.lct_peer_timeout)
3559 tun->lt_cmn.lct_peer_timeout = DEFAULT_PEER_TIMEOUT;
3560 if (!tun->lt_cmn.lct_peer_tx_credits)
3561 tun->lt_cmn.lct_peer_tx_credits = DEFAULT_PEER_CREDITS;
3562 if (!tun->lt_cmn.lct_max_tx_credits)
3563 tun->lt_cmn.lct_max_tx_credits = DEFAULT_CREDITS;
3567 static int lnet_handle_legacy_ip2nets(char *ip2nets,
3568 struct lnet_ioctl_config_lnd_tunables *tun)
3570 struct lnet_net *net;
3573 LIST_HEAD(net_head);
3575 rc = lnet_parse_ip2nets(&nets, ip2nets);
3579 rc = lnet_parse_networks(&net_head, nets);
3583 lnet_set_tune_defaults(tun);
3585 mutex_lock(&the_lnet.ln_api_mutex);
3586 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3591 while ((net = list_first_entry_or_null(&net_head,
3593 net_list)) != NULL) {
3594 list_del_init(&net->net_list);
3595 rc = lnet_add_net_common(net, tun);
3601 mutex_unlock(&the_lnet.ln_api_mutex);
3603 while ((net = list_first_entry_or_null(&net_head,
3605 net_list)) != NULL) {
3606 list_del_init(&net->net_list);
3612 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
3614 struct lnet_net *net;
3616 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
3618 __u32 net_id, lnd_type;
3620 /* get the tunables if they are available */
3621 if (conf->lic_cfg_hdr.ioc_len >=
3622 sizeof(*conf) + sizeof(*tun))
3623 tun = (struct lnet_ioctl_config_lnd_tunables *)
3626 /* handle legacy ip2nets from DLC */
3627 if (conf->lic_legacy_ip2nets[0] != '\0')
3628 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
3631 net_id = LNET_NIDNET(conf->lic_nid);
3632 lnd_type = LNET_NETTYP(net_id);
3634 if (!libcfs_isknown_lnd(lnd_type)) {
3635 CERROR("No valid net and lnd information provided\n");
3639 net = lnet_net_alloc(net_id, NULL);
3643 for (i = 0; i < conf->lic_ncpts; i++) {
3644 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER)
3648 ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
3653 lnet_set_tune_defaults(tun);
3655 mutex_lock(&the_lnet.ln_api_mutex);
3656 if (the_lnet.ln_state != LNET_STATE_RUNNING)
3659 rc = lnet_add_net_common(net, tun);
3661 mutex_unlock(&the_lnet.ln_api_mutex);
3666 int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
3668 struct lnet_net *net;
3670 __u32 net_id = LNET_NIDNET(conf->lic_nid);
3671 struct lnet_ping_buffer *pbuf;
3672 struct lnet_handle_md ping_mdh;
3677 /* don't allow userspace to shutdown the LOLND */
3678 if (LNET_NETTYP(net_id) == LOLND)
3681 mutex_lock(&the_lnet.ln_api_mutex);
3682 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3684 goto unlock_api_mutex;
3689 net = lnet_get_net_locked(net_id);
3691 CERROR("net %s not found\n",
3692 libcfs_net2str(net_id));
3697 addr = LNET_NIDADDR(conf->lic_nid);
3699 /* remove the entire net */
3700 net_count = lnet_get_net_ni_count_locked(net);
3704 /* create and link a new ping info, before removing the old one */
3705 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3706 lnet_get_ni_count() - net_count,
3709 goto unlock_api_mutex;
3711 lnet_shutdown_lndnet(net);
3713 lnet_acceptor_stop();
3715 lnet_ping_target_update(pbuf, ping_mdh);
3717 goto unlock_api_mutex;
3720 ni = lnet_nid2ni_locked(conf->lic_nid, 0);
3722 CERROR("nid %s not found\n",
3723 libcfs_nid2str(conf->lic_nid));
3728 net_count = lnet_get_net_ni_count_locked(net);
3732 /* create and link a new ping info, before removing the old one */
3733 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3734 lnet_get_ni_count() - 1, false);
3736 goto unlock_api_mutex;
3738 lnet_shutdown_lndni(ni);
3740 lnet_acceptor_stop();
3742 lnet_ping_target_update(pbuf, ping_mdh);
3744 /* check if the net is empty and remove it if it is */
3746 lnet_shutdown_lndnet(net);
3748 goto unlock_api_mutex;
3753 mutex_unlock(&the_lnet.ln_api_mutex);
3759 * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
3760 * They are only expected to be called for unique networks.
3761 * That can be as a result of older DLC library
3762 * calls. Multi-Rail DLC and beyond no longer uses these APIs.
3765 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
3767 struct lnet_net *net;
3768 LIST_HEAD(net_head);
3770 struct lnet_ioctl_config_lnd_tunables tun;
3771 const char *nets = conf->cfg_config_u.cfg_net.net_intf;
3773 /* Create a net/ni structures for the network string */
3774 rc = lnet_parse_networks(&net_head, nets);
3776 return rc == 0 ? -EINVAL : rc;
3778 mutex_lock(&the_lnet.ln_api_mutex);
3779 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3781 goto out_unlock_clean;
3785 rc = -EINVAL; /* only add one network per call */
3786 goto out_unlock_clean;
3789 net = list_first_entry(&net_head, struct lnet_net, net_list);
3790 list_del_init(&net->net_list);
3792 LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
3794 memset(&tun, 0, sizeof(tun));
3796 tun.lt_cmn.lct_peer_timeout =
3797 (!conf->cfg_config_u.cfg_net.net_peer_timeout) ? DEFAULT_PEER_TIMEOUT :
3798 conf->cfg_config_u.cfg_net.net_peer_timeout;
3799 tun.lt_cmn.lct_peer_tx_credits =
3800 (!conf->cfg_config_u.cfg_net.net_peer_tx_credits) ? DEFAULT_PEER_CREDITS :
3801 conf->cfg_config_u.cfg_net.net_peer_tx_credits;
3802 tun.lt_cmn.lct_peer_rtr_credits =
3803 conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
3804 tun.lt_cmn.lct_max_tx_credits =
3805 (!conf->cfg_config_u.cfg_net.net_max_tx_credits) ? DEFAULT_CREDITS :
3806 conf->cfg_config_u.cfg_net.net_max_tx_credits;
3808 rc = lnet_add_net_common(net, &tun);
3811 mutex_unlock(&the_lnet.ln_api_mutex);
3812 /* net_head list is empty in success case */
3813 while ((net = list_first_entry_or_null(&net_head,
3815 net_list)) != NULL) {
3816 list_del_init(&net->net_list);
3823 lnet_dyn_del_net(__u32 net_id)
3825 struct lnet_net *net;
3826 struct lnet_ping_buffer *pbuf;
3827 struct lnet_handle_md ping_mdh;
3831 /* don't allow userspace to shutdown the LOLND */
3832 if (LNET_NETTYP(net_id) == LOLND)
3835 mutex_lock(&the_lnet.ln_api_mutex);
3836 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3843 net = lnet_get_net_locked(net_id);
3850 net_ni_count = lnet_get_net_ni_count_locked(net);
3854 /* create and link a new ping info, before removing the old one */
3855 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3856 lnet_get_ni_count() - net_ni_count, false);
3860 lnet_shutdown_lndnet(net);
3862 lnet_acceptor_stop();
3864 lnet_ping_target_update(pbuf, ping_mdh);
3867 mutex_unlock(&the_lnet.ln_api_mutex);
3872 void lnet_incr_dlc_seq(void)
3874 atomic_inc(&lnet_dlc_seq_no);
3877 __u32 lnet_get_dlc_seq_locked(void)
3879 return atomic_read(&lnet_dlc_seq_no);
3883 lnet_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3885 struct lnet_net *net;
3888 lnet_net_lock(LNET_LOCK_EX);
3889 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3890 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3891 if (all || (nid_is_nid4(&ni->ni_nid) &&
3892 lnet_nid_to_nid4(&ni->ni_nid) == nid)) {
3893 atomic_set(&ni->ni_healthv, value);
3894 if (list_empty(&ni->ni_recovery) &&
3895 value < LNET_MAX_HEALTH_VALUE) {
3896 CERROR("manually adding local NI %s to recovery\n",
3897 libcfs_nidstr(&ni->ni_nid));
3898 list_add_tail(&ni->ni_recovery,
3899 &the_lnet.ln_mt_localNIRecovq);
3900 lnet_ni_addref_locked(ni, 0);
3903 lnet_net_unlock(LNET_LOCK_EX);
3909 lnet_net_unlock(LNET_LOCK_EX);
3913 lnet_ni_set_conns_per_peer(lnet_nid_t nid, int value, bool all)
3915 struct lnet_net *net;
3918 lnet_net_lock(LNET_LOCK_EX);
3919 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3920 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3921 if (lnet_nid_to_nid4(&ni->ni_nid) != nid && !all)
3923 if (LNET_NETTYP(net->net_id) == SOCKLND)
3924 ni->ni_lnd_tunables.lnd_tun_u.lnd_sock.lnd_conns_per_peer = value;
3925 else if (LNET_NETTYP(net->net_id) == O2IBLND)
3926 ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib.lnd_conns_per_peer = value;
3928 lnet_net_unlock(LNET_LOCK_EX);
3933 lnet_net_unlock(LNET_LOCK_EX);
3937 lnet_get_local_ni_hstats(struct lnet_ioctl_local_ni_hstats *stats)
3941 lnet_nid_t nid = stats->hlni_nid;
3943 cpt = lnet_net_lock_current();
3944 ni = lnet_nid2ni_locked(nid, cpt);
3951 stats->hlni_local_interrupt = atomic_read(&ni->ni_hstats.hlt_local_interrupt);
3952 stats->hlni_local_dropped = atomic_read(&ni->ni_hstats.hlt_local_dropped);
3953 stats->hlni_local_aborted = atomic_read(&ni->ni_hstats.hlt_local_aborted);
3954 stats->hlni_local_no_route = atomic_read(&ni->ni_hstats.hlt_local_no_route);
3955 stats->hlni_local_timeout = atomic_read(&ni->ni_hstats.hlt_local_timeout);
3956 stats->hlni_local_error = atomic_read(&ni->ni_hstats.hlt_local_error);
3957 stats->hlni_fatal_error = atomic_read(&ni->ni_fatal_error_on);
3958 stats->hlni_health_value = atomic_read(&ni->ni_healthv);
3959 stats->hlni_ping_count = ni->ni_ping_count;
3960 stats->hlni_next_ping = ni->ni_next_ping;
3963 lnet_net_unlock(cpt);
3969 lnet_get_local_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
3974 lnet_net_lock(LNET_LOCK_EX);
3975 list_for_each_entry(ni, &the_lnet.ln_mt_localNIRecovq, ni_recovery) {
3976 if (!nid_is_nid4(&ni->ni_nid))
3978 list->rlst_nid_array[i] = lnet_nid_to_nid4(&ni->ni_nid);
3980 if (i >= LNET_MAX_SHOW_NUM_NID)
3983 lnet_net_unlock(LNET_LOCK_EX);
3984 list->rlst_num_nids = i;
3990 lnet_get_peer_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
3992 struct lnet_peer_ni *lpni;
3995 lnet_net_lock(LNET_LOCK_EX);
3996 list_for_each_entry(lpni, &the_lnet.ln_mt_peerNIRecovq, lpni_recovery) {
3997 list->rlst_nid_array[i] = lnet_nid_to_nid4(&lpni->lpni_nid);
3999 if (i >= LNET_MAX_SHOW_NUM_NID)
4002 lnet_net_unlock(LNET_LOCK_EX);
4003 list->rlst_num_nids = i;
4009 * LNet ioctl handler.
4013 LNetCtl(unsigned int cmd, void *arg)
4015 struct libcfs_ioctl_data *data = arg;
4016 struct lnet_ioctl_config_data *config;
4017 struct lnet_process_id id4 = {};
4018 struct lnet_processid id = {};
4020 struct lnet_nid nid;
4023 BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
4024 sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
4027 case IOC_LIBCFS_GET_NI:
4028 rc = LNetGetId(data->ioc_count, &id);
4029 data->ioc_nid = lnet_nid_to_nid4(&id.nid);
4032 case IOC_LIBCFS_FAIL_NID:
4033 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
4035 case IOC_LIBCFS_ADD_ROUTE: {
4036 /* default router sensitivity to 1 */
4037 unsigned int sensitivity = 1;
4040 if (config->cfg_hdr.ioc_len < sizeof(*config))
4043 if (config->cfg_config_u.cfg_route.rtr_sensitivity) {
4045 config->cfg_config_u.cfg_route.rtr_sensitivity;
4048 lnet_nid4_to_nid(config->cfg_nid, &nid);
4049 mutex_lock(&the_lnet.ln_api_mutex);
4050 rc = lnet_add_route(config->cfg_net,
4051 config->cfg_config_u.cfg_route.rtr_hop,
4053 config->cfg_config_u.cfg_route.
4054 rtr_priority, sensitivity);
4055 mutex_unlock(&the_lnet.ln_api_mutex);
4059 case IOC_LIBCFS_DEL_ROUTE:
4062 if (config->cfg_hdr.ioc_len < sizeof(*config))
4065 lnet_nid4_to_nid(config->cfg_nid, &nid);
4066 mutex_lock(&the_lnet.ln_api_mutex);
4067 rc = lnet_del_route(config->cfg_net, &nid);
4068 mutex_unlock(&the_lnet.ln_api_mutex);
4071 case IOC_LIBCFS_GET_ROUTE:
4074 if (config->cfg_hdr.ioc_len < sizeof(*config))
4077 mutex_lock(&the_lnet.ln_api_mutex);
4078 rc = lnet_get_route(config->cfg_count,
4080 &config->cfg_config_u.cfg_route.rtr_hop,
4082 &config->cfg_config_u.cfg_route.rtr_flags,
4083 &config->cfg_config_u.cfg_route.
4085 &config->cfg_config_u.cfg_route.
4087 mutex_unlock(&the_lnet.ln_api_mutex);
4090 case IOC_LIBCFS_GET_LOCAL_NI: {
4091 struct lnet_ioctl_config_ni *cfg_ni;
4092 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
4093 struct lnet_ioctl_element_stats *stats;
4098 /* get the tunables if they are available */
4099 if (cfg_ni->lic_cfg_hdr.ioc_len <
4100 sizeof(*cfg_ni) + sizeof(*stats) + sizeof(*tun))
4103 stats = (struct lnet_ioctl_element_stats *)
4105 tun = (struct lnet_ioctl_config_lnd_tunables *)
4106 (cfg_ni->lic_bulk + sizeof(*stats));
4108 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
4111 mutex_lock(&the_lnet.ln_api_mutex);
4112 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
4113 mutex_unlock(&the_lnet.ln_api_mutex);
4117 case IOC_LIBCFS_GET_LOCAL_NI_MSG_STATS: {
4118 struct lnet_ioctl_element_msg_stats *msg_stats = arg;
4120 if (msg_stats->im_hdr.ioc_len != sizeof(*msg_stats))
4123 mutex_lock(&the_lnet.ln_api_mutex);
4124 rc = lnet_get_ni_stats(msg_stats);
4125 mutex_unlock(&the_lnet.ln_api_mutex);
4130 case IOC_LIBCFS_GET_NET: {
4131 size_t total = sizeof(*config) +
4132 sizeof(struct lnet_ioctl_net_config);
4135 if (config->cfg_hdr.ioc_len < total)
4138 mutex_lock(&the_lnet.ln_api_mutex);
4139 rc = lnet_get_net_config(config);
4140 mutex_unlock(&the_lnet.ln_api_mutex);
4144 case IOC_LIBCFS_GET_LNET_STATS:
4146 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
4148 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
4151 mutex_lock(&the_lnet.ln_api_mutex);
4152 rc = lnet_counters_get(&lnet_stats->st_cntrs);
4153 mutex_unlock(&the_lnet.ln_api_mutex);
4157 case IOC_LIBCFS_RESET_LNET_STATS:
4159 mutex_lock(&the_lnet.ln_api_mutex);
4160 lnet_counters_reset();
4161 mutex_unlock(&the_lnet.ln_api_mutex);
4165 case IOC_LIBCFS_CONFIG_RTR:
4168 if (config->cfg_hdr.ioc_len < sizeof(*config))
4171 mutex_lock(&the_lnet.ln_api_mutex);
4172 if (config->cfg_config_u.cfg_buffers.buf_enable) {
4173 rc = lnet_rtrpools_enable();
4174 mutex_unlock(&the_lnet.ln_api_mutex);
4177 lnet_rtrpools_disable();
4178 mutex_unlock(&the_lnet.ln_api_mutex);
4181 case IOC_LIBCFS_ADD_BUF:
4184 if (config->cfg_hdr.ioc_len < sizeof(*config))
4187 mutex_lock(&the_lnet.ln_api_mutex);
4188 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
4190 config->cfg_config_u.cfg_buffers.
4192 config->cfg_config_u.cfg_buffers.
4194 mutex_unlock(&the_lnet.ln_api_mutex);
4197 case IOC_LIBCFS_SET_NUMA_RANGE: {
4198 struct lnet_ioctl_set_value *numa;
4200 if (numa->sv_hdr.ioc_len != sizeof(*numa))
4202 lnet_net_lock(LNET_LOCK_EX);
4203 lnet_numa_range = numa->sv_value;
4204 lnet_net_unlock(LNET_LOCK_EX);
4208 case IOC_LIBCFS_GET_NUMA_RANGE: {
4209 struct lnet_ioctl_set_value *numa;
4211 if (numa->sv_hdr.ioc_len != sizeof(*numa))
4213 numa->sv_value = lnet_numa_range;
4217 case IOC_LIBCFS_GET_BUF: {
4218 struct lnet_ioctl_pool_cfg *pool_cfg;
4219 size_t total = sizeof(*config) + sizeof(*pool_cfg);
4223 if (config->cfg_hdr.ioc_len < total)
4226 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
4228 mutex_lock(&the_lnet.ln_api_mutex);
4229 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
4230 mutex_unlock(&the_lnet.ln_api_mutex);
4234 case IOC_LIBCFS_GET_LOCAL_HSTATS: {
4235 struct lnet_ioctl_local_ni_hstats *stats = arg;
4237 if (stats->hlni_hdr.ioc_len < sizeof(*stats))
4240 mutex_lock(&the_lnet.ln_api_mutex);
4241 rc = lnet_get_local_ni_hstats(stats);
4242 mutex_unlock(&the_lnet.ln_api_mutex);
4247 case IOC_LIBCFS_GET_RECOVERY_QUEUE: {
4248 struct lnet_ioctl_recovery_list *list = arg;
4249 if (list->rlst_hdr.ioc_len < sizeof(*list))
4252 mutex_lock(&the_lnet.ln_api_mutex);
4253 if (list->rlst_type == LNET_HEALTH_TYPE_LOCAL_NI)
4254 rc = lnet_get_local_ni_recovery_list(list);
4256 rc = lnet_get_peer_ni_recovery_list(list);
4257 mutex_unlock(&the_lnet.ln_api_mutex);
4261 case IOC_LIBCFS_ADD_PEER_NI: {
4262 struct lnet_ioctl_peer_cfg *cfg = arg;
4263 struct lnet_nid prim_nid;
4265 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4268 mutex_lock(&the_lnet.ln_api_mutex);
4269 lnet_nid4_to_nid(cfg->prcfg_prim_nid, &prim_nid);
4270 lnet_nid4_to_nid(cfg->prcfg_cfg_nid, &nid);
4271 rc = lnet_add_peer_ni(&prim_nid, &nid, cfg->prcfg_mr, false);
4272 mutex_unlock(&the_lnet.ln_api_mutex);
4276 case IOC_LIBCFS_DEL_PEER_NI: {
4277 struct lnet_ioctl_peer_cfg *cfg = arg;
4278 struct lnet_nid prim_nid;
4280 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4283 mutex_lock(&the_lnet.ln_api_mutex);
4284 lnet_nid4_to_nid(cfg->prcfg_prim_nid, &prim_nid);
4285 lnet_nid4_to_nid(cfg->prcfg_cfg_nid, &nid);
4286 rc = lnet_del_peer_ni(&prim_nid,
4288 mutex_unlock(&the_lnet.ln_api_mutex);
4292 case IOC_LIBCFS_GET_PEER_INFO: {
4293 struct lnet_ioctl_peer *peer_info = arg;
4295 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
4298 mutex_lock(&the_lnet.ln_api_mutex);
4299 rc = lnet_get_peer_ni_info(
4300 peer_info->pr_count,
4302 peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
4303 &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
4304 &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
4305 &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
4306 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
4307 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
4308 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
4309 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
4310 mutex_unlock(&the_lnet.ln_api_mutex);
4314 case IOC_LIBCFS_GET_PEER_NI: {
4315 struct lnet_ioctl_peer_cfg *cfg = arg;
4317 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4320 mutex_lock(&the_lnet.ln_api_mutex);
4321 rc = lnet_get_peer_info(cfg,
4322 (void __user *)cfg->prcfg_bulk);
4323 mutex_unlock(&the_lnet.ln_api_mutex);
4327 case IOC_LIBCFS_GET_PEER_LIST: {
4328 struct lnet_ioctl_peer_cfg *cfg = arg;
4330 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
4333 mutex_lock(&the_lnet.ln_api_mutex);
4334 rc = lnet_get_peer_list(&cfg->prcfg_count, &cfg->prcfg_size,
4335 (struct lnet_process_id __user *)cfg->prcfg_bulk);
4336 mutex_unlock(&the_lnet.ln_api_mutex);
4340 case IOC_LIBCFS_SET_HEALHV: {
4341 struct lnet_ioctl_reset_health_cfg *cfg = arg;
4343 if (cfg->rh_hdr.ioc_len < sizeof(*cfg))
4345 if (cfg->rh_value < 0 ||
4346 cfg->rh_value > LNET_MAX_HEALTH_VALUE)
4347 value = LNET_MAX_HEALTH_VALUE;
4349 value = cfg->rh_value;
4350 CDEBUG(D_NET, "Manually setting healthv to %d for %s:%s. all = %d\n",
4351 value, (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI) ?
4352 "local" : "peer", libcfs_nid2str(cfg->rh_nid), cfg->rh_all);
4353 mutex_lock(&the_lnet.ln_api_mutex);
4354 if (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI)
4355 lnet_ni_set_healthv(cfg->rh_nid, value,
4358 lnet_peer_ni_set_healthv(cfg->rh_nid, value,
4360 mutex_unlock(&the_lnet.ln_api_mutex);
4364 case IOC_LIBCFS_SET_CONNS_PER_PEER: {
4365 struct lnet_ioctl_reset_conns_per_peer_cfg *cfg = arg;
4368 if (cfg->rcpp_hdr.ioc_len < sizeof(*cfg))
4370 if (cfg->rcpp_value < 0)
4373 value = cfg->rcpp_value;
4375 "Setting conns_per_peer to %d for %s. all = %d\n",
4376 value, libcfs_nid2str(cfg->rcpp_nid), cfg->rcpp_all);
4377 mutex_lock(&the_lnet.ln_api_mutex);
4378 lnet_ni_set_conns_per_peer(cfg->rcpp_nid, value, cfg->rcpp_all);
4379 mutex_unlock(&the_lnet.ln_api_mutex);
4383 case IOC_LIBCFS_NOTIFY_ROUTER: {
4384 time64_t deadline = ktime_get_real_seconds() - data->ioc_u64[0];
4386 /* The deadline passed in by the user should be some time in
4387 * seconds in the future since the UNIX epoch. We have to map
4388 * that deadline to the wall clock.
4390 deadline += ktime_get_seconds();
4391 return lnet_notify(NULL, data->ioc_nid, data->ioc_flags, false,
4395 case IOC_LIBCFS_LNET_DIST:
4396 lnet_nid4_to_nid(data->ioc_nid, &nid);
4397 rc = LNetDist(&nid, &nid, &data->ioc_u32[1]);
4398 if (rc < 0 && rc != -EHOSTUNREACH)
4401 data->ioc_nid = lnet_nid_to_nid4(&nid);
4402 data->ioc_u32[0] = rc;
4405 case IOC_LIBCFS_TESTPROTOCOMPAT:
4406 the_lnet.ln_testprotocompat = data->ioc_flags;
4409 case IOC_LIBCFS_LNET_FAULT:
4410 return lnet_fault_ctl(data->ioc_flags, data);
4412 case IOC_LIBCFS_PING: {
4413 signed long timeout;
4415 id4.nid = data->ioc_nid;
4416 id4.pid = data->ioc_u32[0];
4418 /* If timeout is negative then set default of 3 minutes */
4419 if (((s32)data->ioc_u32[1] <= 0) ||
4420 data->ioc_u32[1] > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
4421 timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
4423 timeout = nsecs_to_jiffies(data->ioc_u32[1] * NSEC_PER_MSEC);
4425 rc = lnet_ping(id4, &LNET_ANY_NID, timeout, data->ioc_pbuf1,
4426 data->ioc_plen1 / sizeof(struct lnet_process_id));
4431 data->ioc_count = rc;
4435 case IOC_LIBCFS_PING_PEER: {
4436 struct lnet_ioctl_ping_data *ping = arg;
4437 struct lnet_nid src_nid = LNET_ANY_NID;
4438 struct lnet_peer *lp;
4439 signed long timeout;
4441 /* Check if the supplied ping data supports source nid
4442 * NB: This check is sufficient if lnet_ioctl_ping_data has
4443 * additional fields added, but if they are re-ordered or
4444 * fields removed then this will break. It is expected that
4445 * these ioctls will be replaced with netlink implementation, so
4446 * it is probably not worth coming up with a more robust version
4447 * compatibility scheme.
4449 if (ping->ping_hdr.ioc_len >= sizeof(struct lnet_ioctl_ping_data))
4450 lnet_nid4_to_nid(ping->ping_src, &src_nid);
4452 /* If timeout is negative then set default of 3 minutes */
4453 if (((s32)ping->op_param) <= 0 ||
4454 ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
4455 timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
4457 timeout = nsecs_to_jiffies(ping->op_param * NSEC_PER_MSEC);
4459 rc = lnet_ping(ping->ping_id, &src_nid, timeout,
4465 mutex_lock(&the_lnet.ln_api_mutex);
4466 lnet_nid4_to_nid(ping->ping_id.nid, &nid);
4467 lp = lnet_find_peer(&nid);
4470 lnet_nid_to_nid4(&lp->lp_primary_nid);
4471 ping->mr_info = lnet_peer_is_multi_rail(lp);
4472 lnet_peer_decref_locked(lp);
4474 mutex_unlock(&the_lnet.ln_api_mutex);
4476 ping->ping_count = rc;
4480 case IOC_LIBCFS_DISCOVER: {
4481 struct lnet_ioctl_ping_data *discover = arg;
4482 struct lnet_peer *lp;
4484 rc = lnet_discover(discover->ping_id, discover->op_param,
4486 discover->ping_count);
4490 mutex_lock(&the_lnet.ln_api_mutex);
4491 lnet_nid4_to_nid(discover->ping_id.nid, &nid);
4492 lp = lnet_find_peer(&nid);
4494 discover->ping_id.nid =
4495 lnet_nid_to_nid4(&lp->lp_primary_nid);
4496 discover->mr_info = lnet_peer_is_multi_rail(lp);
4497 lnet_peer_decref_locked(lp);
4499 mutex_unlock(&the_lnet.ln_api_mutex);
4501 discover->ping_count = rc;
4505 case IOC_LIBCFS_ADD_UDSP: {
4506 struct lnet_ioctl_udsp *ioc_udsp = arg;
4507 __u32 bulk_size = ioc_udsp->iou_hdr.ioc_len;
4509 mutex_lock(&the_lnet.ln_api_mutex);
4510 rc = lnet_udsp_demarshal_add(arg, bulk_size);
4512 rc = lnet_udsp_apply_policies(NULL, false);
4513 CDEBUG(D_NET, "policy application returned %d\n", rc);
4516 mutex_unlock(&the_lnet.ln_api_mutex);
4521 case IOC_LIBCFS_DEL_UDSP: {
4522 struct lnet_ioctl_udsp *ioc_udsp = arg;
4523 int idx = ioc_udsp->iou_idx;
4525 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4528 mutex_lock(&the_lnet.ln_api_mutex);
4529 rc = lnet_udsp_del_policy(idx);
4531 rc = lnet_udsp_apply_policies(NULL, false);
4532 CDEBUG(D_NET, "policy re-application returned %d\n",
4536 mutex_unlock(&the_lnet.ln_api_mutex);
4541 case IOC_LIBCFS_GET_UDSP_SIZE: {
4542 struct lnet_ioctl_udsp *ioc_udsp = arg;
4543 struct lnet_udsp *udsp;
4545 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4550 mutex_lock(&the_lnet.ln_api_mutex);
4551 udsp = lnet_udsp_get_policy(ioc_udsp->iou_idx);
4555 /* coming in iou_idx will hold the idx of the udsp
4556 * to get the size of. going out the iou_idx will
4557 * hold the size of the UDSP found at the passed
4560 ioc_udsp->iou_idx = lnet_get_udsp_size(udsp);
4561 if (ioc_udsp->iou_idx < 0)
4564 mutex_unlock(&the_lnet.ln_api_mutex);
4569 case IOC_LIBCFS_GET_UDSP: {
4570 struct lnet_ioctl_udsp *ioc_udsp = arg;
4571 struct lnet_udsp *udsp;
4573 if (ioc_udsp->iou_hdr.ioc_len < sizeof(*ioc_udsp))
4578 mutex_lock(&the_lnet.ln_api_mutex);
4579 udsp = lnet_udsp_get_policy(ioc_udsp->iou_idx);
4583 rc = lnet_udsp_marshal(udsp, ioc_udsp);
4584 mutex_unlock(&the_lnet.ln_api_mutex);
4589 case IOC_LIBCFS_GET_CONST_UDSP_INFO: {
4590 struct lnet_ioctl_construct_udsp_info *info = arg;
4592 if (info->cud_hdr.ioc_len < sizeof(*info))
4595 CDEBUG(D_NET, "GET_UDSP_INFO for %s\n",
4596 libcfs_nid2str(info->cud_nid));
4598 mutex_lock(&the_lnet.ln_api_mutex);
4599 lnet_udsp_get_construct_info(info);
4600 mutex_unlock(&the_lnet.ln_api_mutex);
4606 ni = lnet_net2ni_addref(data->ioc_net);
4610 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
4613 rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
4620 EXPORT_SYMBOL(LNetCtl);
4622 void LNetDebugPeer(struct lnet_processid *id)
4624 lnet_debug_peer(lnet_nid_to_nid4(&id->nid));
4626 EXPORT_SYMBOL(LNetDebugPeer);
4629 * Determine if the specified peer \a nid is on the local node.
4631 * \param nid peer nid to check
4633 * \retval true If peer NID is on the local node.
4634 * \retval false If peer NID is not on the local node.
4636 bool LNetIsPeerLocal(struct lnet_nid *nid)
4638 struct lnet_net *net;
4642 cpt = lnet_net_lock_current();
4643 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
4644 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
4645 if (nid_same(&ni->ni_nid, nid)) {
4646 lnet_net_unlock(cpt);
4651 lnet_net_unlock(cpt);
4655 EXPORT_SYMBOL(LNetIsPeerLocal);
4658 * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
4659 * Note that all interfaces share a same PID, as requested by LNetNIInit().
4661 * \param index Index of the interface to look up.
4662 * \param id On successful return, this location will hold the
4663 * struct lnet_process_id ID of the interface.
4665 * \retval 0 If an interface exists at \a index.
4666 * \retval -ENOENT If no interface has been found.
4669 LNetGetId(unsigned int index, struct lnet_processid *id)
4672 struct lnet_net *net;
4676 LASSERT(the_lnet.ln_refcount > 0);
4678 cpt = lnet_net_lock_current();
4680 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
4681 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
4682 if (!nid_is_nid4(&ni->ni_nid))
4683 /* FIXME this needs to be handled */
4688 id->nid = ni->ni_nid;
4689 id->pid = the_lnet.ln_pid;
4695 lnet_net_unlock(cpt);
4698 EXPORT_SYMBOL(LNetGetId);
4703 struct lnet_handle_md mdh;
4704 struct completion completion;
4708 lnet_ping_event_handler(struct lnet_event *event)
4710 struct ping_data *pd = event->md_user_ptr;
4712 CDEBUG(D_NET, "ping event (%d %d)%s\n",
4713 event->type, event->status,
4714 event->unlinked ? " unlinked" : "");
4716 if (event->status) {
4718 pd->rc = event->status;
4719 } else if (event->type == LNET_EVENT_REPLY) {
4721 pd->rc = event->mlength;
4723 if (event->unlinked)
4724 complete(&pd->completion);
4727 static int lnet_ping(struct lnet_process_id id4, struct lnet_nid *src_nid,
4728 signed long timeout, struct lnet_process_id __user *ids,
4731 struct lnet_md md = { NULL };
4732 struct ping_data pd = { 0 };
4733 struct lnet_ping_buffer *pbuf;
4734 struct lnet_process_id tmpid;
4735 struct lnet_processid id;
4741 /* n_ids limit is arbitrary */
4742 if (n_ids <= 0 || id4.nid == LNET_NID_ANY)
4746 * if the user buffer has more space than the lnet_interfaces_max
4747 * then only fill it up to lnet_interfaces_max
4749 if (n_ids > lnet_interfaces_max)
4750 n_ids = lnet_interfaces_max;
4752 if (id4.pid == LNET_PID_ANY)
4753 id4.pid = LNET_PID_LUSTRE;
4755 pbuf = lnet_ping_buffer_alloc(n_ids, GFP_NOFS);
4759 /* initialize md content */
4760 md.start = &pbuf->pb_info;
4761 md.length = LNET_PING_INFO_SIZE(n_ids);
4762 md.threshold = 2; /* GET/REPLY */
4764 md.options = LNET_MD_TRUNCATE;
4766 md.handler = lnet_ping_event_handler;
4768 init_completion(&pd.completion);
4770 rc = LNetMDBind(&md, LNET_UNLINK, &pd.mdh);
4772 CERROR("Can't bind MD: %d\n", rc);
4773 goto fail_ping_buffer_decref;
4776 lnet_pid4_to_pid(id4, &id);
4777 rc = LNetGet(src_nid, pd.mdh, &id, LNET_RESERVED_PORTAL,
4778 LNET_PROTO_PING_MATCHBITS, 0, false);
4781 /* Don't CERROR; this could be deliberate! */
4782 rc2 = LNetMDUnlink(pd.mdh);
4785 /* NB must wait for the UNLINK event below... */
4788 if (wait_for_completion_timeout(&pd.completion, timeout) == 0) {
4789 /* Ensure completion in finite time... */
4790 LNetMDUnlink(pd.mdh);
4791 wait_for_completion(&pd.completion);
4795 goto fail_ping_buffer_decref;
4799 LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
4801 rc = -EPROTO; /* if I can't parse... */
4804 CERROR("%s: ping info too short %d\n",
4805 libcfs_idstr(&id), nob);
4806 goto fail_ping_buffer_decref;
4809 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
4810 lnet_swap_pinginfo(pbuf);
4811 } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
4812 CERROR("%s: Unexpected magic %08x\n",
4813 libcfs_idstr(&id), pbuf->pb_info.pi_magic);
4814 goto fail_ping_buffer_decref;
4817 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
4818 CERROR("%s: ping w/o NI status: 0x%x\n",
4819 libcfs_idstr(&id), pbuf->pb_info.pi_features);
4820 goto fail_ping_buffer_decref;
4823 if (nob < LNET_PING_INFO_SIZE(0)) {
4824 CERROR("%s: Short reply %d(%d min)\n",
4826 nob, (int)LNET_PING_INFO_SIZE(0));
4827 goto fail_ping_buffer_decref;
4830 if (pbuf->pb_info.pi_nnis < n_ids)
4831 n_ids = pbuf->pb_info.pi_nnis;
4833 if (nob < LNET_PING_INFO_SIZE(n_ids)) {
4834 CERROR("%s: Short reply %d(%d expected)\n",
4836 nob, (int)LNET_PING_INFO_SIZE(n_ids));
4837 goto fail_ping_buffer_decref;
4840 rc = -EFAULT; /* if I segv in copy_to_user()... */
4842 memset(&tmpid, 0, sizeof(tmpid));
4843 for (i = 0; i < n_ids; i++) {
4844 tmpid.pid = pbuf->pb_info.pi_pid;
4845 tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
4846 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
4847 goto fail_ping_buffer_decref;
4849 rc = pbuf->pb_info.pi_nnis;
4851 fail_ping_buffer_decref:
4852 lnet_ping_buffer_decref(pbuf);
4857 lnet_discover(struct lnet_process_id id4, __u32 force,
4858 struct lnet_process_id __user *ids, int n_ids)
4860 struct lnet_peer_ni *lpni;
4861 struct lnet_peer_ni *p;
4862 struct lnet_peer *lp;
4863 struct lnet_process_id *buf;
4864 struct lnet_processid id;
4870 id4.nid == LNET_NID_ANY)
4873 lnet_pid4_to_pid(id4, &id);
4874 if (id.pid == LNET_PID_ANY)
4875 id.pid = LNET_PID_LUSTRE;
4878 * If the user buffer has more space than the lnet_interfaces_max,
4879 * then only fill it up to lnet_interfaces_max.
4881 if (n_ids > lnet_interfaces_max)
4882 n_ids = lnet_interfaces_max;
4884 CFS_ALLOC_PTR_ARRAY(buf, n_ids);
4888 cpt = lnet_net_lock_current();
4889 lpni = lnet_peerni_by_nid_locked(&id.nid, NULL, cpt);
4896 * Clearing the NIDS_UPTODATE flag ensures the peer will
4897 * be discovered, provided discovery has not been disabled.
4899 lp = lpni->lpni_peer_net->lpn_peer;
4900 spin_lock(&lp->lp_lock);
4901 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
4902 /* If the force flag is set, force a PING and PUSH as well. */
4904 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
4905 spin_unlock(&lp->lp_lock);
4906 rc = lnet_discover_peer_locked(lpni, cpt, true);
4910 /* The lpni (or lp) for this NID may have changed and our ref is
4911 * the only thing keeping the old one around. Release the ref
4912 * and lookup the lpni again
4914 lnet_peer_ni_decref_locked(lpni);
4915 lpni = lnet_peer_ni_find_locked(&id.nid);
4920 lp = lpni->lpni_peer_net->lpn_peer;
4924 while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
4925 buf[i].pid = id.pid;
4926 buf[i].nid = lnet_nid_to_nid4(&p->lpni_nid);
4933 lnet_peer_ni_decref_locked(lpni);
4935 lnet_net_unlock(cpt);
4938 if (copy_to_user(ids, buf, rc * sizeof(*buf)))
4940 CFS_FREE_PTR_ARRAY(buf, n_ids);
4946 * Retrieve peer discovery status.
4948 * \retval 1 if lnet_peer_discovery_disabled is 0
4949 * \retval 0 if lnet_peer_discovery_disabled is 1
4952 LNetGetPeerDiscoveryStatus(void)
4954 return !lnet_peer_discovery_disabled;
4956 EXPORT_SYMBOL(LNetGetPeerDiscoveryStatus);