4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_LNET
35 #include <linux/ctype.h>
36 #include <linux/log2.h>
37 #include <linux/ktime.h>
38 #include <linux/moduleparam.h>
39 #include <linux/uaccess.h>
41 #include <lnet/lib-lnet.h>
43 #define D_LNI D_CONSOLE
46 * initialize ln_api_mutex statically, since it needs to be used in
47 * discovery_set callback. That module parameter callback can be called
48 * before module init completes. The mutex needs to be ready for use then.
50 struct lnet the_lnet = {
51 .ln_api_mutex = __MUTEX_INITIALIZER(the_lnet.ln_api_mutex),
52 }; /* THE state of the network */
53 EXPORT_SYMBOL(the_lnet);
55 static char *ip2nets = "";
56 module_param(ip2nets, charp, 0444);
57 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
59 static char *networks = "";
60 module_param(networks, charp, 0444);
61 MODULE_PARM_DESC(networks, "local networks");
63 static char *routes = "";
64 module_param(routes, charp, 0444);
65 MODULE_PARM_DESC(routes, "routes to non-local networks");
67 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
68 module_param(rnet_htable_size, int, 0444);
69 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
71 static int use_tcp_bonding = false;
72 module_param(use_tcp_bonding, int, 0444);
73 MODULE_PARM_DESC(use_tcp_bonding,
74 "Set to 1 to use socklnd bonding. 0 to use Multi-Rail");
76 unsigned int lnet_numa_range = 0;
77 module_param(lnet_numa_range, uint, 0444);
78 MODULE_PARM_DESC(lnet_numa_range,
79 "NUMA range to consider during Multi-Rail selection");
82 * lnet_health_sensitivity determines by how much we decrement the health
83 * value on sending error. The value defaults to 100, which means health
84 * interface health is decremented by 100 points every failure.
86 unsigned int lnet_health_sensitivity = 100;
87 static int sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
88 #ifdef HAVE_KERNEL_PARAM_OPS
89 static struct kernel_param_ops param_ops_health_sensitivity = {
90 .set = sensitivity_set,
93 #define param_check_health_sensitivity(name, p) \
94 __param_check(name, p, int)
95 module_param(lnet_health_sensitivity, health_sensitivity, S_IRUGO|S_IWUSR);
97 module_param_call(lnet_health_sensitivity, sensitivity_set, param_get_int,
98 &lnet_health_sensitivity, S_IRUGO|S_IWUSR);
100 MODULE_PARM_DESC(lnet_health_sensitivity,
101 "Value to decrement the health value by on error");
104 * lnet_recovery_interval determines how often we should perform recovery
105 * on unhealthy interfaces.
107 unsigned int lnet_recovery_interval = 1;
108 static int recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp);
109 #ifdef HAVE_KERNEL_PARAM_OPS
110 static struct kernel_param_ops param_ops_recovery_interval = {
111 .set = recovery_interval_set,
112 .get = param_get_int,
114 #define param_check_recovery_interval(name, p) \
115 __param_check(name, p, int)
116 module_param(lnet_recovery_interval, recovery_interval, S_IRUGO|S_IWUSR);
118 module_param_call(lnet_recovery_interval, recovery_interval_set, param_get_int,
119 &lnet_recovery_interval, S_IRUGO|S_IWUSR);
121 MODULE_PARM_DESC(lnet_recovery_interval,
122 "Interval to recover unhealthy interfaces in seconds");
124 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
125 static int intf_max_set(const char *val, cfs_kernel_param_arg_t *kp);
127 static struct kernel_param_ops param_ops_interfaces_max = {
129 .get = param_get_int,
132 #define param_check_interfaces_max(name, p) \
133 __param_check(name, p, int)
135 #ifdef HAVE_KERNEL_PARAM_OPS
136 module_param(lnet_interfaces_max, interfaces_max, 0644);
138 module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
139 ¶m_ops_interfaces_max, 0644);
141 MODULE_PARM_DESC(lnet_interfaces_max,
142 "Maximum number of interfaces in a node.");
144 unsigned lnet_peer_discovery_disabled = 0;
145 static int discovery_set(const char *val, cfs_kernel_param_arg_t *kp);
147 static struct kernel_param_ops param_ops_discovery_disabled = {
148 .set = discovery_set,
149 .get = param_get_int,
152 #define param_check_discovery_disabled(name, p) \
153 __param_check(name, p, int)
154 #ifdef HAVE_KERNEL_PARAM_OPS
155 module_param(lnet_peer_discovery_disabled, discovery_disabled, 0644);
157 module_param_call(lnet_peer_discovery_disabled, discovery_set, param_get_int,
158 ¶m_ops_discovery_disabled, 0644);
160 MODULE_PARM_DESC(lnet_peer_discovery_disabled,
161 "Set to 1 to disable peer discovery on this node.");
163 unsigned int lnet_drop_asym_route;
164 static int drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp);
166 static struct kernel_param_ops param_ops_drop_asym_route = {
167 .set = drop_asym_route_set,
168 .get = param_get_int,
171 #define param_check_drop_asym_route(name, p) \
172 __param_check(name, p, int)
173 #ifdef HAVE_KERNEL_PARAM_OPS
174 module_param(lnet_drop_asym_route, drop_asym_route, 0644);
176 module_param_call(lnet_drop_asym_route, drop_asym_route_set, param_get_int,
177 ¶m_ops_drop_asym_route, 0644);
179 MODULE_PARM_DESC(lnet_drop_asym_route,
180 "Set to 1 to drop asymmetrical route messages.");
182 #define LNET_TRANSACTION_TIMEOUT_NO_HEALTH_DEFAULT 50
183 #define LNET_TRANSACTION_TIMEOUT_HEALTH_DEFAULT 10
185 unsigned lnet_transaction_timeout = LNET_TRANSACTION_TIMEOUT_HEALTH_DEFAULT;
186 static int transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp);
187 #ifdef HAVE_KERNEL_PARAM_OPS
188 static struct kernel_param_ops param_ops_transaction_timeout = {
189 .set = transaction_to_set,
190 .get = param_get_int,
193 #define param_check_transaction_timeout(name, p) \
194 __param_check(name, p, int)
195 module_param(lnet_transaction_timeout, transaction_timeout, S_IRUGO|S_IWUSR);
197 module_param_call(lnet_transaction_timeout, transaction_to_set, param_get_int,
198 &lnet_transaction_timeout, S_IRUGO|S_IWUSR);
200 MODULE_PARM_DESC(lnet_transaction_timeout,
201 "Maximum number of seconds to wait for a peer response.");
203 #define LNET_RETRY_COUNT_HEALTH_DEFAULT 3
204 unsigned lnet_retry_count = LNET_RETRY_COUNT_HEALTH_DEFAULT;
205 static int retry_count_set(const char *val, cfs_kernel_param_arg_t *kp);
206 #ifdef HAVE_KERNEL_PARAM_OPS
207 static struct kernel_param_ops param_ops_retry_count = {
208 .set = retry_count_set,
209 .get = param_get_int,
212 #define param_check_retry_count(name, p) \
213 __param_check(name, p, int)
214 module_param(lnet_retry_count, retry_count, S_IRUGO|S_IWUSR);
216 module_param_call(lnet_retry_count, retry_count_set, param_get_int,
217 &lnet_retry_count, S_IRUGO|S_IWUSR);
219 MODULE_PARM_DESC(lnet_retry_count,
220 "Maximum number of times to retry transmitting a message");
223 unsigned lnet_lnd_timeout = LNET_LND_DEFAULT_TIMEOUT;
224 unsigned int lnet_current_net_count;
227 * This sequence number keeps track of how many times DLC was used to
228 * update the local NIs. It is incremented when a NI is added or
229 * removed and checked when sending a message to determine if there is
230 * a need to re-run the selection algorithm. See lnet_select_pathway()
231 * for more details on its usage.
233 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
235 static int lnet_ping(struct lnet_process_id id, signed long timeout,
236 struct lnet_process_id __user *ids, int n_ids);
238 static int lnet_discover(struct lnet_process_id id, __u32 force,
239 struct lnet_process_id __user *ids, int n_ids);
242 sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
245 unsigned *sensitivity = (unsigned *)kp->arg;
248 rc = kstrtoul(val, 0, &value);
250 CERROR("Invalid module parameter value for 'lnet_health_sensitivity'\n");
255 * The purpose of locking the api_mutex here is to ensure that
256 * the correct value ends up stored properly.
258 mutex_lock(&the_lnet.ln_api_mutex);
260 if (value > LNET_MAX_HEALTH_VALUE) {
261 mutex_unlock(&the_lnet.ln_api_mutex);
262 CERROR("Invalid health value. Maximum: %d value = %lu\n",
263 LNET_MAX_HEALTH_VALUE, value);
268 * if we're turning on health then use the health timeout
271 if (*sensitivity == 0 && value != 0) {
272 lnet_transaction_timeout = LNET_TRANSACTION_TIMEOUT_HEALTH_DEFAULT;
273 lnet_retry_count = LNET_RETRY_COUNT_HEALTH_DEFAULT;
275 * if we're turning off health then use the no health timeout
278 } else if (*sensitivity != 0 && value == 0) {
279 lnet_transaction_timeout =
280 LNET_TRANSACTION_TIMEOUT_NO_HEALTH_DEFAULT;
281 lnet_retry_count = 0;
284 *sensitivity = value;
286 mutex_unlock(&the_lnet.ln_api_mutex);
292 recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp)
295 unsigned *interval = (unsigned *)kp->arg;
298 rc = kstrtoul(val, 0, &value);
300 CERROR("Invalid module parameter value for 'lnet_recovery_interval'\n");
305 CERROR("lnet_recovery_interval must be at least 1 second\n");
310 * The purpose of locking the api_mutex here is to ensure that
311 * the correct value ends up stored properly.
313 mutex_lock(&the_lnet.ln_api_mutex);
317 mutex_unlock(&the_lnet.ln_api_mutex);
323 discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
326 unsigned *discovery = (unsigned *)kp->arg;
328 struct lnet_ping_buffer *pbuf;
330 rc = kstrtoul(val, 0, &value);
332 CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
336 value = (value) ? 1 : 0;
339 * The purpose of locking the api_mutex here is to ensure that
340 * the correct value ends up stored properly.
342 mutex_lock(&the_lnet.ln_api_mutex);
344 if (value == *discovery) {
345 mutex_unlock(&the_lnet.ln_api_mutex);
351 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
352 mutex_unlock(&the_lnet.ln_api_mutex);
356 /* tell peers that discovery setting has changed */
357 lnet_net_lock(LNET_LOCK_EX);
358 pbuf = the_lnet.ln_ping_target;
360 pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
362 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
363 lnet_net_unlock(LNET_LOCK_EX);
365 lnet_push_update_to_peers(1);
367 mutex_unlock(&the_lnet.ln_api_mutex);
373 drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp)
376 unsigned int *drop_asym_route = (unsigned int *)kp->arg;
379 rc = kstrtoul(val, 0, &value);
381 CERROR("Invalid module parameter value for "
382 "'lnet_drop_asym_route'\n");
387 * The purpose of locking the api_mutex here is to ensure that
388 * the correct value ends up stored properly.
390 mutex_lock(&the_lnet.ln_api_mutex);
392 if (value == *drop_asym_route) {
393 mutex_unlock(&the_lnet.ln_api_mutex);
397 *drop_asym_route = value;
399 mutex_unlock(&the_lnet.ln_api_mutex);
405 transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp)
408 unsigned *transaction_to = (unsigned *)kp->arg;
411 rc = kstrtoul(val, 0, &value);
413 CERROR("Invalid module parameter value for 'lnet_transaction_timeout'\n");
418 * The purpose of locking the api_mutex here is to ensure that
419 * the correct value ends up stored properly.
421 mutex_lock(&the_lnet.ln_api_mutex);
423 if (value < lnet_retry_count || value == 0) {
424 mutex_unlock(&the_lnet.ln_api_mutex);
425 CERROR("Invalid value for lnet_transaction_timeout (%lu). "
426 "Has to be greater than lnet_retry_count (%u)\n",
427 value, lnet_retry_count);
431 if (value == *transaction_to) {
432 mutex_unlock(&the_lnet.ln_api_mutex);
436 *transaction_to = value;
437 if (lnet_retry_count == 0)
438 lnet_lnd_timeout = value;
440 lnet_lnd_timeout = value / lnet_retry_count;
442 mutex_unlock(&the_lnet.ln_api_mutex);
448 retry_count_set(const char *val, cfs_kernel_param_arg_t *kp)
451 unsigned *retry_count = (unsigned *)kp->arg;
454 rc = kstrtoul(val, 0, &value);
456 CERROR("Invalid module parameter value for 'lnet_retry_count'\n");
461 * The purpose of locking the api_mutex here is to ensure that
462 * the correct value ends up stored properly.
464 mutex_lock(&the_lnet.ln_api_mutex);
466 if (lnet_health_sensitivity == 0) {
467 mutex_unlock(&the_lnet.ln_api_mutex);
468 CERROR("Can not set retry_count when health feature is turned off\n");
472 if (value > lnet_transaction_timeout) {
473 mutex_unlock(&the_lnet.ln_api_mutex);
474 CERROR("Invalid value for lnet_retry_count (%lu). "
475 "Has to be smaller than lnet_transaction_timeout (%u)\n",
476 value, lnet_transaction_timeout);
480 *retry_count = value;
483 lnet_lnd_timeout = lnet_transaction_timeout;
485 lnet_lnd_timeout = lnet_transaction_timeout / value;
487 mutex_unlock(&the_lnet.ln_api_mutex);
493 intf_max_set(const char *val, cfs_kernel_param_arg_t *kp)
497 rc = kstrtoint(val, 0, &value);
499 CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
503 if (value < LNET_INTERFACES_MIN) {
504 CWARN("max interfaces provided are too small, setting to %d\n",
505 LNET_INTERFACES_MAX_DEFAULT);
506 value = LNET_INTERFACES_MAX_DEFAULT;
509 *(int *)kp->arg = value;
515 lnet_get_routes(void)
521 lnet_get_networks(void)
526 if (*networks != 0 && *ip2nets != 0) {
527 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
528 "'ip2nets' but not both at once\n");
533 rc = lnet_parse_ip2nets(&nets, ip2nets);
534 return (rc == 0) ? nets : NULL;
544 lnet_init_locks(void)
546 spin_lock_init(&the_lnet.ln_eq_wait_lock);
547 spin_lock_init(&the_lnet.ln_msg_resend_lock);
548 init_waitqueue_head(&the_lnet.ln_eq_waitq);
549 init_completion(&the_lnet.ln_mt_wait_complete);
550 mutex_init(&the_lnet.ln_lnd_mutex);
553 struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
554 struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
558 lnet_descriptor_setup(void)
560 /* create specific kmem_cache for MEs and small MDs (i.e., originally
561 * allocated in <size-xxx> kmem_cache).
563 lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(struct lnet_me),
565 if (!lnet_mes_cachep)
568 lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
569 LNET_SMALL_MD_SIZE, 0, 0,
571 if (!lnet_small_mds_cachep)
578 lnet_descriptor_cleanup(void)
581 if (lnet_small_mds_cachep) {
582 kmem_cache_destroy(lnet_small_mds_cachep);
583 lnet_small_mds_cachep = NULL;
586 if (lnet_mes_cachep) {
587 kmem_cache_destroy(lnet_mes_cachep);
588 lnet_mes_cachep = NULL;
593 lnet_create_remote_nets_table(void)
596 struct list_head *hash;
598 LASSERT(the_lnet.ln_remote_nets_hash == NULL);
599 LASSERT(the_lnet.ln_remote_nets_hbits > 0);
600 LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
602 CERROR("Failed to create remote nets hash table\n");
606 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
607 INIT_LIST_HEAD(&hash[i]);
608 the_lnet.ln_remote_nets_hash = hash;
613 lnet_destroy_remote_nets_table(void)
617 if (the_lnet.ln_remote_nets_hash == NULL)
620 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
621 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
623 LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
624 LNET_REMOTE_NETS_HASH_SIZE *
625 sizeof(the_lnet.ln_remote_nets_hash[0]));
626 the_lnet.ln_remote_nets_hash = NULL;
630 lnet_destroy_locks(void)
632 if (the_lnet.ln_res_lock != NULL) {
633 cfs_percpt_lock_free(the_lnet.ln_res_lock);
634 the_lnet.ln_res_lock = NULL;
637 if (the_lnet.ln_net_lock != NULL) {
638 cfs_percpt_lock_free(the_lnet.ln_net_lock);
639 the_lnet.ln_net_lock = NULL;
644 lnet_create_locks(void)
648 the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
649 if (the_lnet.ln_res_lock == NULL)
652 the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
653 if (the_lnet.ln_net_lock == NULL)
659 lnet_destroy_locks();
663 static void lnet_assert_wire_constants(void)
665 /* Wire protocol assertions generated by 'wirecheck'
666 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
667 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
668 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
671 BUILD_BUG_ON(LNET_PROTO_TCP_MAGIC != 0xeebc0ded);
672 BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MAJOR != 1);
673 BUILD_BUG_ON(LNET_PROTO_TCP_VERSION_MINOR != 0);
674 BUILD_BUG_ON(LNET_MSG_ACK != 0);
675 BUILD_BUG_ON(LNET_MSG_PUT != 1);
676 BUILD_BUG_ON(LNET_MSG_GET != 2);
677 BUILD_BUG_ON(LNET_MSG_REPLY != 3);
678 BUILD_BUG_ON(LNET_MSG_HELLO != 4);
680 /* Checks for struct lnet_handle_wire */
681 BUILD_BUG_ON((int)sizeof(struct lnet_handle_wire) != 16);
682 BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
683 wh_interface_cookie) != 0);
684 BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) != 8);
685 BUILD_BUG_ON((int)offsetof(struct lnet_handle_wire,
686 wh_object_cookie) != 8);
687 BUILD_BUG_ON((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) != 8);
689 /* Checks for struct struct lnet_magicversion */
690 BUILD_BUG_ON((int)sizeof(struct lnet_magicversion) != 8);
691 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, magic) != 0);
692 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->magic) != 4);
693 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion, version_major) != 4);
694 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_major) != 2);
695 BUILD_BUG_ON((int)offsetof(struct lnet_magicversion,
696 version_minor) != 6);
697 BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_minor) != 2);
699 /* Checks for struct struct lnet_hdr */
700 BUILD_BUG_ON((int)sizeof(struct lnet_hdr) != 72);
701 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_nid) != 0);
702 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_nid) != 8);
703 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_nid) != 8);
704 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_nid) != 8);
705 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_pid) != 16);
706 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_pid) != 4);
707 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_pid) != 20);
708 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_pid) != 4);
709 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, type) != 24);
710 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->type) != 4);
711 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, payload_length) != 28);
712 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->payload_length) != 4);
713 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg) != 32);
714 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg) != 40);
717 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) != 32);
718 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) != 16);
719 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.match_bits) != 48);
720 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) != 8);
721 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.mlength) != 56);
722 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) != 4);
725 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) != 32);
726 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) != 16);
727 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.match_bits) != 48);
728 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) != 8);
729 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.hdr_data) != 56);
730 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) != 8);
731 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ptl_index) != 64);
732 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) != 4);
733 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.offset) != 68);
734 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) != 4);
737 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.return_wmd) != 32);
738 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) != 16);
739 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.match_bits) != 48);
740 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) != 8);
741 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.ptl_index) != 56);
742 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) != 4);
743 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.src_offset) != 60);
744 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) != 4);
745 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.sink_length) != 64);
746 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) != 4);
749 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) != 32);
750 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) != 16);
753 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.incarnation) != 32);
754 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) != 8);
755 BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.type) != 40);
756 BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) != 4);
758 /* Checks for struct lnet_ni_status and related constants */
759 BUILD_BUG_ON(LNET_NI_STATUS_INVALID != 0x00000000);
760 BUILD_BUG_ON(LNET_NI_STATUS_UP != 0x15aac0de);
761 BUILD_BUG_ON(LNET_NI_STATUS_DOWN != 0xdeadface);
763 /* Checks for struct lnet_ni_status */
764 BUILD_BUG_ON((int)sizeof(struct lnet_ni_status) != 16);
765 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_nid) != 0);
766 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) != 8);
767 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_status) != 8);
768 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_status) != 4);
769 BUILD_BUG_ON((int)offsetof(struct lnet_ni_status, ns_unused) != 12);
770 BUILD_BUG_ON((int)sizeof(((struct lnet_ni_status *)0)->ns_unused) != 4);
772 /* Checks for struct lnet_ping_info and related constants */
773 BUILD_BUG_ON(LNET_PROTO_PING_MAGIC != 0x70696E67);
774 BUILD_BUG_ON(LNET_PING_FEAT_INVAL != 0);
775 BUILD_BUG_ON(LNET_PING_FEAT_BASE != 1);
776 BUILD_BUG_ON(LNET_PING_FEAT_NI_STATUS != 2);
777 BUILD_BUG_ON(LNET_PING_FEAT_RTE_DISABLED != 4);
778 BUILD_BUG_ON(LNET_PING_FEAT_MULTI_RAIL != 8);
779 BUILD_BUG_ON(LNET_PING_FEAT_DISCOVERY != 16);
780 BUILD_BUG_ON(LNET_PING_FEAT_BITS != 31);
782 /* Checks for struct lnet_ping_info */
783 BUILD_BUG_ON((int)sizeof(struct lnet_ping_info) != 16);
784 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_magic) != 0);
785 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) != 4);
786 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_features) != 4);
787 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_features) != 4);
788 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_pid) != 8);
789 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) != 4);
790 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_nnis) != 12);
791 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) != 4);
792 BUILD_BUG_ON((int)offsetof(struct lnet_ping_info, pi_ni) != 16);
793 BUILD_BUG_ON((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) != 0);
796 static const struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
798 const struct lnet_lnd *lnd;
800 /* holding lnd mutex */
801 if (type >= NUM_LNDS)
803 lnd = the_lnet.ln_lnds[type];
804 LASSERT(!lnd || lnd->lnd_type == type);
810 lnet_get_lnd_timeout(void)
812 return lnet_lnd_timeout;
814 EXPORT_SYMBOL(lnet_get_lnd_timeout);
817 lnet_register_lnd(const struct lnet_lnd *lnd)
819 mutex_lock(&the_lnet.ln_lnd_mutex);
821 LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
822 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
824 the_lnet.ln_lnds[lnd->lnd_type] = lnd;
826 CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
828 mutex_unlock(&the_lnet.ln_lnd_mutex);
830 EXPORT_SYMBOL(lnet_register_lnd);
833 lnet_unregister_lnd(const struct lnet_lnd *lnd)
835 mutex_lock(&the_lnet.ln_lnd_mutex);
837 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
839 the_lnet.ln_lnds[lnd->lnd_type] = NULL;
840 CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
842 mutex_unlock(&the_lnet.ln_lnd_mutex);
844 EXPORT_SYMBOL(lnet_unregister_lnd);
847 lnet_counters_get_common(struct lnet_counters_common *common)
849 struct lnet_counters *ctr;
852 memset(common, 0, sizeof(*common));
854 lnet_net_lock(LNET_LOCK_EX);
856 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
857 common->lcc_msgs_max += ctr->lct_common.lcc_msgs_max;
858 common->lcc_msgs_alloc += ctr->lct_common.lcc_msgs_alloc;
859 common->lcc_errors += ctr->lct_common.lcc_errors;
860 common->lcc_send_count += ctr->lct_common.lcc_send_count;
861 common->lcc_recv_count += ctr->lct_common.lcc_recv_count;
862 common->lcc_route_count += ctr->lct_common.lcc_route_count;
863 common->lcc_drop_count += ctr->lct_common.lcc_drop_count;
864 common->lcc_send_length += ctr->lct_common.lcc_send_length;
865 common->lcc_recv_length += ctr->lct_common.lcc_recv_length;
866 common->lcc_route_length += ctr->lct_common.lcc_route_length;
867 common->lcc_drop_length += ctr->lct_common.lcc_drop_length;
869 lnet_net_unlock(LNET_LOCK_EX);
871 EXPORT_SYMBOL(lnet_counters_get_common);
874 lnet_counters_get(struct lnet_counters *counters)
876 struct lnet_counters *ctr;
877 struct lnet_counters_health *health = &counters->lct_health;
880 memset(counters, 0, sizeof(*counters));
882 lnet_counters_get_common(&counters->lct_common);
884 lnet_net_lock(LNET_LOCK_EX);
886 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
887 health->lch_rst_alloc += ctr->lct_health.lch_rst_alloc;
888 health->lch_resend_count += ctr->lct_health.lch_resend_count;
889 health->lch_response_timeout_count +=
890 ctr->lct_health.lch_response_timeout_count;
891 health->lch_local_interrupt_count +=
892 ctr->lct_health.lch_local_interrupt_count;
893 health->lch_local_dropped_count +=
894 ctr->lct_health.lch_local_dropped_count;
895 health->lch_local_aborted_count +=
896 ctr->lct_health.lch_local_aborted_count;
897 health->lch_local_no_route_count +=
898 ctr->lct_health.lch_local_no_route_count;
899 health->lch_local_timeout_count +=
900 ctr->lct_health.lch_local_timeout_count;
901 health->lch_local_error_count +=
902 ctr->lct_health.lch_local_error_count;
903 health->lch_remote_dropped_count +=
904 ctr->lct_health.lch_remote_dropped_count;
905 health->lch_remote_error_count +=
906 ctr->lct_health.lch_remote_error_count;
907 health->lch_remote_timeout_count +=
908 ctr->lct_health.lch_remote_timeout_count;
909 health->lch_network_timeout_count +=
910 ctr->lct_health.lch_network_timeout_count;
912 lnet_net_unlock(LNET_LOCK_EX);
914 EXPORT_SYMBOL(lnet_counters_get);
917 lnet_counters_reset(void)
919 struct lnet_counters *counters;
922 lnet_net_lock(LNET_LOCK_EX);
924 cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
925 memset(counters, 0, sizeof(struct lnet_counters));
927 lnet_net_unlock(LNET_LOCK_EX);
931 lnet_res_type2str(int type)
936 case LNET_COOKIE_TYPE_MD:
938 case LNET_COOKIE_TYPE_ME:
940 case LNET_COOKIE_TYPE_EQ:
946 lnet_res_container_cleanup(struct lnet_res_container *rec)
950 if (rec->rec_type == 0) /* not set yet, it's uninitialized */
953 while (!list_empty(&rec->rec_active)) {
954 struct list_head *e = rec->rec_active.next;
957 if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
958 lnet_eq_free(list_entry(e, struct lnet_eq, eq_list));
960 } else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
961 lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
963 } else { /* NB: Active MEs should be attached on portals */
970 /* Found alive MD/ME/EQ, user really should unlink/free
971 * all of them before finalize LNet, but if someone didn't,
972 * we have to recycle garbage for him */
973 CERROR("%d active elements on exit of %s container\n",
974 count, lnet_res_type2str(rec->rec_type));
977 if (rec->rec_lh_hash != NULL) {
978 LIBCFS_FREE(rec->rec_lh_hash,
979 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
980 rec->rec_lh_hash = NULL;
983 rec->rec_type = 0; /* mark it as finalized */
987 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
992 LASSERT(rec->rec_type == 0);
994 rec->rec_type = type;
995 INIT_LIST_HEAD(&rec->rec_active);
997 rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
999 /* Arbitrary choice of hash table size */
1000 LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
1001 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
1002 if (rec->rec_lh_hash == NULL) {
1007 for (i = 0; i < LNET_LH_HASH_SIZE; i++)
1008 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
1013 CERROR("Failed to setup %s resource container\n",
1014 lnet_res_type2str(type));
1015 lnet_res_container_cleanup(rec);
1020 lnet_res_containers_destroy(struct lnet_res_container **recs)
1022 struct lnet_res_container *rec;
1025 cfs_percpt_for_each(rec, i, recs)
1026 lnet_res_container_cleanup(rec);
1028 cfs_percpt_free(recs);
1031 static struct lnet_res_container **
1032 lnet_res_containers_create(int type)
1034 struct lnet_res_container **recs;
1035 struct lnet_res_container *rec;
1039 recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
1041 CERROR("Failed to allocate %s resource containers\n",
1042 lnet_res_type2str(type));
1046 cfs_percpt_for_each(rec, i, recs) {
1047 rc = lnet_res_container_setup(rec, i, type);
1049 lnet_res_containers_destroy(recs);
1057 struct lnet_libhandle *
1058 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
1060 /* ALWAYS called with lnet_res_lock held */
1061 struct list_head *head;
1062 struct lnet_libhandle *lh;
1065 if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
1068 hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
1069 head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
1071 list_for_each_entry(lh, head, lh_hash_chain) {
1072 if (lh->lh_cookie == cookie)
1080 lnet_res_lh_initialize(struct lnet_res_container *rec,
1081 struct lnet_libhandle *lh)
1083 /* ALWAYS called with lnet_res_lock held */
1084 unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
1087 lh->lh_cookie = rec->rec_lh_cookie;
1088 rec->rec_lh_cookie += 1 << ibits;
1090 hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
1092 list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
1096 lnet_create_array_of_queues(void)
1098 struct list_head **qs;
1099 struct list_head *q;
1102 qs = cfs_percpt_alloc(lnet_cpt_table(),
1103 sizeof(struct list_head));
1105 CERROR("Failed to allocate queues\n");
1109 cfs_percpt_for_each(q, i, qs)
1115 static int lnet_unprepare(void);
1118 lnet_prepare(lnet_pid_t requested_pid)
1120 /* Prepare to bring up the network */
1121 struct lnet_res_container **recs;
1124 if (requested_pid == LNET_PID_ANY) {
1125 /* Don't instantiate LNET just for me */
1129 LASSERT(the_lnet.ln_refcount == 0);
1131 the_lnet.ln_routing = 0;
1133 LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
1134 the_lnet.ln_pid = requested_pid;
1136 INIT_LIST_HEAD(&the_lnet.ln_test_peers);
1137 INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
1138 INIT_LIST_HEAD(&the_lnet.ln_nets);
1139 INIT_LIST_HEAD(&the_lnet.ln_routers);
1140 INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
1141 INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
1142 INIT_LIST_HEAD(&the_lnet.ln_dc_request);
1143 INIT_LIST_HEAD(&the_lnet.ln_dc_working);
1144 INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
1145 INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq);
1146 INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
1147 init_waitqueue_head(&the_lnet.ln_dc_waitq);
1148 LNetInvalidateEQHandle(&the_lnet.ln_mt_eqh);
1149 init_completion(&the_lnet.ln_started);
1151 rc = lnet_descriptor_setup();
1155 rc = lnet_create_remote_nets_table();
1160 * NB the interface cookie in wire handles guards against delayed
1161 * replies and ACKs appearing valid after reboot.
1163 the_lnet.ln_interface_cookie = ktime_get_real_ns();
1165 the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
1166 sizeof(struct lnet_counters));
1167 if (the_lnet.ln_counters == NULL) {
1168 CERROR("Failed to allocate counters for LNet\n");
1173 rc = lnet_peer_tables_create();
1177 rc = lnet_msg_containers_create();
1181 rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
1182 LNET_COOKIE_TYPE_EQ);
1186 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
1192 the_lnet.ln_me_containers = recs;
1194 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
1200 the_lnet.ln_md_containers = recs;
1202 rc = lnet_portals_create();
1204 CERROR("Failed to create portals for LNet: %d\n", rc);
1208 the_lnet.ln_mt_zombie_rstqs = lnet_create_array_of_queues();
1209 if (!the_lnet.ln_mt_zombie_rstqs) {
1222 lnet_unprepare (void)
1226 /* NB no LNET_LOCK since this is the last reference. All LND instances
1227 * have shut down already, so it is safe to unlink and free all
1228 * descriptors, even those that appear committed to a network op (eg MD
1229 * with non-zero pending count) */
1231 lnet_fail_nid(LNET_NID_ANY, 0);
1233 LASSERT(the_lnet.ln_refcount == 0);
1234 LASSERT(list_empty(&the_lnet.ln_test_peers));
1235 LASSERT(list_empty(&the_lnet.ln_nets));
1237 if (the_lnet.ln_mt_zombie_rstqs) {
1238 lnet_clean_zombie_rstqs();
1239 the_lnet.ln_mt_zombie_rstqs = NULL;
1242 if (!LNetEQHandleIsInvalid(the_lnet.ln_mt_eqh)) {
1243 rc = LNetEQFree(the_lnet.ln_mt_eqh);
1244 LNetInvalidateEQHandle(&the_lnet.ln_mt_eqh);
1248 lnet_portals_destroy();
1250 if (the_lnet.ln_md_containers != NULL) {
1251 lnet_res_containers_destroy(the_lnet.ln_md_containers);
1252 the_lnet.ln_md_containers = NULL;
1255 if (the_lnet.ln_me_containers != NULL) {
1256 lnet_res_containers_destroy(the_lnet.ln_me_containers);
1257 the_lnet.ln_me_containers = NULL;
1260 lnet_res_container_cleanup(&the_lnet.ln_eq_container);
1262 lnet_msg_containers_destroy();
1264 lnet_rtrpools_free(0);
1266 if (the_lnet.ln_counters != NULL) {
1267 cfs_percpt_free(the_lnet.ln_counters);
1268 the_lnet.ln_counters = NULL;
1270 lnet_destroy_remote_nets_table();
1271 lnet_descriptor_cleanup();
1277 lnet_net2ni_locked(__u32 net_id, int cpt)
1280 struct lnet_net *net;
1282 LASSERT(cpt != LNET_LOCK_EX);
1284 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1285 if (net->net_id == net_id) {
1286 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
1296 lnet_net2ni_addref(__u32 net)
1301 ni = lnet_net2ni_locked(net, 0);
1303 lnet_ni_addref_locked(ni, 0);
1308 EXPORT_SYMBOL(lnet_net2ni_addref);
1311 lnet_get_net_locked(__u32 net_id)
1313 struct lnet_net *net;
1315 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1316 if (net->net_id == net_id)
1324 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
1329 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
1334 val = hash_long(key, LNET_CPT_BITS);
1335 /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
1339 return (unsigned int)(key + val + (val >> 1)) % number;
1343 lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
1345 struct lnet_net *net;
1347 /* must called with hold of lnet_net_lock */
1348 if (LNET_CPT_NUMBER == 1)
1349 return 0; /* the only one */
1352 * If NI is provided then use the CPT identified in the NI cpt
1353 * list if one exists. If one doesn't exist, then that NI is
1354 * associated with all CPTs and it follows that the net it belongs
1355 * to is implicitly associated with all CPTs, so just hash the nid
1359 if (ni->ni_cpts != NULL)
1360 return ni->ni_cpts[lnet_nid_cpt_hash(nid,
1363 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1366 /* no NI provided so look at the net */
1367 net = lnet_get_net_locked(LNET_NIDNET(nid));
1369 if (net != NULL && net->net_cpts != NULL) {
1370 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
1373 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1377 lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
1382 if (LNET_CPT_NUMBER == 1)
1383 return 0; /* the only one */
1385 cpt = lnet_net_lock_current();
1387 cpt2 = lnet_cpt_of_nid_locked(nid, ni);
1389 lnet_net_unlock(cpt);
1393 EXPORT_SYMBOL(lnet_cpt_of_nid);
1396 lnet_islocalnet_locked(__u32 net_id)
1398 struct lnet_net *net;
1401 net = lnet_get_net_locked(net_id);
1403 local = net != NULL;
1409 lnet_islocalnet(__u32 net_id)
1414 cpt = lnet_net_lock_current();
1416 local = lnet_islocalnet_locked(net_id);
1418 lnet_net_unlock(cpt);
1424 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
1426 struct lnet_net *net;
1429 LASSERT(cpt != LNET_LOCK_EX);
1431 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1432 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1433 if (ni->ni_nid == nid)
1442 lnet_nid2ni_addref(lnet_nid_t nid)
1447 ni = lnet_nid2ni_locked(nid, 0);
1449 lnet_ni_addref_locked(ni, 0);
1454 EXPORT_SYMBOL(lnet_nid2ni_addref);
1457 lnet_islocalnid(lnet_nid_t nid)
1462 cpt = lnet_net_lock_current();
1463 ni = lnet_nid2ni_locked(nid, cpt);
1464 lnet_net_unlock(cpt);
1470 lnet_count_acceptor_nets(void)
1472 /* Return the # of NIs that need the acceptor. */
1474 struct lnet_net *net;
1477 cpt = lnet_net_lock_current();
1478 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1479 /* all socklnd type networks should have the acceptor
1481 if (net->net_lnd->lnd_accept != NULL)
1485 lnet_net_unlock(cpt);
1490 struct lnet_ping_buffer *
1491 lnet_ping_buffer_alloc(int nnis, gfp_t gfp)
1493 struct lnet_ping_buffer *pbuf;
1495 LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nnis), gfp);
1497 pbuf->pb_nnis = nnis;
1498 atomic_set(&pbuf->pb_refcnt, 1);
1505 lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
1507 LASSERT(lnet_ping_buffer_numref(pbuf) == 0);
1508 LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
1511 static struct lnet_ping_buffer *
1512 lnet_ping_target_create(int nnis)
1514 struct lnet_ping_buffer *pbuf;
1516 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1518 CERROR("Can't allocate ping source [%d]\n", nnis);
1522 pbuf->pb_info.pi_nnis = nnis;
1523 pbuf->pb_info.pi_pid = the_lnet.ln_pid;
1524 pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
1525 pbuf->pb_info.pi_features =
1526 LNET_PING_FEAT_NI_STATUS | LNET_PING_FEAT_MULTI_RAIL;
1532 lnet_get_net_ni_count_locked(struct lnet_net *net)
1537 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1544 lnet_get_net_ni_count_pre(struct lnet_net *net)
1549 list_for_each_entry(ni, &net->net_ni_added, ni_netlist)
1556 lnet_get_ni_count(void)
1559 struct lnet_net *net;
1564 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1565 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1575 lnet_get_net_count(void)
1577 struct lnet_net *net;
1582 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1592 lnet_swap_pinginfo(struct lnet_ping_buffer *pbuf)
1594 struct lnet_ni_status *stat;
1598 __swab32s(&pbuf->pb_info.pi_magic);
1599 __swab32s(&pbuf->pb_info.pi_features);
1600 __swab32s(&pbuf->pb_info.pi_pid);
1601 __swab32s(&pbuf->pb_info.pi_nnis);
1602 nnis = pbuf->pb_info.pi_nnis;
1603 if (nnis > pbuf->pb_nnis)
1604 nnis = pbuf->pb_nnis;
1605 for (i = 0; i < nnis; i++) {
1606 stat = &pbuf->pb_info.pi_ni[i];
1607 __swab64s(&stat->ns_nid);
1608 __swab32s(&stat->ns_status);
1613 lnet_ping_info_validate(struct lnet_ping_info *pinfo)
1617 if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
1619 if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
1621 /* Loopback is guaranteed to be present */
1622 if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
1624 if (LNET_NETTYP(LNET_NIDNET(LNET_PING_INFO_LONI(pinfo))) != LOLND)
1630 lnet_ping_target_destroy(void)
1632 struct lnet_net *net;
1635 lnet_net_lock(LNET_LOCK_EX);
1637 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1638 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1640 ni->ni_status = NULL;
1645 lnet_ping_buffer_decref(the_lnet.ln_ping_target);
1646 the_lnet.ln_ping_target = NULL;
1648 lnet_net_unlock(LNET_LOCK_EX);
1652 lnet_ping_target_event_handler(struct lnet_event *event)
1654 struct lnet_ping_buffer *pbuf = event->md.user_ptr;
1656 if (event->unlinked)
1657 lnet_ping_buffer_decref(pbuf);
1661 lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
1662 struct lnet_handle_md *ping_mdh,
1663 int ni_count, bool set_eq)
1665 struct lnet_process_id id = {
1666 .nid = LNET_NID_ANY,
1669 struct lnet_handle_me me_handle;
1670 struct lnet_md md = { NULL };
1674 rc = LNetEQAlloc(0, lnet_ping_target_event_handler,
1675 &the_lnet.ln_ping_target_eq);
1677 CERROR("Can't allocate ping buffer EQ: %d\n", rc);
1682 *ppbuf = lnet_ping_target_create(ni_count);
1683 if (*ppbuf == NULL) {
1688 /* Ping target ME/MD */
1689 rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1690 LNET_PROTO_PING_MATCHBITS, 0,
1691 LNET_UNLINK, LNET_INS_AFTER,
1694 CERROR("Can't create ping target ME: %d\n", rc);
1695 goto fail_decref_ping_buffer;
1698 /* initialize md content */
1699 md.start = &(*ppbuf)->pb_info;
1700 md.length = LNET_PING_INFO_SIZE((*ppbuf)->pb_nnis);
1701 md.threshold = LNET_MD_THRESH_INF;
1703 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1704 LNET_MD_MANAGE_REMOTE;
1705 md.eq_handle = the_lnet.ln_ping_target_eq;
1706 md.user_ptr = *ppbuf;
1708 rc = LNetMDAttach(me_handle, md, LNET_RETAIN, ping_mdh);
1710 CERROR("Can't attach ping target MD: %d\n", rc);
1711 goto fail_unlink_ping_me;
1713 lnet_ping_buffer_addref(*ppbuf);
1717 fail_unlink_ping_me:
1718 rc2 = LNetMEUnlink(me_handle);
1720 fail_decref_ping_buffer:
1721 LASSERT(lnet_ping_buffer_numref(*ppbuf) == 1);
1722 lnet_ping_buffer_decref(*ppbuf);
1726 rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
1733 lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
1734 struct lnet_handle_md *ping_mdh)
1736 sigset_t blocked = cfs_block_allsigs();
1738 LNetMDUnlink(*ping_mdh);
1739 LNetInvalidateMDHandle(ping_mdh);
1741 /* NB the MD could be busy; this just starts the unlink */
1742 while (lnet_ping_buffer_numref(pbuf) > 1) {
1743 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1744 set_current_state(TASK_UNINTERRUPTIBLE);
1745 schedule_timeout(cfs_time_seconds(1));
1748 cfs_restore_sigs(blocked);
1752 lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
1755 struct lnet_net *net;
1756 struct lnet_ni_status *ns;
1761 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1762 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1763 LASSERT(i < pbuf->pb_nnis);
1765 ns = &pbuf->pb_info.pi_ni[i];
1767 ns->ns_nid = ni->ni_nid;
1770 ns->ns_status = (ni->ni_status != NULL) ?
1771 ni->ni_status->ns_status :
1780 * We (ab)use the ns_status of the loopback interface to
1781 * transmit the sequence number. The first interface listed
1782 * must be the loopback interface.
1784 rc = lnet_ping_info_validate(&pbuf->pb_info);
1786 LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
1789 LNET_PING_BUFFER_SEQNO(pbuf) =
1790 atomic_inc_return(&the_lnet.ln_ping_target_seqno);
1794 lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
1795 struct lnet_handle_md ping_mdh)
1797 struct lnet_ping_buffer *old_pbuf = NULL;
1798 struct lnet_handle_md old_ping_md;
1800 /* switch the NIs to point to the new ping info created */
1801 lnet_net_lock(LNET_LOCK_EX);
1803 if (!the_lnet.ln_routing)
1804 pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1805 if (!lnet_peer_discovery_disabled)
1806 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
1808 /* Ensure only known feature bits have been set. */
1809 LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
1810 LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
1812 lnet_ping_target_install_locked(pbuf);
1814 if (the_lnet.ln_ping_target) {
1815 old_pbuf = the_lnet.ln_ping_target;
1816 old_ping_md = the_lnet.ln_ping_target_md;
1818 the_lnet.ln_ping_target_md = ping_mdh;
1819 the_lnet.ln_ping_target = pbuf;
1821 lnet_net_unlock(LNET_LOCK_EX);
1824 /* unlink and free the old ping info */
1825 lnet_ping_md_unlink(old_pbuf, &old_ping_md);
1826 lnet_ping_buffer_decref(old_pbuf);
1829 lnet_push_update_to_peers(0);
1833 lnet_ping_target_fini(void)
1837 lnet_ping_md_unlink(the_lnet.ln_ping_target,
1838 &the_lnet.ln_ping_target_md);
1840 rc = LNetEQFree(the_lnet.ln_ping_target_eq);
1843 lnet_ping_target_destroy();
1846 /* Resize the push target. */
1847 int lnet_push_target_resize(void)
1849 struct lnet_process_id id = { LNET_NID_ANY, LNET_PID_ANY };
1850 struct lnet_md md = { NULL };
1851 struct lnet_handle_me meh;
1852 struct lnet_handle_md mdh;
1853 struct lnet_handle_md old_mdh;
1854 struct lnet_ping_buffer *pbuf;
1855 struct lnet_ping_buffer *old_pbuf;
1856 int nnis = the_lnet.ln_push_target_nnis;
1864 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1870 rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1871 LNET_PROTO_PING_MATCHBITS, 0,
1872 LNET_UNLINK, LNET_INS_AFTER,
1875 CERROR("Can't create push target ME: %d\n", rc);
1876 goto fail_decref_pbuf;
1879 /* initialize md content */
1880 md.start = &pbuf->pb_info;
1881 md.length = LNET_PING_INFO_SIZE(nnis);
1882 md.threshold = LNET_MD_THRESH_INF;
1884 md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE |
1885 LNET_MD_MANAGE_REMOTE;
1887 md.eq_handle = the_lnet.ln_push_target_eq;
1889 rc = LNetMDAttach(meh, md, LNET_RETAIN, &mdh);
1891 CERROR("Can't attach push MD: %d\n", rc);
1892 goto fail_unlink_meh;
1894 lnet_ping_buffer_addref(pbuf);
1896 lnet_net_lock(LNET_LOCK_EX);
1897 old_pbuf = the_lnet.ln_push_target;
1898 old_mdh = the_lnet.ln_push_target_md;
1899 the_lnet.ln_push_target = pbuf;
1900 the_lnet.ln_push_target_md = mdh;
1901 lnet_net_unlock(LNET_LOCK_EX);
1904 LNetMDUnlink(old_mdh);
1905 lnet_ping_buffer_decref(old_pbuf);
1908 if (nnis < the_lnet.ln_push_target_nnis)
1911 CDEBUG(D_NET, "nnis %d success\n", nnis);
1918 lnet_ping_buffer_decref(pbuf);
1920 CDEBUG(D_NET, "nnis %d error %d\n", nnis, rc);
1924 static void lnet_push_target_event_handler(struct lnet_event *ev)
1926 struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
1928 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
1929 lnet_swap_pinginfo(pbuf);
1931 lnet_peer_push_event(ev);
1933 lnet_ping_buffer_decref(pbuf);
1936 /* Initialize the push target. */
1937 static int lnet_push_target_init(void)
1941 if (the_lnet.ln_push_target)
1944 rc = LNetEQAlloc(0, lnet_push_target_event_handler,
1945 &the_lnet.ln_push_target_eq);
1947 CERROR("Can't allocated push target EQ: %d\n", rc);
1951 /* Start at the required minimum, we'll enlarge if required. */
1952 the_lnet.ln_push_target_nnis = LNET_INTERFACES_MIN;
1954 rc = lnet_push_target_resize();
1957 LNetEQFree(the_lnet.ln_push_target_eq);
1958 LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
1964 /* Clean up the push target. */
1965 static void lnet_push_target_fini(void)
1967 if (!the_lnet.ln_push_target)
1970 /* Unlink and invalidate to prevent new references. */
1971 LNetMDUnlink(the_lnet.ln_push_target_md);
1972 LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
1974 /* Wait for the unlink to complete. */
1975 while (lnet_ping_buffer_numref(the_lnet.ln_push_target) > 1) {
1976 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1977 set_current_state(TASK_UNINTERRUPTIBLE);
1978 schedule_timeout(cfs_time_seconds(1));
1981 lnet_ping_buffer_decref(the_lnet.ln_push_target);
1982 the_lnet.ln_push_target = NULL;
1983 the_lnet.ln_push_target_nnis = 0;
1985 LNetEQFree(the_lnet.ln_push_target_eq);
1986 LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
1990 lnet_ni_tq_credits(struct lnet_ni *ni)
1994 LASSERT(ni->ni_ncpts >= 1);
1996 if (ni->ni_ncpts == 1)
1997 return ni->ni_net->net_tunables.lct_max_tx_credits;
1999 credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
2000 credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
2001 credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
2007 lnet_ni_unlink_locked(struct lnet_ni *ni)
2009 /* move it to zombie list and nobody can find it anymore */
2010 LASSERT(!list_empty(&ni->ni_netlist));
2011 list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
2012 lnet_ni_decref_locked(ni, 0);
2016 lnet_clear_zombies_nis_locked(struct lnet_net *net)
2021 struct list_head *zombie_list = &net->net_ni_zombie;
2024 * Now wait for the NIs I just nuked to show up on the zombie
2025 * list and shut them down in guaranteed thread context
2028 while (!list_empty(zombie_list)) {
2032 ni = list_entry(zombie_list->next,
2033 struct lnet_ni, ni_netlist);
2034 list_del_init(&ni->ni_netlist);
2035 /* the ni should be in deleting state. If it's not it's
2037 LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
2038 cfs_percpt_for_each(ref, j, ni->ni_refs) {
2041 /* still busy, add it back to zombie list */
2042 list_add(&ni->ni_netlist, zombie_list);
2046 if (!list_empty(&ni->ni_netlist)) {
2047 lnet_net_unlock(LNET_LOCK_EX);
2049 if ((i & (-i)) == i) {
2051 "Waiting for zombie LNI %s\n",
2052 libcfs_nid2str(ni->ni_nid));
2054 set_current_state(TASK_UNINTERRUPTIBLE);
2055 schedule_timeout(cfs_time_seconds(1));
2056 lnet_net_lock(LNET_LOCK_EX);
2060 lnet_net_unlock(LNET_LOCK_EX);
2062 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
2064 LASSERT(!in_interrupt());
2065 /* Holding the mutex makes it safe for lnd_shutdown
2066 * to call module_put(). Module unload cannot finish
2067 * until lnet_unregister_lnd() completes, and that
2068 * requires the mutex.
2070 mutex_lock(&the_lnet.ln_lnd_mutex);
2071 (net->net_lnd->lnd_shutdown)(ni);
2072 mutex_unlock(&the_lnet.ln_lnd_mutex);
2075 CDEBUG(D_LNI, "Removed LNI %s\n",
2076 libcfs_nid2str(ni->ni_nid));
2080 lnet_net_lock(LNET_LOCK_EX);
2084 /* shutdown down the NI and release refcount */
2086 lnet_shutdown_lndni(struct lnet_ni *ni)
2089 struct lnet_net *net = ni->ni_net;
2091 lnet_net_lock(LNET_LOCK_EX);
2093 ni->ni_state = LNET_NI_STATE_DELETING;
2095 lnet_ni_unlink_locked(ni);
2096 lnet_incr_dlc_seq();
2097 lnet_net_unlock(LNET_LOCK_EX);
2099 /* clear messages for this NI on the lazy portal */
2100 for (i = 0; i < the_lnet.ln_nportals; i++)
2101 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
2103 lnet_net_lock(LNET_LOCK_EX);
2104 lnet_clear_zombies_nis_locked(net);
2105 lnet_net_unlock(LNET_LOCK_EX);
2109 lnet_shutdown_lndnet(struct lnet_net *net)
2113 lnet_net_lock(LNET_LOCK_EX);
2115 list_del_init(&net->net_list);
2117 while (!list_empty(&net->net_ni_list)) {
2118 ni = list_entry(net->net_ni_list.next,
2119 struct lnet_ni, ni_netlist);
2120 lnet_net_unlock(LNET_LOCK_EX);
2121 lnet_shutdown_lndni(ni);
2122 lnet_net_lock(LNET_LOCK_EX);
2125 lnet_net_unlock(LNET_LOCK_EX);
2127 /* Do peer table cleanup for this net */
2128 lnet_peer_tables_cleanup(net);
2134 lnet_shutdown_lndnets(void)
2136 struct lnet_net *net;
2137 struct list_head resend;
2138 struct lnet_msg *msg, *tmp;
2140 INIT_LIST_HEAD(&resend);
2142 /* NB called holding the global mutex */
2144 /* All quiet on the API front */
2145 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
2146 LASSERT(the_lnet.ln_refcount == 0);
2148 lnet_net_lock(LNET_LOCK_EX);
2149 the_lnet.ln_state = LNET_STATE_STOPPING;
2151 while (!list_empty(&the_lnet.ln_nets)) {
2153 * move the nets to the zombie list to avoid them being
2154 * picked up for new work. LONET is also included in the
2155 * Nets that will be moved to the zombie list
2157 net = list_entry(the_lnet.ln_nets.next,
2158 struct lnet_net, net_list);
2159 list_move(&net->net_list, &the_lnet.ln_net_zombie);
2162 /* Drop the cached loopback Net. */
2163 if (the_lnet.ln_loni != NULL) {
2164 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
2165 the_lnet.ln_loni = NULL;
2167 lnet_net_unlock(LNET_LOCK_EX);
2169 /* iterate through the net zombie list and delete each net */
2170 while (!list_empty(&the_lnet.ln_net_zombie)) {
2171 net = list_entry(the_lnet.ln_net_zombie.next,
2172 struct lnet_net, net_list);
2173 lnet_shutdown_lndnet(net);
2176 spin_lock(&the_lnet.ln_msg_resend_lock);
2177 list_splice(&the_lnet.ln_msg_resend, &resend);
2178 spin_unlock(&the_lnet.ln_msg_resend_lock);
2180 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
2181 list_del_init(&msg->msg_list);
2182 msg->msg_no_resend = true;
2183 lnet_finalize(msg, -ECANCELED);
2186 lnet_net_lock(LNET_LOCK_EX);
2187 the_lnet.ln_state = LNET_STATE_SHUTDOWN;
2188 lnet_net_unlock(LNET_LOCK_EX);
2192 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
2195 struct lnet_tx_queue *tq;
2197 struct lnet_net *net = ni->ni_net;
2199 mutex_lock(&the_lnet.ln_lnd_mutex);
2202 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
2203 ni->ni_lnd_tunables_set = true;
2206 rc = (net->net_lnd->lnd_startup)(ni);
2208 mutex_unlock(&the_lnet.ln_lnd_mutex);
2211 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
2212 rc, libcfs_lnd2str(net->net_lnd->lnd_type));
2217 ni->ni_state = LNET_NI_STATE_ACTIVE;
2220 /* We keep a reference on the loopback net through the loopback NI */
2221 if (net->net_lnd->lnd_type == LOLND) {
2223 LASSERT(the_lnet.ln_loni == NULL);
2224 the_lnet.ln_loni = ni;
2225 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
2226 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
2227 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
2228 ni->ni_net->net_tunables.lct_peer_timeout = 0;
2232 if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
2233 ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
2234 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
2235 libcfs_lnd2str(net->net_lnd->lnd_type),
2236 ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
2238 /* shutdown the NI since if we get here then it must've already
2241 lnet_shutdown_lndni(ni);
2245 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
2246 tq->tq_credits_min =
2247 tq->tq_credits_max =
2248 tq->tq_credits = lnet_ni_tq_credits(ni);
2251 atomic_set(&ni->ni_tx_credits,
2252 lnet_ni_tq_credits(ni) * ni->ni_ncpts);
2253 atomic_set(&ni->ni_healthv, LNET_MAX_HEALTH_VALUE);
2255 CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
2256 libcfs_nid2str(ni->ni_nid),
2257 ni->ni_net->net_tunables.lct_peer_tx_credits,
2258 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
2259 ni->ni_net->net_tunables.lct_peer_rtr_credits,
2260 ni->ni_net->net_tunables.lct_peer_timeout);
2269 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
2272 struct lnet_net *net_l = NULL;
2273 struct list_head local_ni_list;
2277 const struct lnet_lnd *lnd;
2279 net->net_tunables.lct_peer_timeout;
2281 net->net_tunables.lct_max_tx_credits;
2282 int peerrtrcredits =
2283 net->net_tunables.lct_peer_rtr_credits;
2285 INIT_LIST_HEAD(&local_ni_list);
2288 * make sure that this net is unique. If it isn't then
2289 * we are adding interfaces to an already existing network, and
2290 * 'net' is just a convenient way to pass in the list.
2291 * if it is unique we need to find the LND and load it if
2294 if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
2295 lnd_type = LNET_NETTYP(net->net_id);
2297 mutex_lock(&the_lnet.ln_lnd_mutex);
2298 lnd = lnet_find_lnd_by_type(lnd_type);
2301 mutex_unlock(&the_lnet.ln_lnd_mutex);
2302 rc = request_module("%s", libcfs_lnd2modname(lnd_type));
2303 mutex_lock(&the_lnet.ln_lnd_mutex);
2305 lnd = lnet_find_lnd_by_type(lnd_type);
2307 mutex_unlock(&the_lnet.ln_lnd_mutex);
2308 CERROR("Can't load LND %s, module %s, rc=%d\n",
2309 libcfs_lnd2str(lnd_type),
2310 libcfs_lnd2modname(lnd_type), rc);
2311 #ifndef HAVE_MODULE_LOADING_SUPPORT
2312 LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
2313 "compiled with kernel module "
2314 "loading support.");
2323 mutex_unlock(&the_lnet.ln_lnd_mutex);
2329 * net_l: if the network being added is unique then net_l
2330 * will point to that network
2331 * if the network being added is not unique then
2332 * net_l points to the existing network.
2334 * When we enter the loop below, we'll pick NIs off he
2335 * network beign added and start them up, then add them to
2336 * a local ni list. Once we've successfully started all
2337 * the NIs then we join the local NI list (of started up
2338 * networks) with the net_l->net_ni_list, which should
2339 * point to the correct network to add the new ni list to
2341 * If any of the new NIs fail to start up, then we want to
2342 * iterate through the local ni list, which should include
2343 * any NIs which were successfully started up, and shut
2346 * After than we want to delete the network being added,
2347 * to avoid a memory leak.
2351 * When a network uses TCP bonding then all its interfaces
2352 * must be specified when the network is first defined: the
2353 * TCP bonding code doesn't allow for interfaces to be added
2356 if (net_l != net && net_l != NULL && use_tcp_bonding &&
2357 LNET_NETTYP(net_l->net_id) == SOCKLND) {
2362 while (!list_empty(&net->net_ni_added)) {
2363 ni = list_entry(net->net_ni_added.next, struct lnet_ni,
2365 list_del_init(&ni->ni_netlist);
2367 /* make sure that the the NI we're about to start
2368 * up is actually unique. if it's not fail. */
2369 if (!lnet_ni_unique_net(&net_l->net_ni_list,
2370 ni->ni_interfaces[0])) {
2375 /* adjust the pointer the parent network, just in case it
2376 * the net is a duplicate */
2379 rc = lnet_startup_lndni(ni, tun);
2384 LASSERT(ni->ni_net->net_tunables.lct_peer_timeout <= 0 ||
2385 ni->ni_net->net_lnd->lnd_query != NULL);
2388 list_add_tail(&ni->ni_netlist, &local_ni_list);
2393 lnet_net_lock(LNET_LOCK_EX);
2394 list_splice_tail(&local_ni_list, &net_l->net_ni_list);
2395 lnet_incr_dlc_seq();
2396 lnet_net_unlock(LNET_LOCK_EX);
2398 /* if the network is not unique then we don't want to keep
2399 * it around after we're done. Free it. Otherwise add that
2400 * net to the global the_lnet.ln_nets */
2401 if (net_l != net && net_l != NULL) {
2403 * TODO - note. currently the tunables can not be updated
2409 * restore tunables after it has been overwitten by the
2412 if (peer_timeout != -1)
2413 net->net_tunables.lct_peer_timeout = peer_timeout;
2414 if (maxtxcredits != -1)
2415 net->net_tunables.lct_max_tx_credits = maxtxcredits;
2416 if (peerrtrcredits != -1)
2417 net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
2419 lnet_net_lock(LNET_LOCK_EX);
2420 list_add_tail(&net->net_list, &the_lnet.ln_nets);
2421 lnet_net_unlock(LNET_LOCK_EX);
2424 /* update net count */
2425 lnet_current_net_count = lnet_get_net_count();
2431 * shutdown the new NIs that are being started up
2432 * free the NET being started
2434 while (!list_empty(&local_ni_list)) {
2435 ni = list_entry(local_ni_list.next, struct lnet_ni,
2438 lnet_shutdown_lndni(ni);
2448 lnet_startup_lndnets(struct list_head *netlist)
2450 struct lnet_net *net;
2455 * Change to running state before bringing up the LNDs. This
2456 * allows lnet_shutdown_lndnets() to assert that we've passed
2459 lnet_net_lock(LNET_LOCK_EX);
2460 the_lnet.ln_state = LNET_STATE_RUNNING;
2461 lnet_net_unlock(LNET_LOCK_EX);
2463 while (!list_empty(netlist)) {
2464 net = list_entry(netlist->next, struct lnet_net, net_list);
2465 list_del_init(&net->net_list);
2467 rc = lnet_startup_lndnet(net, NULL);
2477 lnet_shutdown_lndnets();
2483 * Initialize LNet library.
2485 * Automatically called at module loading time. Caller has to call
2486 * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
2487 * latter returned 0. It must be called exactly once.
2489 * \retval 0 on success
2490 * \retval -ve on failures.
2492 int lnet_lib_init(void)
2496 lnet_assert_wire_constants();
2498 /* refer to global cfs_cpt_table for now */
2499 the_lnet.ln_cpt_table = cfs_cpt_table;
2500 the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_table);
2502 LASSERT(the_lnet.ln_cpt_number > 0);
2503 if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
2504 /* we are under risk of consuming all lh_cookie */
2505 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
2506 "please change setting of CPT-table and retry\n",
2507 the_lnet.ln_cpt_number, LNET_CPT_MAX);
2511 while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
2512 the_lnet.ln_cpt_bits++;
2514 rc = lnet_create_locks();
2516 CERROR("Can't create LNet global locks: %d\n", rc);
2520 the_lnet.ln_refcount = 0;
2521 INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
2522 INIT_LIST_HEAD(&the_lnet.ln_msg_resend);
2524 /* The hash table size is the number of bits it takes to express the set
2525 * ln_num_routes, minus 1 (better to under estimate than over so we
2526 * don't waste memory). */
2527 if (rnet_htable_size <= 0)
2528 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
2529 else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
2530 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
2531 the_lnet.ln_remote_nets_hbits = max_t(int, 1,
2532 order_base_2(rnet_htable_size) - 1);
2534 /* All LNDs apart from the LOLND are in separate modules. They
2535 * register themselves when their module loads, and unregister
2536 * themselves when their module is unloaded. */
2537 lnet_register_lnd(&the_lolnd);
2542 * Finalize LNet library.
2544 * \pre lnet_lib_init() called with success.
2545 * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
2547 * As this happens at module-unload, all lnds must already be unloaded,
2548 * so they must already be unregistered.
2550 void lnet_lib_exit(void)
2554 LASSERT(the_lnet.ln_refcount == 0);
2555 lnet_unregister_lnd(&the_lolnd);
2556 for (i = 0; i < NUM_LNDS; i++)
2557 LASSERT(!the_lnet.ln_lnds[i]);
2558 lnet_destroy_locks();
2562 * Set LNet PID and start LNet interfaces, routing, and forwarding.
2564 * Users must call this function at least once before any other functions.
2565 * For each successful call there must be a corresponding call to
2566 * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
2569 * The PID used by LNet may be different from the one requested.
2572 * \param requested_pid PID requested by the caller.
2574 * \return >= 0 on success, and < 0 error code on failures.
2577 LNetNIInit(lnet_pid_t requested_pid)
2579 int im_a_router = 0;
2582 struct lnet_ping_buffer *pbuf;
2583 struct lnet_handle_md ping_mdh;
2584 struct list_head net_head;
2585 struct lnet_net *net;
2587 INIT_LIST_HEAD(&net_head);
2589 mutex_lock(&the_lnet.ln_api_mutex);
2591 CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
2593 if (the_lnet.ln_refcount > 0) {
2594 rc = the_lnet.ln_refcount++;
2595 mutex_unlock(&the_lnet.ln_api_mutex);
2599 rc = lnet_prepare(requested_pid);
2601 mutex_unlock(&the_lnet.ln_api_mutex);
2605 /* create a network for Loopback network */
2606 net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
2609 goto err_empty_list;
2612 /* Add in the loopback NI */
2613 if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
2615 goto err_empty_list;
2618 /* If LNet is being initialized via DLC it is possible
2619 * that the user requests not to load module parameters (ones which
2620 * are supported by DLC) on initialization. Therefore, make sure not
2621 * to load networks, routes and forwarding from module parameters
2622 * in this case. On cleanup in case of failure only clean up
2623 * routes if it has been loaded */
2624 if (!the_lnet.ln_nis_from_mod_params) {
2625 rc = lnet_parse_networks(&net_head, lnet_get_networks(),
2628 goto err_empty_list;
2631 ni_count = lnet_startup_lndnets(&net_head);
2634 goto err_empty_list;
2637 if (!the_lnet.ln_nis_from_mod_params) {
2638 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
2640 goto err_shutdown_lndnis;
2642 rc = lnet_rtrpools_alloc(im_a_router);
2644 goto err_destroy_routes;
2647 rc = lnet_acceptor_start();
2649 goto err_destroy_routes;
2651 the_lnet.ln_refcount = 1;
2652 /* Now I may use my own API functions... */
2654 rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_count, true);
2656 goto err_acceptor_stop;
2658 lnet_ping_target_update(pbuf, ping_mdh);
2660 rc = LNetEQAlloc(0, lnet_mt_event_handler, &the_lnet.ln_mt_eqh);
2662 CERROR("Can't allocate monitor thread EQ: %d\n", rc);
2666 rc = lnet_push_target_init();
2670 rc = lnet_peer_discovery_start();
2672 goto err_destroy_push_target;
2674 rc = lnet_monitor_thr_start();
2676 goto err_stop_discovery_thr;
2679 lnet_router_debugfs_init();
2681 mutex_unlock(&the_lnet.ln_api_mutex);
2683 complete_all(&the_lnet.ln_started);
2685 /* wait for all routers to start */
2686 lnet_wait_router_start();
2690 err_stop_discovery_thr:
2691 lnet_peer_discovery_stop();
2692 err_destroy_push_target:
2693 lnet_push_target_fini();
2695 lnet_ping_target_fini();
2697 the_lnet.ln_refcount = 0;
2698 lnet_acceptor_stop();
2700 if (!the_lnet.ln_nis_from_mod_params)
2701 lnet_destroy_routes();
2702 err_shutdown_lndnis:
2703 lnet_shutdown_lndnets();
2707 mutex_unlock(&the_lnet.ln_api_mutex);
2708 while (!list_empty(&net_head)) {
2709 struct lnet_net *net;
2711 net = list_entry(net_head.next, struct lnet_net, net_list);
2712 list_del_init(&net->net_list);
2717 EXPORT_SYMBOL(LNetNIInit);
2720 * Stop LNet interfaces, routing, and forwarding.
2722 * Users must call this function once for each successful call to LNetNIInit().
2723 * Once the LNetNIFini() operation has been started, the results of pending
2724 * API operations are undefined.
2726 * \return always 0 for current implementation.
2731 mutex_lock(&the_lnet.ln_api_mutex);
2733 LASSERT(the_lnet.ln_refcount > 0);
2735 if (the_lnet.ln_refcount != 1) {
2736 the_lnet.ln_refcount--;
2738 LASSERT(!the_lnet.ln_niinit_self);
2742 lnet_router_debugfs_fini();
2743 lnet_monitor_thr_stop();
2744 lnet_peer_discovery_stop();
2745 lnet_push_target_fini();
2746 lnet_ping_target_fini();
2748 /* Teardown fns that use my own API functions BEFORE here */
2749 the_lnet.ln_refcount = 0;
2751 lnet_acceptor_stop();
2752 lnet_destroy_routes();
2753 lnet_shutdown_lndnets();
2757 mutex_unlock(&the_lnet.ln_api_mutex);
2760 EXPORT_SYMBOL(LNetNIFini);
2763 * Grabs the ni data from the ni structure and fills the out
2766 * \param[in] ni network interface structure
2767 * \param[out] cfg_ni NI config information
2768 * \param[out] tun network and LND tunables
2771 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
2772 struct lnet_ioctl_config_lnd_tunables *tun,
2773 struct lnet_ioctl_element_stats *stats,
2776 size_t min_size = 0;
2779 if (!ni || !cfg_ni || !tun)
2782 if (ni->ni_interfaces[0] != NULL) {
2783 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2784 if (ni->ni_interfaces[i] != NULL) {
2785 strncpy(cfg_ni->lic_ni_intf[i],
2786 ni->ni_interfaces[i],
2787 sizeof(cfg_ni->lic_ni_intf[i]));
2792 cfg_ni->lic_nid = ni->ni_nid;
2793 if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2794 cfg_ni->lic_status = LNET_NI_STATUS_UP;
2796 cfg_ni->lic_status = ni->ni_status->ns_status;
2797 cfg_ni->lic_tcp_bonding = use_tcp_bonding;
2798 cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
2800 memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
2803 stats->iel_send_count = lnet_sum_stats(&ni->ni_stats,
2804 LNET_STATS_TYPE_SEND);
2805 stats->iel_recv_count = lnet_sum_stats(&ni->ni_stats,
2806 LNET_STATS_TYPE_RECV);
2807 stats->iel_drop_count = lnet_sum_stats(&ni->ni_stats,
2808 LNET_STATS_TYPE_DROP);
2812 * tun->lt_tun will always be present, but in order to be
2813 * backwards compatible, we need to deal with the cases when
2814 * tun->lt_tun is smaller than what the kernel has, because it
2815 * comes from an older version of a userspace program, then we'll
2816 * need to copy as much information as we have available space.
2818 min_size = tun_size - sizeof(tun->lt_cmn);
2819 memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
2821 /* copy over the cpts */
2822 if (ni->ni_ncpts == LNET_CPT_NUMBER &&
2823 ni->ni_cpts == NULL) {
2824 for (i = 0; i < ni->ni_ncpts; i++)
2825 cfg_ni->lic_cpts[i] = i;
2828 ni->ni_cpts != NULL && i < ni->ni_ncpts &&
2829 i < LNET_MAX_SHOW_NUM_CPT;
2831 cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
2833 cfg_ni->lic_ncpts = ni->ni_ncpts;
2837 * NOTE: This is a legacy function left in the code to be backwards
2838 * compatible with older userspace programs. It should eventually be
2841 * Grabs the ni data from the ni structure and fills the out
2844 * \param[in] ni network interface structure
2845 * \param[out] config config information
2848 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
2849 struct lnet_ioctl_config_data *config)
2851 struct lnet_ioctl_net_config *net_config;
2852 struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
2853 size_t min_size, tunable_size = 0;
2859 net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
2863 BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
2864 ARRAY_SIZE(net_config->ni_interfaces));
2866 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2867 if (!ni->ni_interfaces[i])
2870 strncpy(net_config->ni_interfaces[i],
2871 ni->ni_interfaces[i],
2872 sizeof(net_config->ni_interfaces[i]));
2875 config->cfg_nid = ni->ni_nid;
2876 config->cfg_config_u.cfg_net.net_peer_timeout =
2877 ni->ni_net->net_tunables.lct_peer_timeout;
2878 config->cfg_config_u.cfg_net.net_max_tx_credits =
2879 ni->ni_net->net_tunables.lct_max_tx_credits;
2880 config->cfg_config_u.cfg_net.net_peer_tx_credits =
2881 ni->ni_net->net_tunables.lct_peer_tx_credits;
2882 config->cfg_config_u.cfg_net.net_peer_rtr_credits =
2883 ni->ni_net->net_tunables.lct_peer_rtr_credits;
2885 if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2886 net_config->ni_status = LNET_NI_STATUS_UP;
2888 net_config->ni_status = ni->ni_status->ns_status;
2891 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
2893 for (i = 0; i < num_cpts; i++)
2894 net_config->ni_cpts[i] = ni->ni_cpts[i];
2896 config->cfg_ncpts = num_cpts;
2900 * See if user land tools sent in a newer and larger version
2901 * of struct lnet_tunables than what the kernel uses.
2903 min_size = sizeof(*config) + sizeof(*net_config);
2905 if (config->cfg_hdr.ioc_len > min_size)
2906 tunable_size = config->cfg_hdr.ioc_len - min_size;
2908 /* Don't copy too much data to user space */
2909 min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
2910 lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
2912 if (lnd_cfg && min_size) {
2913 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
2914 config->cfg_config_u.cfg_net.net_interface_count = 1;
2916 /* Tell user land that kernel side has less data */
2917 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
2918 min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
2919 config->cfg_hdr.ioc_len -= min_size;
2925 lnet_get_ni_idx_locked(int idx)
2928 struct lnet_net *net;
2930 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2931 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2941 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
2944 struct lnet_net *net = mynet;
2947 * It is possible that the net has been cleaned out while there is
2948 * a message being sent. This function accessed the net without
2949 * checking if the list is empty
2953 net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
2955 if (list_empty(&net->net_ni_list))
2957 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2963 if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
2964 /* if you reached the end of the ni list and the net is
2965 * specified, then there are no more nis in that net */
2969 /* we reached the end of this net ni list. move to the
2971 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
2972 /* no more nets and no more NIs. */
2975 /* get the next net */
2976 net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
2978 if (list_empty(&net->net_ni_list))
2980 /* get the ni on it */
2981 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2987 if (list_empty(&prev->ni_netlist))
2990 /* there are more nis left */
2991 ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
2997 lnet_get_net_config(struct lnet_ioctl_config_data *config)
3002 int idx = config->cfg_count;
3004 cpt = lnet_net_lock_current();
3006 ni = lnet_get_ni_idx_locked(idx);
3011 lnet_fill_ni_info_legacy(ni, config);
3015 lnet_net_unlock(cpt);
3020 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
3021 struct lnet_ioctl_config_lnd_tunables *tun,
3022 struct lnet_ioctl_element_stats *stats,
3029 if (!cfg_ni || !tun || !stats)
3032 cpt = lnet_net_lock_current();
3034 ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
3039 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
3043 lnet_net_unlock(cpt);
3047 int lnet_get_ni_stats(struct lnet_ioctl_element_msg_stats *msg_stats)
3056 cpt = lnet_net_lock_current();
3058 ni = lnet_get_ni_idx_locked(msg_stats->im_idx);
3061 lnet_usr_translate_stats(msg_stats, &ni->ni_stats);
3065 lnet_net_unlock(cpt);
3070 static int lnet_add_net_common(struct lnet_net *net,
3071 struct lnet_ioctl_config_lnd_tunables *tun)
3074 struct lnet_ping_buffer *pbuf;
3075 struct lnet_handle_md ping_mdh;
3077 struct lnet_remotenet *rnet;
3079 int num_acceptor_nets;
3081 lnet_net_lock(LNET_LOCK_EX);
3082 rnet = lnet_find_rnet_locked(net->net_id);
3083 lnet_net_unlock(LNET_LOCK_EX);
3085 * make sure that the net added doesn't invalidate the current
3086 * configuration LNet is keeping
3089 CERROR("Adding net %s will invalidate routing configuration\n",
3090 libcfs_net2str(net->net_id));
3096 * make sure you calculate the correct number of slots in the ping
3097 * buffer. Since the ping info is a flattened list of all the NIs,
3098 * we should allocate enough slots to accomodate the number of NIs
3099 * which will be added.
3101 * since ni hasn't been configured yet, use
3102 * lnet_get_net_ni_count_pre() which checks the net_ni_added list
3104 net_ni_count = lnet_get_net_ni_count_pre(net);
3106 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3107 net_ni_count + lnet_get_ni_count(),
3115 memcpy(&net->net_tunables,
3116 &tun->lt_cmn, sizeof(net->net_tunables));
3118 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
3121 * before starting this network get a count of the current TCP
3122 * networks which require the acceptor thread running. If that
3123 * count is == 0 before we start up this network, then we'd want to
3124 * start up the acceptor thread after starting up this network
3126 num_acceptor_nets = lnet_count_acceptor_nets();
3128 net_id = net->net_id;
3130 rc = lnet_startup_lndnet(net,
3131 (tun) ? &tun->lt_tun : NULL);
3135 lnet_net_lock(LNET_LOCK_EX);
3136 net = lnet_get_net_locked(net_id);
3137 lnet_net_unlock(LNET_LOCK_EX);
3142 * Start the acceptor thread if this is the first network
3143 * being added that requires the thread.
3145 if (net->net_lnd->lnd_accept && num_acceptor_nets == 0) {
3146 rc = lnet_acceptor_start();
3148 /* shutdown the net that we just started */
3149 CERROR("Failed to start up acceptor thread\n");
3150 lnet_shutdown_lndnet(net);
3155 lnet_net_lock(LNET_LOCK_EX);
3156 lnet_peer_net_added(net);
3157 lnet_net_unlock(LNET_LOCK_EX);
3159 lnet_ping_target_update(pbuf, ping_mdh);
3164 lnet_ping_md_unlink(pbuf, &ping_mdh);
3165 lnet_ping_buffer_decref(pbuf);
3169 static int lnet_handle_legacy_ip2nets(char *ip2nets,
3170 struct lnet_ioctl_config_lnd_tunables *tun)
3172 struct lnet_net *net;
3175 struct list_head net_head;
3177 INIT_LIST_HEAD(&net_head);
3179 rc = lnet_parse_ip2nets(&nets, ip2nets);
3183 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
3187 mutex_lock(&the_lnet.ln_api_mutex);
3188 while (!list_empty(&net_head)) {
3189 net = list_entry(net_head.next, struct lnet_net, net_list);
3190 list_del_init(&net->net_list);
3191 rc = lnet_add_net_common(net, tun);
3197 mutex_unlock(&the_lnet.ln_api_mutex);
3199 while (!list_empty(&net_head)) {
3200 net = list_entry(net_head.next, struct lnet_net, net_list);
3201 list_del_init(&net->net_list);
3207 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
3209 struct lnet_net *net;
3211 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
3213 __u32 net_id, lnd_type;
3215 /* get the tunables if they are available */
3216 if (conf->lic_cfg_hdr.ioc_len >=
3217 sizeof(*conf) + sizeof(*tun))
3218 tun = (struct lnet_ioctl_config_lnd_tunables *)
3221 /* handle legacy ip2nets from DLC */
3222 if (conf->lic_legacy_ip2nets[0] != '\0')
3223 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
3226 net_id = LNET_NIDNET(conf->lic_nid);
3227 lnd_type = LNET_NETTYP(net_id);
3229 if (!libcfs_isknown_lnd(lnd_type)) {
3230 CERROR("No valid net and lnd information provided\n");
3234 net = lnet_net_alloc(net_id, NULL);
3238 for (i = 0; i < conf->lic_ncpts; i++) {
3239 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER)
3243 ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
3244 conf->lic_ni_intf[0]);
3248 mutex_lock(&the_lnet.ln_api_mutex);
3250 rc = lnet_add_net_common(net, tun);
3252 mutex_unlock(&the_lnet.ln_api_mutex);
3257 int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
3259 struct lnet_net *net;
3261 __u32 net_id = LNET_NIDNET(conf->lic_nid);
3262 struct lnet_ping_buffer *pbuf;
3263 struct lnet_handle_md ping_mdh;
3268 /* don't allow userspace to shutdown the LOLND */
3269 if (LNET_NETTYP(net_id) == LOLND)
3272 mutex_lock(&the_lnet.ln_api_mutex);
3276 net = lnet_get_net_locked(net_id);
3278 CERROR("net %s not found\n",
3279 libcfs_net2str(net_id));
3284 addr = LNET_NIDADDR(conf->lic_nid);
3286 /* remove the entire net */
3287 net_count = lnet_get_net_ni_count_locked(net);
3291 /* create and link a new ping info, before removing the old one */
3292 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3293 lnet_get_ni_count() - net_count,
3296 goto unlock_api_mutex;
3298 lnet_shutdown_lndnet(net);
3300 if (lnet_count_acceptor_nets() == 0)
3301 lnet_acceptor_stop();
3303 lnet_ping_target_update(pbuf, ping_mdh);
3305 goto unlock_api_mutex;
3308 ni = lnet_nid2ni_locked(conf->lic_nid, 0);
3310 CERROR("nid %s not found\n",
3311 libcfs_nid2str(conf->lic_nid));
3316 net_count = lnet_get_net_ni_count_locked(net);
3320 /* create and link a new ping info, before removing the old one */
3321 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3322 lnet_get_ni_count() - 1, false);
3324 goto unlock_api_mutex;
3326 lnet_shutdown_lndni(ni);
3328 if (lnet_count_acceptor_nets() == 0)
3329 lnet_acceptor_stop();
3331 lnet_ping_target_update(pbuf, ping_mdh);
3333 /* check if the net is empty and remove it if it is */
3335 lnet_shutdown_lndnet(net);
3337 goto unlock_api_mutex;
3342 mutex_unlock(&the_lnet.ln_api_mutex);
3348 * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
3349 * They are only expected to be called for unique networks.
3350 * That can be as a result of older DLC library
3351 * calls. Multi-Rail DLC and beyond no longer uses these APIs.
3354 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
3356 struct lnet_net *net;
3357 struct list_head net_head;
3359 struct lnet_ioctl_config_lnd_tunables tun;
3360 char *nets = conf->cfg_config_u.cfg_net.net_intf;
3362 INIT_LIST_HEAD(&net_head);
3364 /* Create a net/ni structures for the network string */
3365 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
3367 return rc == 0 ? -EINVAL : rc;
3369 mutex_lock(&the_lnet.ln_api_mutex);
3372 rc = -EINVAL; /* only add one network per call */
3373 goto out_unlock_clean;
3376 net = list_entry(net_head.next, struct lnet_net, net_list);
3377 list_del_init(&net->net_list);
3379 LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
3381 memset(&tun, 0, sizeof(tun));
3383 tun.lt_cmn.lct_peer_timeout =
3384 conf->cfg_config_u.cfg_net.net_peer_timeout;
3385 tun.lt_cmn.lct_peer_tx_credits =
3386 conf->cfg_config_u.cfg_net.net_peer_tx_credits;
3387 tun.lt_cmn.lct_peer_rtr_credits =
3388 conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
3389 tun.lt_cmn.lct_max_tx_credits =
3390 conf->cfg_config_u.cfg_net.net_max_tx_credits;
3392 rc = lnet_add_net_common(net, &tun);
3395 mutex_unlock(&the_lnet.ln_api_mutex);
3396 while (!list_empty(&net_head)) {
3397 /* net_head list is empty in success case */
3398 net = list_entry(net_head.next, struct lnet_net, net_list);
3399 list_del_init(&net->net_list);
3406 lnet_dyn_del_net(__u32 net_id)
3408 struct lnet_net *net;
3409 struct lnet_ping_buffer *pbuf;
3410 struct lnet_handle_md ping_mdh;
3414 /* don't allow userspace to shutdown the LOLND */
3415 if (LNET_NETTYP(net_id) == LOLND)
3418 mutex_lock(&the_lnet.ln_api_mutex);
3422 net = lnet_get_net_locked(net_id);
3429 net_ni_count = lnet_get_net_ni_count_locked(net);
3433 /* create and link a new ping info, before removing the old one */
3434 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3435 lnet_get_ni_count() - net_ni_count, false);
3439 lnet_shutdown_lndnet(net);
3441 if (lnet_count_acceptor_nets() == 0)
3442 lnet_acceptor_stop();
3444 lnet_ping_target_update(pbuf, ping_mdh);
3447 mutex_unlock(&the_lnet.ln_api_mutex);
3452 void lnet_incr_dlc_seq(void)
3454 atomic_inc(&lnet_dlc_seq_no);
3457 __u32 lnet_get_dlc_seq_locked(void)
3459 return atomic_read(&lnet_dlc_seq_no);
3463 lnet_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3465 struct lnet_net *net;
3468 lnet_net_lock(LNET_LOCK_EX);
3469 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3470 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3471 if (ni->ni_nid == nid || all) {
3472 atomic_set(&ni->ni_healthv, value);
3473 if (list_empty(&ni->ni_recovery) &&
3474 value < LNET_MAX_HEALTH_VALUE) {
3475 CERROR("manually adding local NI %s to recovery\n",
3476 libcfs_nid2str(ni->ni_nid));
3477 list_add_tail(&ni->ni_recovery,
3478 &the_lnet.ln_mt_localNIRecovq);
3479 lnet_ni_addref_locked(ni, 0);
3482 lnet_net_unlock(LNET_LOCK_EX);
3488 lnet_net_unlock(LNET_LOCK_EX);
3492 lnet_get_local_ni_hstats(struct lnet_ioctl_local_ni_hstats *stats)
3496 lnet_nid_t nid = stats->hlni_nid;
3498 cpt = lnet_net_lock_current();
3499 ni = lnet_nid2ni_locked(nid, cpt);
3506 stats->hlni_local_interrupt = atomic_read(&ni->ni_hstats.hlt_local_interrupt);
3507 stats->hlni_local_dropped = atomic_read(&ni->ni_hstats.hlt_local_dropped);
3508 stats->hlni_local_aborted = atomic_read(&ni->ni_hstats.hlt_local_aborted);
3509 stats->hlni_local_no_route = atomic_read(&ni->ni_hstats.hlt_local_no_route);
3510 stats->hlni_local_timeout = atomic_read(&ni->ni_hstats.hlt_local_timeout);
3511 stats->hlni_local_error = atomic_read(&ni->ni_hstats.hlt_local_error);
3512 stats->hlni_health_value = atomic_read(&ni->ni_healthv);
3515 lnet_net_unlock(cpt);
3521 lnet_get_local_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
3526 lnet_net_lock(LNET_LOCK_EX);
3527 list_for_each_entry(ni, &the_lnet.ln_mt_localNIRecovq, ni_recovery) {
3528 list->rlst_nid_array[i] = ni->ni_nid;
3530 if (i >= LNET_MAX_SHOW_NUM_NID)
3533 lnet_net_unlock(LNET_LOCK_EX);
3534 list->rlst_num_nids = i;
3540 lnet_get_peer_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
3542 struct lnet_peer_ni *lpni;
3545 lnet_net_lock(LNET_LOCK_EX);
3546 list_for_each_entry(lpni, &the_lnet.ln_mt_peerNIRecovq, lpni_recovery) {
3547 list->rlst_nid_array[i] = lpni->lpni_nid;
3549 if (i >= LNET_MAX_SHOW_NUM_NID)
3552 lnet_net_unlock(LNET_LOCK_EX);
3553 list->rlst_num_nids = i;
3559 * LNet ioctl handler.
3563 LNetCtl(unsigned int cmd, void *arg)
3565 struct libcfs_ioctl_data *data = arg;
3566 struct lnet_ioctl_config_data *config;
3567 struct lnet_process_id id = {0};
3571 BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
3572 sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
3575 case IOC_LIBCFS_GET_NI:
3576 rc = LNetGetId(data->ioc_count, &id);
3577 data->ioc_nid = id.nid;
3580 case IOC_LIBCFS_FAIL_NID:
3581 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
3583 case IOC_LIBCFS_ADD_ROUTE: {
3584 /* default router sensitivity to 1 */
3585 unsigned int sensitivity = 1;
3588 if (config->cfg_hdr.ioc_len < sizeof(*config))
3591 if (config->cfg_config_u.cfg_route.rtr_sensitivity) {
3593 config->cfg_config_u.cfg_route.rtr_sensitivity;
3596 mutex_lock(&the_lnet.ln_api_mutex);
3597 rc = lnet_add_route(config->cfg_net,
3598 config->cfg_config_u.cfg_route.rtr_hop,
3600 config->cfg_config_u.cfg_route.
3601 rtr_priority, sensitivity);
3602 mutex_unlock(&the_lnet.ln_api_mutex);
3606 case IOC_LIBCFS_DEL_ROUTE:
3609 if (config->cfg_hdr.ioc_len < sizeof(*config))
3612 mutex_lock(&the_lnet.ln_api_mutex);
3613 rc = lnet_del_route(config->cfg_net, config->cfg_nid);
3614 mutex_unlock(&the_lnet.ln_api_mutex);
3617 case IOC_LIBCFS_GET_ROUTE:
3620 if (config->cfg_hdr.ioc_len < sizeof(*config))
3623 mutex_lock(&the_lnet.ln_api_mutex);
3624 rc = lnet_get_route(config->cfg_count,
3626 &config->cfg_config_u.cfg_route.rtr_hop,
3628 &config->cfg_config_u.cfg_route.rtr_flags,
3629 &config->cfg_config_u.cfg_route.
3631 &config->cfg_config_u.cfg_route.
3633 mutex_unlock(&the_lnet.ln_api_mutex);
3636 case IOC_LIBCFS_GET_LOCAL_NI: {
3637 struct lnet_ioctl_config_ni *cfg_ni;
3638 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
3639 struct lnet_ioctl_element_stats *stats;
3644 /* get the tunables if they are available */
3645 if (cfg_ni->lic_cfg_hdr.ioc_len <
3646 sizeof(*cfg_ni) + sizeof(*stats) + sizeof(*tun))
3649 stats = (struct lnet_ioctl_element_stats *)
3651 tun = (struct lnet_ioctl_config_lnd_tunables *)
3652 (cfg_ni->lic_bulk + sizeof(*stats));
3654 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
3657 mutex_lock(&the_lnet.ln_api_mutex);
3658 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
3659 mutex_unlock(&the_lnet.ln_api_mutex);
3663 case IOC_LIBCFS_GET_LOCAL_NI_MSG_STATS: {
3664 struct lnet_ioctl_element_msg_stats *msg_stats = arg;
3666 if (msg_stats->im_hdr.ioc_len != sizeof(*msg_stats))
3669 mutex_lock(&the_lnet.ln_api_mutex);
3670 rc = lnet_get_ni_stats(msg_stats);
3671 mutex_unlock(&the_lnet.ln_api_mutex);
3676 case IOC_LIBCFS_GET_NET: {
3677 size_t total = sizeof(*config) +
3678 sizeof(struct lnet_ioctl_net_config);
3681 if (config->cfg_hdr.ioc_len < total)
3684 mutex_lock(&the_lnet.ln_api_mutex);
3685 rc = lnet_get_net_config(config);
3686 mutex_unlock(&the_lnet.ln_api_mutex);
3690 case IOC_LIBCFS_GET_LNET_STATS:
3692 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
3694 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
3697 mutex_lock(&the_lnet.ln_api_mutex);
3698 lnet_counters_get(&lnet_stats->st_cntrs);
3699 mutex_unlock(&the_lnet.ln_api_mutex);
3703 case IOC_LIBCFS_CONFIG_RTR:
3706 if (config->cfg_hdr.ioc_len < sizeof(*config))
3709 mutex_lock(&the_lnet.ln_api_mutex);
3710 if (config->cfg_config_u.cfg_buffers.buf_enable) {
3711 rc = lnet_rtrpools_enable();
3712 mutex_unlock(&the_lnet.ln_api_mutex);
3715 lnet_rtrpools_disable();
3716 mutex_unlock(&the_lnet.ln_api_mutex);
3719 case IOC_LIBCFS_ADD_BUF:
3722 if (config->cfg_hdr.ioc_len < sizeof(*config))
3725 mutex_lock(&the_lnet.ln_api_mutex);
3726 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
3728 config->cfg_config_u.cfg_buffers.
3730 config->cfg_config_u.cfg_buffers.
3732 mutex_unlock(&the_lnet.ln_api_mutex);
3735 case IOC_LIBCFS_SET_NUMA_RANGE: {
3736 struct lnet_ioctl_set_value *numa;
3738 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3740 lnet_net_lock(LNET_LOCK_EX);
3741 lnet_numa_range = numa->sv_value;
3742 lnet_net_unlock(LNET_LOCK_EX);
3746 case IOC_LIBCFS_GET_NUMA_RANGE: {
3747 struct lnet_ioctl_set_value *numa;
3749 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3751 numa->sv_value = lnet_numa_range;
3755 case IOC_LIBCFS_GET_BUF: {
3756 struct lnet_ioctl_pool_cfg *pool_cfg;
3757 size_t total = sizeof(*config) + sizeof(*pool_cfg);
3761 if (config->cfg_hdr.ioc_len < total)
3764 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
3766 mutex_lock(&the_lnet.ln_api_mutex);
3767 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
3768 mutex_unlock(&the_lnet.ln_api_mutex);
3772 case IOC_LIBCFS_GET_LOCAL_HSTATS: {
3773 struct lnet_ioctl_local_ni_hstats *stats = arg;
3775 if (stats->hlni_hdr.ioc_len < sizeof(*stats))
3778 mutex_lock(&the_lnet.ln_api_mutex);
3779 rc = lnet_get_local_ni_hstats(stats);
3780 mutex_unlock(&the_lnet.ln_api_mutex);
3785 case IOC_LIBCFS_GET_RECOVERY_QUEUE: {
3786 struct lnet_ioctl_recovery_list *list = arg;
3787 if (list->rlst_hdr.ioc_len < sizeof(*list))
3790 mutex_lock(&the_lnet.ln_api_mutex);
3791 if (list->rlst_type == LNET_HEALTH_TYPE_LOCAL_NI)
3792 rc = lnet_get_local_ni_recovery_list(list);
3794 rc = lnet_get_peer_ni_recovery_list(list);
3795 mutex_unlock(&the_lnet.ln_api_mutex);
3799 case IOC_LIBCFS_ADD_PEER_NI: {
3800 struct lnet_ioctl_peer_cfg *cfg = arg;
3802 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3805 mutex_lock(&the_lnet.ln_api_mutex);
3806 rc = lnet_add_peer_ni(cfg->prcfg_prim_nid,
3809 mutex_unlock(&the_lnet.ln_api_mutex);
3813 case IOC_LIBCFS_DEL_PEER_NI: {
3814 struct lnet_ioctl_peer_cfg *cfg = arg;
3816 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3819 mutex_lock(&the_lnet.ln_api_mutex);
3820 rc = lnet_del_peer_ni(cfg->prcfg_prim_nid,
3821 cfg->prcfg_cfg_nid);
3822 mutex_unlock(&the_lnet.ln_api_mutex);
3826 case IOC_LIBCFS_GET_PEER_INFO: {
3827 struct lnet_ioctl_peer *peer_info = arg;
3829 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
3832 mutex_lock(&the_lnet.ln_api_mutex);
3833 rc = lnet_get_peer_ni_info(
3834 peer_info->pr_count,
3836 peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
3837 &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
3838 &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
3839 &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
3840 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
3841 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
3842 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
3843 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
3844 mutex_unlock(&the_lnet.ln_api_mutex);
3848 case IOC_LIBCFS_GET_PEER_NI: {
3849 struct lnet_ioctl_peer_cfg *cfg = arg;
3851 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3854 mutex_lock(&the_lnet.ln_api_mutex);
3855 rc = lnet_get_peer_info(cfg,
3856 (void __user *)cfg->prcfg_bulk);
3857 mutex_unlock(&the_lnet.ln_api_mutex);
3861 case IOC_LIBCFS_GET_PEER_LIST: {
3862 struct lnet_ioctl_peer_cfg *cfg = arg;
3864 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3867 mutex_lock(&the_lnet.ln_api_mutex);
3868 rc = lnet_get_peer_list(&cfg->prcfg_count, &cfg->prcfg_size,
3869 (struct lnet_process_id __user *)cfg->prcfg_bulk);
3870 mutex_unlock(&the_lnet.ln_api_mutex);
3874 case IOC_LIBCFS_SET_HEALHV: {
3875 struct lnet_ioctl_reset_health_cfg *cfg = arg;
3877 if (cfg->rh_hdr.ioc_len < sizeof(*cfg))
3879 if (cfg->rh_value < 0 ||
3880 cfg->rh_value > LNET_MAX_HEALTH_VALUE)
3881 value = LNET_MAX_HEALTH_VALUE;
3883 value = cfg->rh_value;
3884 CDEBUG(D_NET, "Manually setting healthv to %d for %s:%s. all = %d\n",
3885 value, (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI) ?
3886 "local" : "peer", libcfs_nid2str(cfg->rh_nid), cfg->rh_all);
3887 mutex_lock(&the_lnet.ln_api_mutex);
3888 if (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI)
3889 lnet_ni_set_healthv(cfg->rh_nid, value,
3892 lnet_peer_ni_set_healthv(cfg->rh_nid, value,
3894 mutex_unlock(&the_lnet.ln_api_mutex);
3898 case IOC_LIBCFS_NOTIFY_ROUTER: {
3899 time64_t deadline = ktime_get_real_seconds() - data->ioc_u64[0];
3901 /* The deadline passed in by the user should be some time in
3902 * seconds in the future since the UNIX epoch. We have to map
3903 * that deadline to the wall clock.
3905 deadline += ktime_get_seconds();
3906 return lnet_notify(NULL, data->ioc_nid, data->ioc_flags, false,
3910 case IOC_LIBCFS_LNET_DIST:
3911 rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
3912 if (rc < 0 && rc != -EHOSTUNREACH)
3915 data->ioc_u32[0] = rc;
3918 case IOC_LIBCFS_TESTPROTOCOMPAT:
3919 the_lnet.ln_testprotocompat = data->ioc_flags;
3922 case IOC_LIBCFS_LNET_FAULT:
3923 return lnet_fault_ctl(data->ioc_flags, data);
3925 case IOC_LIBCFS_PING: {
3926 signed long timeout;
3928 id.nid = data->ioc_nid;
3929 id.pid = data->ioc_u32[0];
3931 /* If timeout is negative then set default of 3 minutes */
3932 if (((s32)data->ioc_u32[1] <= 0) ||
3933 data->ioc_u32[1] > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
3934 timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
3936 timeout = nsecs_to_jiffies(data->ioc_u32[1] * NSEC_PER_MSEC);
3938 rc = lnet_ping(id, timeout, data->ioc_pbuf1,
3939 data->ioc_plen1 / sizeof(struct lnet_process_id));
3944 data->ioc_count = rc;
3948 case IOC_LIBCFS_PING_PEER: {
3949 struct lnet_ioctl_ping_data *ping = arg;
3950 struct lnet_peer *lp;
3951 signed long timeout;
3953 /* If timeout is negative then set default of 3 minutes */
3954 if (((s32)ping->op_param) <= 0 ||
3955 ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
3956 timeout = cfs_time_seconds(DEFAULT_PEER_TIMEOUT);
3958 timeout = nsecs_to_jiffies(ping->op_param * NSEC_PER_MSEC);
3960 rc = lnet_ping(ping->ping_id, timeout,
3966 mutex_lock(&the_lnet.ln_api_mutex);
3967 lp = lnet_find_peer(ping->ping_id.nid);
3969 ping->ping_id.nid = lp->lp_primary_nid;
3970 ping->mr_info = lnet_peer_is_multi_rail(lp);
3971 lnet_peer_decref_locked(lp);
3973 mutex_unlock(&the_lnet.ln_api_mutex);
3975 ping->ping_count = rc;
3979 case IOC_LIBCFS_DISCOVER: {
3980 struct lnet_ioctl_ping_data *discover = arg;
3981 struct lnet_peer *lp;
3983 rc = lnet_discover(discover->ping_id, discover->op_param,
3985 discover->ping_count);
3989 mutex_lock(&the_lnet.ln_api_mutex);
3990 lp = lnet_find_peer(discover->ping_id.nid);
3992 discover->ping_id.nid = lp->lp_primary_nid;
3993 discover->mr_info = lnet_peer_is_multi_rail(lp);
3994 lnet_peer_decref_locked(lp);
3996 mutex_unlock(&the_lnet.ln_api_mutex);
3998 discover->ping_count = rc;
4003 ni = lnet_net2ni_addref(data->ioc_net);
4007 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
4010 rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
4017 EXPORT_SYMBOL(LNetCtl);
4019 void LNetDebugPeer(struct lnet_process_id id)
4021 lnet_debug_peer(id.nid);
4023 EXPORT_SYMBOL(LNetDebugPeer);
4026 * Determine if the specified peer \a nid is on the local node.
4028 * \param nid peer nid to check
4030 * \retval true If peer NID is on the local node.
4031 * \retval false If peer NID is not on the local node.
4033 bool LNetIsPeerLocal(lnet_nid_t nid)
4035 struct lnet_net *net;
4039 cpt = lnet_net_lock_current();
4040 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
4041 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
4042 if (ni->ni_nid == nid) {
4043 lnet_net_unlock(cpt);
4048 lnet_net_unlock(cpt);
4052 EXPORT_SYMBOL(LNetIsPeerLocal);
4055 * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
4056 * Note that all interfaces share a same PID, as requested by LNetNIInit().
4058 * \param index Index of the interface to look up.
4059 * \param id On successful return, this location will hold the
4060 * struct lnet_process_id ID of the interface.
4062 * \retval 0 If an interface exists at \a index.
4063 * \retval -ENOENT If no interface has been found.
4066 LNetGetId(unsigned int index, struct lnet_process_id *id)
4069 struct lnet_net *net;
4073 LASSERT(the_lnet.ln_refcount > 0);
4075 cpt = lnet_net_lock_current();
4077 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
4078 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
4082 id->nid = ni->ni_nid;
4083 id->pid = the_lnet.ln_pid;
4089 lnet_net_unlock(cpt);
4092 EXPORT_SYMBOL(LNetGetId);
4094 static int lnet_ping(struct lnet_process_id id, signed long timeout,
4095 struct lnet_process_id __user *ids, int n_ids)
4097 struct lnet_handle_eq eqh;
4098 struct lnet_handle_md mdh;
4099 struct lnet_event event;
4100 struct lnet_md md = { NULL };
4104 const signed long a_long_time = cfs_time_seconds(60);
4105 struct lnet_ping_buffer *pbuf;
4106 struct lnet_process_id tmpid;
4113 /* n_ids limit is arbitrary */
4114 if (n_ids <= 0 || id.nid == LNET_NID_ANY)
4118 * if the user buffer has more space than the lnet_interfaces_max
4119 * then only fill it up to lnet_interfaces_max
4121 if (n_ids > lnet_interfaces_max)
4122 n_ids = lnet_interfaces_max;
4124 if (id.pid == LNET_PID_ANY)
4125 id.pid = LNET_PID_LUSTRE;
4127 pbuf = lnet_ping_buffer_alloc(n_ids, GFP_NOFS);
4131 /* NB 2 events max (including any unlink event) */
4132 rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
4134 CERROR("Can't allocate EQ: %d\n", rc);
4135 goto fail_ping_buffer_decref;
4138 /* initialize md content */
4139 md.start = &pbuf->pb_info;
4140 md.length = LNET_PING_INFO_SIZE(n_ids);
4141 md.threshold = 2; /* GET/REPLY */
4143 md.options = LNET_MD_TRUNCATE;
4147 rc = LNetMDBind(md, LNET_UNLINK, &mdh);
4149 CERROR("Can't bind MD: %d\n", rc);
4153 rc = LNetGet(LNET_NID_ANY, mdh, id,
4154 LNET_RESERVED_PORTAL,
4155 LNET_PROTO_PING_MATCHBITS, 0, false);
4158 /* Don't CERROR; this could be deliberate! */
4159 rc2 = LNetMDUnlink(mdh);
4162 /* NB must wait for the UNLINK event below... */
4164 timeout = a_long_time;
4168 /* MUST block for unlink to complete */
4170 blocked = cfs_block_allsigs();
4172 rc2 = LNetEQPoll(&eqh, 1, timeout, &event, &which);
4175 cfs_restore_sigs(blocked);
4177 CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
4178 (rc2 <= 0) ? -1 : event.type,
4179 (rc2 <= 0) ? -1 : event.status,
4180 (rc2 > 0 && event.unlinked) ? " unlinked" : "");
4182 LASSERT(rc2 != -EOVERFLOW); /* can't miss anything */
4184 if (rc2 <= 0 || event.status != 0) {
4185 /* timeout or error */
4186 if (!replied && rc == 0)
4187 rc = (rc2 < 0) ? rc2 :
4188 (rc2 == 0) ? -ETIMEDOUT :
4192 /* Ensure completion in finite time... */
4194 /* No assertion (racing with network) */
4196 timeout = a_long_time;
4197 } else if (rc2 == 0) {
4198 /* timed out waiting for unlink */
4199 CWARN("ping %s: late network completion\n",
4202 } else if (event.type == LNET_EVENT_REPLY) {
4206 } while (rc2 <= 0 || !event.unlinked);
4210 CWARN("%s: Unexpected rc >= 0 but no reply!\n",
4217 LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
4219 rc = -EPROTO; /* if I can't parse... */
4222 CERROR("%s: ping info too short %d\n",
4223 libcfs_id2str(id), nob);
4227 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
4228 lnet_swap_pinginfo(pbuf);
4229 } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
4230 CERROR("%s: Unexpected magic %08x\n",
4231 libcfs_id2str(id), pbuf->pb_info.pi_magic);
4235 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
4236 CERROR("%s: ping w/o NI status: 0x%x\n",
4237 libcfs_id2str(id), pbuf->pb_info.pi_features);
4241 if (nob < LNET_PING_INFO_SIZE(0)) {
4242 CERROR("%s: Short reply %d(%d min)\n",
4244 nob, (int)LNET_PING_INFO_SIZE(0));
4248 if (pbuf->pb_info.pi_nnis < n_ids)
4249 n_ids = pbuf->pb_info.pi_nnis;
4251 if (nob < LNET_PING_INFO_SIZE(n_ids)) {
4252 CERROR("%s: Short reply %d(%d expected)\n",
4254 nob, (int)LNET_PING_INFO_SIZE(n_ids));
4258 rc = -EFAULT; /* if I segv in copy_to_user()... */
4260 memset(&tmpid, 0, sizeof(tmpid));
4261 for (i = 0; i < n_ids; i++) {
4262 tmpid.pid = pbuf->pb_info.pi_pid;
4263 tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
4264 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
4267 rc = pbuf->pb_info.pi_nnis;
4270 rc2 = LNetEQFree(eqh);
4272 CERROR("rc2 %d\n", rc2);
4275 fail_ping_buffer_decref:
4276 lnet_ping_buffer_decref(pbuf);
4281 lnet_discover(struct lnet_process_id id, __u32 force,
4282 struct lnet_process_id __user *ids, int n_ids)
4284 struct lnet_peer_ni *lpni;
4285 struct lnet_peer_ni *p;
4286 struct lnet_peer *lp;
4287 struct lnet_process_id *buf;
4291 int max_intf = lnet_interfaces_max;
4295 id.nid == LNET_NID_ANY)
4298 if (id.pid == LNET_PID_ANY)
4299 id.pid = LNET_PID_LUSTRE;
4302 * if the user buffer has more space than the max_intf
4303 * then only fill it up to max_intf
4305 if (n_ids > max_intf)
4308 buf_size = n_ids * sizeof(*buf);
4310 LIBCFS_ALLOC(buf, buf_size);
4314 cpt = lnet_net_lock_current();
4315 lpni = lnet_nid2peerni_locked(id.nid, LNET_NID_ANY, cpt);
4322 * Clearing the NIDS_UPTODATE flag ensures the peer will
4323 * be discovered, provided discovery has not been disabled.
4325 lp = lpni->lpni_peer_net->lpn_peer;
4326 spin_lock(&lp->lp_lock);
4327 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
4328 /* If the force flag is set, force a PING and PUSH as well. */
4330 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
4331 spin_unlock(&lp->lp_lock);
4332 rc = lnet_discover_peer_locked(lpni, cpt, true);
4336 /* Peer may have changed. */
4337 lp = lpni->lpni_peer_net->lpn_peer;
4338 if (lp->lp_nnis < n_ids)
4339 n_ids = lp->lp_nnis;
4343 while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
4344 buf[i].pid = id.pid;
4345 buf[i].nid = p->lpni_nid;
4350 lnet_net_unlock(cpt);
4353 if (copy_to_user(ids, buf, n_ids * sizeof(*buf)))
4359 lnet_peer_ni_decref_locked(lpni);
4361 lnet_net_unlock(cpt);
4363 LIBCFS_FREE(buf, buf_size);
4369 * Retrieve peer discovery status.
4371 * \retval 1 if lnet_peer_discovery_disabled is 0
4372 * \retval 0 if lnet_peer_discovery_disabled is 1
4375 LNetGetPeerDiscoveryStatus(void)
4377 return !lnet_peer_discovery_disabled;
4379 EXPORT_SYMBOL(LNetGetPeerDiscoveryStatus);