4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_LNET
35 #include <linux/ctype.h>
36 #include <linux/log2.h>
37 #include <linux/ktime.h>
38 #include <linux/moduleparam.h>
39 #include <linux/uaccess.h>
41 #include <lnet/lib-lnet.h>
43 #define D_LNI D_CONSOLE
46 * initialize ln_api_mutex statically, since it needs to be used in
47 * discovery_set callback. That module parameter callback can be called
48 * before module init completes. The mutex needs to be ready for use then.
50 struct lnet the_lnet = {
51 .ln_api_mutex = __MUTEX_INITIALIZER(the_lnet.ln_api_mutex),
52 }; /* THE state of the network */
53 EXPORT_SYMBOL(the_lnet);
55 static char *ip2nets = "";
56 module_param(ip2nets, charp, 0444);
57 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
59 static char *networks = "";
60 module_param(networks, charp, 0444);
61 MODULE_PARM_DESC(networks, "local networks");
63 static char *routes = "";
64 module_param(routes, charp, 0444);
65 MODULE_PARM_DESC(routes, "routes to non-local networks");
67 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
68 module_param(rnet_htable_size, int, 0444);
69 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
71 static int use_tcp_bonding = false;
72 module_param(use_tcp_bonding, int, 0444);
73 MODULE_PARM_DESC(use_tcp_bonding,
74 "Set to 1 to use socklnd bonding. 0 to use Multi-Rail");
76 unsigned int lnet_numa_range = 0;
77 module_param(lnet_numa_range, uint, 0444);
78 MODULE_PARM_DESC(lnet_numa_range,
79 "NUMA range to consider during Multi-Rail selection");
82 * lnet_health_sensitivity determines by how much we decrement the health
83 * value on sending error. The value defaults to 0, which means health
84 * checking is turned off by default.
86 unsigned int lnet_health_sensitivity = 0;
87 static int sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
88 #ifdef HAVE_KERNEL_PARAM_OPS
89 static struct kernel_param_ops param_ops_health_sensitivity = {
90 .set = sensitivity_set,
93 #define param_check_health_sensitivity(name, p) \
94 __param_check(name, p, int)
95 module_param(lnet_health_sensitivity, health_sensitivity, S_IRUGO|S_IWUSR);
97 module_param_call(lnet_health_sensitivity, sensitivity_set, param_get_int,
98 &lnet_health_sensitivity, S_IRUGO|S_IWUSR);
100 MODULE_PARM_DESC(lnet_health_sensitivity,
101 "Value to decrement the health value by on error");
104 * lnet_recovery_interval determines how often we should perform recovery
105 * on unhealthy interfaces.
107 unsigned int lnet_recovery_interval = 1;
108 static int recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp);
109 #ifdef HAVE_KERNEL_PARAM_OPS
110 static struct kernel_param_ops param_ops_recovery_interval = {
111 .set = recovery_interval_set,
112 .get = param_get_int,
114 #define param_check_recovery_interval(name, p) \
115 __param_check(name, p, int)
116 module_param(lnet_recovery_interval, recovery_interval, S_IRUGO|S_IWUSR);
118 module_param_call(lnet_recovery_interval, recovery_interval_set, param_get_int,
119 &lnet_recovery_interval, S_IRUGO|S_IWUSR);
121 MODULE_PARM_DESC(lnet_recovery_interval,
122 "Interval to recover unhealthy interfaces in seconds");
124 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
125 static int intf_max_set(const char *val, cfs_kernel_param_arg_t *kp);
127 static struct kernel_param_ops param_ops_interfaces_max = {
129 .get = param_get_int,
132 #define param_check_interfaces_max(name, p) \
133 __param_check(name, p, int)
135 #ifdef HAVE_KERNEL_PARAM_OPS
136 module_param(lnet_interfaces_max, interfaces_max, 0644);
138 module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
139 ¶m_ops_interfaces_max, 0644);
141 MODULE_PARM_DESC(lnet_interfaces_max,
142 "Maximum number of interfaces in a node.");
144 unsigned lnet_peer_discovery_disabled = 0;
145 static int discovery_set(const char *val, cfs_kernel_param_arg_t *kp);
147 static struct kernel_param_ops param_ops_discovery_disabled = {
148 .set = discovery_set,
149 .get = param_get_int,
152 #define param_check_discovery_disabled(name, p) \
153 __param_check(name, p, int)
154 #ifdef HAVE_KERNEL_PARAM_OPS
155 module_param(lnet_peer_discovery_disabled, discovery_disabled, 0644);
157 module_param_call(lnet_peer_discovery_disabled, discovery_set, param_get_int,
158 ¶m_ops_discovery_disabled, 0644);
160 MODULE_PARM_DESC(lnet_peer_discovery_disabled,
161 "Set to 1 to disable peer discovery on this node.");
163 unsigned int lnet_drop_asym_route;
164 static int drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp);
166 static struct kernel_param_ops param_ops_drop_asym_route = {
167 .set = drop_asym_route_set,
168 .get = param_get_int,
171 #define param_check_drop_asym_route(name, p) \
172 __param_check(name, p, int)
173 #ifdef HAVE_KERNEL_PARAM_OPS
174 module_param(lnet_drop_asym_route, drop_asym_route, 0644);
176 module_param_call(lnet_drop_asym_route, drop_asym_route_set, param_get_int,
177 ¶m_ops_drop_asym_route, 0644);
179 MODULE_PARM_DESC(lnet_drop_asym_route,
180 "Set to 1 to drop asymmetrical route messages.");
182 unsigned lnet_transaction_timeout = 50;
183 static int transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp);
184 #ifdef HAVE_KERNEL_PARAM_OPS
185 static struct kernel_param_ops param_ops_transaction_timeout = {
186 .set = transaction_to_set,
187 .get = param_get_int,
190 #define param_check_transaction_timeout(name, p) \
191 __param_check(name, p, int)
192 module_param(lnet_transaction_timeout, transaction_timeout, S_IRUGO|S_IWUSR);
194 module_param_call(lnet_transaction_timeout, transaction_to_set, param_get_int,
195 &lnet_transaction_timeout, S_IRUGO|S_IWUSR);
197 MODULE_PARM_DESC(lnet_transaction_timeout,
198 "Maximum number of seconds to wait for a peer response.");
200 unsigned lnet_retry_count = 0;
201 static int retry_count_set(const char *val, cfs_kernel_param_arg_t *kp);
202 #ifdef HAVE_KERNEL_PARAM_OPS
203 static struct kernel_param_ops param_ops_retry_count = {
204 .set = retry_count_set,
205 .get = param_get_int,
208 #define param_check_retry_count(name, p) \
209 __param_check(name, p, int)
210 module_param(lnet_retry_count, retry_count, S_IRUGO|S_IWUSR);
212 module_param_call(lnet_retry_count, retry_count_set, param_get_int,
213 &lnet_retry_count, S_IRUGO|S_IWUSR);
215 MODULE_PARM_DESC(lnet_retry_count,
216 "Maximum number of times to retry transmitting a message");
219 unsigned lnet_lnd_timeout = LNET_LND_DEFAULT_TIMEOUT;
222 * This sequence number keeps track of how many times DLC was used to
223 * update the local NIs. It is incremented when a NI is added or
224 * removed and checked when sending a message to determine if there is
225 * a need to re-run the selection algorithm. See lnet_select_pathway()
226 * for more details on its usage.
228 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
230 static int lnet_ping(struct lnet_process_id id, signed long timeout,
231 struct lnet_process_id __user *ids, int n_ids);
233 static int lnet_discover(struct lnet_process_id id, __u32 force,
234 struct lnet_process_id __user *ids, int n_ids);
237 sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
240 unsigned *sensitivity = (unsigned *)kp->arg;
243 rc = kstrtoul(val, 0, &value);
245 CERROR("Invalid module parameter value for 'lnet_health_sensitivity'\n");
250 * The purpose of locking the api_mutex here is to ensure that
251 * the correct value ends up stored properly.
253 mutex_lock(&the_lnet.ln_api_mutex);
255 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
256 mutex_unlock(&the_lnet.ln_api_mutex);
260 if (value > LNET_MAX_HEALTH_VALUE) {
261 mutex_unlock(&the_lnet.ln_api_mutex);
262 CERROR("Invalid health value. Maximum: %d value = %lu\n",
263 LNET_MAX_HEALTH_VALUE, value);
267 *sensitivity = value;
269 mutex_unlock(&the_lnet.ln_api_mutex);
275 recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp)
278 unsigned *interval = (unsigned *)kp->arg;
281 rc = kstrtoul(val, 0, &value);
283 CERROR("Invalid module parameter value for 'lnet_recovery_interval'\n");
288 CERROR("lnet_recovery_interval must be at least 1 second\n");
293 * The purpose of locking the api_mutex here is to ensure that
294 * the correct value ends up stored properly.
296 mutex_lock(&the_lnet.ln_api_mutex);
298 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
299 mutex_unlock(&the_lnet.ln_api_mutex);
305 mutex_unlock(&the_lnet.ln_api_mutex);
311 discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
314 unsigned *discovery = (unsigned *)kp->arg;
316 struct lnet_ping_buffer *pbuf;
318 rc = kstrtoul(val, 0, &value);
320 CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
324 value = (value) ? 1 : 0;
327 * The purpose of locking the api_mutex here is to ensure that
328 * the correct value ends up stored properly.
330 mutex_lock(&the_lnet.ln_api_mutex);
332 if (value == *discovery) {
333 mutex_unlock(&the_lnet.ln_api_mutex);
339 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
340 mutex_unlock(&the_lnet.ln_api_mutex);
344 /* tell peers that discovery setting has changed */
345 lnet_net_lock(LNET_LOCK_EX);
346 pbuf = the_lnet.ln_ping_target;
348 pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
350 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
351 lnet_net_unlock(LNET_LOCK_EX);
353 lnet_push_update_to_peers(1);
355 mutex_unlock(&the_lnet.ln_api_mutex);
361 drop_asym_route_set(const char *val, cfs_kernel_param_arg_t *kp)
364 unsigned int *drop_asym_route = (unsigned int *)kp->arg;
367 rc = kstrtoul(val, 0, &value);
369 CERROR("Invalid module parameter value for "
370 "'lnet_drop_asym_route'\n");
375 * The purpose of locking the api_mutex here is to ensure that
376 * the correct value ends up stored properly.
378 mutex_lock(&the_lnet.ln_api_mutex);
380 if (value == *drop_asym_route) {
381 mutex_unlock(&the_lnet.ln_api_mutex);
385 *drop_asym_route = value;
387 mutex_unlock(&the_lnet.ln_api_mutex);
393 transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp)
396 unsigned *transaction_to = (unsigned *)kp->arg;
399 rc = kstrtoul(val, 0, &value);
401 CERROR("Invalid module parameter value for 'lnet_transaction_timeout'\n");
406 * The purpose of locking the api_mutex here is to ensure that
407 * the correct value ends up stored properly.
409 mutex_lock(&the_lnet.ln_api_mutex);
411 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
412 mutex_unlock(&the_lnet.ln_api_mutex);
416 if (value < lnet_retry_count || value == 0) {
417 mutex_unlock(&the_lnet.ln_api_mutex);
418 CERROR("Invalid value for lnet_transaction_timeout (%lu). "
419 "Has to be greater than lnet_retry_count (%u)\n",
420 value, lnet_retry_count);
424 if (value == *transaction_to) {
425 mutex_unlock(&the_lnet.ln_api_mutex);
429 *transaction_to = value;
430 if (lnet_retry_count == 0)
431 lnet_lnd_timeout = value;
433 lnet_lnd_timeout = value / lnet_retry_count;
435 mutex_unlock(&the_lnet.ln_api_mutex);
441 retry_count_set(const char *val, cfs_kernel_param_arg_t *kp)
444 unsigned *retry_count = (unsigned *)kp->arg;
447 rc = kstrtoul(val, 0, &value);
449 CERROR("Invalid module parameter value for 'lnet_retry_count'\n");
454 * The purpose of locking the api_mutex here is to ensure that
455 * the correct value ends up stored properly.
457 mutex_lock(&the_lnet.ln_api_mutex);
459 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
460 mutex_unlock(&the_lnet.ln_api_mutex);
464 if (value > lnet_transaction_timeout) {
465 mutex_unlock(&the_lnet.ln_api_mutex);
466 CERROR("Invalid value for lnet_retry_count (%lu). "
467 "Has to be smaller than lnet_transaction_timeout (%u)\n",
468 value, lnet_transaction_timeout);
472 if (value == *retry_count) {
473 mutex_unlock(&the_lnet.ln_api_mutex);
477 *retry_count = value;
480 lnet_lnd_timeout = lnet_transaction_timeout;
482 lnet_lnd_timeout = lnet_transaction_timeout / value;
484 mutex_unlock(&the_lnet.ln_api_mutex);
490 intf_max_set(const char *val, cfs_kernel_param_arg_t *kp)
494 rc = kstrtoint(val, 0, &value);
496 CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
500 if (value < LNET_INTERFACES_MIN) {
501 CWARN("max interfaces provided are too small, setting to %d\n",
502 LNET_INTERFACES_MAX_DEFAULT);
503 value = LNET_INTERFACES_MAX_DEFAULT;
506 *(int *)kp->arg = value;
512 lnet_get_routes(void)
518 lnet_get_networks(void)
523 if (*networks != 0 && *ip2nets != 0) {
524 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
525 "'ip2nets' but not both at once\n");
530 rc = lnet_parse_ip2nets(&nets, ip2nets);
531 return (rc == 0) ? nets : NULL;
541 lnet_init_locks(void)
543 spin_lock_init(&the_lnet.ln_eq_wait_lock);
544 spin_lock_init(&the_lnet.ln_msg_resend_lock);
545 init_waitqueue_head(&the_lnet.ln_eq_waitq);
546 init_waitqueue_head(&the_lnet.ln_mt_waitq);
547 mutex_init(&the_lnet.ln_lnd_mutex);
551 lnet_fini_locks(void)
555 struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
556 struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
560 lnet_descriptor_setup(void)
562 /* create specific kmem_cache for MEs and small MDs (i.e., originally
563 * allocated in <size-xxx> kmem_cache).
565 lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(struct lnet_me),
567 if (!lnet_mes_cachep)
570 lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
571 LNET_SMALL_MD_SIZE, 0, 0,
573 if (!lnet_small_mds_cachep)
580 lnet_descriptor_cleanup(void)
583 if (lnet_small_mds_cachep) {
584 kmem_cache_destroy(lnet_small_mds_cachep);
585 lnet_small_mds_cachep = NULL;
588 if (lnet_mes_cachep) {
589 kmem_cache_destroy(lnet_mes_cachep);
590 lnet_mes_cachep = NULL;
595 lnet_create_remote_nets_table(void)
598 struct list_head *hash;
600 LASSERT(the_lnet.ln_remote_nets_hash == NULL);
601 LASSERT(the_lnet.ln_remote_nets_hbits > 0);
602 LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
604 CERROR("Failed to create remote nets hash table\n");
608 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
609 INIT_LIST_HEAD(&hash[i]);
610 the_lnet.ln_remote_nets_hash = hash;
615 lnet_destroy_remote_nets_table(void)
619 if (the_lnet.ln_remote_nets_hash == NULL)
622 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
623 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
625 LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
626 LNET_REMOTE_NETS_HASH_SIZE *
627 sizeof(the_lnet.ln_remote_nets_hash[0]));
628 the_lnet.ln_remote_nets_hash = NULL;
632 lnet_destroy_locks(void)
634 if (the_lnet.ln_res_lock != NULL) {
635 cfs_percpt_lock_free(the_lnet.ln_res_lock);
636 the_lnet.ln_res_lock = NULL;
639 if (the_lnet.ln_net_lock != NULL) {
640 cfs_percpt_lock_free(the_lnet.ln_net_lock);
641 the_lnet.ln_net_lock = NULL;
648 lnet_create_locks(void)
652 the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
653 if (the_lnet.ln_res_lock == NULL)
656 the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
657 if (the_lnet.ln_net_lock == NULL)
663 lnet_destroy_locks();
667 static void lnet_assert_wire_constants(void)
669 /* Wire protocol assertions generated by 'wirecheck'
670 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
671 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
672 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
675 CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
676 CLASSERT(LNET_PROTO_TCP_VERSION_MAJOR == 1);
677 CLASSERT(LNET_PROTO_TCP_VERSION_MINOR == 0);
678 CLASSERT(LNET_MSG_ACK == 0);
679 CLASSERT(LNET_MSG_PUT == 1);
680 CLASSERT(LNET_MSG_GET == 2);
681 CLASSERT(LNET_MSG_REPLY == 3);
682 CLASSERT(LNET_MSG_HELLO == 4);
684 /* Checks for struct lnet_handle_wire */
685 CLASSERT((int)sizeof(struct lnet_handle_wire) == 16);
686 CLASSERT((int)offsetof(struct lnet_handle_wire, wh_interface_cookie) == 0);
687 CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) == 8);
688 CLASSERT((int)offsetof(struct lnet_handle_wire, wh_object_cookie) == 8);
689 CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) == 8);
691 /* Checks for struct struct lnet_magicversion */
692 CLASSERT((int)sizeof(struct lnet_magicversion) == 8);
693 CLASSERT((int)offsetof(struct lnet_magicversion, magic) == 0);
694 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->magic) == 4);
695 CLASSERT((int)offsetof(struct lnet_magicversion, version_major) == 4);
696 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_major) == 2);
697 CLASSERT((int)offsetof(struct lnet_magicversion, version_minor) == 6);
698 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_minor) == 2);
700 /* Checks for struct struct lnet_hdr */
701 CLASSERT((int)sizeof(struct lnet_hdr) == 72);
702 CLASSERT((int)offsetof(struct lnet_hdr, dest_nid) == 0);
703 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_nid) == 8);
704 CLASSERT((int)offsetof(struct lnet_hdr, src_nid) == 8);
705 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_nid) == 8);
706 CLASSERT((int)offsetof(struct lnet_hdr, dest_pid) == 16);
707 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_pid) == 4);
708 CLASSERT((int)offsetof(struct lnet_hdr, src_pid) == 20);
709 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_pid) == 4);
710 CLASSERT((int)offsetof(struct lnet_hdr, type) == 24);
711 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->type) == 4);
712 CLASSERT((int)offsetof(struct lnet_hdr, payload_length) == 28);
713 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->payload_length) == 4);
714 CLASSERT((int)offsetof(struct lnet_hdr, msg) == 32);
715 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg) == 40);
718 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) == 32);
719 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) == 16);
720 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.match_bits) == 48);
721 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) == 8);
722 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.mlength) == 56);
723 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) == 4);
726 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) == 32);
727 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) == 16);
728 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.match_bits) == 48);
729 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) == 8);
730 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.hdr_data) == 56);
731 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) == 8);
732 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ptl_index) == 64);
733 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) == 4);
734 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.offset) == 68);
735 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) == 4);
738 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.return_wmd) == 32);
739 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) == 16);
740 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.match_bits) == 48);
741 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) == 8);
742 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.ptl_index) == 56);
743 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) == 4);
744 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.src_offset) == 60);
745 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) == 4);
746 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.sink_length) == 64);
747 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) == 4);
750 CLASSERT((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) == 32);
751 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) == 16);
754 CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.incarnation) == 32);
755 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) == 8);
756 CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.type) == 40);
757 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) == 4);
759 /* Checks for struct lnet_ni_status and related constants */
760 CLASSERT(LNET_NI_STATUS_INVALID == 0x00000000);
761 CLASSERT(LNET_NI_STATUS_UP == 0x15aac0de);
762 CLASSERT(LNET_NI_STATUS_DOWN == 0xdeadface);
764 /* Checks for struct lnet_ni_status */
765 CLASSERT((int)sizeof(struct lnet_ni_status) == 16);
766 CLASSERT((int)offsetof(struct lnet_ni_status, ns_nid) == 0);
767 CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) == 8);
768 CLASSERT((int)offsetof(struct lnet_ni_status, ns_status) == 8);
769 CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_status) == 4);
770 CLASSERT((int)offsetof(struct lnet_ni_status, ns_unused) == 12);
771 CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_unused) == 4);
773 /* Checks for struct lnet_ping_info and related constants */
774 CLASSERT(LNET_PROTO_PING_MAGIC == 0x70696E67);
775 CLASSERT(LNET_PING_FEAT_INVAL == 0);
776 CLASSERT(LNET_PING_FEAT_BASE == 1);
777 CLASSERT(LNET_PING_FEAT_NI_STATUS == 2);
778 CLASSERT(LNET_PING_FEAT_RTE_DISABLED == 4);
779 CLASSERT(LNET_PING_FEAT_MULTI_RAIL == 8);
780 CLASSERT(LNET_PING_FEAT_DISCOVERY == 16);
781 CLASSERT(LNET_PING_FEAT_BITS == 31);
783 /* Checks for struct lnet_ping_info */
784 CLASSERT((int)sizeof(struct lnet_ping_info) == 16);
785 CLASSERT((int)offsetof(struct lnet_ping_info, pi_magic) == 0);
786 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) == 4);
787 CLASSERT((int)offsetof(struct lnet_ping_info, pi_features) == 4);
788 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_features) == 4);
789 CLASSERT((int)offsetof(struct lnet_ping_info, pi_pid) == 8);
790 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) == 4);
791 CLASSERT((int)offsetof(struct lnet_ping_info, pi_nnis) == 12);
792 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) == 4);
793 CLASSERT((int)offsetof(struct lnet_ping_info, pi_ni) == 16);
794 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) == 0);
797 static struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
799 struct lnet_lnd *lnd;
800 struct list_head *tmp;
802 /* holding lnd mutex */
803 list_for_each(tmp, &the_lnet.ln_lnds) {
804 lnd = list_entry(tmp, struct lnet_lnd, lnd_list);
806 if (lnd->lnd_type == type)
813 lnet_get_lnd_timeout(void)
815 return lnet_lnd_timeout;
817 EXPORT_SYMBOL(lnet_get_lnd_timeout);
820 lnet_register_lnd(struct lnet_lnd *lnd)
822 mutex_lock(&the_lnet.ln_lnd_mutex);
824 LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
825 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
827 list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
828 lnd->lnd_refcount = 0;
830 CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
832 mutex_unlock(&the_lnet.ln_lnd_mutex);
834 EXPORT_SYMBOL(lnet_register_lnd);
837 lnet_unregister_lnd(struct lnet_lnd *lnd)
839 mutex_lock(&the_lnet.ln_lnd_mutex);
841 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
842 LASSERT(lnd->lnd_refcount == 0);
844 list_del(&lnd->lnd_list);
845 CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
847 mutex_unlock(&the_lnet.ln_lnd_mutex);
849 EXPORT_SYMBOL(lnet_unregister_lnd);
852 lnet_counters_get_common(struct lnet_counters_common *common)
854 struct lnet_counters *ctr;
857 memset(common, 0, sizeof(*common));
859 lnet_net_lock(LNET_LOCK_EX);
861 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
862 common->lcc_msgs_max += ctr->lct_common.lcc_msgs_max;
863 common->lcc_msgs_alloc += ctr->lct_common.lcc_msgs_alloc;
864 common->lcc_errors += ctr->lct_common.lcc_errors;
865 common->lcc_send_count += ctr->lct_common.lcc_send_count;
866 common->lcc_recv_count += ctr->lct_common.lcc_recv_count;
867 common->lcc_route_count += ctr->lct_common.lcc_route_count;
868 common->lcc_drop_count += ctr->lct_common.lcc_drop_count;
869 common->lcc_send_length += ctr->lct_common.lcc_send_length;
870 common->lcc_recv_length += ctr->lct_common.lcc_recv_length;
871 common->lcc_route_length += ctr->lct_common.lcc_route_length;
872 common->lcc_drop_length += ctr->lct_common.lcc_drop_length;
874 lnet_net_unlock(LNET_LOCK_EX);
876 EXPORT_SYMBOL(lnet_counters_get_common);
879 lnet_counters_get(struct lnet_counters *counters)
881 struct lnet_counters *ctr;
882 struct lnet_counters_health *health = &counters->lct_health;
885 memset(counters, 0, sizeof(*counters));
887 lnet_counters_get_common(&counters->lct_common);
889 lnet_net_lock(LNET_LOCK_EX);
891 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
892 health->lch_rst_alloc += ctr->lct_health.lch_rst_alloc;
893 health->lch_resend_count += ctr->lct_health.lch_resend_count;
894 health->lch_response_timeout_count +=
895 ctr->lct_health.lch_response_timeout_count;
896 health->lch_local_interrupt_count +=
897 ctr->lct_health.lch_local_interrupt_count;
898 health->lch_local_dropped_count +=
899 ctr->lct_health.lch_local_dropped_count;
900 health->lch_local_aborted_count +=
901 ctr->lct_health.lch_local_aborted_count;
902 health->lch_local_no_route_count +=
903 ctr->lct_health.lch_local_no_route_count;
904 health->lch_local_timeout_count +=
905 ctr->lct_health.lch_local_timeout_count;
906 health->lch_local_error_count +=
907 ctr->lct_health.lch_local_error_count;
908 health->lch_remote_dropped_count +=
909 ctr->lct_health.lch_remote_dropped_count;
910 health->lch_remote_error_count +=
911 ctr->lct_health.lch_remote_error_count;
912 health->lch_remote_timeout_count +=
913 ctr->lct_health.lch_remote_timeout_count;
914 health->lch_network_timeout_count +=
915 ctr->lct_health.lch_network_timeout_count;
917 lnet_net_unlock(LNET_LOCK_EX);
919 EXPORT_SYMBOL(lnet_counters_get);
922 lnet_counters_reset(void)
924 struct lnet_counters *counters;
927 lnet_net_lock(LNET_LOCK_EX);
929 cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
930 memset(counters, 0, sizeof(struct lnet_counters));
932 lnet_net_unlock(LNET_LOCK_EX);
936 lnet_res_type2str(int type)
941 case LNET_COOKIE_TYPE_MD:
943 case LNET_COOKIE_TYPE_ME:
945 case LNET_COOKIE_TYPE_EQ:
951 lnet_res_container_cleanup(struct lnet_res_container *rec)
955 if (rec->rec_type == 0) /* not set yet, it's uninitialized */
958 while (!list_empty(&rec->rec_active)) {
959 struct list_head *e = rec->rec_active.next;
962 if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
963 lnet_eq_free(list_entry(e, struct lnet_eq, eq_list));
965 } else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
966 lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
968 } else { /* NB: Active MEs should be attached on portals */
975 /* Found alive MD/ME/EQ, user really should unlink/free
976 * all of them before finalize LNet, but if someone didn't,
977 * we have to recycle garbage for him */
978 CERROR("%d active elements on exit of %s container\n",
979 count, lnet_res_type2str(rec->rec_type));
982 if (rec->rec_lh_hash != NULL) {
983 LIBCFS_FREE(rec->rec_lh_hash,
984 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
985 rec->rec_lh_hash = NULL;
988 rec->rec_type = 0; /* mark it as finalized */
992 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
997 LASSERT(rec->rec_type == 0);
999 rec->rec_type = type;
1000 INIT_LIST_HEAD(&rec->rec_active);
1002 rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
1004 /* Arbitrary choice of hash table size */
1005 LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
1006 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
1007 if (rec->rec_lh_hash == NULL) {
1012 for (i = 0; i < LNET_LH_HASH_SIZE; i++)
1013 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
1018 CERROR("Failed to setup %s resource container\n",
1019 lnet_res_type2str(type));
1020 lnet_res_container_cleanup(rec);
1025 lnet_res_containers_destroy(struct lnet_res_container **recs)
1027 struct lnet_res_container *rec;
1030 cfs_percpt_for_each(rec, i, recs)
1031 lnet_res_container_cleanup(rec);
1033 cfs_percpt_free(recs);
1036 static struct lnet_res_container **
1037 lnet_res_containers_create(int type)
1039 struct lnet_res_container **recs;
1040 struct lnet_res_container *rec;
1044 recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
1046 CERROR("Failed to allocate %s resource containers\n",
1047 lnet_res_type2str(type));
1051 cfs_percpt_for_each(rec, i, recs) {
1052 rc = lnet_res_container_setup(rec, i, type);
1054 lnet_res_containers_destroy(recs);
1062 struct lnet_libhandle *
1063 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
1065 /* ALWAYS called with lnet_res_lock held */
1066 struct list_head *head;
1067 struct lnet_libhandle *lh;
1070 if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
1073 hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
1074 head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
1076 list_for_each_entry(lh, head, lh_hash_chain) {
1077 if (lh->lh_cookie == cookie)
1085 lnet_res_lh_initialize(struct lnet_res_container *rec,
1086 struct lnet_libhandle *lh)
1088 /* ALWAYS called with lnet_res_lock held */
1089 unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
1092 lh->lh_cookie = rec->rec_lh_cookie;
1093 rec->rec_lh_cookie += 1 << ibits;
1095 hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
1097 list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
1100 static int lnet_unprepare(void);
1103 lnet_prepare(lnet_pid_t requested_pid)
1105 /* Prepare to bring up the network */
1106 struct lnet_res_container **recs;
1109 if (requested_pid == LNET_PID_ANY) {
1110 /* Don't instantiate LNET just for me */
1114 LASSERT(the_lnet.ln_refcount == 0);
1116 the_lnet.ln_routing = 0;
1118 LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
1119 the_lnet.ln_pid = requested_pid;
1121 INIT_LIST_HEAD(&the_lnet.ln_test_peers);
1122 INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
1123 INIT_LIST_HEAD(&the_lnet.ln_nets);
1124 INIT_LIST_HEAD(&the_lnet.ln_routers);
1125 INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
1126 INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
1127 INIT_LIST_HEAD(&the_lnet.ln_dc_request);
1128 INIT_LIST_HEAD(&the_lnet.ln_dc_working);
1129 INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
1130 INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq);
1131 INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
1132 init_waitqueue_head(&the_lnet.ln_dc_waitq);
1133 LNetInvalidateEQHandle(&the_lnet.ln_mt_eqh);
1135 rc = lnet_descriptor_setup();
1139 rc = lnet_create_remote_nets_table();
1144 * NB the interface cookie in wire handles guards against delayed
1145 * replies and ACKs appearing valid after reboot.
1147 the_lnet.ln_interface_cookie = ktime_get_real_ns();
1149 the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
1150 sizeof(struct lnet_counters));
1151 if (the_lnet.ln_counters == NULL) {
1152 CERROR("Failed to allocate counters for LNet\n");
1157 rc = lnet_peer_tables_create();
1161 rc = lnet_msg_containers_create();
1165 rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
1166 LNET_COOKIE_TYPE_EQ);
1170 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
1176 the_lnet.ln_me_containers = recs;
1178 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
1184 the_lnet.ln_md_containers = recs;
1186 rc = lnet_portals_create();
1188 CERROR("Failed to create portals for LNet: %d\n", rc);
1200 lnet_unprepare (void)
1204 /* NB no LNET_LOCK since this is the last reference. All LND instances
1205 * have shut down already, so it is safe to unlink and free all
1206 * descriptors, even those that appear committed to a network op (eg MD
1207 * with non-zero pending count) */
1209 lnet_fail_nid(LNET_NID_ANY, 0);
1211 LASSERT(the_lnet.ln_refcount == 0);
1212 LASSERT(list_empty(&the_lnet.ln_test_peers));
1213 LASSERT(list_empty(&the_lnet.ln_nets));
1215 if (!LNetEQHandleIsInvalid(the_lnet.ln_mt_eqh)) {
1216 rc = LNetEQFree(the_lnet.ln_mt_eqh);
1217 LNetInvalidateEQHandle(&the_lnet.ln_mt_eqh);
1221 lnet_portals_destroy();
1223 if (the_lnet.ln_md_containers != NULL) {
1224 lnet_res_containers_destroy(the_lnet.ln_md_containers);
1225 the_lnet.ln_md_containers = NULL;
1228 if (the_lnet.ln_me_containers != NULL) {
1229 lnet_res_containers_destroy(the_lnet.ln_me_containers);
1230 the_lnet.ln_me_containers = NULL;
1233 lnet_res_container_cleanup(&the_lnet.ln_eq_container);
1235 lnet_msg_containers_destroy();
1237 lnet_rtrpools_free(0);
1239 if (the_lnet.ln_counters != NULL) {
1240 cfs_percpt_free(the_lnet.ln_counters);
1241 the_lnet.ln_counters = NULL;
1243 lnet_destroy_remote_nets_table();
1244 lnet_descriptor_cleanup();
1250 lnet_net2ni_locked(__u32 net_id, int cpt)
1253 struct lnet_net *net;
1255 LASSERT(cpt != LNET_LOCK_EX);
1257 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1258 if (net->net_id == net_id) {
1259 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
1269 lnet_net2ni_addref(__u32 net)
1274 ni = lnet_net2ni_locked(net, 0);
1276 lnet_ni_addref_locked(ni, 0);
1281 EXPORT_SYMBOL(lnet_net2ni_addref);
1284 lnet_get_net_locked(__u32 net_id)
1286 struct lnet_net *net;
1288 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1289 if (net->net_id == net_id)
1297 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
1302 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
1307 val = hash_long(key, LNET_CPT_BITS);
1308 /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
1312 return (unsigned int)(key + val + (val >> 1)) % number;
1316 lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
1318 struct lnet_net *net;
1320 /* must called with hold of lnet_net_lock */
1321 if (LNET_CPT_NUMBER == 1)
1322 return 0; /* the only one */
1325 * If NI is provided then use the CPT identified in the NI cpt
1326 * list if one exists. If one doesn't exist, then that NI is
1327 * associated with all CPTs and it follows that the net it belongs
1328 * to is implicitly associated with all CPTs, so just hash the nid
1332 if (ni->ni_cpts != NULL)
1333 return ni->ni_cpts[lnet_nid_cpt_hash(nid,
1336 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1339 /* no NI provided so look at the net */
1340 net = lnet_get_net_locked(LNET_NIDNET(nid));
1342 if (net != NULL && net->net_cpts != NULL) {
1343 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
1346 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1350 lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
1355 if (LNET_CPT_NUMBER == 1)
1356 return 0; /* the only one */
1358 cpt = lnet_net_lock_current();
1360 cpt2 = lnet_cpt_of_nid_locked(nid, ni);
1362 lnet_net_unlock(cpt);
1366 EXPORT_SYMBOL(lnet_cpt_of_nid);
1369 lnet_islocalnet(__u32 net_id)
1371 struct lnet_net *net;
1375 cpt = lnet_net_lock_current();
1377 net = lnet_get_net_locked(net_id);
1379 local = net != NULL;
1381 lnet_net_unlock(cpt);
1387 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
1389 struct lnet_net *net;
1392 LASSERT(cpt != LNET_LOCK_EX);
1394 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1395 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1396 if (ni->ni_nid == nid)
1405 lnet_nid2ni_addref(lnet_nid_t nid)
1410 ni = lnet_nid2ni_locked(nid, 0);
1412 lnet_ni_addref_locked(ni, 0);
1417 EXPORT_SYMBOL(lnet_nid2ni_addref);
1420 lnet_islocalnid(lnet_nid_t nid)
1425 cpt = lnet_net_lock_current();
1426 ni = lnet_nid2ni_locked(nid, cpt);
1427 lnet_net_unlock(cpt);
1433 lnet_count_acceptor_nets(void)
1435 /* Return the # of NIs that need the acceptor. */
1437 struct lnet_net *net;
1440 cpt = lnet_net_lock_current();
1441 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1442 /* all socklnd type networks should have the acceptor
1444 if (net->net_lnd->lnd_accept != NULL)
1448 lnet_net_unlock(cpt);
1453 struct lnet_ping_buffer *
1454 lnet_ping_buffer_alloc(int nnis, gfp_t gfp)
1456 struct lnet_ping_buffer *pbuf;
1458 LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nnis), gfp);
1460 pbuf->pb_nnis = nnis;
1461 atomic_set(&pbuf->pb_refcnt, 1);
1468 lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
1470 LASSERT(lnet_ping_buffer_numref(pbuf) == 0);
1471 LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
1474 static struct lnet_ping_buffer *
1475 lnet_ping_target_create(int nnis)
1477 struct lnet_ping_buffer *pbuf;
1479 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1481 CERROR("Can't allocate ping source [%d]\n", nnis);
1485 pbuf->pb_info.pi_nnis = nnis;
1486 pbuf->pb_info.pi_pid = the_lnet.ln_pid;
1487 pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
1488 pbuf->pb_info.pi_features =
1489 LNET_PING_FEAT_NI_STATUS | LNET_PING_FEAT_MULTI_RAIL;
1495 lnet_get_net_ni_count_locked(struct lnet_net *net)
1500 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1507 lnet_get_net_ni_count_pre(struct lnet_net *net)
1512 list_for_each_entry(ni, &net->net_ni_added, ni_netlist)
1519 lnet_get_ni_count(void)
1522 struct lnet_net *net;
1527 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1528 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1538 lnet_ping_info_validate(struct lnet_ping_info *pinfo)
1542 if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
1544 if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
1546 /* Loopback is guaranteed to be present */
1547 if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
1549 if (LNET_NETTYP(LNET_NIDNET(LNET_PING_INFO_LONI(pinfo))) != LOLND)
1555 lnet_ping_target_destroy(void)
1557 struct lnet_net *net;
1560 lnet_net_lock(LNET_LOCK_EX);
1562 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1563 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1565 ni->ni_status = NULL;
1570 lnet_ping_buffer_decref(the_lnet.ln_ping_target);
1571 the_lnet.ln_ping_target = NULL;
1573 lnet_net_unlock(LNET_LOCK_EX);
1577 lnet_ping_target_event_handler(struct lnet_event *event)
1579 struct lnet_ping_buffer *pbuf = event->md.user_ptr;
1581 if (event->unlinked)
1582 lnet_ping_buffer_decref(pbuf);
1586 lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
1587 struct lnet_handle_md *ping_mdh,
1588 int ni_count, bool set_eq)
1590 struct lnet_process_id id = {
1591 .nid = LNET_NID_ANY,
1594 struct lnet_handle_me me_handle;
1595 struct lnet_md md = { NULL };
1599 rc = LNetEQAlloc(0, lnet_ping_target_event_handler,
1600 &the_lnet.ln_ping_target_eq);
1602 CERROR("Can't allocate ping buffer EQ: %d\n", rc);
1607 *ppbuf = lnet_ping_target_create(ni_count);
1608 if (*ppbuf == NULL) {
1613 /* Ping target ME/MD */
1614 rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1615 LNET_PROTO_PING_MATCHBITS, 0,
1616 LNET_UNLINK, LNET_INS_AFTER,
1619 CERROR("Can't create ping target ME: %d\n", rc);
1620 goto fail_decref_ping_buffer;
1623 /* initialize md content */
1624 md.start = &(*ppbuf)->pb_info;
1625 md.length = LNET_PING_INFO_SIZE((*ppbuf)->pb_nnis);
1626 md.threshold = LNET_MD_THRESH_INF;
1628 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1629 LNET_MD_MANAGE_REMOTE;
1630 md.eq_handle = the_lnet.ln_ping_target_eq;
1631 md.user_ptr = *ppbuf;
1633 rc = LNetMDAttach(me_handle, md, LNET_RETAIN, ping_mdh);
1635 CERROR("Can't attach ping target MD: %d\n", rc);
1636 goto fail_unlink_ping_me;
1638 lnet_ping_buffer_addref(*ppbuf);
1642 fail_unlink_ping_me:
1643 rc2 = LNetMEUnlink(me_handle);
1645 fail_decref_ping_buffer:
1646 LASSERT(lnet_ping_buffer_numref(*ppbuf) == 1);
1647 lnet_ping_buffer_decref(*ppbuf);
1651 rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
1658 lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
1659 struct lnet_handle_md *ping_mdh)
1661 sigset_t blocked = cfs_block_allsigs();
1663 LNetMDUnlink(*ping_mdh);
1664 LNetInvalidateMDHandle(ping_mdh);
1666 /* NB the MD could be busy; this just starts the unlink */
1667 while (lnet_ping_buffer_numref(pbuf) > 1) {
1668 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1669 set_current_state(TASK_UNINTERRUPTIBLE);
1670 schedule_timeout(cfs_time_seconds(1));
1673 cfs_restore_sigs(blocked);
1677 lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
1680 struct lnet_net *net;
1681 struct lnet_ni_status *ns;
1686 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1687 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1688 LASSERT(i < pbuf->pb_nnis);
1690 ns = &pbuf->pb_info.pi_ni[i];
1692 ns->ns_nid = ni->ni_nid;
1695 ns->ns_status = (ni->ni_status != NULL) ?
1696 ni->ni_status->ns_status :
1705 * We (ab)use the ns_status of the loopback interface to
1706 * transmit the sequence number. The first interface listed
1707 * must be the loopback interface.
1709 rc = lnet_ping_info_validate(&pbuf->pb_info);
1711 LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
1714 LNET_PING_BUFFER_SEQNO(pbuf) =
1715 atomic_inc_return(&the_lnet.ln_ping_target_seqno);
1719 lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
1720 struct lnet_handle_md ping_mdh)
1722 struct lnet_ping_buffer *old_pbuf = NULL;
1723 struct lnet_handle_md old_ping_md;
1725 /* switch the NIs to point to the new ping info created */
1726 lnet_net_lock(LNET_LOCK_EX);
1728 if (!the_lnet.ln_routing)
1729 pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1730 if (!lnet_peer_discovery_disabled)
1731 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
1733 /* Ensure only known feature bits have been set. */
1734 LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
1735 LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
1737 lnet_ping_target_install_locked(pbuf);
1739 if (the_lnet.ln_ping_target) {
1740 old_pbuf = the_lnet.ln_ping_target;
1741 old_ping_md = the_lnet.ln_ping_target_md;
1743 the_lnet.ln_ping_target_md = ping_mdh;
1744 the_lnet.ln_ping_target = pbuf;
1746 lnet_net_unlock(LNET_LOCK_EX);
1749 /* unlink and free the old ping info */
1750 lnet_ping_md_unlink(old_pbuf, &old_ping_md);
1751 lnet_ping_buffer_decref(old_pbuf);
1754 lnet_push_update_to_peers(0);
1758 lnet_ping_target_fini(void)
1762 lnet_ping_md_unlink(the_lnet.ln_ping_target,
1763 &the_lnet.ln_ping_target_md);
1765 rc = LNetEQFree(the_lnet.ln_ping_target_eq);
1768 lnet_ping_target_destroy();
1771 /* Resize the push target. */
1772 int lnet_push_target_resize(void)
1774 struct lnet_process_id id = { LNET_NID_ANY, LNET_PID_ANY };
1775 struct lnet_md md = { NULL };
1776 struct lnet_handle_me meh;
1777 struct lnet_handle_md mdh;
1778 struct lnet_handle_md old_mdh;
1779 struct lnet_ping_buffer *pbuf;
1780 struct lnet_ping_buffer *old_pbuf;
1781 int nnis = the_lnet.ln_push_target_nnis;
1789 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1795 rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1796 LNET_PROTO_PING_MATCHBITS, 0,
1797 LNET_UNLINK, LNET_INS_AFTER,
1800 CERROR("Can't create push target ME: %d\n", rc);
1801 goto fail_decref_pbuf;
1804 /* initialize md content */
1805 md.start = &pbuf->pb_info;
1806 md.length = LNET_PING_INFO_SIZE(nnis);
1807 md.threshold = LNET_MD_THRESH_INF;
1809 md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE |
1810 LNET_MD_MANAGE_REMOTE;
1812 md.eq_handle = the_lnet.ln_push_target_eq;
1814 rc = LNetMDAttach(meh, md, LNET_RETAIN, &mdh);
1816 CERROR("Can't attach push MD: %d\n", rc);
1817 goto fail_unlink_meh;
1819 lnet_ping_buffer_addref(pbuf);
1821 lnet_net_lock(LNET_LOCK_EX);
1822 old_pbuf = the_lnet.ln_push_target;
1823 old_mdh = the_lnet.ln_push_target_md;
1824 the_lnet.ln_push_target = pbuf;
1825 the_lnet.ln_push_target_md = mdh;
1826 lnet_net_unlock(LNET_LOCK_EX);
1829 LNetMDUnlink(old_mdh);
1830 lnet_ping_buffer_decref(old_pbuf);
1833 if (nnis < the_lnet.ln_push_target_nnis)
1836 CDEBUG(D_NET, "nnis %d success\n", nnis);
1843 lnet_ping_buffer_decref(pbuf);
1845 CDEBUG(D_NET, "nnis %d error %d\n", nnis, rc);
1849 static void lnet_push_target_event_handler(struct lnet_event *ev)
1851 struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
1853 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
1854 lnet_swap_pinginfo(pbuf);
1856 lnet_peer_push_event(ev);
1858 lnet_ping_buffer_decref(pbuf);
1861 /* Initialize the push target. */
1862 static int lnet_push_target_init(void)
1866 if (the_lnet.ln_push_target)
1869 rc = LNetEQAlloc(0, lnet_push_target_event_handler,
1870 &the_lnet.ln_push_target_eq);
1872 CERROR("Can't allocated push target EQ: %d\n", rc);
1876 /* Start at the required minimum, we'll enlarge if required. */
1877 the_lnet.ln_push_target_nnis = LNET_INTERFACES_MIN;
1879 rc = lnet_push_target_resize();
1882 LNetEQFree(the_lnet.ln_push_target_eq);
1883 LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
1889 /* Clean up the push target. */
1890 static void lnet_push_target_fini(void)
1892 if (!the_lnet.ln_push_target)
1895 /* Unlink and invalidate to prevent new references. */
1896 LNetMDUnlink(the_lnet.ln_push_target_md);
1897 LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
1899 /* Wait for the unlink to complete. */
1900 while (lnet_ping_buffer_numref(the_lnet.ln_push_target) > 1) {
1901 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1902 set_current_state(TASK_UNINTERRUPTIBLE);
1903 schedule_timeout(cfs_time_seconds(1));
1906 lnet_ping_buffer_decref(the_lnet.ln_push_target);
1907 the_lnet.ln_push_target = NULL;
1908 the_lnet.ln_push_target_nnis = 0;
1910 LNetEQFree(the_lnet.ln_push_target_eq);
1911 LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
1915 lnet_ni_tq_credits(struct lnet_ni *ni)
1919 LASSERT(ni->ni_ncpts >= 1);
1921 if (ni->ni_ncpts == 1)
1922 return ni->ni_net->net_tunables.lct_max_tx_credits;
1924 credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
1925 credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
1926 credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
1932 lnet_ni_unlink_locked(struct lnet_ni *ni)
1934 /* move it to zombie list and nobody can find it anymore */
1935 LASSERT(!list_empty(&ni->ni_netlist));
1936 list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
1937 lnet_ni_decref_locked(ni, 0);
1941 lnet_clear_zombies_nis_locked(struct lnet_net *net)
1946 struct list_head *zombie_list = &net->net_ni_zombie;
1949 * Now wait for the NIs I just nuked to show up on the zombie
1950 * list and shut them down in guaranteed thread context
1953 while (!list_empty(zombie_list)) {
1957 ni = list_entry(zombie_list->next,
1958 struct lnet_ni, ni_netlist);
1959 list_del_init(&ni->ni_netlist);
1960 /* the ni should be in deleting state. If it's not it's
1962 LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
1963 cfs_percpt_for_each(ref, j, ni->ni_refs) {
1966 /* still busy, add it back to zombie list */
1967 list_add(&ni->ni_netlist, zombie_list);
1971 if (!list_empty(&ni->ni_netlist)) {
1972 lnet_net_unlock(LNET_LOCK_EX);
1974 if ((i & (-i)) == i) {
1976 "Waiting for zombie LNI %s\n",
1977 libcfs_nid2str(ni->ni_nid));
1979 set_current_state(TASK_UNINTERRUPTIBLE);
1980 schedule_timeout(cfs_time_seconds(1));
1981 lnet_net_lock(LNET_LOCK_EX);
1985 lnet_net_unlock(LNET_LOCK_EX);
1987 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
1989 LASSERT(!in_interrupt());
1990 (net->net_lnd->lnd_shutdown)(ni);
1993 CDEBUG(D_LNI, "Removed LNI %s\n",
1994 libcfs_nid2str(ni->ni_nid));
1998 lnet_net_lock(LNET_LOCK_EX);
2002 /* shutdown down the NI and release refcount */
2004 lnet_shutdown_lndni(struct lnet_ni *ni)
2007 struct lnet_net *net = ni->ni_net;
2009 lnet_net_lock(LNET_LOCK_EX);
2011 ni->ni_state = LNET_NI_STATE_DELETING;
2013 lnet_ni_unlink_locked(ni);
2014 lnet_incr_dlc_seq();
2015 lnet_net_unlock(LNET_LOCK_EX);
2017 /* clear messages for this NI on the lazy portal */
2018 for (i = 0; i < the_lnet.ln_nportals; i++)
2019 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
2021 lnet_net_lock(LNET_LOCK_EX);
2022 lnet_clear_zombies_nis_locked(net);
2023 lnet_net_unlock(LNET_LOCK_EX);
2027 lnet_shutdown_lndnet(struct lnet_net *net)
2031 lnet_net_lock(LNET_LOCK_EX);
2033 net->net_state = LNET_NET_STATE_DELETING;
2035 list_del_init(&net->net_list);
2037 while (!list_empty(&net->net_ni_list)) {
2038 ni = list_entry(net->net_ni_list.next,
2039 struct lnet_ni, ni_netlist);
2040 lnet_net_unlock(LNET_LOCK_EX);
2041 lnet_shutdown_lndni(ni);
2042 lnet_net_lock(LNET_LOCK_EX);
2045 lnet_net_unlock(LNET_LOCK_EX);
2047 /* Do peer table cleanup for this net */
2048 lnet_peer_tables_cleanup(net);
2050 lnet_net_lock(LNET_LOCK_EX);
2052 * decrement ref count on lnd only when the entire network goes
2055 net->net_lnd->lnd_refcount--;
2057 lnet_net_unlock(LNET_LOCK_EX);
2063 lnet_shutdown_lndnets(void)
2065 struct lnet_net *net;
2066 struct list_head resend;
2067 struct lnet_msg *msg, *tmp;
2069 INIT_LIST_HEAD(&resend);
2071 /* NB called holding the global mutex */
2073 /* All quiet on the API front */
2074 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
2075 LASSERT(the_lnet.ln_refcount == 0);
2077 lnet_net_lock(LNET_LOCK_EX);
2078 the_lnet.ln_state = LNET_STATE_STOPPING;
2080 while (!list_empty(&the_lnet.ln_nets)) {
2082 * move the nets to the zombie list to avoid them being
2083 * picked up for new work. LONET is also included in the
2084 * Nets that will be moved to the zombie list
2086 net = list_entry(the_lnet.ln_nets.next,
2087 struct lnet_net, net_list);
2088 list_move(&net->net_list, &the_lnet.ln_net_zombie);
2091 /* Drop the cached loopback Net. */
2092 if (the_lnet.ln_loni != NULL) {
2093 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
2094 the_lnet.ln_loni = NULL;
2096 lnet_net_unlock(LNET_LOCK_EX);
2098 /* iterate through the net zombie list and delete each net */
2099 while (!list_empty(&the_lnet.ln_net_zombie)) {
2100 net = list_entry(the_lnet.ln_net_zombie.next,
2101 struct lnet_net, net_list);
2102 lnet_shutdown_lndnet(net);
2105 spin_lock(&the_lnet.ln_msg_resend_lock);
2106 list_splice(&the_lnet.ln_msg_resend, &resend);
2107 spin_unlock(&the_lnet.ln_msg_resend_lock);
2109 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
2110 list_del_init(&msg->msg_list);
2111 msg->msg_no_resend = true;
2112 lnet_finalize(msg, -ECANCELED);
2115 lnet_net_lock(LNET_LOCK_EX);
2116 the_lnet.ln_state = LNET_STATE_SHUTDOWN;
2117 lnet_net_unlock(LNET_LOCK_EX);
2121 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
2124 struct lnet_tx_queue *tq;
2126 struct lnet_net *net = ni->ni_net;
2128 mutex_lock(&the_lnet.ln_lnd_mutex);
2131 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
2132 ni->ni_lnd_tunables_set = true;
2135 rc = (net->net_lnd->lnd_startup)(ni);
2137 mutex_unlock(&the_lnet.ln_lnd_mutex);
2140 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
2141 rc, libcfs_lnd2str(net->net_lnd->lnd_type));
2142 lnet_net_lock(LNET_LOCK_EX);
2143 net->net_lnd->lnd_refcount--;
2144 lnet_net_unlock(LNET_LOCK_EX);
2149 ni->ni_state = LNET_NI_STATE_ACTIVE;
2152 /* We keep a reference on the loopback net through the loopback NI */
2153 if (net->net_lnd->lnd_type == LOLND) {
2155 LASSERT(the_lnet.ln_loni == NULL);
2156 the_lnet.ln_loni = ni;
2157 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
2158 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
2159 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
2160 ni->ni_net->net_tunables.lct_peer_timeout = 0;
2164 if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
2165 ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
2166 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
2167 libcfs_lnd2str(net->net_lnd->lnd_type),
2168 ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
2170 /* shutdown the NI since if we get here then it must've already
2173 lnet_shutdown_lndni(ni);
2177 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
2178 tq->tq_credits_min =
2179 tq->tq_credits_max =
2180 tq->tq_credits = lnet_ni_tq_credits(ni);
2183 atomic_set(&ni->ni_tx_credits,
2184 lnet_ni_tq_credits(ni) * ni->ni_ncpts);
2185 atomic_set(&ni->ni_healthv, LNET_MAX_HEALTH_VALUE);
2187 CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
2188 libcfs_nid2str(ni->ni_nid),
2189 ni->ni_net->net_tunables.lct_peer_tx_credits,
2190 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
2191 ni->ni_net->net_tunables.lct_peer_rtr_credits,
2192 ni->ni_net->net_tunables.lct_peer_timeout);
2201 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
2204 struct lnet_net *net_l = NULL;
2205 struct list_head local_ni_list;
2209 struct lnet_lnd *lnd;
2211 net->net_tunables.lct_peer_timeout;
2213 net->net_tunables.lct_max_tx_credits;
2214 int peerrtrcredits =
2215 net->net_tunables.lct_peer_rtr_credits;
2217 INIT_LIST_HEAD(&local_ni_list);
2220 * make sure that this net is unique. If it isn't then
2221 * we are adding interfaces to an already existing network, and
2222 * 'net' is just a convenient way to pass in the list.
2223 * if it is unique we need to find the LND and load it if
2226 if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
2227 lnd_type = LNET_NETTYP(net->net_id);
2229 mutex_lock(&the_lnet.ln_lnd_mutex);
2230 lnd = lnet_find_lnd_by_type(lnd_type);
2233 mutex_unlock(&the_lnet.ln_lnd_mutex);
2234 rc = request_module("%s", libcfs_lnd2modname(lnd_type));
2235 mutex_lock(&the_lnet.ln_lnd_mutex);
2237 lnd = lnet_find_lnd_by_type(lnd_type);
2239 mutex_unlock(&the_lnet.ln_lnd_mutex);
2240 CERROR("Can't load LND %s, module %s, rc=%d\n",
2241 libcfs_lnd2str(lnd_type),
2242 libcfs_lnd2modname(lnd_type), rc);
2243 #ifndef HAVE_MODULE_LOADING_SUPPORT
2244 LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
2245 "compiled with kernel module "
2246 "loading support.");
2253 lnet_net_lock(LNET_LOCK_EX);
2254 lnd->lnd_refcount++;
2255 lnet_net_unlock(LNET_LOCK_EX);
2259 mutex_unlock(&the_lnet.ln_lnd_mutex);
2265 * net_l: if the network being added is unique then net_l
2266 * will point to that network
2267 * if the network being added is not unique then
2268 * net_l points to the existing network.
2270 * When we enter the loop below, we'll pick NIs off he
2271 * network beign added and start them up, then add them to
2272 * a local ni list. Once we've successfully started all
2273 * the NIs then we join the local NI list (of started up
2274 * networks) with the net_l->net_ni_list, which should
2275 * point to the correct network to add the new ni list to
2277 * If any of the new NIs fail to start up, then we want to
2278 * iterate through the local ni list, which should include
2279 * any NIs which were successfully started up, and shut
2282 * After than we want to delete the network being added,
2283 * to avoid a memory leak.
2287 * When a network uses TCP bonding then all its interfaces
2288 * must be specified when the network is first defined: the
2289 * TCP bonding code doesn't allow for interfaces to be added
2292 if (net_l != net && net_l != NULL && use_tcp_bonding &&
2293 LNET_NETTYP(net_l->net_id) == SOCKLND) {
2298 while (!list_empty(&net->net_ni_added)) {
2299 ni = list_entry(net->net_ni_added.next, struct lnet_ni,
2301 list_del_init(&ni->ni_netlist);
2303 /* make sure that the the NI we're about to start
2304 * up is actually unique. if it's not fail. */
2305 if (!lnet_ni_unique_net(&net_l->net_ni_list,
2306 ni->ni_interfaces[0])) {
2311 /* adjust the pointer the parent network, just in case it
2312 * the net is a duplicate */
2315 rc = lnet_startup_lndni(ni, tun);
2317 LASSERT(ni->ni_net->net_tunables.lct_peer_timeout <= 0 ||
2318 ni->ni_net->net_lnd->lnd_query != NULL);
2324 list_add_tail(&ni->ni_netlist, &local_ni_list);
2329 lnet_net_lock(LNET_LOCK_EX);
2330 list_splice_tail(&local_ni_list, &net_l->net_ni_list);
2331 lnet_incr_dlc_seq();
2332 lnet_net_unlock(LNET_LOCK_EX);
2334 /* if the network is not unique then we don't want to keep
2335 * it around after we're done. Free it. Otherwise add that
2336 * net to the global the_lnet.ln_nets */
2337 if (net_l != net && net_l != NULL) {
2339 * TODO - note. currently the tunables can not be updated
2344 net->net_state = LNET_NET_STATE_ACTIVE;
2346 * restore tunables after it has been overwitten by the
2349 if (peer_timeout != -1)
2350 net->net_tunables.lct_peer_timeout = peer_timeout;
2351 if (maxtxcredits != -1)
2352 net->net_tunables.lct_max_tx_credits = maxtxcredits;
2353 if (peerrtrcredits != -1)
2354 net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
2356 lnet_net_lock(LNET_LOCK_EX);
2357 list_add_tail(&net->net_list, &the_lnet.ln_nets);
2358 lnet_net_unlock(LNET_LOCK_EX);
2365 * shutdown the new NIs that are being started up
2366 * free the NET being started
2368 while (!list_empty(&local_ni_list)) {
2369 ni = list_entry(local_ni_list.next, struct lnet_ni,
2372 lnet_shutdown_lndni(ni);
2382 lnet_startup_lndnets(struct list_head *netlist)
2384 struct lnet_net *net;
2389 * Change to running state before bringing up the LNDs. This
2390 * allows lnet_shutdown_lndnets() to assert that we've passed
2393 lnet_net_lock(LNET_LOCK_EX);
2394 the_lnet.ln_state = LNET_STATE_RUNNING;
2395 lnet_net_unlock(LNET_LOCK_EX);
2397 while (!list_empty(netlist)) {
2398 net = list_entry(netlist->next, struct lnet_net, net_list);
2399 list_del_init(&net->net_list);
2401 rc = lnet_startup_lndnet(net, NULL);
2411 lnet_shutdown_lndnets();
2417 * Initialize LNet library.
2419 * Automatically called at module loading time. Caller has to call
2420 * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
2421 * latter returned 0. It must be called exactly once.
2423 * \retval 0 on success
2424 * \retval -ve on failures.
2426 int lnet_lib_init(void)
2430 lnet_assert_wire_constants();
2432 /* refer to global cfs_cpt_table for now */
2433 the_lnet.ln_cpt_table = cfs_cpt_table;
2434 the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_table);
2436 LASSERT(the_lnet.ln_cpt_number > 0);
2437 if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
2438 /* we are under risk of consuming all lh_cookie */
2439 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
2440 "please change setting of CPT-table and retry\n",
2441 the_lnet.ln_cpt_number, LNET_CPT_MAX);
2445 while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
2446 the_lnet.ln_cpt_bits++;
2448 rc = lnet_create_locks();
2450 CERROR("Can't create LNet global locks: %d\n", rc);
2454 the_lnet.ln_refcount = 0;
2455 LNetInvalidateEQHandle(&the_lnet.ln_rc_eqh);
2456 INIT_LIST_HEAD(&the_lnet.ln_lnds);
2457 INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
2458 INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
2459 INIT_LIST_HEAD(&the_lnet.ln_msg_resend);
2460 INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
2462 /* The hash table size is the number of bits it takes to express the set
2463 * ln_num_routes, minus 1 (better to under estimate than over so we
2464 * don't waste memory). */
2465 if (rnet_htable_size <= 0)
2466 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
2467 else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
2468 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
2469 the_lnet.ln_remote_nets_hbits = max_t(int, 1,
2470 order_base_2(rnet_htable_size) - 1);
2472 /* All LNDs apart from the LOLND are in separate modules. They
2473 * register themselves when their module loads, and unregister
2474 * themselves when their module is unloaded. */
2475 lnet_register_lnd(&the_lolnd);
2480 * Finalize LNet library.
2482 * \pre lnet_lib_init() called with success.
2483 * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
2485 void lnet_lib_exit(void)
2487 LASSERT(the_lnet.ln_refcount == 0);
2489 while (!list_empty(&the_lnet.ln_lnds))
2490 lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
2491 struct lnet_lnd, lnd_list));
2492 lnet_destroy_locks();
2496 * Set LNet PID and start LNet interfaces, routing, and forwarding.
2498 * Users must call this function at least once before any other functions.
2499 * For each successful call there must be a corresponding call to
2500 * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
2503 * The PID used by LNet may be different from the one requested.
2506 * \param requested_pid PID requested by the caller.
2508 * \return >= 0 on success, and < 0 error code on failures.
2511 LNetNIInit(lnet_pid_t requested_pid)
2513 int im_a_router = 0;
2516 struct lnet_ping_buffer *pbuf;
2517 struct lnet_handle_md ping_mdh;
2518 struct list_head net_head;
2519 struct lnet_net *net;
2521 INIT_LIST_HEAD(&net_head);
2523 mutex_lock(&the_lnet.ln_api_mutex);
2525 CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
2527 if (the_lnet.ln_refcount > 0) {
2528 rc = the_lnet.ln_refcount++;
2529 mutex_unlock(&the_lnet.ln_api_mutex);
2533 rc = lnet_prepare(requested_pid);
2535 mutex_unlock(&the_lnet.ln_api_mutex);
2539 /* create a network for Loopback network */
2540 net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
2543 goto err_empty_list;
2546 /* Add in the loopback NI */
2547 if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
2549 goto err_empty_list;
2552 /* If LNet is being initialized via DLC it is possible
2553 * that the user requests not to load module parameters (ones which
2554 * are supported by DLC) on initialization. Therefore, make sure not
2555 * to load networks, routes and forwarding from module parameters
2556 * in this case. On cleanup in case of failure only clean up
2557 * routes if it has been loaded */
2558 if (!the_lnet.ln_nis_from_mod_params) {
2559 rc = lnet_parse_networks(&net_head, lnet_get_networks(),
2562 goto err_empty_list;
2565 ni_count = lnet_startup_lndnets(&net_head);
2568 goto err_empty_list;
2571 if (!the_lnet.ln_nis_from_mod_params) {
2572 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
2574 goto err_shutdown_lndnis;
2576 rc = lnet_check_routes();
2578 goto err_destroy_routes;
2580 rc = lnet_rtrpools_alloc(im_a_router);
2582 goto err_destroy_routes;
2585 rc = lnet_acceptor_start();
2587 goto err_destroy_routes;
2589 the_lnet.ln_refcount = 1;
2590 /* Now I may use my own API functions... */
2592 rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_count, true);
2594 goto err_acceptor_stop;
2596 lnet_ping_target_update(pbuf, ping_mdh);
2598 rc = LNetEQAlloc(0, lnet_mt_event_handler, &the_lnet.ln_mt_eqh);
2600 CERROR("Can't allocate monitor thread EQ: %d\n", rc);
2604 rc = lnet_monitor_thr_start();
2608 rc = lnet_push_target_init();
2610 goto err_stop_monitor_thr;
2612 rc = lnet_peer_discovery_start();
2614 goto err_destroy_push_target;
2617 lnet_router_debugfs_init();
2619 mutex_unlock(&the_lnet.ln_api_mutex);
2623 err_destroy_push_target:
2624 lnet_push_target_fini();
2625 err_stop_monitor_thr:
2626 lnet_monitor_thr_stop();
2628 lnet_ping_target_fini();
2630 the_lnet.ln_refcount = 0;
2631 lnet_acceptor_stop();
2633 if (!the_lnet.ln_nis_from_mod_params)
2634 lnet_destroy_routes();
2635 err_shutdown_lndnis:
2636 lnet_shutdown_lndnets();
2640 mutex_unlock(&the_lnet.ln_api_mutex);
2641 while (!list_empty(&net_head)) {
2642 struct lnet_net *net;
2644 net = list_entry(net_head.next, struct lnet_net, net_list);
2645 list_del_init(&net->net_list);
2650 EXPORT_SYMBOL(LNetNIInit);
2653 * Stop LNet interfaces, routing, and forwarding.
2655 * Users must call this function once for each successful call to LNetNIInit().
2656 * Once the LNetNIFini() operation has been started, the results of pending
2657 * API operations are undefined.
2659 * \return always 0 for current implementation.
2664 mutex_lock(&the_lnet.ln_api_mutex);
2666 LASSERT(the_lnet.ln_refcount > 0);
2668 if (the_lnet.ln_refcount != 1) {
2669 the_lnet.ln_refcount--;
2671 LASSERT(!the_lnet.ln_niinit_self);
2675 lnet_router_debugfs_fini();
2676 lnet_peer_discovery_stop();
2677 lnet_push_target_fini();
2678 lnet_monitor_thr_stop();
2679 lnet_ping_target_fini();
2681 /* Teardown fns that use my own API functions BEFORE here */
2682 the_lnet.ln_refcount = 0;
2684 lnet_acceptor_stop();
2685 lnet_destroy_routes();
2686 lnet_shutdown_lndnets();
2690 mutex_unlock(&the_lnet.ln_api_mutex);
2693 EXPORT_SYMBOL(LNetNIFini);
2696 * Grabs the ni data from the ni structure and fills the out
2699 * \param[in] ni network interface structure
2700 * \param[out] cfg_ni NI config information
2701 * \param[out] tun network and LND tunables
2704 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
2705 struct lnet_ioctl_config_lnd_tunables *tun,
2706 struct lnet_ioctl_element_stats *stats,
2709 size_t min_size = 0;
2712 if (!ni || !cfg_ni || !tun)
2715 if (ni->ni_interfaces[0] != NULL) {
2716 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2717 if (ni->ni_interfaces[i] != NULL) {
2718 strncpy(cfg_ni->lic_ni_intf[i],
2719 ni->ni_interfaces[i],
2720 sizeof(cfg_ni->lic_ni_intf[i]));
2725 cfg_ni->lic_nid = ni->ni_nid;
2726 if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2727 cfg_ni->lic_status = LNET_NI_STATUS_UP;
2729 cfg_ni->lic_status = ni->ni_status->ns_status;
2730 cfg_ni->lic_tcp_bonding = use_tcp_bonding;
2731 cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
2733 memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
2736 stats->iel_send_count = lnet_sum_stats(&ni->ni_stats,
2737 LNET_STATS_TYPE_SEND);
2738 stats->iel_recv_count = lnet_sum_stats(&ni->ni_stats,
2739 LNET_STATS_TYPE_RECV);
2740 stats->iel_drop_count = lnet_sum_stats(&ni->ni_stats,
2741 LNET_STATS_TYPE_DROP);
2745 * tun->lt_tun will always be present, but in order to be
2746 * backwards compatible, we need to deal with the cases when
2747 * tun->lt_tun is smaller than what the kernel has, because it
2748 * comes from an older version of a userspace program, then we'll
2749 * need to copy as much information as we have available space.
2751 min_size = tun_size - sizeof(tun->lt_cmn);
2752 memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
2754 /* copy over the cpts */
2755 if (ni->ni_ncpts == LNET_CPT_NUMBER &&
2756 ni->ni_cpts == NULL) {
2757 for (i = 0; i < ni->ni_ncpts; i++)
2758 cfg_ni->lic_cpts[i] = i;
2761 ni->ni_cpts != NULL && i < ni->ni_ncpts &&
2762 i < LNET_MAX_SHOW_NUM_CPT;
2764 cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
2766 cfg_ni->lic_ncpts = ni->ni_ncpts;
2770 * NOTE: This is a legacy function left in the code to be backwards
2771 * compatible with older userspace programs. It should eventually be
2774 * Grabs the ni data from the ni structure and fills the out
2777 * \param[in] ni network interface structure
2778 * \param[out] config config information
2781 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
2782 struct lnet_ioctl_config_data *config)
2784 struct lnet_ioctl_net_config *net_config;
2785 struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
2786 size_t min_size, tunable_size = 0;
2792 net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
2796 BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
2797 ARRAY_SIZE(net_config->ni_interfaces));
2799 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2800 if (!ni->ni_interfaces[i])
2803 strncpy(net_config->ni_interfaces[i],
2804 ni->ni_interfaces[i],
2805 sizeof(net_config->ni_interfaces[i]));
2808 config->cfg_nid = ni->ni_nid;
2809 config->cfg_config_u.cfg_net.net_peer_timeout =
2810 ni->ni_net->net_tunables.lct_peer_timeout;
2811 config->cfg_config_u.cfg_net.net_max_tx_credits =
2812 ni->ni_net->net_tunables.lct_max_tx_credits;
2813 config->cfg_config_u.cfg_net.net_peer_tx_credits =
2814 ni->ni_net->net_tunables.lct_peer_tx_credits;
2815 config->cfg_config_u.cfg_net.net_peer_rtr_credits =
2816 ni->ni_net->net_tunables.lct_peer_rtr_credits;
2818 if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2819 net_config->ni_status = LNET_NI_STATUS_UP;
2821 net_config->ni_status = ni->ni_status->ns_status;
2824 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
2826 for (i = 0; i < num_cpts; i++)
2827 net_config->ni_cpts[i] = ni->ni_cpts[i];
2829 config->cfg_ncpts = num_cpts;
2833 * See if user land tools sent in a newer and larger version
2834 * of struct lnet_tunables than what the kernel uses.
2836 min_size = sizeof(*config) + sizeof(*net_config);
2838 if (config->cfg_hdr.ioc_len > min_size)
2839 tunable_size = config->cfg_hdr.ioc_len - min_size;
2841 /* Don't copy too much data to user space */
2842 min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
2843 lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
2845 if (lnd_cfg && min_size) {
2846 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
2847 config->cfg_config_u.cfg_net.net_interface_count = 1;
2849 /* Tell user land that kernel side has less data */
2850 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
2851 min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
2852 config->cfg_hdr.ioc_len -= min_size;
2858 lnet_get_ni_idx_locked(int idx)
2861 struct lnet_net *net;
2863 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2864 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2874 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
2877 struct lnet_net *net = mynet;
2880 * It is possible that the net has been cleaned out while there is
2881 * a message being sent. This function accessed the net without
2882 * checking if the list is empty
2886 net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
2888 if (list_empty(&net->net_ni_list))
2890 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2896 if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
2897 /* if you reached the end of the ni list and the net is
2898 * specified, then there are no more nis in that net */
2902 /* we reached the end of this net ni list. move to the
2904 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
2905 /* no more nets and no more NIs. */
2908 /* get the next net */
2909 net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
2911 if (list_empty(&net->net_ni_list))
2913 /* get the ni on it */
2914 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2920 if (list_empty(&prev->ni_netlist))
2923 /* there are more nis left */
2924 ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
2930 lnet_get_net_config(struct lnet_ioctl_config_data *config)
2935 int idx = config->cfg_count;
2937 cpt = lnet_net_lock_current();
2939 ni = lnet_get_ni_idx_locked(idx);
2944 lnet_fill_ni_info_legacy(ni, config);
2948 lnet_net_unlock(cpt);
2953 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
2954 struct lnet_ioctl_config_lnd_tunables *tun,
2955 struct lnet_ioctl_element_stats *stats,
2962 if (!cfg_ni || !tun || !stats)
2965 cpt = lnet_net_lock_current();
2967 ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
2972 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
2976 lnet_net_unlock(cpt);
2980 int lnet_get_ni_stats(struct lnet_ioctl_element_msg_stats *msg_stats)
2989 cpt = lnet_net_lock_current();
2991 ni = lnet_get_ni_idx_locked(msg_stats->im_idx);
2994 lnet_usr_translate_stats(msg_stats, &ni->ni_stats);
2998 lnet_net_unlock(cpt);
3003 static int lnet_add_net_common(struct lnet_net *net,
3004 struct lnet_ioctl_config_lnd_tunables *tun)
3007 struct lnet_ping_buffer *pbuf;
3008 struct lnet_handle_md ping_mdh;
3010 struct lnet_remotenet *rnet;
3012 int num_acceptor_nets;
3014 lnet_net_lock(LNET_LOCK_EX);
3015 rnet = lnet_find_rnet_locked(net->net_id);
3016 lnet_net_unlock(LNET_LOCK_EX);
3018 * make sure that the net added doesn't invalidate the current
3019 * configuration LNet is keeping
3022 CERROR("Adding net %s will invalidate routing configuration\n",
3023 libcfs_net2str(net->net_id));
3029 * make sure you calculate the correct number of slots in the ping
3030 * buffer. Since the ping info is a flattened list of all the NIs,
3031 * we should allocate enough slots to accomodate the number of NIs
3032 * which will be added.
3034 * since ni hasn't been configured yet, use
3035 * lnet_get_net_ni_count_pre() which checks the net_ni_added list
3037 net_ni_count = lnet_get_net_ni_count_pre(net);
3039 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3040 net_ni_count + lnet_get_ni_count(),
3048 memcpy(&net->net_tunables,
3049 &tun->lt_cmn, sizeof(net->net_tunables));
3051 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
3054 * before starting this network get a count of the current TCP
3055 * networks which require the acceptor thread running. If that
3056 * count is == 0 before we start up this network, then we'd want to
3057 * start up the acceptor thread after starting up this network
3059 num_acceptor_nets = lnet_count_acceptor_nets();
3061 net_id = net->net_id;
3063 rc = lnet_startup_lndnet(net,
3064 (tun) ? &tun->lt_tun : NULL);
3068 lnet_net_lock(LNET_LOCK_EX);
3069 net = lnet_get_net_locked(net_id);
3070 lnet_net_unlock(LNET_LOCK_EX);
3075 * Start the acceptor thread if this is the first network
3076 * being added that requires the thread.
3078 if (net->net_lnd->lnd_accept && num_acceptor_nets == 0) {
3079 rc = lnet_acceptor_start();
3081 /* shutdown the net that we just started */
3082 CERROR("Failed to start up acceptor thread\n");
3083 lnet_shutdown_lndnet(net);
3088 lnet_net_lock(LNET_LOCK_EX);
3089 lnet_peer_net_added(net);
3090 lnet_net_unlock(LNET_LOCK_EX);
3092 lnet_ping_target_update(pbuf, ping_mdh);
3097 lnet_ping_md_unlink(pbuf, &ping_mdh);
3098 lnet_ping_buffer_decref(pbuf);
3102 static int lnet_handle_legacy_ip2nets(char *ip2nets,
3103 struct lnet_ioctl_config_lnd_tunables *tun)
3105 struct lnet_net *net;
3108 struct list_head net_head;
3110 INIT_LIST_HEAD(&net_head);
3112 rc = lnet_parse_ip2nets(&nets, ip2nets);
3116 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
3120 mutex_lock(&the_lnet.ln_api_mutex);
3121 while (!list_empty(&net_head)) {
3122 net = list_entry(net_head.next, struct lnet_net, net_list);
3123 list_del_init(&net->net_list);
3124 rc = lnet_add_net_common(net, tun);
3130 mutex_unlock(&the_lnet.ln_api_mutex);
3132 while (!list_empty(&net_head)) {
3133 net = list_entry(net_head.next, struct lnet_net, net_list);
3134 list_del_init(&net->net_list);
3140 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
3142 struct lnet_net *net;
3144 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
3146 __u32 net_id, lnd_type;
3148 /* get the tunables if they are available */
3149 if (conf->lic_cfg_hdr.ioc_len >=
3150 sizeof(*conf) + sizeof(*tun))
3151 tun = (struct lnet_ioctl_config_lnd_tunables *)
3154 /* handle legacy ip2nets from DLC */
3155 if (conf->lic_legacy_ip2nets[0] != '\0')
3156 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
3159 net_id = LNET_NIDNET(conf->lic_nid);
3160 lnd_type = LNET_NETTYP(net_id);
3162 if (!libcfs_isknown_lnd(lnd_type)) {
3163 CERROR("No valid net and lnd information provided\n");
3167 net = lnet_net_alloc(net_id, NULL);
3171 for (i = 0; i < conf->lic_ncpts; i++) {
3172 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER)
3176 ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
3177 conf->lic_ni_intf[0]);
3181 mutex_lock(&the_lnet.ln_api_mutex);
3183 rc = lnet_add_net_common(net, tun);
3185 mutex_unlock(&the_lnet.ln_api_mutex);
3190 int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
3192 struct lnet_net *net;
3194 __u32 net_id = LNET_NIDNET(conf->lic_nid);
3195 struct lnet_ping_buffer *pbuf;
3196 struct lnet_handle_md ping_mdh;
3201 /* don't allow userspace to shutdown the LOLND */
3202 if (LNET_NETTYP(net_id) == LOLND)
3205 mutex_lock(&the_lnet.ln_api_mutex);
3209 net = lnet_get_net_locked(net_id);
3211 CERROR("net %s not found\n",
3212 libcfs_net2str(net_id));
3217 addr = LNET_NIDADDR(conf->lic_nid);
3219 /* remove the entire net */
3220 net_count = lnet_get_net_ni_count_locked(net);
3224 /* create and link a new ping info, before removing the old one */
3225 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3226 lnet_get_ni_count() - net_count,
3229 goto unlock_api_mutex;
3231 lnet_shutdown_lndnet(net);
3233 if (lnet_count_acceptor_nets() == 0)
3234 lnet_acceptor_stop();
3236 lnet_ping_target_update(pbuf, ping_mdh);
3238 goto unlock_api_mutex;
3241 ni = lnet_nid2ni_locked(conf->lic_nid, 0);
3243 CERROR("nid %s not found\n",
3244 libcfs_nid2str(conf->lic_nid));
3249 net_count = lnet_get_net_ni_count_locked(net);
3253 /* create and link a new ping info, before removing the old one */
3254 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3255 lnet_get_ni_count() - 1, false);
3257 goto unlock_api_mutex;
3259 lnet_shutdown_lndni(ni);
3261 if (lnet_count_acceptor_nets() == 0)
3262 lnet_acceptor_stop();
3264 lnet_ping_target_update(pbuf, ping_mdh);
3266 /* check if the net is empty and remove it if it is */
3268 lnet_shutdown_lndnet(net);
3270 goto unlock_api_mutex;
3275 mutex_unlock(&the_lnet.ln_api_mutex);
3281 * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
3282 * They are only expected to be called for unique networks.
3283 * That can be as a result of older DLC library
3284 * calls. Multi-Rail DLC and beyond no longer uses these APIs.
3287 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
3289 struct lnet_net *net;
3290 struct list_head net_head;
3292 struct lnet_ioctl_config_lnd_tunables tun;
3293 char *nets = conf->cfg_config_u.cfg_net.net_intf;
3295 INIT_LIST_HEAD(&net_head);
3297 /* Create a net/ni structures for the network string */
3298 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
3300 return rc == 0 ? -EINVAL : rc;
3302 mutex_lock(&the_lnet.ln_api_mutex);
3305 rc = -EINVAL; /* only add one network per call */
3306 goto out_unlock_clean;
3309 net = list_entry(net_head.next, struct lnet_net, net_list);
3310 list_del_init(&net->net_list);
3312 LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
3314 memset(&tun, 0, sizeof(tun));
3316 tun.lt_cmn.lct_peer_timeout =
3317 conf->cfg_config_u.cfg_net.net_peer_timeout;
3318 tun.lt_cmn.lct_peer_tx_credits =
3319 conf->cfg_config_u.cfg_net.net_peer_tx_credits;
3320 tun.lt_cmn.lct_peer_rtr_credits =
3321 conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
3322 tun.lt_cmn.lct_max_tx_credits =
3323 conf->cfg_config_u.cfg_net.net_max_tx_credits;
3325 rc = lnet_add_net_common(net, &tun);
3328 mutex_unlock(&the_lnet.ln_api_mutex);
3329 while (!list_empty(&net_head)) {
3330 /* net_head list is empty in success case */
3331 net = list_entry(net_head.next, struct lnet_net, net_list);
3332 list_del_init(&net->net_list);
3339 lnet_dyn_del_net(__u32 net_id)
3341 struct lnet_net *net;
3342 struct lnet_ping_buffer *pbuf;
3343 struct lnet_handle_md ping_mdh;
3347 /* don't allow userspace to shutdown the LOLND */
3348 if (LNET_NETTYP(net_id) == LOLND)
3351 mutex_lock(&the_lnet.ln_api_mutex);
3355 net = lnet_get_net_locked(net_id);
3362 net_ni_count = lnet_get_net_ni_count_locked(net);
3366 /* create and link a new ping info, before removing the old one */
3367 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3368 lnet_get_ni_count() - net_ni_count, false);
3372 lnet_shutdown_lndnet(net);
3374 if (lnet_count_acceptor_nets() == 0)
3375 lnet_acceptor_stop();
3377 lnet_ping_target_update(pbuf, ping_mdh);
3380 mutex_unlock(&the_lnet.ln_api_mutex);
3385 void lnet_incr_dlc_seq(void)
3387 atomic_inc(&lnet_dlc_seq_no);
3390 __u32 lnet_get_dlc_seq_locked(void)
3392 return atomic_read(&lnet_dlc_seq_no);
3396 lnet_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3398 struct lnet_net *net;
3401 lnet_net_lock(LNET_LOCK_EX);
3402 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3403 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3404 if (ni->ni_nid == nid || all) {
3405 atomic_set(&ni->ni_healthv, value);
3406 if (list_empty(&ni->ni_recovery) &&
3407 value < LNET_MAX_HEALTH_VALUE) {
3408 CERROR("manually adding local NI %s to recovery\n",
3409 libcfs_nid2str(ni->ni_nid));
3410 list_add_tail(&ni->ni_recovery,
3411 &the_lnet.ln_mt_localNIRecovq);
3412 lnet_ni_addref_locked(ni, 0);
3415 lnet_net_unlock(LNET_LOCK_EX);
3421 lnet_net_unlock(LNET_LOCK_EX);
3425 lnet_get_local_ni_hstats(struct lnet_ioctl_local_ni_hstats *stats)
3429 lnet_nid_t nid = stats->hlni_nid;
3431 cpt = lnet_net_lock_current();
3432 ni = lnet_nid2ni_locked(nid, cpt);
3439 stats->hlni_local_interrupt = atomic_read(&ni->ni_hstats.hlt_local_interrupt);
3440 stats->hlni_local_dropped = atomic_read(&ni->ni_hstats.hlt_local_dropped);
3441 stats->hlni_local_aborted = atomic_read(&ni->ni_hstats.hlt_local_aborted);
3442 stats->hlni_local_no_route = atomic_read(&ni->ni_hstats.hlt_local_no_route);
3443 stats->hlni_local_timeout = atomic_read(&ni->ni_hstats.hlt_local_timeout);
3444 stats->hlni_local_error = atomic_read(&ni->ni_hstats.hlt_local_error);
3445 stats->hlni_health_value = atomic_read(&ni->ni_healthv);
3448 lnet_net_unlock(cpt);
3454 lnet_get_local_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
3459 lnet_net_lock(LNET_LOCK_EX);
3460 list_for_each_entry(ni, &the_lnet.ln_mt_localNIRecovq, ni_recovery) {
3461 list->rlst_nid_array[i] = ni->ni_nid;
3463 if (i >= LNET_MAX_SHOW_NUM_NID)
3466 lnet_net_unlock(LNET_LOCK_EX);
3467 list->rlst_num_nids = i;
3473 lnet_get_peer_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
3475 struct lnet_peer_ni *lpni;
3478 lnet_net_lock(LNET_LOCK_EX);
3479 list_for_each_entry(lpni, &the_lnet.ln_mt_peerNIRecovq, lpni_recovery) {
3480 list->rlst_nid_array[i] = lpni->lpni_nid;
3482 if (i >= LNET_MAX_SHOW_NUM_NID)
3485 lnet_net_unlock(LNET_LOCK_EX);
3486 list->rlst_num_nids = i;
3492 * LNet ioctl handler.
3496 LNetCtl(unsigned int cmd, void *arg)
3498 struct libcfs_ioctl_data *data = arg;
3499 struct lnet_ioctl_config_data *config;
3500 struct lnet_process_id id = {0};
3504 BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
3505 sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
3508 case IOC_LIBCFS_GET_NI:
3509 rc = LNetGetId(data->ioc_count, &id);
3510 data->ioc_nid = id.nid;
3513 case IOC_LIBCFS_FAIL_NID:
3514 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
3516 case IOC_LIBCFS_ADD_ROUTE:
3519 if (config->cfg_hdr.ioc_len < sizeof(*config))
3522 mutex_lock(&the_lnet.ln_api_mutex);
3523 rc = lnet_add_route(config->cfg_net,
3524 config->cfg_config_u.cfg_route.rtr_hop,
3526 config->cfg_config_u.cfg_route.
3529 rc = lnet_check_routes();
3531 lnet_del_route(config->cfg_net,
3534 mutex_unlock(&the_lnet.ln_api_mutex);
3537 case IOC_LIBCFS_DEL_ROUTE:
3540 if (config->cfg_hdr.ioc_len < sizeof(*config))
3543 mutex_lock(&the_lnet.ln_api_mutex);
3544 rc = lnet_del_route(config->cfg_net, config->cfg_nid);
3545 mutex_unlock(&the_lnet.ln_api_mutex);
3548 case IOC_LIBCFS_GET_ROUTE:
3551 if (config->cfg_hdr.ioc_len < sizeof(*config))
3554 mutex_lock(&the_lnet.ln_api_mutex);
3555 rc = lnet_get_route(config->cfg_count,
3557 &config->cfg_config_u.cfg_route.rtr_hop,
3559 &config->cfg_config_u.cfg_route.rtr_flags,
3560 &config->cfg_config_u.cfg_route.
3562 mutex_unlock(&the_lnet.ln_api_mutex);
3565 case IOC_LIBCFS_GET_LOCAL_NI: {
3566 struct lnet_ioctl_config_ni *cfg_ni;
3567 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
3568 struct lnet_ioctl_element_stats *stats;
3573 /* get the tunables if they are available */
3574 if (cfg_ni->lic_cfg_hdr.ioc_len <
3575 sizeof(*cfg_ni) + sizeof(*stats) + sizeof(*tun))
3578 stats = (struct lnet_ioctl_element_stats *)
3580 tun = (struct lnet_ioctl_config_lnd_tunables *)
3581 (cfg_ni->lic_bulk + sizeof(*stats));
3583 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
3586 mutex_lock(&the_lnet.ln_api_mutex);
3587 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
3588 mutex_unlock(&the_lnet.ln_api_mutex);
3592 case IOC_LIBCFS_GET_LOCAL_NI_MSG_STATS: {
3593 struct lnet_ioctl_element_msg_stats *msg_stats = arg;
3595 if (msg_stats->im_hdr.ioc_len != sizeof(*msg_stats))
3598 mutex_lock(&the_lnet.ln_api_mutex);
3599 rc = lnet_get_ni_stats(msg_stats);
3600 mutex_unlock(&the_lnet.ln_api_mutex);
3605 case IOC_LIBCFS_GET_NET: {
3606 size_t total = sizeof(*config) +
3607 sizeof(struct lnet_ioctl_net_config);
3610 if (config->cfg_hdr.ioc_len < total)
3613 mutex_lock(&the_lnet.ln_api_mutex);
3614 rc = lnet_get_net_config(config);
3615 mutex_unlock(&the_lnet.ln_api_mutex);
3619 case IOC_LIBCFS_GET_LNET_STATS:
3621 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
3623 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
3626 mutex_lock(&the_lnet.ln_api_mutex);
3627 lnet_counters_get(&lnet_stats->st_cntrs);
3628 mutex_unlock(&the_lnet.ln_api_mutex);
3632 case IOC_LIBCFS_CONFIG_RTR:
3635 if (config->cfg_hdr.ioc_len < sizeof(*config))
3638 mutex_lock(&the_lnet.ln_api_mutex);
3639 if (config->cfg_config_u.cfg_buffers.buf_enable) {
3640 rc = lnet_rtrpools_enable();
3641 mutex_unlock(&the_lnet.ln_api_mutex);
3644 lnet_rtrpools_disable();
3645 mutex_unlock(&the_lnet.ln_api_mutex);
3648 case IOC_LIBCFS_ADD_BUF:
3651 if (config->cfg_hdr.ioc_len < sizeof(*config))
3654 mutex_lock(&the_lnet.ln_api_mutex);
3655 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
3657 config->cfg_config_u.cfg_buffers.
3659 config->cfg_config_u.cfg_buffers.
3661 mutex_unlock(&the_lnet.ln_api_mutex);
3664 case IOC_LIBCFS_SET_NUMA_RANGE: {
3665 struct lnet_ioctl_set_value *numa;
3667 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3669 lnet_net_lock(LNET_LOCK_EX);
3670 lnet_numa_range = numa->sv_value;
3671 lnet_net_unlock(LNET_LOCK_EX);
3675 case IOC_LIBCFS_GET_NUMA_RANGE: {
3676 struct lnet_ioctl_set_value *numa;
3678 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3680 numa->sv_value = lnet_numa_range;
3684 case IOC_LIBCFS_GET_BUF: {
3685 struct lnet_ioctl_pool_cfg *pool_cfg;
3686 size_t total = sizeof(*config) + sizeof(*pool_cfg);
3690 if (config->cfg_hdr.ioc_len < total)
3693 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
3695 mutex_lock(&the_lnet.ln_api_mutex);
3696 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
3697 mutex_unlock(&the_lnet.ln_api_mutex);
3701 case IOC_LIBCFS_GET_LOCAL_HSTATS: {
3702 struct lnet_ioctl_local_ni_hstats *stats = arg;
3704 if (stats->hlni_hdr.ioc_len < sizeof(*stats))
3707 mutex_lock(&the_lnet.ln_api_mutex);
3708 rc = lnet_get_local_ni_hstats(stats);
3709 mutex_unlock(&the_lnet.ln_api_mutex);
3714 case IOC_LIBCFS_GET_RECOVERY_QUEUE: {
3715 struct lnet_ioctl_recovery_list *list = arg;
3716 if (list->rlst_hdr.ioc_len < sizeof(*list))
3719 mutex_lock(&the_lnet.ln_api_mutex);
3720 if (list->rlst_type == LNET_HEALTH_TYPE_LOCAL_NI)
3721 rc = lnet_get_local_ni_recovery_list(list);
3723 rc = lnet_get_peer_ni_recovery_list(list);
3724 mutex_unlock(&the_lnet.ln_api_mutex);
3728 case IOC_LIBCFS_ADD_PEER_NI: {
3729 struct lnet_ioctl_peer_cfg *cfg = arg;
3731 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3734 mutex_lock(&the_lnet.ln_api_mutex);
3735 rc = lnet_add_peer_ni(cfg->prcfg_prim_nid,
3738 mutex_unlock(&the_lnet.ln_api_mutex);
3742 case IOC_LIBCFS_DEL_PEER_NI: {
3743 struct lnet_ioctl_peer_cfg *cfg = arg;
3745 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3748 mutex_lock(&the_lnet.ln_api_mutex);
3749 rc = lnet_del_peer_ni(cfg->prcfg_prim_nid,
3750 cfg->prcfg_cfg_nid);
3751 mutex_unlock(&the_lnet.ln_api_mutex);
3755 case IOC_LIBCFS_GET_PEER_INFO: {
3756 struct lnet_ioctl_peer *peer_info = arg;
3758 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
3761 mutex_lock(&the_lnet.ln_api_mutex);
3762 rc = lnet_get_peer_ni_info(
3763 peer_info->pr_count,
3765 peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
3766 &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
3767 &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
3768 &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
3769 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
3770 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
3771 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
3772 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
3773 mutex_unlock(&the_lnet.ln_api_mutex);
3777 case IOC_LIBCFS_GET_PEER_NI: {
3778 struct lnet_ioctl_peer_cfg *cfg = arg;
3780 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3783 mutex_lock(&the_lnet.ln_api_mutex);
3784 rc = lnet_get_peer_info(cfg,
3785 (void __user *)cfg->prcfg_bulk);
3786 mutex_unlock(&the_lnet.ln_api_mutex);
3790 case IOC_LIBCFS_GET_PEER_LIST: {
3791 struct lnet_ioctl_peer_cfg *cfg = arg;
3793 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3796 mutex_lock(&the_lnet.ln_api_mutex);
3797 rc = lnet_get_peer_list(&cfg->prcfg_count, &cfg->prcfg_size,
3798 (struct lnet_process_id __user *)cfg->prcfg_bulk);
3799 mutex_unlock(&the_lnet.ln_api_mutex);
3803 case IOC_LIBCFS_SET_HEALHV: {
3804 struct lnet_ioctl_reset_health_cfg *cfg = arg;
3806 if (cfg->rh_hdr.ioc_len < sizeof(*cfg))
3808 if (cfg->rh_value < 0 ||
3809 cfg->rh_value > LNET_MAX_HEALTH_VALUE)
3810 value = LNET_MAX_HEALTH_VALUE;
3812 value = cfg->rh_value;
3813 CDEBUG(D_NET, "Manually setting healthv to %d for %s:%s. all = %d\n",
3814 value, (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI) ?
3815 "local" : "peer", libcfs_nid2str(cfg->rh_nid), cfg->rh_all);
3816 mutex_lock(&the_lnet.ln_api_mutex);
3817 if (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI)
3818 lnet_ni_set_healthv(cfg->rh_nid, value,
3821 lnet_peer_ni_set_healthv(cfg->rh_nid, value,
3823 mutex_unlock(&the_lnet.ln_api_mutex);
3827 case IOC_LIBCFS_NOTIFY_ROUTER: {
3828 time64_t deadline = ktime_get_real_seconds() - data->ioc_u64[0];
3830 /* The deadline passed in by the user should be some time in
3831 * seconds in the future since the UNIX epoch. We have to map
3832 * that deadline to the wall clock.
3834 deadline += ktime_get_seconds();
3835 return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
3839 case IOC_LIBCFS_LNET_DIST:
3840 rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
3841 if (rc < 0 && rc != -EHOSTUNREACH)
3844 data->ioc_u32[0] = rc;
3847 case IOC_LIBCFS_TESTPROTOCOMPAT:
3848 lnet_net_lock(LNET_LOCK_EX);
3849 the_lnet.ln_testprotocompat = data->ioc_flags;
3850 lnet_net_unlock(LNET_LOCK_EX);
3853 case IOC_LIBCFS_LNET_FAULT:
3854 return lnet_fault_ctl(data->ioc_flags, data);
3856 case IOC_LIBCFS_PING: {
3857 signed long timeout;
3859 id.nid = data->ioc_nid;
3860 id.pid = data->ioc_u32[0];
3862 /* If timeout is negative then set default of 3 minutes */
3863 if (((s32)data->ioc_u32[1] <= 0) ||
3864 data->ioc_u32[1] > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
3865 timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
3867 timeout = msecs_to_jiffies(data->ioc_u32[1]);
3869 rc = lnet_ping(id, timeout, data->ioc_pbuf1,
3870 data->ioc_plen1 / sizeof(struct lnet_process_id));
3875 data->ioc_count = rc;
3879 case IOC_LIBCFS_PING_PEER: {
3880 struct lnet_ioctl_ping_data *ping = arg;
3881 struct lnet_peer *lp;
3882 signed long timeout;
3884 /* If timeout is negative then set default of 3 minutes */
3885 if (((s32)ping->op_param) <= 0 ||
3886 ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
3887 timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
3889 timeout = msecs_to_jiffies(ping->op_param);
3891 rc = lnet_ping(ping->ping_id, timeout,
3897 mutex_lock(&the_lnet.ln_api_mutex);
3898 lp = lnet_find_peer(ping->ping_id.nid);
3900 ping->ping_id.nid = lp->lp_primary_nid;
3901 ping->mr_info = lnet_peer_is_multi_rail(lp);
3902 lnet_peer_decref_locked(lp);
3904 mutex_unlock(&the_lnet.ln_api_mutex);
3906 ping->ping_count = rc;
3910 case IOC_LIBCFS_DISCOVER: {
3911 struct lnet_ioctl_ping_data *discover = arg;
3912 struct lnet_peer *lp;
3914 rc = lnet_discover(discover->ping_id, discover->op_param,
3916 discover->ping_count);
3920 mutex_lock(&the_lnet.ln_api_mutex);
3921 lp = lnet_find_peer(discover->ping_id.nid);
3923 discover->ping_id.nid = lp->lp_primary_nid;
3924 discover->mr_info = lnet_peer_is_multi_rail(lp);
3925 lnet_peer_decref_locked(lp);
3927 mutex_unlock(&the_lnet.ln_api_mutex);
3929 discover->ping_count = rc;
3934 ni = lnet_net2ni_addref(data->ioc_net);
3938 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
3941 rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
3948 EXPORT_SYMBOL(LNetCtl);
3950 void LNetDebugPeer(struct lnet_process_id id)
3952 lnet_debug_peer(id.nid);
3954 EXPORT_SYMBOL(LNetDebugPeer);
3957 * Determine if the specified peer \a nid is on the local node.
3959 * \param nid peer nid to check
3961 * \retval true If peer NID is on the local node.
3962 * \retval false If peer NID is not on the local node.
3964 bool LNetIsPeerLocal(lnet_nid_t nid)
3966 struct lnet_net *net;
3970 cpt = lnet_net_lock_current();
3971 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3972 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3973 if (ni->ni_nid == nid) {
3974 lnet_net_unlock(cpt);
3979 lnet_net_unlock(cpt);
3983 EXPORT_SYMBOL(LNetIsPeerLocal);
3986 * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
3987 * Note that all interfaces share a same PID, as requested by LNetNIInit().
3989 * \param index Index of the interface to look up.
3990 * \param id On successful return, this location will hold the
3991 * struct lnet_process_id ID of the interface.
3993 * \retval 0 If an interface exists at \a index.
3994 * \retval -ENOENT If no interface has been found.
3997 LNetGetId(unsigned int index, struct lnet_process_id *id)
4000 struct lnet_net *net;
4004 LASSERT(the_lnet.ln_refcount > 0);
4006 cpt = lnet_net_lock_current();
4008 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
4009 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
4013 id->nid = ni->ni_nid;
4014 id->pid = the_lnet.ln_pid;
4020 lnet_net_unlock(cpt);
4023 EXPORT_SYMBOL(LNetGetId);
4025 static int lnet_ping(struct lnet_process_id id, signed long timeout,
4026 struct lnet_process_id __user *ids, int n_ids)
4028 struct lnet_handle_eq eqh;
4029 struct lnet_handle_md mdh;
4030 struct lnet_event event;
4031 struct lnet_md md = { NULL };
4035 const signed long a_long_time = msecs_to_jiffies(60 * MSEC_PER_SEC);
4036 struct lnet_ping_buffer *pbuf;
4037 struct lnet_process_id tmpid;
4044 /* n_ids limit is arbitrary */
4045 if (n_ids <= 0 || id.nid == LNET_NID_ANY)
4049 * if the user buffer has more space than the lnet_interfaces_max
4050 * then only fill it up to lnet_interfaces_max
4052 if (n_ids > lnet_interfaces_max)
4053 n_ids = lnet_interfaces_max;
4055 if (id.pid == LNET_PID_ANY)
4056 id.pid = LNET_PID_LUSTRE;
4058 pbuf = lnet_ping_buffer_alloc(n_ids, GFP_NOFS);
4062 /* NB 2 events max (including any unlink event) */
4063 rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
4065 CERROR("Can't allocate EQ: %d\n", rc);
4066 goto fail_ping_buffer_decref;
4069 /* initialize md content */
4070 md.start = &pbuf->pb_info;
4071 md.length = LNET_PING_INFO_SIZE(n_ids);
4072 md.threshold = 2; /* GET/REPLY */
4074 md.options = LNET_MD_TRUNCATE;
4078 rc = LNetMDBind(md, LNET_UNLINK, &mdh);
4080 CERROR("Can't bind MD: %d\n", rc);
4084 rc = LNetGet(LNET_NID_ANY, mdh, id,
4085 LNET_RESERVED_PORTAL,
4086 LNET_PROTO_PING_MATCHBITS, 0, false);
4089 /* Don't CERROR; this could be deliberate! */
4090 rc2 = LNetMDUnlink(mdh);
4093 /* NB must wait for the UNLINK event below... */
4095 timeout = a_long_time;
4099 /* MUST block for unlink to complete */
4101 blocked = cfs_block_allsigs();
4103 rc2 = LNetEQPoll(&eqh, 1, timeout, &event, &which);
4106 cfs_restore_sigs(blocked);
4108 CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
4109 (rc2 <= 0) ? -1 : event.type,
4110 (rc2 <= 0) ? -1 : event.status,
4111 (rc2 > 0 && event.unlinked) ? " unlinked" : "");
4113 LASSERT(rc2 != -EOVERFLOW); /* can't miss anything */
4115 if (rc2 <= 0 || event.status != 0) {
4116 /* timeout or error */
4117 if (!replied && rc == 0)
4118 rc = (rc2 < 0) ? rc2 :
4119 (rc2 == 0) ? -ETIMEDOUT :
4123 /* Ensure completion in finite time... */
4125 /* No assertion (racing with network) */
4127 timeout = a_long_time;
4128 } else if (rc2 == 0) {
4129 /* timed out waiting for unlink */
4130 CWARN("ping %s: late network completion\n",
4133 } else if (event.type == LNET_EVENT_REPLY) {
4137 } while (rc2 <= 0 || !event.unlinked);
4141 CWARN("%s: Unexpected rc >= 0 but no reply!\n",
4148 LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
4150 rc = -EPROTO; /* if I can't parse... */
4153 CERROR("%s: ping info too short %d\n",
4154 libcfs_id2str(id), nob);
4158 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
4159 lnet_swap_pinginfo(pbuf);
4160 } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
4161 CERROR("%s: Unexpected magic %08x\n",
4162 libcfs_id2str(id), pbuf->pb_info.pi_magic);
4166 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
4167 CERROR("%s: ping w/o NI status: 0x%x\n",
4168 libcfs_id2str(id), pbuf->pb_info.pi_features);
4172 if (nob < LNET_PING_INFO_SIZE(0)) {
4173 CERROR("%s: Short reply %d(%d min)\n",
4175 nob, (int)LNET_PING_INFO_SIZE(0));
4179 if (pbuf->pb_info.pi_nnis < n_ids)
4180 n_ids = pbuf->pb_info.pi_nnis;
4182 if (nob < LNET_PING_INFO_SIZE(n_ids)) {
4183 CERROR("%s: Short reply %d(%d expected)\n",
4185 nob, (int)LNET_PING_INFO_SIZE(n_ids));
4189 rc = -EFAULT; /* if I segv in copy_to_user()... */
4191 memset(&tmpid, 0, sizeof(tmpid));
4192 for (i = 0; i < n_ids; i++) {
4193 tmpid.pid = pbuf->pb_info.pi_pid;
4194 tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
4195 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
4198 rc = pbuf->pb_info.pi_nnis;
4201 rc2 = LNetEQFree(eqh);
4203 CERROR("rc2 %d\n", rc2);
4206 fail_ping_buffer_decref:
4207 lnet_ping_buffer_decref(pbuf);
4212 lnet_discover(struct lnet_process_id id, __u32 force,
4213 struct lnet_process_id __user *ids, int n_ids)
4215 struct lnet_peer_ni *lpni;
4216 struct lnet_peer_ni *p;
4217 struct lnet_peer *lp;
4218 struct lnet_process_id *buf;
4222 int max_intf = lnet_interfaces_max;
4226 id.nid == LNET_NID_ANY)
4229 if (id.pid == LNET_PID_ANY)
4230 id.pid = LNET_PID_LUSTRE;
4233 * if the user buffer has more space than the max_intf
4234 * then only fill it up to max_intf
4236 if (n_ids > max_intf)
4239 buf_size = n_ids * sizeof(*buf);
4241 LIBCFS_ALLOC(buf, buf_size);
4245 cpt = lnet_net_lock_current();
4246 lpni = lnet_nid2peerni_locked(id.nid, LNET_NID_ANY, cpt);
4253 * Clearing the NIDS_UPTODATE flag ensures the peer will
4254 * be discovered, provided discovery has not been disabled.
4256 lp = lpni->lpni_peer_net->lpn_peer;
4257 spin_lock(&lp->lp_lock);
4258 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
4259 /* If the force flag is set, force a PING and PUSH as well. */
4261 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
4262 spin_unlock(&lp->lp_lock);
4263 rc = lnet_discover_peer_locked(lpni, cpt, true);
4267 /* Peer may have changed. */
4268 lp = lpni->lpni_peer_net->lpn_peer;
4269 if (lp->lp_nnis < n_ids)
4270 n_ids = lp->lp_nnis;
4274 while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
4275 buf[i].pid = id.pid;
4276 buf[i].nid = p->lpni_nid;
4281 lnet_net_unlock(cpt);
4284 if (copy_to_user(ids, buf, n_ids * sizeof(*buf)))
4290 lnet_peer_ni_decref_locked(lpni);
4292 lnet_net_unlock(cpt);
4294 LIBCFS_FREE(buf, buf_size);
4300 * Retrieve peer discovery status.
4302 * \retval 1 if lnet_peer_discovery_disabled is 0
4303 * \retval 0 if lnet_peer_discovery_disabled is 1
4306 LNetGetPeerDiscoveryStatus(void)
4308 return !lnet_peer_discovery_disabled;
4310 EXPORT_SYMBOL(LNetGetPeerDiscoveryStatus);