4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_LNET
35 #include <linux/ctype.h>
36 #include <linux/log2.h>
37 #include <linux/ktime.h>
38 #include <linux/moduleparam.h>
39 #include <linux/uaccess.h>
41 #include <lnet/lib-lnet.h>
43 #define D_LNI D_CONSOLE
46 * initialize ln_api_mutex statically, since it needs to be used in
47 * discovery_set callback. That module parameter callback can be called
48 * before module init completes. The mutex needs to be ready for use then.
50 struct lnet the_lnet = {
51 .ln_api_mutex = __MUTEX_INITIALIZER(the_lnet.ln_api_mutex),
52 }; /* THE state of the network */
53 EXPORT_SYMBOL(the_lnet);
55 static char *ip2nets = "";
56 module_param(ip2nets, charp, 0444);
57 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
59 static char *networks = "";
60 module_param(networks, charp, 0444);
61 MODULE_PARM_DESC(networks, "local networks");
63 static char *routes = "";
64 module_param(routes, charp, 0444);
65 MODULE_PARM_DESC(routes, "routes to non-local networks");
67 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
68 module_param(rnet_htable_size, int, 0444);
69 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
71 static int use_tcp_bonding = false;
72 module_param(use_tcp_bonding, int, 0444);
73 MODULE_PARM_DESC(use_tcp_bonding,
74 "Set to 1 to use socklnd bonding. 0 to use Multi-Rail");
76 unsigned int lnet_numa_range = 0;
77 module_param(lnet_numa_range, uint, 0444);
78 MODULE_PARM_DESC(lnet_numa_range,
79 "NUMA range to consider during Multi-Rail selection");
82 * lnet_health_sensitivity determines by how much we decrement the health
83 * value on sending error. The value defaults to 0, which means health
84 * checking is turned off by default.
86 unsigned int lnet_health_sensitivity = 0;
87 static int sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
88 static struct kernel_param_ops param_ops_health_sensitivity = {
89 .set = sensitivity_set,
92 #define param_check_health_sensitivity(name, p) \
93 __param_check(name, p, int)
94 #ifdef HAVE_KERNEL_PARAM_OPS
95 module_param(lnet_health_sensitivity, health_sensitivity, S_IRUGO|S_IWUSR);
97 module_param_call(lnet_health_sensitivity, sensitivity_set, param_get_int,
98 &lnet_health_sensitivity, S_IRUGO|S_IWUSR);
100 MODULE_PARM_DESC(lnet_health_sensitivity,
101 "Value to decrement the health value by on error");
103 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
104 static int intf_max_set(const char *val, cfs_kernel_param_arg_t *kp);
106 static struct kernel_param_ops param_ops_interfaces_max = {
108 .get = param_get_int,
111 #define param_check_interfaces_max(name, p) \
112 __param_check(name, p, int)
114 #ifdef HAVE_KERNEL_PARAM_OPS
115 module_param(lnet_interfaces_max, interfaces_max, 0644);
117 module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
118 ¶m_ops_interfaces_max, 0644);
120 MODULE_PARM_DESC(lnet_interfaces_max,
121 "Maximum number of interfaces in a node.");
123 unsigned lnet_peer_discovery_disabled = 0;
124 static int discovery_set(const char *val, cfs_kernel_param_arg_t *kp);
126 static struct kernel_param_ops param_ops_discovery_disabled = {
127 .set = discovery_set,
128 .get = param_get_int,
131 #define param_check_discovery_disabled(name, p) \
132 __param_check(name, p, int)
133 #ifdef HAVE_KERNEL_PARAM_OPS
134 module_param(lnet_peer_discovery_disabled, discovery_disabled, 0644);
136 module_param_call(lnet_peer_discovery_disabled, discovery_set, param_get_int,
137 ¶m_ops_discovery_disabled, 0644);
139 MODULE_PARM_DESC(lnet_peer_discovery_disabled,
140 "Set to 1 to disable peer discovery on this node.");
142 unsigned lnet_transaction_timeout = 5;
143 static int transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp);
144 static struct kernel_param_ops param_ops_transaction_timeout = {
145 .set = transaction_to_set,
146 .get = param_get_int,
149 #define param_check_transaction_timeout(name, p) \
150 __param_check(name, p, int)
151 #ifdef HAVE_KERNEL_PARAM_OPS
152 module_param(lnet_transaction_timeout, transaction_timeout, S_IRUGO|S_IWUSR);
154 module_param_call(lnet_transaction_timeout, transaction_to_set, param_get_int,
155 &lnet_transaction_timeout, S_IRUGO|S_IWUSR);
157 MODULE_PARM_DESC(lnet_peer_discovery_disabled,
158 "Set to 1 to disable peer discovery on this node.");
160 unsigned lnet_retry_count = 0;
161 static int retry_count_set(const char *val, cfs_kernel_param_arg_t *kp);
162 static struct kernel_param_ops param_ops_retry_count = {
163 .set = retry_count_set,
164 .get = param_get_int,
167 #define param_check_retry_count(name, p) \
168 __param_check(name, p, int)
169 #ifdef HAVE_KERNEL_PARAM_OPS
170 module_param(lnet_retry_count, retry_count, S_IRUGO|S_IWUSR);
172 module_param_call(lnet_retry_count, retry_count_set, param_get_int,
173 &lnet_retry_count, S_IRUGO|S_IWUSR);
175 MODULE_PARM_DESC(lnet_retry_count,
176 "Maximum number of times to retry transmitting a message");
179 unsigned lnet_lnd_timeout = LNET_LND_DEFAULT_TIMEOUT;
182 * This sequence number keeps track of how many times DLC was used to
183 * update the local NIs. It is incremented when a NI is added or
184 * removed and checked when sending a message to determine if there is
185 * a need to re-run the selection algorithm. See lnet_select_pathway()
186 * for more details on its usage.
188 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
190 static int lnet_ping(struct lnet_process_id id, signed long timeout,
191 struct lnet_process_id __user *ids, int n_ids);
193 static int lnet_discover(struct lnet_process_id id, __u32 force,
194 struct lnet_process_id __user *ids, int n_ids);
197 sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
200 unsigned *sensitivity = (unsigned *)kp->arg;
203 rc = kstrtoul(val, 0, &value);
205 CERROR("Invalid module parameter value for 'lnet_health_sensitivity'\n");
210 * The purpose of locking the api_mutex here is to ensure that
211 * the correct value ends up stored properly.
213 mutex_lock(&the_lnet.ln_api_mutex);
215 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
216 mutex_unlock(&the_lnet.ln_api_mutex);
220 if (value == *sensitivity) {
221 mutex_unlock(&the_lnet.ln_api_mutex);
225 *sensitivity = value;
227 mutex_unlock(&the_lnet.ln_api_mutex);
233 discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
236 unsigned *discovery = (unsigned *)kp->arg;
238 struct lnet_ping_buffer *pbuf;
240 rc = kstrtoul(val, 0, &value);
242 CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
246 value = (value) ? 1 : 0;
249 * The purpose of locking the api_mutex here is to ensure that
250 * the correct value ends up stored properly.
252 mutex_lock(&the_lnet.ln_api_mutex);
254 if (value == *discovery) {
255 mutex_unlock(&the_lnet.ln_api_mutex);
261 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
262 mutex_unlock(&the_lnet.ln_api_mutex);
266 /* tell peers that discovery setting has changed */
267 lnet_net_lock(LNET_LOCK_EX);
268 pbuf = the_lnet.ln_ping_target;
270 pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
272 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
273 lnet_net_unlock(LNET_LOCK_EX);
275 lnet_push_update_to_peers(1);
277 mutex_unlock(&the_lnet.ln_api_mutex);
283 transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp)
286 unsigned *transaction_to = (unsigned *)kp->arg;
289 rc = kstrtoul(val, 0, &value);
291 CERROR("Invalid module parameter value for 'lnet_transaction_timeout'\n");
296 * The purpose of locking the api_mutex here is to ensure that
297 * the correct value ends up stored properly.
299 mutex_lock(&the_lnet.ln_api_mutex);
301 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
302 mutex_unlock(&the_lnet.ln_api_mutex);
306 if (value < lnet_retry_count || value == 0) {
307 mutex_unlock(&the_lnet.ln_api_mutex);
308 CERROR("Invalid value for lnet_transaction_timeout (%lu). "
309 "Has to be greater than lnet_retry_count (%u)\n",
310 value, lnet_retry_count);
314 if (value == *transaction_to) {
315 mutex_unlock(&the_lnet.ln_api_mutex);
319 *transaction_to = value;
320 if (lnet_retry_count == 0)
321 lnet_lnd_timeout = value;
323 lnet_lnd_timeout = value / lnet_retry_count;
325 mutex_unlock(&the_lnet.ln_api_mutex);
331 retry_count_set(const char *val, cfs_kernel_param_arg_t *kp)
334 unsigned *retry_count = (unsigned *)kp->arg;
337 rc = kstrtoul(val, 0, &value);
339 CERROR("Invalid module parameter value for 'lnet_retry_count'\n");
344 * The purpose of locking the api_mutex here is to ensure that
345 * the correct value ends up stored properly.
347 mutex_lock(&the_lnet.ln_api_mutex);
349 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
350 mutex_unlock(&the_lnet.ln_api_mutex);
354 if (value > lnet_transaction_timeout) {
355 mutex_unlock(&the_lnet.ln_api_mutex);
356 CERROR("Invalid value for lnet_retry_count (%lu). "
357 "Has to be smaller than lnet_transaction_timeout (%u)\n",
358 value, lnet_transaction_timeout);
362 if (value == *retry_count) {
363 mutex_unlock(&the_lnet.ln_api_mutex);
367 *retry_count = value;
370 lnet_lnd_timeout = lnet_transaction_timeout;
372 lnet_lnd_timeout = lnet_transaction_timeout / value;
374 mutex_unlock(&the_lnet.ln_api_mutex);
380 intf_max_set(const char *val, cfs_kernel_param_arg_t *kp)
384 rc = kstrtoint(val, 0, &value);
386 CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
390 if (value < LNET_INTERFACES_MIN) {
391 CWARN("max interfaces provided are too small, setting to %d\n",
392 LNET_INTERFACES_MAX_DEFAULT);
393 value = LNET_INTERFACES_MAX_DEFAULT;
396 *(int *)kp->arg = value;
402 lnet_get_routes(void)
408 lnet_get_networks(void)
413 if (*networks != 0 && *ip2nets != 0) {
414 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
415 "'ip2nets' but not both at once\n");
420 rc = lnet_parse_ip2nets(&nets, ip2nets);
421 return (rc == 0) ? nets : NULL;
431 lnet_init_locks(void)
433 spin_lock_init(&the_lnet.ln_eq_wait_lock);
434 spin_lock_init(&the_lnet.ln_msg_resend_lock);
435 init_waitqueue_head(&the_lnet.ln_eq_waitq);
436 init_waitqueue_head(&the_lnet.ln_mt_waitq);
437 mutex_init(&the_lnet.ln_lnd_mutex);
441 lnet_fini_locks(void)
445 struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
446 struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
450 lnet_descriptor_setup(void)
452 /* create specific kmem_cache for MEs and small MDs (i.e., originally
453 * allocated in <size-xxx> kmem_cache).
455 lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(struct lnet_me),
457 if (!lnet_mes_cachep)
460 lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
461 LNET_SMALL_MD_SIZE, 0, 0,
463 if (!lnet_small_mds_cachep)
470 lnet_descriptor_cleanup(void)
473 if (lnet_small_mds_cachep) {
474 kmem_cache_destroy(lnet_small_mds_cachep);
475 lnet_small_mds_cachep = NULL;
478 if (lnet_mes_cachep) {
479 kmem_cache_destroy(lnet_mes_cachep);
480 lnet_mes_cachep = NULL;
485 lnet_create_remote_nets_table(void)
488 struct list_head *hash;
490 LASSERT(the_lnet.ln_remote_nets_hash == NULL);
491 LASSERT(the_lnet.ln_remote_nets_hbits > 0);
492 LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
494 CERROR("Failed to create remote nets hash table\n");
498 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
499 INIT_LIST_HEAD(&hash[i]);
500 the_lnet.ln_remote_nets_hash = hash;
505 lnet_destroy_remote_nets_table(void)
509 if (the_lnet.ln_remote_nets_hash == NULL)
512 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
513 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
515 LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
516 LNET_REMOTE_NETS_HASH_SIZE *
517 sizeof(the_lnet.ln_remote_nets_hash[0]));
518 the_lnet.ln_remote_nets_hash = NULL;
522 lnet_destroy_locks(void)
524 if (the_lnet.ln_res_lock != NULL) {
525 cfs_percpt_lock_free(the_lnet.ln_res_lock);
526 the_lnet.ln_res_lock = NULL;
529 if (the_lnet.ln_net_lock != NULL) {
530 cfs_percpt_lock_free(the_lnet.ln_net_lock);
531 the_lnet.ln_net_lock = NULL;
538 lnet_create_locks(void)
542 the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
543 if (the_lnet.ln_res_lock == NULL)
546 the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
547 if (the_lnet.ln_net_lock == NULL)
553 lnet_destroy_locks();
557 static void lnet_assert_wire_constants(void)
559 /* Wire protocol assertions generated by 'wirecheck'
560 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
561 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
562 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
565 CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
566 CLASSERT(LNET_PROTO_TCP_VERSION_MAJOR == 1);
567 CLASSERT(LNET_PROTO_TCP_VERSION_MINOR == 0);
568 CLASSERT(LNET_MSG_ACK == 0);
569 CLASSERT(LNET_MSG_PUT == 1);
570 CLASSERT(LNET_MSG_GET == 2);
571 CLASSERT(LNET_MSG_REPLY == 3);
572 CLASSERT(LNET_MSG_HELLO == 4);
574 /* Checks for struct lnet_handle_wire */
575 CLASSERT((int)sizeof(struct lnet_handle_wire) == 16);
576 CLASSERT((int)offsetof(struct lnet_handle_wire, wh_interface_cookie) == 0);
577 CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) == 8);
578 CLASSERT((int)offsetof(struct lnet_handle_wire, wh_object_cookie) == 8);
579 CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) == 8);
581 /* Checks for struct struct lnet_magicversion */
582 CLASSERT((int)sizeof(struct lnet_magicversion) == 8);
583 CLASSERT((int)offsetof(struct lnet_magicversion, magic) == 0);
584 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->magic) == 4);
585 CLASSERT((int)offsetof(struct lnet_magicversion, version_major) == 4);
586 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_major) == 2);
587 CLASSERT((int)offsetof(struct lnet_magicversion, version_minor) == 6);
588 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_minor) == 2);
590 /* Checks for struct struct lnet_hdr */
591 CLASSERT((int)sizeof(struct lnet_hdr) == 72);
592 CLASSERT((int)offsetof(struct lnet_hdr, dest_nid) == 0);
593 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_nid) == 8);
594 CLASSERT((int)offsetof(struct lnet_hdr, src_nid) == 8);
595 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_nid) == 8);
596 CLASSERT((int)offsetof(struct lnet_hdr, dest_pid) == 16);
597 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_pid) == 4);
598 CLASSERT((int)offsetof(struct lnet_hdr, src_pid) == 20);
599 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_pid) == 4);
600 CLASSERT((int)offsetof(struct lnet_hdr, type) == 24);
601 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->type) == 4);
602 CLASSERT((int)offsetof(struct lnet_hdr, payload_length) == 28);
603 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->payload_length) == 4);
604 CLASSERT((int)offsetof(struct lnet_hdr, msg) == 32);
605 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg) == 40);
608 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) == 32);
609 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) == 16);
610 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.match_bits) == 48);
611 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) == 8);
612 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.mlength) == 56);
613 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) == 4);
616 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) == 32);
617 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) == 16);
618 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.match_bits) == 48);
619 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) == 8);
620 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.hdr_data) == 56);
621 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) == 8);
622 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ptl_index) == 64);
623 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) == 4);
624 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.offset) == 68);
625 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) == 4);
628 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.return_wmd) == 32);
629 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) == 16);
630 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.match_bits) == 48);
631 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) == 8);
632 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.ptl_index) == 56);
633 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) == 4);
634 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.src_offset) == 60);
635 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) == 4);
636 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.sink_length) == 64);
637 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) == 4);
640 CLASSERT((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) == 32);
641 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) == 16);
644 CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.incarnation) == 32);
645 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) == 8);
646 CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.type) == 40);
647 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) == 4);
649 /* Checks for struct lnet_ni_status and related constants */
650 CLASSERT(LNET_NI_STATUS_INVALID == 0x00000000);
651 CLASSERT(LNET_NI_STATUS_UP == 0x15aac0de);
652 CLASSERT(LNET_NI_STATUS_DOWN == 0xdeadface);
654 /* Checks for struct lnet_ni_status */
655 CLASSERT((int)sizeof(struct lnet_ni_status) == 16);
656 CLASSERT((int)offsetof(struct lnet_ni_status, ns_nid) == 0);
657 CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) == 8);
658 CLASSERT((int)offsetof(struct lnet_ni_status, ns_status) == 8);
659 CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_status) == 4);
660 CLASSERT((int)offsetof(struct lnet_ni_status, ns_unused) == 12);
661 CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_unused) == 4);
663 /* Checks for struct lnet_ping_info and related constants */
664 CLASSERT(LNET_PROTO_PING_MAGIC == 0x70696E67);
665 CLASSERT(LNET_PING_FEAT_INVAL == 0);
666 CLASSERT(LNET_PING_FEAT_BASE == 1);
667 CLASSERT(LNET_PING_FEAT_NI_STATUS == 2);
668 CLASSERT(LNET_PING_FEAT_RTE_DISABLED == 4);
669 CLASSERT(LNET_PING_FEAT_MULTI_RAIL == 8);
670 CLASSERT(LNET_PING_FEAT_DISCOVERY == 16);
671 CLASSERT(LNET_PING_FEAT_BITS == 31);
673 /* Checks for struct lnet_ping_info */
674 CLASSERT((int)sizeof(struct lnet_ping_info) == 16);
675 CLASSERT((int)offsetof(struct lnet_ping_info, pi_magic) == 0);
676 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) == 4);
677 CLASSERT((int)offsetof(struct lnet_ping_info, pi_features) == 4);
678 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_features) == 4);
679 CLASSERT((int)offsetof(struct lnet_ping_info, pi_pid) == 8);
680 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) == 4);
681 CLASSERT((int)offsetof(struct lnet_ping_info, pi_nnis) == 12);
682 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) == 4);
683 CLASSERT((int)offsetof(struct lnet_ping_info, pi_ni) == 16);
684 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) == 0);
687 static struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
689 struct lnet_lnd *lnd;
690 struct list_head *tmp;
692 /* holding lnd mutex */
693 list_for_each(tmp, &the_lnet.ln_lnds) {
694 lnd = list_entry(tmp, struct lnet_lnd, lnd_list);
696 if (lnd->lnd_type == type)
703 lnet_get_lnd_timeout(void)
705 return lnet_lnd_timeout;
707 EXPORT_SYMBOL(lnet_get_lnd_timeout);
710 lnet_register_lnd(struct lnet_lnd *lnd)
712 mutex_lock(&the_lnet.ln_lnd_mutex);
714 LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
715 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
717 list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
718 lnd->lnd_refcount = 0;
720 CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
722 mutex_unlock(&the_lnet.ln_lnd_mutex);
724 EXPORT_SYMBOL(lnet_register_lnd);
727 lnet_unregister_lnd(struct lnet_lnd *lnd)
729 mutex_lock(&the_lnet.ln_lnd_mutex);
731 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
732 LASSERT(lnd->lnd_refcount == 0);
734 list_del(&lnd->lnd_list);
735 CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
737 mutex_unlock(&the_lnet.ln_lnd_mutex);
739 EXPORT_SYMBOL(lnet_unregister_lnd);
742 lnet_counters_get_common(struct lnet_counters_common *common)
744 struct lnet_counters *ctr;
747 memset(common, 0, sizeof(*common));
749 lnet_net_lock(LNET_LOCK_EX);
751 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
752 common->lcc_msgs_max += ctr->lct_common.lcc_msgs_max;
753 common->lcc_msgs_alloc += ctr->lct_common.lcc_msgs_alloc;
754 common->lcc_errors += ctr->lct_common.lcc_errors;
755 common->lcc_send_count += ctr->lct_common.lcc_send_count;
756 common->lcc_recv_count += ctr->lct_common.lcc_recv_count;
757 common->lcc_route_count += ctr->lct_common.lcc_route_count;
758 common->lcc_drop_count += ctr->lct_common.lcc_drop_count;
759 common->lcc_send_length += ctr->lct_common.lcc_send_length;
760 common->lcc_recv_length += ctr->lct_common.lcc_recv_length;
761 common->lcc_route_length += ctr->lct_common.lcc_route_length;
762 common->lcc_drop_length += ctr->lct_common.lcc_drop_length;
764 lnet_net_unlock(LNET_LOCK_EX);
766 EXPORT_SYMBOL(lnet_counters_get_common);
769 lnet_counters_get(struct lnet_counters *counters)
771 struct lnet_counters *ctr;
772 struct lnet_counters_health *health = &counters->lct_health;
775 memset(counters, 0, sizeof(*counters));
777 lnet_counters_get_common(&counters->lct_common);
779 lnet_net_lock(LNET_LOCK_EX);
781 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
782 health->lch_rst_alloc += ctr->lct_health.lch_rst_alloc;
783 health->lch_resend_count += ctr->lct_health.lch_resend_count;
784 health->lch_response_timeout_count +=
785 ctr->lct_health.lch_response_timeout_count;
786 health->lch_local_interrupt_count +=
787 ctr->lct_health.lch_local_interrupt_count;
788 health->lch_local_dropped_count +=
789 ctr->lct_health.lch_local_dropped_count;
790 health->lch_local_aborted_count +=
791 ctr->lct_health.lch_local_aborted_count;
792 health->lch_local_no_route_count +=
793 ctr->lct_health.lch_local_no_route_count;
794 health->lch_local_timeout_count +=
795 ctr->lct_health.lch_local_timeout_count;
796 health->lch_local_error_count +=
797 ctr->lct_health.lch_local_error_count;
798 health->lch_remote_dropped_count +=
799 ctr->lct_health.lch_remote_dropped_count;
800 health->lch_remote_error_count +=
801 ctr->lct_health.lch_remote_error_count;
802 health->lch_remote_timeout_count +=
803 ctr->lct_health.lch_remote_timeout_count;
804 health->lch_network_timeout_count +=
805 ctr->lct_health.lch_network_timeout_count;
807 lnet_net_unlock(LNET_LOCK_EX);
809 EXPORT_SYMBOL(lnet_counters_get);
812 lnet_counters_reset(void)
814 struct lnet_counters *counters;
817 lnet_net_lock(LNET_LOCK_EX);
819 cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
820 memset(counters, 0, sizeof(struct lnet_counters));
822 lnet_net_unlock(LNET_LOCK_EX);
826 lnet_res_type2str(int type)
831 case LNET_COOKIE_TYPE_MD:
833 case LNET_COOKIE_TYPE_ME:
835 case LNET_COOKIE_TYPE_EQ:
841 lnet_res_container_cleanup(struct lnet_res_container *rec)
845 if (rec->rec_type == 0) /* not set yet, it's uninitialized */
848 while (!list_empty(&rec->rec_active)) {
849 struct list_head *e = rec->rec_active.next;
852 if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
853 lnet_eq_free(list_entry(e, struct lnet_eq, eq_list));
855 } else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
856 lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
858 } else { /* NB: Active MEs should be attached on portals */
865 /* Found alive MD/ME/EQ, user really should unlink/free
866 * all of them before finalize LNet, but if someone didn't,
867 * we have to recycle garbage for him */
868 CERROR("%d active elements on exit of %s container\n",
869 count, lnet_res_type2str(rec->rec_type));
872 if (rec->rec_lh_hash != NULL) {
873 LIBCFS_FREE(rec->rec_lh_hash,
874 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
875 rec->rec_lh_hash = NULL;
878 rec->rec_type = 0; /* mark it as finalized */
882 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
887 LASSERT(rec->rec_type == 0);
889 rec->rec_type = type;
890 INIT_LIST_HEAD(&rec->rec_active);
892 rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
894 /* Arbitrary choice of hash table size */
895 LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
896 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
897 if (rec->rec_lh_hash == NULL) {
902 for (i = 0; i < LNET_LH_HASH_SIZE; i++)
903 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
908 CERROR("Failed to setup %s resource container\n",
909 lnet_res_type2str(type));
910 lnet_res_container_cleanup(rec);
915 lnet_res_containers_destroy(struct lnet_res_container **recs)
917 struct lnet_res_container *rec;
920 cfs_percpt_for_each(rec, i, recs)
921 lnet_res_container_cleanup(rec);
923 cfs_percpt_free(recs);
926 static struct lnet_res_container **
927 lnet_res_containers_create(int type)
929 struct lnet_res_container **recs;
930 struct lnet_res_container *rec;
934 recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
936 CERROR("Failed to allocate %s resource containers\n",
937 lnet_res_type2str(type));
941 cfs_percpt_for_each(rec, i, recs) {
942 rc = lnet_res_container_setup(rec, i, type);
944 lnet_res_containers_destroy(recs);
952 struct lnet_libhandle *
953 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
955 /* ALWAYS called with lnet_res_lock held */
956 struct list_head *head;
957 struct lnet_libhandle *lh;
960 if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
963 hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
964 head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
966 list_for_each_entry(lh, head, lh_hash_chain) {
967 if (lh->lh_cookie == cookie)
975 lnet_res_lh_initialize(struct lnet_res_container *rec,
976 struct lnet_libhandle *lh)
978 /* ALWAYS called with lnet_res_lock held */
979 unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
982 lh->lh_cookie = rec->rec_lh_cookie;
983 rec->rec_lh_cookie += 1 << ibits;
985 hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
987 list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
990 static int lnet_unprepare(void);
993 lnet_prepare(lnet_pid_t requested_pid)
995 /* Prepare to bring up the network */
996 struct lnet_res_container **recs;
999 if (requested_pid == LNET_PID_ANY) {
1000 /* Don't instantiate LNET just for me */
1004 LASSERT(the_lnet.ln_refcount == 0);
1006 the_lnet.ln_routing = 0;
1008 LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
1009 the_lnet.ln_pid = requested_pid;
1011 INIT_LIST_HEAD(&the_lnet.ln_test_peers);
1012 INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
1013 INIT_LIST_HEAD(&the_lnet.ln_nets);
1014 INIT_LIST_HEAD(&the_lnet.ln_routers);
1015 INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
1016 INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
1017 INIT_LIST_HEAD(&the_lnet.ln_dc_request);
1018 INIT_LIST_HEAD(&the_lnet.ln_dc_working);
1019 INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
1020 INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq);
1021 INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
1022 init_waitqueue_head(&the_lnet.ln_dc_waitq);
1024 rc = lnet_descriptor_setup();
1028 rc = lnet_create_remote_nets_table();
1033 * NB the interface cookie in wire handles guards against delayed
1034 * replies and ACKs appearing valid after reboot.
1036 the_lnet.ln_interface_cookie = ktime_get_real_ns();
1038 the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
1039 sizeof(struct lnet_counters));
1040 if (the_lnet.ln_counters == NULL) {
1041 CERROR("Failed to allocate counters for LNet\n");
1046 rc = lnet_peer_tables_create();
1050 rc = lnet_msg_containers_create();
1054 rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
1055 LNET_COOKIE_TYPE_EQ);
1059 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
1065 the_lnet.ln_me_containers = recs;
1067 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
1073 the_lnet.ln_md_containers = recs;
1075 rc = lnet_portals_create();
1077 CERROR("Failed to create portals for LNet: %d\n", rc);
1089 lnet_unprepare (void)
1091 /* NB no LNET_LOCK since this is the last reference. All LND instances
1092 * have shut down already, so it is safe to unlink and free all
1093 * descriptors, even those that appear committed to a network op (eg MD
1094 * with non-zero pending count) */
1096 lnet_fail_nid(LNET_NID_ANY, 0);
1098 LASSERT(the_lnet.ln_refcount == 0);
1099 LASSERT(list_empty(&the_lnet.ln_test_peers));
1100 LASSERT(list_empty(&the_lnet.ln_nets));
1102 lnet_portals_destroy();
1104 if (the_lnet.ln_md_containers != NULL) {
1105 lnet_res_containers_destroy(the_lnet.ln_md_containers);
1106 the_lnet.ln_md_containers = NULL;
1109 if (the_lnet.ln_me_containers != NULL) {
1110 lnet_res_containers_destroy(the_lnet.ln_me_containers);
1111 the_lnet.ln_me_containers = NULL;
1114 lnet_res_container_cleanup(&the_lnet.ln_eq_container);
1116 lnet_msg_containers_destroy();
1118 lnet_rtrpools_free(0);
1120 if (the_lnet.ln_counters != NULL) {
1121 cfs_percpt_free(the_lnet.ln_counters);
1122 the_lnet.ln_counters = NULL;
1124 lnet_destroy_remote_nets_table();
1125 lnet_descriptor_cleanup();
1131 lnet_net2ni_locked(__u32 net_id, int cpt)
1134 struct lnet_net *net;
1136 LASSERT(cpt != LNET_LOCK_EX);
1138 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1139 if (net->net_id == net_id) {
1140 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
1150 lnet_net2ni_addref(__u32 net)
1155 ni = lnet_net2ni_locked(net, 0);
1157 lnet_ni_addref_locked(ni, 0);
1162 EXPORT_SYMBOL(lnet_net2ni_addref);
1165 lnet_get_net_locked(__u32 net_id)
1167 struct lnet_net *net;
1169 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1170 if (net->net_id == net_id)
1178 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
1183 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
1188 val = hash_long(key, LNET_CPT_BITS);
1189 /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
1193 return (unsigned int)(key + val + (val >> 1)) % number;
1197 lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
1199 struct lnet_net *net;
1201 /* must called with hold of lnet_net_lock */
1202 if (LNET_CPT_NUMBER == 1)
1203 return 0; /* the only one */
1206 * If NI is provided then use the CPT identified in the NI cpt
1207 * list if one exists. If one doesn't exist, then that NI is
1208 * associated with all CPTs and it follows that the net it belongs
1209 * to is implicitly associated with all CPTs, so just hash the nid
1213 if (ni->ni_cpts != NULL)
1214 return ni->ni_cpts[lnet_nid_cpt_hash(nid,
1217 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1220 /* no NI provided so look at the net */
1221 net = lnet_get_net_locked(LNET_NIDNET(nid));
1223 if (net != NULL && net->net_cpts != NULL) {
1224 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
1227 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1231 lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
1236 if (LNET_CPT_NUMBER == 1)
1237 return 0; /* the only one */
1239 cpt = lnet_net_lock_current();
1241 cpt2 = lnet_cpt_of_nid_locked(nid, ni);
1243 lnet_net_unlock(cpt);
1247 EXPORT_SYMBOL(lnet_cpt_of_nid);
1250 lnet_islocalnet(__u32 net_id)
1252 struct lnet_net *net;
1256 cpt = lnet_net_lock_current();
1258 net = lnet_get_net_locked(net_id);
1260 local = net != NULL;
1262 lnet_net_unlock(cpt);
1268 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
1270 struct lnet_net *net;
1273 LASSERT(cpt != LNET_LOCK_EX);
1275 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1276 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1277 if (ni->ni_nid == nid)
1286 lnet_nid2ni_addref(lnet_nid_t nid)
1291 ni = lnet_nid2ni_locked(nid, 0);
1293 lnet_ni_addref_locked(ni, 0);
1298 EXPORT_SYMBOL(lnet_nid2ni_addref);
1301 lnet_islocalnid(lnet_nid_t nid)
1306 cpt = lnet_net_lock_current();
1307 ni = lnet_nid2ni_locked(nid, cpt);
1308 lnet_net_unlock(cpt);
1314 lnet_count_acceptor_nets(void)
1316 /* Return the # of NIs that need the acceptor. */
1318 struct lnet_net *net;
1321 cpt = lnet_net_lock_current();
1322 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1323 /* all socklnd type networks should have the acceptor
1325 if (net->net_lnd->lnd_accept != NULL)
1329 lnet_net_unlock(cpt);
1334 struct lnet_ping_buffer *
1335 lnet_ping_buffer_alloc(int nnis, gfp_t gfp)
1337 struct lnet_ping_buffer *pbuf;
1339 LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nnis), gfp);
1341 pbuf->pb_nnis = nnis;
1342 atomic_set(&pbuf->pb_refcnt, 1);
1349 lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
1351 LASSERT(lnet_ping_buffer_numref(pbuf) == 0);
1352 LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
1355 static struct lnet_ping_buffer *
1356 lnet_ping_target_create(int nnis)
1358 struct lnet_ping_buffer *pbuf;
1360 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1362 CERROR("Can't allocate ping source [%d]\n", nnis);
1366 pbuf->pb_info.pi_nnis = nnis;
1367 pbuf->pb_info.pi_pid = the_lnet.ln_pid;
1368 pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
1369 pbuf->pb_info.pi_features =
1370 LNET_PING_FEAT_NI_STATUS | LNET_PING_FEAT_MULTI_RAIL;
1376 lnet_get_net_ni_count_locked(struct lnet_net *net)
1381 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1388 lnet_get_net_ni_count_pre(struct lnet_net *net)
1393 list_for_each_entry(ni, &net->net_ni_added, ni_netlist)
1400 lnet_get_ni_count(void)
1403 struct lnet_net *net;
1408 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1409 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1419 lnet_ping_info_validate(struct lnet_ping_info *pinfo)
1423 if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
1425 if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
1427 /* Loopback is guaranteed to be present */
1428 if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
1430 if (LNET_NETTYP(LNET_NIDNET(LNET_PING_INFO_LONI(pinfo))) != LOLND)
1436 lnet_ping_target_destroy(void)
1438 struct lnet_net *net;
1441 lnet_net_lock(LNET_LOCK_EX);
1443 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1444 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1446 ni->ni_status = NULL;
1451 lnet_ping_buffer_decref(the_lnet.ln_ping_target);
1452 the_lnet.ln_ping_target = NULL;
1454 lnet_net_unlock(LNET_LOCK_EX);
1458 lnet_ping_target_event_handler(struct lnet_event *event)
1460 struct lnet_ping_buffer *pbuf = event->md.user_ptr;
1462 if (event->unlinked)
1463 lnet_ping_buffer_decref(pbuf);
1467 lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
1468 struct lnet_handle_md *ping_mdh,
1469 int ni_count, bool set_eq)
1471 struct lnet_process_id id = {
1472 .nid = LNET_NID_ANY,
1475 struct lnet_handle_me me_handle;
1476 struct lnet_md md = { NULL };
1480 rc = LNetEQAlloc(0, lnet_ping_target_event_handler,
1481 &the_lnet.ln_ping_target_eq);
1483 CERROR("Can't allocate ping buffer EQ: %d\n", rc);
1488 *ppbuf = lnet_ping_target_create(ni_count);
1489 if (*ppbuf == NULL) {
1494 /* Ping target ME/MD */
1495 rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1496 LNET_PROTO_PING_MATCHBITS, 0,
1497 LNET_UNLINK, LNET_INS_AFTER,
1500 CERROR("Can't create ping target ME: %d\n", rc);
1501 goto fail_decref_ping_buffer;
1504 /* initialize md content */
1505 md.start = &(*ppbuf)->pb_info;
1506 md.length = LNET_PING_INFO_SIZE((*ppbuf)->pb_nnis);
1507 md.threshold = LNET_MD_THRESH_INF;
1509 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1510 LNET_MD_MANAGE_REMOTE;
1511 md.eq_handle = the_lnet.ln_ping_target_eq;
1512 md.user_ptr = *ppbuf;
1514 rc = LNetMDAttach(me_handle, md, LNET_RETAIN, ping_mdh);
1516 CERROR("Can't attach ping target MD: %d\n", rc);
1517 goto fail_unlink_ping_me;
1519 lnet_ping_buffer_addref(*ppbuf);
1523 fail_unlink_ping_me:
1524 rc2 = LNetMEUnlink(me_handle);
1526 fail_decref_ping_buffer:
1527 LASSERT(lnet_ping_buffer_numref(*ppbuf) == 1);
1528 lnet_ping_buffer_decref(*ppbuf);
1532 rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
1539 lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
1540 struct lnet_handle_md *ping_mdh)
1542 sigset_t blocked = cfs_block_allsigs();
1544 LNetMDUnlink(*ping_mdh);
1545 LNetInvalidateMDHandle(ping_mdh);
1547 /* NB the MD could be busy; this just starts the unlink */
1548 while (lnet_ping_buffer_numref(pbuf) > 1) {
1549 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1550 set_current_state(TASK_UNINTERRUPTIBLE);
1551 schedule_timeout(cfs_time_seconds(1));
1554 cfs_restore_sigs(blocked);
1558 lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
1561 struct lnet_net *net;
1562 struct lnet_ni_status *ns;
1567 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1568 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1569 LASSERT(i < pbuf->pb_nnis);
1571 ns = &pbuf->pb_info.pi_ni[i];
1573 ns->ns_nid = ni->ni_nid;
1576 ns->ns_status = (ni->ni_status != NULL) ?
1577 ni->ni_status->ns_status :
1586 * We (ab)use the ns_status of the loopback interface to
1587 * transmit the sequence number. The first interface listed
1588 * must be the loopback interface.
1590 rc = lnet_ping_info_validate(&pbuf->pb_info);
1592 LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
1595 LNET_PING_BUFFER_SEQNO(pbuf) =
1596 atomic_inc_return(&the_lnet.ln_ping_target_seqno);
1600 lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
1601 struct lnet_handle_md ping_mdh)
1603 struct lnet_ping_buffer *old_pbuf = NULL;
1604 struct lnet_handle_md old_ping_md;
1606 /* switch the NIs to point to the new ping info created */
1607 lnet_net_lock(LNET_LOCK_EX);
1609 if (!the_lnet.ln_routing)
1610 pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1611 if (!lnet_peer_discovery_disabled)
1612 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
1614 /* Ensure only known feature bits have been set. */
1615 LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
1616 LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
1618 lnet_ping_target_install_locked(pbuf);
1620 if (the_lnet.ln_ping_target) {
1621 old_pbuf = the_lnet.ln_ping_target;
1622 old_ping_md = the_lnet.ln_ping_target_md;
1624 the_lnet.ln_ping_target_md = ping_mdh;
1625 the_lnet.ln_ping_target = pbuf;
1627 lnet_net_unlock(LNET_LOCK_EX);
1630 /* unlink and free the old ping info */
1631 lnet_ping_md_unlink(old_pbuf, &old_ping_md);
1632 lnet_ping_buffer_decref(old_pbuf);
1635 lnet_push_update_to_peers(0);
1639 lnet_ping_target_fini(void)
1643 lnet_ping_md_unlink(the_lnet.ln_ping_target,
1644 &the_lnet.ln_ping_target_md);
1646 rc = LNetEQFree(the_lnet.ln_ping_target_eq);
1649 lnet_ping_target_destroy();
1652 /* Resize the push target. */
1653 int lnet_push_target_resize(void)
1655 struct lnet_process_id id = { LNET_NID_ANY, LNET_PID_ANY };
1656 struct lnet_md md = { NULL };
1657 struct lnet_handle_me meh;
1658 struct lnet_handle_md mdh;
1659 struct lnet_handle_md old_mdh;
1660 struct lnet_ping_buffer *pbuf;
1661 struct lnet_ping_buffer *old_pbuf;
1662 int nnis = the_lnet.ln_push_target_nnis;
1670 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1676 rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1677 LNET_PROTO_PING_MATCHBITS, 0,
1678 LNET_UNLINK, LNET_INS_AFTER,
1681 CERROR("Can't create push target ME: %d\n", rc);
1682 goto fail_decref_pbuf;
1685 /* initialize md content */
1686 md.start = &pbuf->pb_info;
1687 md.length = LNET_PING_INFO_SIZE(nnis);
1688 md.threshold = LNET_MD_THRESH_INF;
1690 md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE |
1691 LNET_MD_MANAGE_REMOTE;
1693 md.eq_handle = the_lnet.ln_push_target_eq;
1695 rc = LNetMDAttach(meh, md, LNET_RETAIN, &mdh);
1697 CERROR("Can't attach push MD: %d\n", rc);
1698 goto fail_unlink_meh;
1700 lnet_ping_buffer_addref(pbuf);
1702 lnet_net_lock(LNET_LOCK_EX);
1703 old_pbuf = the_lnet.ln_push_target;
1704 old_mdh = the_lnet.ln_push_target_md;
1705 the_lnet.ln_push_target = pbuf;
1706 the_lnet.ln_push_target_md = mdh;
1707 lnet_net_unlock(LNET_LOCK_EX);
1710 LNetMDUnlink(old_mdh);
1711 lnet_ping_buffer_decref(old_pbuf);
1714 if (nnis < the_lnet.ln_push_target_nnis)
1717 CDEBUG(D_NET, "nnis %d success\n", nnis);
1724 lnet_ping_buffer_decref(pbuf);
1726 CDEBUG(D_NET, "nnis %d error %d\n", nnis, rc);
1730 static void lnet_push_target_event_handler(struct lnet_event *ev)
1732 struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
1734 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
1735 lnet_swap_pinginfo(pbuf);
1737 lnet_peer_push_event(ev);
1739 lnet_ping_buffer_decref(pbuf);
1742 /* Initialize the push target. */
1743 static int lnet_push_target_init(void)
1747 if (the_lnet.ln_push_target)
1750 rc = LNetEQAlloc(0, lnet_push_target_event_handler,
1751 &the_lnet.ln_push_target_eq);
1753 CERROR("Can't allocated push target EQ: %d\n", rc);
1757 /* Start at the required minimum, we'll enlarge if required. */
1758 the_lnet.ln_push_target_nnis = LNET_INTERFACES_MIN;
1760 rc = lnet_push_target_resize();
1763 LNetEQFree(the_lnet.ln_push_target_eq);
1764 LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
1770 /* Clean up the push target. */
1771 static void lnet_push_target_fini(void)
1773 if (!the_lnet.ln_push_target)
1776 /* Unlink and invalidate to prevent new references. */
1777 LNetMDUnlink(the_lnet.ln_push_target_md);
1778 LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
1780 /* Wait for the unlink to complete. */
1781 while (lnet_ping_buffer_numref(the_lnet.ln_push_target) > 1) {
1782 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1783 set_current_state(TASK_UNINTERRUPTIBLE);
1784 schedule_timeout(cfs_time_seconds(1));
1787 lnet_ping_buffer_decref(the_lnet.ln_push_target);
1788 the_lnet.ln_push_target = NULL;
1789 the_lnet.ln_push_target_nnis = 0;
1791 LNetEQFree(the_lnet.ln_push_target_eq);
1792 LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
1796 lnet_ni_tq_credits(struct lnet_ni *ni)
1800 LASSERT(ni->ni_ncpts >= 1);
1802 if (ni->ni_ncpts == 1)
1803 return ni->ni_net->net_tunables.lct_max_tx_credits;
1805 credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
1806 credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
1807 credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
1813 lnet_ni_unlink_locked(struct lnet_ni *ni)
1815 /* move it to zombie list and nobody can find it anymore */
1816 LASSERT(!list_empty(&ni->ni_netlist));
1817 list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
1818 lnet_ni_decref_locked(ni, 0);
1822 lnet_clear_zombies_nis_locked(struct lnet_net *net)
1827 struct list_head *zombie_list = &net->net_ni_zombie;
1830 * Now wait for the NIs I just nuked to show up on the zombie
1831 * list and shut them down in guaranteed thread context
1834 while (!list_empty(zombie_list)) {
1838 ni = list_entry(zombie_list->next,
1839 struct lnet_ni, ni_netlist);
1840 list_del_init(&ni->ni_netlist);
1841 /* the ni should be in deleting state. If it's not it's
1843 LASSERT(ni->ni_state & LNET_NI_STATE_DELETING);
1844 cfs_percpt_for_each(ref, j, ni->ni_refs) {
1847 /* still busy, add it back to zombie list */
1848 list_add(&ni->ni_netlist, zombie_list);
1852 if (!list_empty(&ni->ni_netlist)) {
1853 lnet_net_unlock(LNET_LOCK_EX);
1855 if ((i & (-i)) == i) {
1857 "Waiting for zombie LNI %s\n",
1858 libcfs_nid2str(ni->ni_nid));
1860 set_current_state(TASK_UNINTERRUPTIBLE);
1861 schedule_timeout(cfs_time_seconds(1));
1862 lnet_net_lock(LNET_LOCK_EX);
1866 lnet_net_unlock(LNET_LOCK_EX);
1868 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
1870 LASSERT(!in_interrupt());
1871 (net->net_lnd->lnd_shutdown)(ni);
1874 CDEBUG(D_LNI, "Removed LNI %s\n",
1875 libcfs_nid2str(ni->ni_nid));
1879 lnet_net_lock(LNET_LOCK_EX);
1883 /* shutdown down the NI and release refcount */
1885 lnet_shutdown_lndni(struct lnet_ni *ni)
1888 struct lnet_net *net = ni->ni_net;
1890 lnet_net_lock(LNET_LOCK_EX);
1892 ni->ni_state |= LNET_NI_STATE_DELETING;
1893 ni->ni_state &= ~LNET_NI_STATE_ACTIVE;
1895 lnet_ni_unlink_locked(ni);
1896 lnet_incr_dlc_seq();
1897 lnet_net_unlock(LNET_LOCK_EX);
1899 /* clear messages for this NI on the lazy portal */
1900 for (i = 0; i < the_lnet.ln_nportals; i++)
1901 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
1903 lnet_net_lock(LNET_LOCK_EX);
1904 lnet_clear_zombies_nis_locked(net);
1905 lnet_net_unlock(LNET_LOCK_EX);
1909 lnet_shutdown_lndnet(struct lnet_net *net)
1913 lnet_net_lock(LNET_LOCK_EX);
1915 net->net_state = LNET_NET_STATE_DELETING;
1917 list_del_init(&net->net_list);
1919 while (!list_empty(&net->net_ni_list)) {
1920 ni = list_entry(net->net_ni_list.next,
1921 struct lnet_ni, ni_netlist);
1922 lnet_net_unlock(LNET_LOCK_EX);
1923 lnet_shutdown_lndni(ni);
1924 lnet_net_lock(LNET_LOCK_EX);
1927 lnet_net_unlock(LNET_LOCK_EX);
1929 /* Do peer table cleanup for this net */
1930 lnet_peer_tables_cleanup(net);
1932 lnet_net_lock(LNET_LOCK_EX);
1934 * decrement ref count on lnd only when the entire network goes
1937 net->net_lnd->lnd_refcount--;
1939 lnet_net_unlock(LNET_LOCK_EX);
1945 lnet_shutdown_lndnets(void)
1947 struct lnet_net *net;
1948 struct list_head resend;
1949 struct lnet_msg *msg, *tmp;
1951 INIT_LIST_HEAD(&resend);
1953 /* NB called holding the global mutex */
1955 /* All quiet on the API front */
1956 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
1957 LASSERT(the_lnet.ln_refcount == 0);
1959 lnet_net_lock(LNET_LOCK_EX);
1960 the_lnet.ln_state = LNET_STATE_STOPPING;
1962 while (!list_empty(&the_lnet.ln_nets)) {
1964 * move the nets to the zombie list to avoid them being
1965 * picked up for new work. LONET is also included in the
1966 * Nets that will be moved to the zombie list
1968 net = list_entry(the_lnet.ln_nets.next,
1969 struct lnet_net, net_list);
1970 list_move(&net->net_list, &the_lnet.ln_net_zombie);
1973 /* Drop the cached loopback Net. */
1974 if (the_lnet.ln_loni != NULL) {
1975 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
1976 the_lnet.ln_loni = NULL;
1978 lnet_net_unlock(LNET_LOCK_EX);
1980 /* iterate through the net zombie list and delete each net */
1981 while (!list_empty(&the_lnet.ln_net_zombie)) {
1982 net = list_entry(the_lnet.ln_net_zombie.next,
1983 struct lnet_net, net_list);
1984 lnet_shutdown_lndnet(net);
1987 spin_lock(&the_lnet.ln_msg_resend_lock);
1988 list_splice(&the_lnet.ln_msg_resend, &resend);
1989 spin_unlock(&the_lnet.ln_msg_resend_lock);
1991 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
1992 list_del_init(&msg->msg_list);
1993 msg->msg_no_resend = true;
1994 lnet_finalize(msg, -ECANCELED);
1997 lnet_net_lock(LNET_LOCK_EX);
1998 the_lnet.ln_state = LNET_STATE_SHUTDOWN;
1999 lnet_net_unlock(LNET_LOCK_EX);
2003 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
2006 struct lnet_tx_queue *tq;
2008 struct lnet_net *net = ni->ni_net;
2010 mutex_lock(&the_lnet.ln_lnd_mutex);
2013 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
2014 ni->ni_lnd_tunables_set = true;
2017 rc = (net->net_lnd->lnd_startup)(ni);
2019 mutex_unlock(&the_lnet.ln_lnd_mutex);
2022 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
2023 rc, libcfs_lnd2str(net->net_lnd->lnd_type));
2024 lnet_net_lock(LNET_LOCK_EX);
2025 net->net_lnd->lnd_refcount--;
2026 lnet_net_unlock(LNET_LOCK_EX);
2031 ni->ni_state |= LNET_NI_STATE_ACTIVE;
2032 ni->ni_state &= ~LNET_NI_STATE_INIT;
2035 /* We keep a reference on the loopback net through the loopback NI */
2036 if (net->net_lnd->lnd_type == LOLND) {
2038 LASSERT(the_lnet.ln_loni == NULL);
2039 the_lnet.ln_loni = ni;
2040 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
2041 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
2042 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
2043 ni->ni_net->net_tunables.lct_peer_timeout = 0;
2047 if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
2048 ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
2049 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
2050 libcfs_lnd2str(net->net_lnd->lnd_type),
2051 ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
2053 /* shutdown the NI since if we get here then it must've already
2056 lnet_shutdown_lndni(ni);
2060 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
2061 tq->tq_credits_min =
2062 tq->tq_credits_max =
2063 tq->tq_credits = lnet_ni_tq_credits(ni);
2066 atomic_set(&ni->ni_tx_credits,
2067 lnet_ni_tq_credits(ni) * ni->ni_ncpts);
2068 atomic_set(&ni->ni_healthv, LNET_MAX_HEALTH_VALUE);
2070 CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
2071 libcfs_nid2str(ni->ni_nid),
2072 ni->ni_net->net_tunables.lct_peer_tx_credits,
2073 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
2074 ni->ni_net->net_tunables.lct_peer_rtr_credits,
2075 ni->ni_net->net_tunables.lct_peer_timeout);
2084 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
2087 struct lnet_net *net_l = NULL;
2088 struct list_head local_ni_list;
2092 struct lnet_lnd *lnd;
2094 net->net_tunables.lct_peer_timeout;
2096 net->net_tunables.lct_max_tx_credits;
2097 int peerrtrcredits =
2098 net->net_tunables.lct_peer_rtr_credits;
2100 INIT_LIST_HEAD(&local_ni_list);
2103 * make sure that this net is unique. If it isn't then
2104 * we are adding interfaces to an already existing network, and
2105 * 'net' is just a convenient way to pass in the list.
2106 * if it is unique we need to find the LND and load it if
2109 if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
2110 lnd_type = LNET_NETTYP(net->net_id);
2112 mutex_lock(&the_lnet.ln_lnd_mutex);
2113 lnd = lnet_find_lnd_by_type(lnd_type);
2116 mutex_unlock(&the_lnet.ln_lnd_mutex);
2117 rc = request_module("%s", libcfs_lnd2modname(lnd_type));
2118 mutex_lock(&the_lnet.ln_lnd_mutex);
2120 lnd = lnet_find_lnd_by_type(lnd_type);
2122 mutex_unlock(&the_lnet.ln_lnd_mutex);
2123 CERROR("Can't load LND %s, module %s, rc=%d\n",
2124 libcfs_lnd2str(lnd_type),
2125 libcfs_lnd2modname(lnd_type), rc);
2126 #ifndef HAVE_MODULE_LOADING_SUPPORT
2127 LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
2128 "compiled with kernel module "
2129 "loading support.");
2136 lnet_net_lock(LNET_LOCK_EX);
2137 lnd->lnd_refcount++;
2138 lnet_net_unlock(LNET_LOCK_EX);
2142 mutex_unlock(&the_lnet.ln_lnd_mutex);
2148 * net_l: if the network being added is unique then net_l
2149 * will point to that network
2150 * if the network being added is not unique then
2151 * net_l points to the existing network.
2153 * When we enter the loop below, we'll pick NIs off he
2154 * network beign added and start them up, then add them to
2155 * a local ni list. Once we've successfully started all
2156 * the NIs then we join the local NI list (of started up
2157 * networks) with the net_l->net_ni_list, which should
2158 * point to the correct network to add the new ni list to
2160 * If any of the new NIs fail to start up, then we want to
2161 * iterate through the local ni list, which should include
2162 * any NIs which were successfully started up, and shut
2165 * After than we want to delete the network being added,
2166 * to avoid a memory leak.
2170 * When a network uses TCP bonding then all its interfaces
2171 * must be specified when the network is first defined: the
2172 * TCP bonding code doesn't allow for interfaces to be added
2175 if (net_l != net && net_l != NULL && use_tcp_bonding &&
2176 LNET_NETTYP(net_l->net_id) == SOCKLND) {
2181 while (!list_empty(&net->net_ni_added)) {
2182 ni = list_entry(net->net_ni_added.next, struct lnet_ni,
2184 list_del_init(&ni->ni_netlist);
2186 /* make sure that the the NI we're about to start
2187 * up is actually unique. if it's not fail. */
2188 if (!lnet_ni_unique_net(&net_l->net_ni_list,
2189 ni->ni_interfaces[0])) {
2194 /* adjust the pointer the parent network, just in case it
2195 * the net is a duplicate */
2198 rc = lnet_startup_lndni(ni, tun);
2200 LASSERT(ni->ni_net->net_tunables.lct_peer_timeout <= 0 ||
2201 ni->ni_net->net_lnd->lnd_query != NULL);
2207 list_add_tail(&ni->ni_netlist, &local_ni_list);
2212 lnet_net_lock(LNET_LOCK_EX);
2213 list_splice_tail(&local_ni_list, &net_l->net_ni_list);
2214 lnet_incr_dlc_seq();
2215 lnet_net_unlock(LNET_LOCK_EX);
2217 /* if the network is not unique then we don't want to keep
2218 * it around after we're done. Free it. Otherwise add that
2219 * net to the global the_lnet.ln_nets */
2220 if (net_l != net && net_l != NULL) {
2222 * TODO - note. currently the tunables can not be updated
2227 net->net_state = LNET_NET_STATE_ACTIVE;
2229 * restore tunables after it has been overwitten by the
2232 if (peer_timeout != -1)
2233 net->net_tunables.lct_peer_timeout = peer_timeout;
2234 if (maxtxcredits != -1)
2235 net->net_tunables.lct_max_tx_credits = maxtxcredits;
2236 if (peerrtrcredits != -1)
2237 net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
2239 lnet_net_lock(LNET_LOCK_EX);
2240 list_add_tail(&net->net_list, &the_lnet.ln_nets);
2241 lnet_net_unlock(LNET_LOCK_EX);
2248 * shutdown the new NIs that are being started up
2249 * free the NET being started
2251 while (!list_empty(&local_ni_list)) {
2252 ni = list_entry(local_ni_list.next, struct lnet_ni,
2255 lnet_shutdown_lndni(ni);
2265 lnet_startup_lndnets(struct list_head *netlist)
2267 struct lnet_net *net;
2272 * Change to running state before bringing up the LNDs. This
2273 * allows lnet_shutdown_lndnets() to assert that we've passed
2276 lnet_net_lock(LNET_LOCK_EX);
2277 the_lnet.ln_state = LNET_STATE_RUNNING;
2278 lnet_net_unlock(LNET_LOCK_EX);
2280 while (!list_empty(netlist)) {
2281 net = list_entry(netlist->next, struct lnet_net, net_list);
2282 list_del_init(&net->net_list);
2284 rc = lnet_startup_lndnet(net, NULL);
2294 lnet_shutdown_lndnets();
2300 * Initialize LNet library.
2302 * Automatically called at module loading time. Caller has to call
2303 * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
2304 * latter returned 0. It must be called exactly once.
2306 * \retval 0 on success
2307 * \retval -ve on failures.
2309 int lnet_lib_init(void)
2313 lnet_assert_wire_constants();
2315 /* refer to global cfs_cpt_table for now */
2316 the_lnet.ln_cpt_table = cfs_cpt_table;
2317 the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_table);
2319 LASSERT(the_lnet.ln_cpt_number > 0);
2320 if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
2321 /* we are under risk of consuming all lh_cookie */
2322 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
2323 "please change setting of CPT-table and retry\n",
2324 the_lnet.ln_cpt_number, LNET_CPT_MAX);
2328 while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
2329 the_lnet.ln_cpt_bits++;
2331 rc = lnet_create_locks();
2333 CERROR("Can't create LNet global locks: %d\n", rc);
2337 the_lnet.ln_refcount = 0;
2338 LNetInvalidateEQHandle(&the_lnet.ln_rc_eqh);
2339 INIT_LIST_HEAD(&the_lnet.ln_lnds);
2340 INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
2341 INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
2342 INIT_LIST_HEAD(&the_lnet.ln_msg_resend);
2343 INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
2345 /* The hash table size is the number of bits it takes to express the set
2346 * ln_num_routes, minus 1 (better to under estimate than over so we
2347 * don't waste memory). */
2348 if (rnet_htable_size <= 0)
2349 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
2350 else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
2351 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
2352 the_lnet.ln_remote_nets_hbits = max_t(int, 1,
2353 order_base_2(rnet_htable_size) - 1);
2355 /* All LNDs apart from the LOLND are in separate modules. They
2356 * register themselves when their module loads, and unregister
2357 * themselves when their module is unloaded. */
2358 lnet_register_lnd(&the_lolnd);
2363 * Finalize LNet library.
2365 * \pre lnet_lib_init() called with success.
2366 * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
2368 void lnet_lib_exit(void)
2370 LASSERT(the_lnet.ln_refcount == 0);
2372 while (!list_empty(&the_lnet.ln_lnds))
2373 lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
2374 struct lnet_lnd, lnd_list));
2375 lnet_destroy_locks();
2379 * Set LNet PID and start LNet interfaces, routing, and forwarding.
2381 * Users must call this function at least once before any other functions.
2382 * For each successful call there must be a corresponding call to
2383 * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
2386 * The PID used by LNet may be different from the one requested.
2389 * \param requested_pid PID requested by the caller.
2391 * \return >= 0 on success, and < 0 error code on failures.
2394 LNetNIInit(lnet_pid_t requested_pid)
2396 int im_a_router = 0;
2399 struct lnet_ping_buffer *pbuf;
2400 struct lnet_handle_md ping_mdh;
2401 struct list_head net_head;
2402 struct lnet_net *net;
2404 INIT_LIST_HEAD(&net_head);
2406 mutex_lock(&the_lnet.ln_api_mutex);
2408 CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
2410 if (the_lnet.ln_refcount > 0) {
2411 rc = the_lnet.ln_refcount++;
2412 mutex_unlock(&the_lnet.ln_api_mutex);
2416 rc = lnet_prepare(requested_pid);
2418 mutex_unlock(&the_lnet.ln_api_mutex);
2422 /* create a network for Loopback network */
2423 net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
2426 goto err_empty_list;
2429 /* Add in the loopback NI */
2430 if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
2432 goto err_empty_list;
2435 /* If LNet is being initialized via DLC it is possible
2436 * that the user requests not to load module parameters (ones which
2437 * are supported by DLC) on initialization. Therefore, make sure not
2438 * to load networks, routes and forwarding from module parameters
2439 * in this case. On cleanup in case of failure only clean up
2440 * routes if it has been loaded */
2441 if (!the_lnet.ln_nis_from_mod_params) {
2442 rc = lnet_parse_networks(&net_head, lnet_get_networks(),
2445 goto err_empty_list;
2448 ni_count = lnet_startup_lndnets(&net_head);
2451 goto err_empty_list;
2454 if (!the_lnet.ln_nis_from_mod_params) {
2455 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
2457 goto err_shutdown_lndnis;
2459 rc = lnet_check_routes();
2461 goto err_destroy_routes;
2463 rc = lnet_rtrpools_alloc(im_a_router);
2465 goto err_destroy_routes;
2468 rc = lnet_acceptor_start();
2470 goto err_destroy_routes;
2472 the_lnet.ln_refcount = 1;
2473 /* Now I may use my own API functions... */
2475 rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_count, true);
2477 goto err_acceptor_stop;
2479 lnet_ping_target_update(pbuf, ping_mdh);
2481 rc = lnet_monitor_thr_start();
2485 rc = lnet_push_target_init();
2487 goto err_stop_monitor_thr;
2489 rc = lnet_peer_discovery_start();
2491 goto err_destroy_push_target;
2494 lnet_router_debugfs_init();
2496 mutex_unlock(&the_lnet.ln_api_mutex);
2500 err_destroy_push_target:
2501 lnet_push_target_fini();
2502 err_stop_monitor_thr:
2503 lnet_monitor_thr_stop();
2505 lnet_ping_target_fini();
2507 the_lnet.ln_refcount = 0;
2508 lnet_acceptor_stop();
2510 if (!the_lnet.ln_nis_from_mod_params)
2511 lnet_destroy_routes();
2512 err_shutdown_lndnis:
2513 lnet_shutdown_lndnets();
2517 mutex_unlock(&the_lnet.ln_api_mutex);
2518 while (!list_empty(&net_head)) {
2519 struct lnet_net *net;
2521 net = list_entry(net_head.next, struct lnet_net, net_list);
2522 list_del_init(&net->net_list);
2527 EXPORT_SYMBOL(LNetNIInit);
2530 * Stop LNet interfaces, routing, and forwarding.
2532 * Users must call this function once for each successful call to LNetNIInit().
2533 * Once the LNetNIFini() operation has been started, the results of pending
2534 * API operations are undefined.
2536 * \return always 0 for current implementation.
2541 mutex_lock(&the_lnet.ln_api_mutex);
2543 LASSERT(the_lnet.ln_refcount > 0);
2545 if (the_lnet.ln_refcount != 1) {
2546 the_lnet.ln_refcount--;
2548 LASSERT(!the_lnet.ln_niinit_self);
2552 lnet_router_debugfs_init();
2553 lnet_peer_discovery_stop();
2554 lnet_push_target_fini();
2555 lnet_monitor_thr_stop();
2556 lnet_ping_target_fini();
2558 /* Teardown fns that use my own API functions BEFORE here */
2559 the_lnet.ln_refcount = 0;
2561 lnet_acceptor_stop();
2562 lnet_destroy_routes();
2563 lnet_shutdown_lndnets();
2567 mutex_unlock(&the_lnet.ln_api_mutex);
2570 EXPORT_SYMBOL(LNetNIFini);
2573 * Grabs the ni data from the ni structure and fills the out
2576 * \param[in] ni network interface structure
2577 * \param[out] cfg_ni NI config information
2578 * \param[out] tun network and LND tunables
2581 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
2582 struct lnet_ioctl_config_lnd_tunables *tun,
2583 struct lnet_ioctl_element_stats *stats,
2586 size_t min_size = 0;
2589 if (!ni || !cfg_ni || !tun)
2592 if (ni->ni_interfaces[0] != NULL) {
2593 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2594 if (ni->ni_interfaces[i] != NULL) {
2595 strncpy(cfg_ni->lic_ni_intf[i],
2596 ni->ni_interfaces[i],
2597 sizeof(cfg_ni->lic_ni_intf[i]));
2602 cfg_ni->lic_nid = ni->ni_nid;
2603 if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2604 cfg_ni->lic_status = LNET_NI_STATUS_UP;
2606 cfg_ni->lic_status = ni->ni_status->ns_status;
2607 cfg_ni->lic_tcp_bonding = use_tcp_bonding;
2608 cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
2610 memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
2613 stats->iel_send_count = lnet_sum_stats(&ni->ni_stats,
2614 LNET_STATS_TYPE_SEND);
2615 stats->iel_recv_count = lnet_sum_stats(&ni->ni_stats,
2616 LNET_STATS_TYPE_RECV);
2617 stats->iel_drop_count = lnet_sum_stats(&ni->ni_stats,
2618 LNET_STATS_TYPE_DROP);
2622 * tun->lt_tun will always be present, but in order to be
2623 * backwards compatible, we need to deal with the cases when
2624 * tun->lt_tun is smaller than what the kernel has, because it
2625 * comes from an older version of a userspace program, then we'll
2626 * need to copy as much information as we have available space.
2628 min_size = tun_size - sizeof(tun->lt_cmn);
2629 memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
2631 /* copy over the cpts */
2632 if (ni->ni_ncpts == LNET_CPT_NUMBER &&
2633 ni->ni_cpts == NULL) {
2634 for (i = 0; i < ni->ni_ncpts; i++)
2635 cfg_ni->lic_cpts[i] = i;
2638 ni->ni_cpts != NULL && i < ni->ni_ncpts &&
2639 i < LNET_MAX_SHOW_NUM_CPT;
2641 cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
2643 cfg_ni->lic_ncpts = ni->ni_ncpts;
2647 * NOTE: This is a legacy function left in the code to be backwards
2648 * compatible with older userspace programs. It should eventually be
2651 * Grabs the ni data from the ni structure and fills the out
2654 * \param[in] ni network interface structure
2655 * \param[out] config config information
2658 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
2659 struct lnet_ioctl_config_data *config)
2661 struct lnet_ioctl_net_config *net_config;
2662 struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
2663 size_t min_size, tunable_size = 0;
2669 net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
2673 BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
2674 ARRAY_SIZE(net_config->ni_interfaces));
2676 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2677 if (!ni->ni_interfaces[i])
2680 strncpy(net_config->ni_interfaces[i],
2681 ni->ni_interfaces[i],
2682 sizeof(net_config->ni_interfaces[i]));
2685 config->cfg_nid = ni->ni_nid;
2686 config->cfg_config_u.cfg_net.net_peer_timeout =
2687 ni->ni_net->net_tunables.lct_peer_timeout;
2688 config->cfg_config_u.cfg_net.net_max_tx_credits =
2689 ni->ni_net->net_tunables.lct_max_tx_credits;
2690 config->cfg_config_u.cfg_net.net_peer_tx_credits =
2691 ni->ni_net->net_tunables.lct_peer_tx_credits;
2692 config->cfg_config_u.cfg_net.net_peer_rtr_credits =
2693 ni->ni_net->net_tunables.lct_peer_rtr_credits;
2695 if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2696 net_config->ni_status = LNET_NI_STATUS_UP;
2698 net_config->ni_status = ni->ni_status->ns_status;
2701 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
2703 for (i = 0; i < num_cpts; i++)
2704 net_config->ni_cpts[i] = ni->ni_cpts[i];
2706 config->cfg_ncpts = num_cpts;
2710 * See if user land tools sent in a newer and larger version
2711 * of struct lnet_tunables than what the kernel uses.
2713 min_size = sizeof(*config) + sizeof(*net_config);
2715 if (config->cfg_hdr.ioc_len > min_size)
2716 tunable_size = config->cfg_hdr.ioc_len - min_size;
2718 /* Don't copy too much data to user space */
2719 min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
2720 lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
2722 if (lnd_cfg && min_size) {
2723 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
2724 config->cfg_config_u.cfg_net.net_interface_count = 1;
2726 /* Tell user land that kernel side has less data */
2727 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
2728 min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
2729 config->cfg_hdr.ioc_len -= min_size;
2735 lnet_get_ni_idx_locked(int idx)
2738 struct lnet_net *net;
2740 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2741 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2751 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
2754 struct lnet_net *net = mynet;
2757 * It is possible that the net has been cleaned out while there is
2758 * a message being sent. This function accessed the net without
2759 * checking if the list is empty
2763 net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
2765 if (list_empty(&net->net_ni_list))
2767 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2773 if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
2774 /* if you reached the end of the ni list and the net is
2775 * specified, then there are no more nis in that net */
2779 /* we reached the end of this net ni list. move to the
2781 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
2782 /* no more nets and no more NIs. */
2785 /* get the next net */
2786 net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
2788 if (list_empty(&net->net_ni_list))
2790 /* get the ni on it */
2791 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2797 if (list_empty(&prev->ni_netlist))
2800 /* there are more nis left */
2801 ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
2807 lnet_get_net_config(struct lnet_ioctl_config_data *config)
2812 int idx = config->cfg_count;
2814 cpt = lnet_net_lock_current();
2816 ni = lnet_get_ni_idx_locked(idx);
2821 lnet_fill_ni_info_legacy(ni, config);
2825 lnet_net_unlock(cpt);
2830 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
2831 struct lnet_ioctl_config_lnd_tunables *tun,
2832 struct lnet_ioctl_element_stats *stats,
2839 if (!cfg_ni || !tun || !stats)
2842 cpt = lnet_net_lock_current();
2844 ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
2849 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
2853 lnet_net_unlock(cpt);
2857 int lnet_get_ni_stats(struct lnet_ioctl_element_msg_stats *msg_stats)
2866 cpt = lnet_net_lock_current();
2868 ni = lnet_get_ni_idx_locked(msg_stats->im_idx);
2871 lnet_usr_translate_stats(msg_stats, &ni->ni_stats);
2875 lnet_net_unlock(cpt);
2880 static int lnet_add_net_common(struct lnet_net *net,
2881 struct lnet_ioctl_config_lnd_tunables *tun)
2884 struct lnet_ping_buffer *pbuf;
2885 struct lnet_handle_md ping_mdh;
2887 struct lnet_remotenet *rnet;
2889 int num_acceptor_nets;
2891 lnet_net_lock(LNET_LOCK_EX);
2892 rnet = lnet_find_rnet_locked(net->net_id);
2893 lnet_net_unlock(LNET_LOCK_EX);
2895 * make sure that the net added doesn't invalidate the current
2896 * configuration LNet is keeping
2899 CERROR("Adding net %s will invalidate routing configuration\n",
2900 libcfs_net2str(net->net_id));
2906 * make sure you calculate the correct number of slots in the ping
2907 * buffer. Since the ping info is a flattened list of all the NIs,
2908 * we should allocate enough slots to accomodate the number of NIs
2909 * which will be added.
2911 * since ni hasn't been configured yet, use
2912 * lnet_get_net_ni_count_pre() which checks the net_ni_added list
2914 net_ni_count = lnet_get_net_ni_count_pre(net);
2916 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2917 net_ni_count + lnet_get_ni_count(),
2925 memcpy(&net->net_tunables,
2926 &tun->lt_cmn, sizeof(net->net_tunables));
2928 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
2931 * before starting this network get a count of the current TCP
2932 * networks which require the acceptor thread running. If that
2933 * count is == 0 before we start up this network, then we'd want to
2934 * start up the acceptor thread after starting up this network
2936 num_acceptor_nets = lnet_count_acceptor_nets();
2938 net_id = net->net_id;
2940 rc = lnet_startup_lndnet(net,
2941 (tun) ? &tun->lt_tun : NULL);
2945 lnet_net_lock(LNET_LOCK_EX);
2946 net = lnet_get_net_locked(net_id);
2947 lnet_net_unlock(LNET_LOCK_EX);
2952 * Start the acceptor thread if this is the first network
2953 * being added that requires the thread.
2955 if (net->net_lnd->lnd_accept && num_acceptor_nets == 0) {
2956 rc = lnet_acceptor_start();
2958 /* shutdown the net that we just started */
2959 CERROR("Failed to start up acceptor thread\n");
2960 lnet_shutdown_lndnet(net);
2965 lnet_net_lock(LNET_LOCK_EX);
2966 lnet_peer_net_added(net);
2967 lnet_net_unlock(LNET_LOCK_EX);
2969 lnet_ping_target_update(pbuf, ping_mdh);
2974 lnet_ping_md_unlink(pbuf, &ping_mdh);
2975 lnet_ping_buffer_decref(pbuf);
2979 static int lnet_handle_legacy_ip2nets(char *ip2nets,
2980 struct lnet_ioctl_config_lnd_tunables *tun)
2982 struct lnet_net *net;
2985 struct list_head net_head;
2987 INIT_LIST_HEAD(&net_head);
2989 rc = lnet_parse_ip2nets(&nets, ip2nets);
2993 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
2997 mutex_lock(&the_lnet.ln_api_mutex);
2998 while (!list_empty(&net_head)) {
2999 net = list_entry(net_head.next, struct lnet_net, net_list);
3000 list_del_init(&net->net_list);
3001 rc = lnet_add_net_common(net, tun);
3007 mutex_unlock(&the_lnet.ln_api_mutex);
3009 while (!list_empty(&net_head)) {
3010 net = list_entry(net_head.next, struct lnet_net, net_list);
3011 list_del_init(&net->net_list);
3017 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
3019 struct lnet_net *net;
3021 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
3023 __u32 net_id, lnd_type;
3025 /* get the tunables if they are available */
3026 if (conf->lic_cfg_hdr.ioc_len >=
3027 sizeof(*conf) + sizeof(*tun))
3028 tun = (struct lnet_ioctl_config_lnd_tunables *)
3031 /* handle legacy ip2nets from DLC */
3032 if (conf->lic_legacy_ip2nets[0] != '\0')
3033 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
3036 net_id = LNET_NIDNET(conf->lic_nid);
3037 lnd_type = LNET_NETTYP(net_id);
3039 if (!libcfs_isknown_lnd(lnd_type)) {
3040 CERROR("No valid net and lnd information provided\n");
3044 net = lnet_net_alloc(net_id, NULL);
3048 for (i = 0; i < conf->lic_ncpts; i++) {
3049 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER)
3053 ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
3054 conf->lic_ni_intf[0]);
3058 mutex_lock(&the_lnet.ln_api_mutex);
3060 rc = lnet_add_net_common(net, tun);
3062 mutex_unlock(&the_lnet.ln_api_mutex);
3067 int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
3069 struct lnet_net *net;
3071 __u32 net_id = LNET_NIDNET(conf->lic_nid);
3072 struct lnet_ping_buffer *pbuf;
3073 struct lnet_handle_md ping_mdh;
3078 /* don't allow userspace to shutdown the LOLND */
3079 if (LNET_NETTYP(net_id) == LOLND)
3082 mutex_lock(&the_lnet.ln_api_mutex);
3086 net = lnet_get_net_locked(net_id);
3088 CERROR("net %s not found\n",
3089 libcfs_net2str(net_id));
3094 addr = LNET_NIDADDR(conf->lic_nid);
3096 /* remove the entire net */
3097 net_count = lnet_get_net_ni_count_locked(net);
3101 /* create and link a new ping info, before removing the old one */
3102 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3103 lnet_get_ni_count() - net_count,
3106 goto unlock_api_mutex;
3108 lnet_shutdown_lndnet(net);
3110 if (lnet_count_acceptor_nets() == 0)
3111 lnet_acceptor_stop();
3113 lnet_ping_target_update(pbuf, ping_mdh);
3115 goto unlock_api_mutex;
3118 ni = lnet_nid2ni_locked(conf->lic_nid, 0);
3120 CERROR("nid %s not found\n",
3121 libcfs_nid2str(conf->lic_nid));
3126 net_count = lnet_get_net_ni_count_locked(net);
3130 /* create and link a new ping info, before removing the old one */
3131 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3132 lnet_get_ni_count() - 1, false);
3134 goto unlock_api_mutex;
3136 lnet_shutdown_lndni(ni);
3138 if (lnet_count_acceptor_nets() == 0)
3139 lnet_acceptor_stop();
3141 lnet_ping_target_update(pbuf, ping_mdh);
3143 /* check if the net is empty and remove it if it is */
3145 lnet_shutdown_lndnet(net);
3147 goto unlock_api_mutex;
3152 mutex_unlock(&the_lnet.ln_api_mutex);
3158 * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
3159 * They are only expected to be called for unique networks.
3160 * That can be as a result of older DLC library
3161 * calls. Multi-Rail DLC and beyond no longer uses these APIs.
3164 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
3166 struct lnet_net *net;
3167 struct list_head net_head;
3169 struct lnet_ioctl_config_lnd_tunables tun;
3170 char *nets = conf->cfg_config_u.cfg_net.net_intf;
3172 INIT_LIST_HEAD(&net_head);
3174 /* Create a net/ni structures for the network string */
3175 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
3177 return rc == 0 ? -EINVAL : rc;
3179 mutex_lock(&the_lnet.ln_api_mutex);
3182 rc = -EINVAL; /* only add one network per call */
3183 goto out_unlock_clean;
3186 net = list_entry(net_head.next, struct lnet_net, net_list);
3187 list_del_init(&net->net_list);
3189 LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
3191 memset(&tun, 0, sizeof(tun));
3193 tun.lt_cmn.lct_peer_timeout =
3194 conf->cfg_config_u.cfg_net.net_peer_timeout;
3195 tun.lt_cmn.lct_peer_tx_credits =
3196 conf->cfg_config_u.cfg_net.net_peer_tx_credits;
3197 tun.lt_cmn.lct_peer_rtr_credits =
3198 conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
3199 tun.lt_cmn.lct_max_tx_credits =
3200 conf->cfg_config_u.cfg_net.net_max_tx_credits;
3202 rc = lnet_add_net_common(net, &tun);
3205 mutex_unlock(&the_lnet.ln_api_mutex);
3206 while (!list_empty(&net_head)) {
3207 /* net_head list is empty in success case */
3208 net = list_entry(net_head.next, struct lnet_net, net_list);
3209 list_del_init(&net->net_list);
3216 lnet_dyn_del_net(__u32 net_id)
3218 struct lnet_net *net;
3219 struct lnet_ping_buffer *pbuf;
3220 struct lnet_handle_md ping_mdh;
3224 /* don't allow userspace to shutdown the LOLND */
3225 if (LNET_NETTYP(net_id) == LOLND)
3228 mutex_lock(&the_lnet.ln_api_mutex);
3232 net = lnet_get_net_locked(net_id);
3239 net_ni_count = lnet_get_net_ni_count_locked(net);
3243 /* create and link a new ping info, before removing the old one */
3244 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3245 lnet_get_ni_count() - net_ni_count, false);
3249 lnet_shutdown_lndnet(net);
3251 if (lnet_count_acceptor_nets() == 0)
3252 lnet_acceptor_stop();
3254 lnet_ping_target_update(pbuf, ping_mdh);
3257 mutex_unlock(&the_lnet.ln_api_mutex);
3262 void lnet_incr_dlc_seq(void)
3264 atomic_inc(&lnet_dlc_seq_no);
3267 __u32 lnet_get_dlc_seq_locked(void)
3269 return atomic_read(&lnet_dlc_seq_no);
3273 lnet_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3275 struct lnet_net *net;
3278 lnet_net_lock(LNET_LOCK_EX);
3279 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3280 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3281 if (ni->ni_nid == nid || all) {
3282 atomic_set(&ni->ni_healthv, value);
3283 if (list_empty(&ni->ni_recovery) &&
3284 value < LNET_MAX_HEALTH_VALUE) {
3285 CERROR("manually adding local NI %s to recovery\n",
3286 libcfs_nid2str(ni->ni_nid));
3287 list_add_tail(&ni->ni_recovery,
3288 &the_lnet.ln_mt_localNIRecovq);
3289 lnet_ni_addref_locked(ni, 0);
3292 lnet_net_unlock(LNET_LOCK_EX);
3298 lnet_net_unlock(LNET_LOCK_EX);
3302 lnet_get_local_ni_hstats(struct lnet_ioctl_local_ni_hstats *stats)
3306 lnet_nid_t nid = stats->hlni_nid;
3308 cpt = lnet_net_lock_current();
3309 ni = lnet_nid2ni_locked(nid, cpt);
3316 stats->hlni_local_interrupt = atomic_read(&ni->ni_hstats.hlt_local_interrupt);
3317 stats->hlni_local_dropped = atomic_read(&ni->ni_hstats.hlt_local_dropped);
3318 stats->hlni_local_aborted = atomic_read(&ni->ni_hstats.hlt_local_aborted);
3319 stats->hlni_local_no_route = atomic_read(&ni->ni_hstats.hlt_local_no_route);
3320 stats->hlni_local_timeout = atomic_read(&ni->ni_hstats.hlt_local_timeout);
3321 stats->hlni_local_error = atomic_read(&ni->ni_hstats.hlt_local_error);
3322 stats->hlni_health_value = atomic_read(&ni->ni_healthv);
3325 lnet_net_unlock(cpt);
3331 lnet_get_local_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
3336 lnet_net_lock(LNET_LOCK_EX);
3337 list_for_each_entry(ni, &the_lnet.ln_mt_localNIRecovq, ni_recovery) {
3338 list->rlst_nid_array[i] = ni->ni_nid;
3340 if (i >= LNET_MAX_SHOW_NUM_NID)
3343 lnet_net_unlock(LNET_LOCK_EX);
3344 list->rlst_num_nids = i;
3350 lnet_get_peer_ni_recovery_list(struct lnet_ioctl_recovery_list *list)
3352 struct lnet_peer_ni *lpni;
3355 lnet_net_lock(LNET_LOCK_EX);
3356 list_for_each_entry(lpni, &the_lnet.ln_mt_peerNIRecovq, lpni_recovery) {
3357 list->rlst_nid_array[i] = lpni->lpni_nid;
3359 if (i >= LNET_MAX_SHOW_NUM_NID)
3362 lnet_net_unlock(LNET_LOCK_EX);
3363 list->rlst_num_nids = i;
3369 * LNet ioctl handler.
3373 LNetCtl(unsigned int cmd, void *arg)
3375 struct libcfs_ioctl_data *data = arg;
3376 struct lnet_ioctl_config_data *config;
3377 struct lnet_process_id id = {0};
3381 BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
3382 sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
3385 case IOC_LIBCFS_GET_NI:
3386 rc = LNetGetId(data->ioc_count, &id);
3387 data->ioc_nid = id.nid;
3390 case IOC_LIBCFS_FAIL_NID:
3391 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
3393 case IOC_LIBCFS_ADD_ROUTE:
3396 if (config->cfg_hdr.ioc_len < sizeof(*config))
3399 mutex_lock(&the_lnet.ln_api_mutex);
3400 rc = lnet_add_route(config->cfg_net,
3401 config->cfg_config_u.cfg_route.rtr_hop,
3403 config->cfg_config_u.cfg_route.
3406 rc = lnet_check_routes();
3408 lnet_del_route(config->cfg_net,
3411 mutex_unlock(&the_lnet.ln_api_mutex);
3414 case IOC_LIBCFS_DEL_ROUTE:
3417 if (config->cfg_hdr.ioc_len < sizeof(*config))
3420 mutex_lock(&the_lnet.ln_api_mutex);
3421 rc = lnet_del_route(config->cfg_net, config->cfg_nid);
3422 mutex_unlock(&the_lnet.ln_api_mutex);
3425 case IOC_LIBCFS_GET_ROUTE:
3428 if (config->cfg_hdr.ioc_len < sizeof(*config))
3431 mutex_lock(&the_lnet.ln_api_mutex);
3432 rc = lnet_get_route(config->cfg_count,
3434 &config->cfg_config_u.cfg_route.rtr_hop,
3436 &config->cfg_config_u.cfg_route.rtr_flags,
3437 &config->cfg_config_u.cfg_route.
3439 mutex_unlock(&the_lnet.ln_api_mutex);
3442 case IOC_LIBCFS_GET_LOCAL_NI: {
3443 struct lnet_ioctl_config_ni *cfg_ni;
3444 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
3445 struct lnet_ioctl_element_stats *stats;
3450 /* get the tunables if they are available */
3451 if (cfg_ni->lic_cfg_hdr.ioc_len <
3452 sizeof(*cfg_ni) + sizeof(*stats) + sizeof(*tun))
3455 stats = (struct lnet_ioctl_element_stats *)
3457 tun = (struct lnet_ioctl_config_lnd_tunables *)
3458 (cfg_ni->lic_bulk + sizeof(*stats));
3460 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
3463 mutex_lock(&the_lnet.ln_api_mutex);
3464 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
3465 mutex_unlock(&the_lnet.ln_api_mutex);
3469 case IOC_LIBCFS_GET_LOCAL_NI_MSG_STATS: {
3470 struct lnet_ioctl_element_msg_stats *msg_stats = arg;
3472 if (msg_stats->im_hdr.ioc_len != sizeof(*msg_stats))
3475 mutex_lock(&the_lnet.ln_api_mutex);
3476 rc = lnet_get_ni_stats(msg_stats);
3477 mutex_unlock(&the_lnet.ln_api_mutex);
3482 case IOC_LIBCFS_GET_NET: {
3483 size_t total = sizeof(*config) +
3484 sizeof(struct lnet_ioctl_net_config);
3487 if (config->cfg_hdr.ioc_len < total)
3490 mutex_lock(&the_lnet.ln_api_mutex);
3491 rc = lnet_get_net_config(config);
3492 mutex_unlock(&the_lnet.ln_api_mutex);
3496 case IOC_LIBCFS_GET_LNET_STATS:
3498 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
3500 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
3503 mutex_lock(&the_lnet.ln_api_mutex);
3504 lnet_counters_get(&lnet_stats->st_cntrs);
3505 mutex_unlock(&the_lnet.ln_api_mutex);
3509 case IOC_LIBCFS_CONFIG_RTR:
3512 if (config->cfg_hdr.ioc_len < sizeof(*config))
3515 mutex_lock(&the_lnet.ln_api_mutex);
3516 if (config->cfg_config_u.cfg_buffers.buf_enable) {
3517 rc = lnet_rtrpools_enable();
3518 mutex_unlock(&the_lnet.ln_api_mutex);
3521 lnet_rtrpools_disable();
3522 mutex_unlock(&the_lnet.ln_api_mutex);
3525 case IOC_LIBCFS_ADD_BUF:
3528 if (config->cfg_hdr.ioc_len < sizeof(*config))
3531 mutex_lock(&the_lnet.ln_api_mutex);
3532 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
3534 config->cfg_config_u.cfg_buffers.
3536 config->cfg_config_u.cfg_buffers.
3538 mutex_unlock(&the_lnet.ln_api_mutex);
3541 case IOC_LIBCFS_SET_NUMA_RANGE: {
3542 struct lnet_ioctl_set_value *numa;
3544 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3546 lnet_net_lock(LNET_LOCK_EX);
3547 lnet_numa_range = numa->sv_value;
3548 lnet_net_unlock(LNET_LOCK_EX);
3552 case IOC_LIBCFS_GET_NUMA_RANGE: {
3553 struct lnet_ioctl_set_value *numa;
3555 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3557 numa->sv_value = lnet_numa_range;
3561 case IOC_LIBCFS_GET_BUF: {
3562 struct lnet_ioctl_pool_cfg *pool_cfg;
3563 size_t total = sizeof(*config) + sizeof(*pool_cfg);
3567 if (config->cfg_hdr.ioc_len < total)
3570 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
3572 mutex_lock(&the_lnet.ln_api_mutex);
3573 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
3574 mutex_unlock(&the_lnet.ln_api_mutex);
3578 case IOC_LIBCFS_GET_LOCAL_HSTATS: {
3579 struct lnet_ioctl_local_ni_hstats *stats = arg;
3581 if (stats->hlni_hdr.ioc_len < sizeof(*stats))
3584 mutex_lock(&the_lnet.ln_api_mutex);
3585 rc = lnet_get_local_ni_hstats(stats);
3586 mutex_unlock(&the_lnet.ln_api_mutex);
3591 case IOC_LIBCFS_GET_RECOVERY_QUEUE: {
3592 struct lnet_ioctl_recovery_list *list = arg;
3593 if (list->rlst_hdr.ioc_len < sizeof(*list))
3596 mutex_lock(&the_lnet.ln_api_mutex);
3597 if (list->rlst_type == LNET_HEALTH_TYPE_LOCAL_NI)
3598 rc = lnet_get_local_ni_recovery_list(list);
3600 rc = lnet_get_peer_ni_recovery_list(list);
3601 mutex_unlock(&the_lnet.ln_api_mutex);
3605 case IOC_LIBCFS_ADD_PEER_NI: {
3606 struct lnet_ioctl_peer_cfg *cfg = arg;
3608 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3611 mutex_lock(&the_lnet.ln_api_mutex);
3612 rc = lnet_add_peer_ni(cfg->prcfg_prim_nid,
3615 mutex_unlock(&the_lnet.ln_api_mutex);
3619 case IOC_LIBCFS_DEL_PEER_NI: {
3620 struct lnet_ioctl_peer_cfg *cfg = arg;
3622 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3625 mutex_lock(&the_lnet.ln_api_mutex);
3626 rc = lnet_del_peer_ni(cfg->prcfg_prim_nid,
3627 cfg->prcfg_cfg_nid);
3628 mutex_unlock(&the_lnet.ln_api_mutex);
3632 case IOC_LIBCFS_GET_PEER_INFO: {
3633 struct lnet_ioctl_peer *peer_info = arg;
3635 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
3638 mutex_lock(&the_lnet.ln_api_mutex);
3639 rc = lnet_get_peer_ni_info(
3640 peer_info->pr_count,
3642 peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
3643 &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
3644 &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
3645 &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
3646 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
3647 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
3648 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
3649 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
3650 mutex_unlock(&the_lnet.ln_api_mutex);
3654 case IOC_LIBCFS_GET_PEER_NI: {
3655 struct lnet_ioctl_peer_cfg *cfg = arg;
3657 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3660 mutex_lock(&the_lnet.ln_api_mutex);
3661 rc = lnet_get_peer_info(cfg,
3662 (void __user *)cfg->prcfg_bulk);
3663 mutex_unlock(&the_lnet.ln_api_mutex);
3667 case IOC_LIBCFS_GET_PEER_LIST: {
3668 struct lnet_ioctl_peer_cfg *cfg = arg;
3670 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3673 mutex_lock(&the_lnet.ln_api_mutex);
3674 rc = lnet_get_peer_list(&cfg->prcfg_count, &cfg->prcfg_size,
3675 (struct lnet_process_id __user *)cfg->prcfg_bulk);
3676 mutex_unlock(&the_lnet.ln_api_mutex);
3680 case IOC_LIBCFS_SET_HEALHV: {
3681 struct lnet_ioctl_reset_health_cfg *cfg = arg;
3683 if (cfg->rh_hdr.ioc_len < sizeof(*cfg))
3685 if (cfg->rh_value < 0 ||
3686 cfg->rh_value > LNET_MAX_HEALTH_VALUE)
3687 value = LNET_MAX_HEALTH_VALUE;
3689 value = cfg->rh_value;
3690 CDEBUG(D_NET, "Manually setting healthv to %d for %s:%s. all = %d\n",
3691 value, (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI) ?
3692 "local" : "peer", libcfs_nid2str(cfg->rh_nid), cfg->rh_all);
3693 mutex_lock(&the_lnet.ln_api_mutex);
3694 if (cfg->rh_type == LNET_HEALTH_TYPE_LOCAL_NI)
3695 lnet_ni_set_healthv(cfg->rh_nid, value,
3698 lnet_peer_ni_set_healthv(cfg->rh_nid, value,
3700 mutex_unlock(&the_lnet.ln_api_mutex);
3704 case IOC_LIBCFS_NOTIFY_ROUTER: {
3705 time64_t deadline = ktime_get_real_seconds() - data->ioc_u64[0];
3707 /* The deadline passed in by the user should be some time in
3708 * seconds in the future since the UNIX epoch. We have to map
3709 * that deadline to the wall clock.
3711 deadline += ktime_get_seconds();
3712 return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
3716 case IOC_LIBCFS_LNET_DIST:
3717 rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
3718 if (rc < 0 && rc != -EHOSTUNREACH)
3721 data->ioc_u32[0] = rc;
3724 case IOC_LIBCFS_TESTPROTOCOMPAT:
3725 lnet_net_lock(LNET_LOCK_EX);
3726 the_lnet.ln_testprotocompat = data->ioc_flags;
3727 lnet_net_unlock(LNET_LOCK_EX);
3730 case IOC_LIBCFS_LNET_FAULT:
3731 return lnet_fault_ctl(data->ioc_flags, data);
3733 case IOC_LIBCFS_PING: {
3734 signed long timeout;
3736 id.nid = data->ioc_nid;
3737 id.pid = data->ioc_u32[0];
3739 /* If timeout is negative then set default of 3 minutes */
3740 if (((s32)data->ioc_u32[1] <= 0) ||
3741 data->ioc_u32[1] > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
3742 timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
3744 timeout = msecs_to_jiffies(data->ioc_u32[1]);
3746 rc = lnet_ping(id, timeout, data->ioc_pbuf1,
3747 data->ioc_plen1 / sizeof(struct lnet_process_id));
3752 data->ioc_count = rc;
3756 case IOC_LIBCFS_PING_PEER: {
3757 struct lnet_ioctl_ping_data *ping = arg;
3758 struct lnet_peer *lp;
3759 signed long timeout;
3761 /* If timeout is negative then set default of 3 minutes */
3762 if (((s32)ping->op_param) <= 0 ||
3763 ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
3764 timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
3766 timeout = msecs_to_jiffies(ping->op_param);
3768 rc = lnet_ping(ping->ping_id, timeout,
3774 mutex_lock(&the_lnet.ln_api_mutex);
3775 lp = lnet_find_peer(ping->ping_id.nid);
3777 ping->ping_id.nid = lp->lp_primary_nid;
3778 ping->mr_info = lnet_peer_is_multi_rail(lp);
3779 lnet_peer_decref_locked(lp);
3781 mutex_unlock(&the_lnet.ln_api_mutex);
3783 ping->ping_count = rc;
3787 case IOC_LIBCFS_DISCOVER: {
3788 struct lnet_ioctl_ping_data *discover = arg;
3789 struct lnet_peer *lp;
3791 rc = lnet_discover(discover->ping_id, discover->op_param,
3793 discover->ping_count);
3797 mutex_lock(&the_lnet.ln_api_mutex);
3798 lp = lnet_find_peer(discover->ping_id.nid);
3800 discover->ping_id.nid = lp->lp_primary_nid;
3801 discover->mr_info = lnet_peer_is_multi_rail(lp);
3802 lnet_peer_decref_locked(lp);
3804 mutex_unlock(&the_lnet.ln_api_mutex);
3806 discover->ping_count = rc;
3811 ni = lnet_net2ni_addref(data->ioc_net);
3815 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
3818 rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
3825 EXPORT_SYMBOL(LNetCtl);
3827 void LNetDebugPeer(struct lnet_process_id id)
3829 lnet_debug_peer(id.nid);
3831 EXPORT_SYMBOL(LNetDebugPeer);
3834 * Determine if the specified peer \a nid is on the local node.
3836 * \param nid peer nid to check
3838 * \retval true If peer NID is on the local node.
3839 * \retval false If peer NID is not on the local node.
3841 bool LNetIsPeerLocal(lnet_nid_t nid)
3843 struct lnet_net *net;
3847 cpt = lnet_net_lock_current();
3848 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3849 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3850 if (ni->ni_nid == nid) {
3851 lnet_net_unlock(cpt);
3856 lnet_net_unlock(cpt);
3860 EXPORT_SYMBOL(LNetIsPeerLocal);
3863 * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
3864 * Note that all interfaces share a same PID, as requested by LNetNIInit().
3866 * \param index Index of the interface to look up.
3867 * \param id On successful return, this location will hold the
3868 * struct lnet_process_id ID of the interface.
3870 * \retval 0 If an interface exists at \a index.
3871 * \retval -ENOENT If no interface has been found.
3874 LNetGetId(unsigned int index, struct lnet_process_id *id)
3877 struct lnet_net *net;
3881 LASSERT(the_lnet.ln_refcount > 0);
3883 cpt = lnet_net_lock_current();
3885 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3886 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3890 id->nid = ni->ni_nid;
3891 id->pid = the_lnet.ln_pid;
3897 lnet_net_unlock(cpt);
3900 EXPORT_SYMBOL(LNetGetId);
3902 static int lnet_ping(struct lnet_process_id id, signed long timeout,
3903 struct lnet_process_id __user *ids, int n_ids)
3905 struct lnet_handle_eq eqh;
3906 struct lnet_handle_md mdh;
3907 struct lnet_event event;
3908 struct lnet_md md = { NULL };
3912 const signed long a_long_time = msecs_to_jiffies(60 * MSEC_PER_SEC);
3913 struct lnet_ping_buffer *pbuf;
3914 struct lnet_process_id tmpid;
3921 /* n_ids limit is arbitrary */
3922 if (n_ids <= 0 || id.nid == LNET_NID_ANY)
3926 * if the user buffer has more space than the lnet_interfaces_max
3927 * then only fill it up to lnet_interfaces_max
3929 if (n_ids > lnet_interfaces_max)
3930 n_ids = lnet_interfaces_max;
3932 if (id.pid == LNET_PID_ANY)
3933 id.pid = LNET_PID_LUSTRE;
3935 pbuf = lnet_ping_buffer_alloc(n_ids, GFP_NOFS);
3939 /* NB 2 events max (including any unlink event) */
3940 rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
3942 CERROR("Can't allocate EQ: %d\n", rc);
3943 goto fail_ping_buffer_decref;
3946 /* initialize md content */
3947 md.start = &pbuf->pb_info;
3948 md.length = LNET_PING_INFO_SIZE(n_ids);
3949 md.threshold = 2; /* GET/REPLY */
3951 md.options = LNET_MD_TRUNCATE;
3955 rc = LNetMDBind(md, LNET_UNLINK, &mdh);
3957 CERROR("Can't bind MD: %d\n", rc);
3961 rc = LNetGet(LNET_NID_ANY, mdh, id,
3962 LNET_RESERVED_PORTAL,
3963 LNET_PROTO_PING_MATCHBITS, 0, false);
3966 /* Don't CERROR; this could be deliberate! */
3967 rc2 = LNetMDUnlink(mdh);
3970 /* NB must wait for the UNLINK event below... */
3972 timeout = a_long_time;
3976 /* MUST block for unlink to complete */
3978 blocked = cfs_block_allsigs();
3980 rc2 = LNetEQPoll(&eqh, 1, timeout, &event, &which);
3983 cfs_restore_sigs(blocked);
3985 CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
3986 (rc2 <= 0) ? -1 : event.type,
3987 (rc2 <= 0) ? -1 : event.status,
3988 (rc2 > 0 && event.unlinked) ? " unlinked" : "");
3990 LASSERT(rc2 != -EOVERFLOW); /* can't miss anything */
3992 if (rc2 <= 0 || event.status != 0) {
3993 /* timeout or error */
3994 if (!replied && rc == 0)
3995 rc = (rc2 < 0) ? rc2 :
3996 (rc2 == 0) ? -ETIMEDOUT :
4000 /* Ensure completion in finite time... */
4002 /* No assertion (racing with network) */
4004 timeout = a_long_time;
4005 } else if (rc2 == 0) {
4006 /* timed out waiting for unlink */
4007 CWARN("ping %s: late network completion\n",
4010 } else if (event.type == LNET_EVENT_REPLY) {
4014 } while (rc2 <= 0 || !event.unlinked);
4018 CWARN("%s: Unexpected rc >= 0 but no reply!\n",
4025 LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
4027 rc = -EPROTO; /* if I can't parse... */
4030 CERROR("%s: ping info too short %d\n",
4031 libcfs_id2str(id), nob);
4035 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
4036 lnet_swap_pinginfo(pbuf);
4037 } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
4038 CERROR("%s: Unexpected magic %08x\n",
4039 libcfs_id2str(id), pbuf->pb_info.pi_magic);
4043 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
4044 CERROR("%s: ping w/o NI status: 0x%x\n",
4045 libcfs_id2str(id), pbuf->pb_info.pi_features);
4049 if (nob < LNET_PING_INFO_SIZE(0)) {
4050 CERROR("%s: Short reply %d(%d min)\n",
4052 nob, (int)LNET_PING_INFO_SIZE(0));
4056 if (pbuf->pb_info.pi_nnis < n_ids)
4057 n_ids = pbuf->pb_info.pi_nnis;
4059 if (nob < LNET_PING_INFO_SIZE(n_ids)) {
4060 CERROR("%s: Short reply %d(%d expected)\n",
4062 nob, (int)LNET_PING_INFO_SIZE(n_ids));
4066 rc = -EFAULT; /* if I segv in copy_to_user()... */
4068 memset(&tmpid, 0, sizeof(tmpid));
4069 for (i = 0; i < n_ids; i++) {
4070 tmpid.pid = pbuf->pb_info.pi_pid;
4071 tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
4072 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
4075 rc = pbuf->pb_info.pi_nnis;
4078 rc2 = LNetEQFree(eqh);
4080 CERROR("rc2 %d\n", rc2);
4083 fail_ping_buffer_decref:
4084 lnet_ping_buffer_decref(pbuf);
4089 lnet_discover(struct lnet_process_id id, __u32 force,
4090 struct lnet_process_id __user *ids, int n_ids)
4092 struct lnet_peer_ni *lpni;
4093 struct lnet_peer_ni *p;
4094 struct lnet_peer *lp;
4095 struct lnet_process_id *buf;
4099 int max_intf = lnet_interfaces_max;
4103 id.nid == LNET_NID_ANY)
4106 if (id.pid == LNET_PID_ANY)
4107 id.pid = LNET_PID_LUSTRE;
4110 * if the user buffer has more space than the max_intf
4111 * then only fill it up to max_intf
4113 if (n_ids > max_intf)
4116 buf_size = n_ids * sizeof(*buf);
4118 LIBCFS_ALLOC(buf, buf_size);
4122 cpt = lnet_net_lock_current();
4123 lpni = lnet_nid2peerni_locked(id.nid, LNET_NID_ANY, cpt);
4130 * Clearing the NIDS_UPTODATE flag ensures the peer will
4131 * be discovered, provided discovery has not been disabled.
4133 lp = lpni->lpni_peer_net->lpn_peer;
4134 spin_lock(&lp->lp_lock);
4135 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
4136 /* If the force flag is set, force a PING and PUSH as well. */
4138 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
4139 spin_unlock(&lp->lp_lock);
4140 rc = lnet_discover_peer_locked(lpni, cpt, true);
4144 /* Peer may have changed. */
4145 lp = lpni->lpni_peer_net->lpn_peer;
4146 if (lp->lp_nnis < n_ids)
4147 n_ids = lp->lp_nnis;
4151 while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
4152 buf[i].pid = id.pid;
4153 buf[i].nid = p->lpni_nid;
4158 lnet_net_unlock(cpt);
4161 if (copy_to_user(ids, buf, n_ids * sizeof(*buf)))
4167 lnet_peer_ni_decref_locked(lpni);
4169 lnet_net_unlock(cpt);
4171 LIBCFS_FREE(buf, buf_size);
4177 * Retrieve peer discovery status.
4179 * \retval 1 if lnet_peer_discovery_disabled is 0
4180 * \retval 0 if lnet_peer_discovery_disabled is 1
4183 LNetGetPeerDiscoveryStatus(void)
4185 return !lnet_peer_discovery_disabled;
4187 EXPORT_SYMBOL(LNetGetPeerDiscoveryStatus);