4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_LNET
35 #include <linux/ctype.h>
36 #include <linux/log2.h>
37 #include <linux/ktime.h>
38 #include <linux/moduleparam.h>
39 #include <linux/uaccess.h>
41 #include <lnet/lib-lnet.h>
43 #define D_LNI D_CONSOLE
46 * initialize ln_api_mutex statically, since it needs to be used in
47 * discovery_set callback. That module parameter callback can be called
48 * before module init completes. The mutex needs to be ready for use then.
50 struct lnet the_lnet = {
51 .ln_api_mutex = __MUTEX_INITIALIZER(the_lnet.ln_api_mutex),
52 }; /* THE state of the network */
53 EXPORT_SYMBOL(the_lnet);
55 static char *ip2nets = "";
56 module_param(ip2nets, charp, 0444);
57 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
59 static char *networks = "";
60 module_param(networks, charp, 0444);
61 MODULE_PARM_DESC(networks, "local networks");
63 static char *routes = "";
64 module_param(routes, charp, 0444);
65 MODULE_PARM_DESC(routes, "routes to non-local networks");
67 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
68 module_param(rnet_htable_size, int, 0444);
69 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
71 static int use_tcp_bonding = false;
72 module_param(use_tcp_bonding, int, 0444);
73 MODULE_PARM_DESC(use_tcp_bonding,
74 "Set to 1 to use socklnd bonding. 0 to use Multi-Rail");
76 unsigned int lnet_numa_range = 0;
77 module_param(lnet_numa_range, uint, 0444);
78 MODULE_PARM_DESC(lnet_numa_range,
79 "NUMA range to consider during Multi-Rail selection");
82 * lnet_health_sensitivity determines by how much we decrement the health
83 * value on sending error. The value defaults to 0, which means health
84 * checking is turned off by default.
86 unsigned int lnet_health_sensitivity = 0;
87 static int sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
88 static struct kernel_param_ops param_ops_health_sensitivity = {
89 .set = sensitivity_set,
92 #define param_check_health_sensitivity(name, p) \
93 __param_check(name, p, int)
94 #ifdef HAVE_KERNEL_PARAM_OPS
95 module_param(lnet_health_sensitivity, health_sensitivity, S_IRUGO|S_IWUSR);
97 module_param_call(lnet_health_sensitivity, sensitivity_set, param_get_int,
98 &lnet_health_sensitivity, S_IRUGO|S_IWUSR);
100 MODULE_PARM_DESC(lnet_health_sensitivity,
101 "Value to decrement the health value by on error");
103 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
104 static int intf_max_set(const char *val, cfs_kernel_param_arg_t *kp);
106 static struct kernel_param_ops param_ops_interfaces_max = {
108 .get = param_get_int,
111 #define param_check_interfaces_max(name, p) \
112 __param_check(name, p, int)
114 #ifdef HAVE_KERNEL_PARAM_OPS
115 module_param(lnet_interfaces_max, interfaces_max, 0644);
117 module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
118 ¶m_ops_interfaces_max, 0644);
120 MODULE_PARM_DESC(lnet_interfaces_max,
121 "Maximum number of interfaces in a node.");
123 unsigned lnet_peer_discovery_disabled = 0;
124 static int discovery_set(const char *val, cfs_kernel_param_arg_t *kp);
126 static struct kernel_param_ops param_ops_discovery_disabled = {
127 .set = discovery_set,
128 .get = param_get_int,
131 #define param_check_discovery_disabled(name, p) \
132 __param_check(name, p, int)
133 #ifdef HAVE_KERNEL_PARAM_OPS
134 module_param(lnet_peer_discovery_disabled, discovery_disabled, 0644);
136 module_param_call(lnet_peer_discovery_disabled, discovery_set, param_get_int,
137 ¶m_ops_discovery_disabled, 0644);
139 MODULE_PARM_DESC(lnet_peer_discovery_disabled,
140 "Set to 1 to disable peer discovery on this node.");
142 unsigned lnet_transaction_timeout = 5;
143 module_param(lnet_transaction_timeout, uint, 0444);
144 MODULE_PARM_DESC(lnet_transaction_timeout,
145 "Time in seconds to wait for a REPLY or an ACK");
147 unsigned lnet_retry_count = 0;
148 module_param(lnet_retry_count, uint, 0444);
149 MODULE_PARM_DESC(lnet_retry_count,
150 "Maximum number of times to retry transmitting a message");
153 * This sequence number keeps track of how many times DLC was used to
154 * update the local NIs. It is incremented when a NI is added or
155 * removed and checked when sending a message to determine if there is
156 * a need to re-run the selection algorithm. See lnet_select_pathway()
157 * for more details on its usage.
159 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
161 static int lnet_ping(struct lnet_process_id id, signed long timeout,
162 struct lnet_process_id __user *ids, int n_ids);
164 static int lnet_discover(struct lnet_process_id id, __u32 force,
165 struct lnet_process_id __user *ids, int n_ids);
168 sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
171 unsigned *sensitivity = (unsigned *)kp->arg;
174 rc = kstrtoul(val, 0, &value);
176 CERROR("Invalid module parameter value for 'lnet_health_sensitivity'\n");
181 * The purpose of locking the api_mutex here is to ensure that
182 * the correct value ends up stored properly.
184 mutex_lock(&the_lnet.ln_api_mutex);
186 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
187 mutex_unlock(&the_lnet.ln_api_mutex);
191 if (value == *sensitivity) {
192 mutex_unlock(&the_lnet.ln_api_mutex);
196 *sensitivity = value;
198 mutex_unlock(&the_lnet.ln_api_mutex);
204 discovery_set(const char *val, cfs_kernel_param_arg_t *kp)
207 unsigned *discovery = (unsigned *)kp->arg;
209 struct lnet_ping_buffer *pbuf;
211 rc = kstrtoul(val, 0, &value);
213 CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
217 value = (value) ? 1 : 0;
220 * The purpose of locking the api_mutex here is to ensure that
221 * the correct value ends up stored properly.
223 mutex_lock(&the_lnet.ln_api_mutex);
225 if (value == *discovery) {
226 mutex_unlock(&the_lnet.ln_api_mutex);
232 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
233 mutex_unlock(&the_lnet.ln_api_mutex);
237 /* tell peers that discovery setting has changed */
238 lnet_net_lock(LNET_LOCK_EX);
239 pbuf = the_lnet.ln_ping_target;
241 pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
243 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
244 lnet_net_unlock(LNET_LOCK_EX);
246 lnet_push_update_to_peers(1);
248 mutex_unlock(&the_lnet.ln_api_mutex);
254 intf_max_set(const char *val, cfs_kernel_param_arg_t *kp)
258 rc = kstrtoint(val, 0, &value);
260 CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
264 if (value < LNET_INTERFACES_MIN) {
265 CWARN("max interfaces provided are too small, setting to %d\n",
266 LNET_INTERFACES_MAX_DEFAULT);
267 value = LNET_INTERFACES_MAX_DEFAULT;
270 *(int *)kp->arg = value;
276 lnet_get_routes(void)
282 lnet_get_networks(void)
287 if (*networks != 0 && *ip2nets != 0) {
288 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
289 "'ip2nets' but not both at once\n");
294 rc = lnet_parse_ip2nets(&nets, ip2nets);
295 return (rc == 0) ? nets : NULL;
305 lnet_init_locks(void)
307 spin_lock_init(&the_lnet.ln_eq_wait_lock);
308 spin_lock_init(&the_lnet.ln_msg_resend_lock);
309 init_waitqueue_head(&the_lnet.ln_eq_waitq);
310 init_waitqueue_head(&the_lnet.ln_mt_waitq);
311 mutex_init(&the_lnet.ln_lnd_mutex);
315 lnet_fini_locks(void)
319 struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
320 struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
324 lnet_descriptor_setup(void)
326 /* create specific kmem_cache for MEs and small MDs (i.e., originally
327 * allocated in <size-xxx> kmem_cache).
329 lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(struct lnet_me),
331 if (!lnet_mes_cachep)
334 lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
335 LNET_SMALL_MD_SIZE, 0, 0,
337 if (!lnet_small_mds_cachep)
344 lnet_descriptor_cleanup(void)
347 if (lnet_small_mds_cachep) {
348 kmem_cache_destroy(lnet_small_mds_cachep);
349 lnet_small_mds_cachep = NULL;
352 if (lnet_mes_cachep) {
353 kmem_cache_destroy(lnet_mes_cachep);
354 lnet_mes_cachep = NULL;
359 lnet_create_remote_nets_table(void)
362 struct list_head *hash;
364 LASSERT(the_lnet.ln_remote_nets_hash == NULL);
365 LASSERT(the_lnet.ln_remote_nets_hbits > 0);
366 LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
368 CERROR("Failed to create remote nets hash table\n");
372 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
373 INIT_LIST_HEAD(&hash[i]);
374 the_lnet.ln_remote_nets_hash = hash;
379 lnet_destroy_remote_nets_table(void)
383 if (the_lnet.ln_remote_nets_hash == NULL)
386 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
387 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
389 LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
390 LNET_REMOTE_NETS_HASH_SIZE *
391 sizeof(the_lnet.ln_remote_nets_hash[0]));
392 the_lnet.ln_remote_nets_hash = NULL;
396 lnet_destroy_locks(void)
398 if (the_lnet.ln_res_lock != NULL) {
399 cfs_percpt_lock_free(the_lnet.ln_res_lock);
400 the_lnet.ln_res_lock = NULL;
403 if (the_lnet.ln_net_lock != NULL) {
404 cfs_percpt_lock_free(the_lnet.ln_net_lock);
405 the_lnet.ln_net_lock = NULL;
412 lnet_create_locks(void)
416 the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
417 if (the_lnet.ln_res_lock == NULL)
420 the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
421 if (the_lnet.ln_net_lock == NULL)
427 lnet_destroy_locks();
431 static void lnet_assert_wire_constants(void)
433 /* Wire protocol assertions generated by 'wirecheck'
434 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
435 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
436 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
439 CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
440 CLASSERT(LNET_PROTO_TCP_VERSION_MAJOR == 1);
441 CLASSERT(LNET_PROTO_TCP_VERSION_MINOR == 0);
442 CLASSERT(LNET_MSG_ACK == 0);
443 CLASSERT(LNET_MSG_PUT == 1);
444 CLASSERT(LNET_MSG_GET == 2);
445 CLASSERT(LNET_MSG_REPLY == 3);
446 CLASSERT(LNET_MSG_HELLO == 4);
448 /* Checks for struct lnet_handle_wire */
449 CLASSERT((int)sizeof(struct lnet_handle_wire) == 16);
450 CLASSERT((int)offsetof(struct lnet_handle_wire, wh_interface_cookie) == 0);
451 CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) == 8);
452 CLASSERT((int)offsetof(struct lnet_handle_wire, wh_object_cookie) == 8);
453 CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) == 8);
455 /* Checks for struct struct lnet_magicversion */
456 CLASSERT((int)sizeof(struct lnet_magicversion) == 8);
457 CLASSERT((int)offsetof(struct lnet_magicversion, magic) == 0);
458 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->magic) == 4);
459 CLASSERT((int)offsetof(struct lnet_magicversion, version_major) == 4);
460 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_major) == 2);
461 CLASSERT((int)offsetof(struct lnet_magicversion, version_minor) == 6);
462 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_minor) == 2);
464 /* Checks for struct struct lnet_hdr */
465 CLASSERT((int)sizeof(struct lnet_hdr) == 72);
466 CLASSERT((int)offsetof(struct lnet_hdr, dest_nid) == 0);
467 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_nid) == 8);
468 CLASSERT((int)offsetof(struct lnet_hdr, src_nid) == 8);
469 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_nid) == 8);
470 CLASSERT((int)offsetof(struct lnet_hdr, dest_pid) == 16);
471 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_pid) == 4);
472 CLASSERT((int)offsetof(struct lnet_hdr, src_pid) == 20);
473 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_pid) == 4);
474 CLASSERT((int)offsetof(struct lnet_hdr, type) == 24);
475 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->type) == 4);
476 CLASSERT((int)offsetof(struct lnet_hdr, payload_length) == 28);
477 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->payload_length) == 4);
478 CLASSERT((int)offsetof(struct lnet_hdr, msg) == 32);
479 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg) == 40);
482 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) == 32);
483 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) == 16);
484 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.match_bits) == 48);
485 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) == 8);
486 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.mlength) == 56);
487 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) == 4);
490 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) == 32);
491 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) == 16);
492 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.match_bits) == 48);
493 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) == 8);
494 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.hdr_data) == 56);
495 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) == 8);
496 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ptl_index) == 64);
497 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) == 4);
498 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.offset) == 68);
499 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) == 4);
502 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.return_wmd) == 32);
503 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) == 16);
504 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.match_bits) == 48);
505 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) == 8);
506 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.ptl_index) == 56);
507 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) == 4);
508 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.src_offset) == 60);
509 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) == 4);
510 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.sink_length) == 64);
511 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) == 4);
514 CLASSERT((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) == 32);
515 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) == 16);
518 CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.incarnation) == 32);
519 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) == 8);
520 CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.type) == 40);
521 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) == 4);
523 /* Checks for struct lnet_ni_status and related constants */
524 CLASSERT(LNET_NI_STATUS_INVALID == 0x00000000);
525 CLASSERT(LNET_NI_STATUS_UP == 0x15aac0de);
526 CLASSERT(LNET_NI_STATUS_DOWN == 0xdeadface);
528 /* Checks for struct lnet_ni_status */
529 CLASSERT((int)sizeof(struct lnet_ni_status) == 16);
530 CLASSERT((int)offsetof(struct lnet_ni_status, ns_nid) == 0);
531 CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) == 8);
532 CLASSERT((int)offsetof(struct lnet_ni_status, ns_status) == 8);
533 CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_status) == 4);
534 CLASSERT((int)offsetof(struct lnet_ni_status, ns_unused) == 12);
535 CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_unused) == 4);
537 /* Checks for struct lnet_ping_info and related constants */
538 CLASSERT(LNET_PROTO_PING_MAGIC == 0x70696E67);
539 CLASSERT(LNET_PING_FEAT_INVAL == 0);
540 CLASSERT(LNET_PING_FEAT_BASE == 1);
541 CLASSERT(LNET_PING_FEAT_NI_STATUS == 2);
542 CLASSERT(LNET_PING_FEAT_RTE_DISABLED == 4);
543 CLASSERT(LNET_PING_FEAT_MULTI_RAIL == 8);
544 CLASSERT(LNET_PING_FEAT_DISCOVERY == 16);
545 CLASSERT(LNET_PING_FEAT_BITS == 31);
547 /* Checks for struct lnet_ping_info */
548 CLASSERT((int)sizeof(struct lnet_ping_info) == 16);
549 CLASSERT((int)offsetof(struct lnet_ping_info, pi_magic) == 0);
550 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) == 4);
551 CLASSERT((int)offsetof(struct lnet_ping_info, pi_features) == 4);
552 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_features) == 4);
553 CLASSERT((int)offsetof(struct lnet_ping_info, pi_pid) == 8);
554 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) == 4);
555 CLASSERT((int)offsetof(struct lnet_ping_info, pi_nnis) == 12);
556 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) == 4);
557 CLASSERT((int)offsetof(struct lnet_ping_info, pi_ni) == 16);
558 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) == 0);
561 static struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
563 struct lnet_lnd *lnd;
564 struct list_head *tmp;
566 /* holding lnd mutex */
567 list_for_each(tmp, &the_lnet.ln_lnds) {
568 lnd = list_entry(tmp, struct lnet_lnd, lnd_list);
570 if (lnd->lnd_type == type)
577 lnet_register_lnd(struct lnet_lnd *lnd)
579 mutex_lock(&the_lnet.ln_lnd_mutex);
581 LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
582 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
584 list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
585 lnd->lnd_refcount = 0;
587 CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
589 mutex_unlock(&the_lnet.ln_lnd_mutex);
591 EXPORT_SYMBOL(lnet_register_lnd);
594 lnet_unregister_lnd(struct lnet_lnd *lnd)
596 mutex_lock(&the_lnet.ln_lnd_mutex);
598 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
599 LASSERT(lnd->lnd_refcount == 0);
601 list_del(&lnd->lnd_list);
602 CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
604 mutex_unlock(&the_lnet.ln_lnd_mutex);
606 EXPORT_SYMBOL(lnet_unregister_lnd);
609 lnet_counters_get(struct lnet_counters *counters)
611 struct lnet_counters *ctr;
614 memset(counters, 0, sizeof(*counters));
616 lnet_net_lock(LNET_LOCK_EX);
618 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
619 counters->msgs_max += ctr->msgs_max;
620 counters->msgs_alloc += ctr->msgs_alloc;
621 counters->errors += ctr->errors;
622 counters->send_count += ctr->send_count;
623 counters->recv_count += ctr->recv_count;
624 counters->route_count += ctr->route_count;
625 counters->drop_count += ctr->drop_count;
626 counters->send_length += ctr->send_length;
627 counters->recv_length += ctr->recv_length;
628 counters->route_length += ctr->route_length;
629 counters->drop_length += ctr->drop_length;
632 lnet_net_unlock(LNET_LOCK_EX);
634 EXPORT_SYMBOL(lnet_counters_get);
637 lnet_counters_reset(void)
639 struct lnet_counters *counters;
642 lnet_net_lock(LNET_LOCK_EX);
644 cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
645 memset(counters, 0, sizeof(struct lnet_counters));
647 lnet_net_unlock(LNET_LOCK_EX);
651 lnet_res_type2str(int type)
656 case LNET_COOKIE_TYPE_MD:
658 case LNET_COOKIE_TYPE_ME:
660 case LNET_COOKIE_TYPE_EQ:
666 lnet_res_container_cleanup(struct lnet_res_container *rec)
670 if (rec->rec_type == 0) /* not set yet, it's uninitialized */
673 while (!list_empty(&rec->rec_active)) {
674 struct list_head *e = rec->rec_active.next;
677 if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
678 lnet_eq_free(list_entry(e, struct lnet_eq, eq_list));
680 } else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
681 lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
683 } else { /* NB: Active MEs should be attached on portals */
690 /* Found alive MD/ME/EQ, user really should unlink/free
691 * all of them before finalize LNet, but if someone didn't,
692 * we have to recycle garbage for him */
693 CERROR("%d active elements on exit of %s container\n",
694 count, lnet_res_type2str(rec->rec_type));
697 if (rec->rec_lh_hash != NULL) {
698 LIBCFS_FREE(rec->rec_lh_hash,
699 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
700 rec->rec_lh_hash = NULL;
703 rec->rec_type = 0; /* mark it as finalized */
707 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
712 LASSERT(rec->rec_type == 0);
714 rec->rec_type = type;
715 INIT_LIST_HEAD(&rec->rec_active);
717 rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
719 /* Arbitrary choice of hash table size */
720 LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
721 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
722 if (rec->rec_lh_hash == NULL) {
727 for (i = 0; i < LNET_LH_HASH_SIZE; i++)
728 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
733 CERROR("Failed to setup %s resource container\n",
734 lnet_res_type2str(type));
735 lnet_res_container_cleanup(rec);
740 lnet_res_containers_destroy(struct lnet_res_container **recs)
742 struct lnet_res_container *rec;
745 cfs_percpt_for_each(rec, i, recs)
746 lnet_res_container_cleanup(rec);
748 cfs_percpt_free(recs);
751 static struct lnet_res_container **
752 lnet_res_containers_create(int type)
754 struct lnet_res_container **recs;
755 struct lnet_res_container *rec;
759 recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
761 CERROR("Failed to allocate %s resource containers\n",
762 lnet_res_type2str(type));
766 cfs_percpt_for_each(rec, i, recs) {
767 rc = lnet_res_container_setup(rec, i, type);
769 lnet_res_containers_destroy(recs);
777 struct lnet_libhandle *
778 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
780 /* ALWAYS called with lnet_res_lock held */
781 struct list_head *head;
782 struct lnet_libhandle *lh;
785 if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
788 hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
789 head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
791 list_for_each_entry(lh, head, lh_hash_chain) {
792 if (lh->lh_cookie == cookie)
800 lnet_res_lh_initialize(struct lnet_res_container *rec,
801 struct lnet_libhandle *lh)
803 /* ALWAYS called with lnet_res_lock held */
804 unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
807 lh->lh_cookie = rec->rec_lh_cookie;
808 rec->rec_lh_cookie += 1 << ibits;
810 hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
812 list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
815 static int lnet_unprepare(void);
818 lnet_prepare(lnet_pid_t requested_pid)
820 /* Prepare to bring up the network */
821 struct lnet_res_container **recs;
824 if (requested_pid == LNET_PID_ANY) {
825 /* Don't instantiate LNET just for me */
829 LASSERT(the_lnet.ln_refcount == 0);
831 the_lnet.ln_routing = 0;
833 LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
834 the_lnet.ln_pid = requested_pid;
836 INIT_LIST_HEAD(&the_lnet.ln_test_peers);
837 INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
838 INIT_LIST_HEAD(&the_lnet.ln_nets);
839 INIT_LIST_HEAD(&the_lnet.ln_routers);
840 INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
841 INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
842 INIT_LIST_HEAD(&the_lnet.ln_dc_request);
843 INIT_LIST_HEAD(&the_lnet.ln_dc_working);
844 INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
845 INIT_LIST_HEAD(&the_lnet.ln_mt_localNIRecovq);
846 INIT_LIST_HEAD(&the_lnet.ln_mt_peerNIRecovq);
847 init_waitqueue_head(&the_lnet.ln_dc_waitq);
849 rc = lnet_descriptor_setup();
853 rc = lnet_create_remote_nets_table();
858 * NB the interface cookie in wire handles guards against delayed
859 * replies and ACKs appearing valid after reboot.
861 the_lnet.ln_interface_cookie = ktime_get_real_ns();
863 the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
864 sizeof(struct lnet_counters));
865 if (the_lnet.ln_counters == NULL) {
866 CERROR("Failed to allocate counters for LNet\n");
871 rc = lnet_peer_tables_create();
875 rc = lnet_msg_containers_create();
879 rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
880 LNET_COOKIE_TYPE_EQ);
884 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
890 the_lnet.ln_me_containers = recs;
892 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
898 the_lnet.ln_md_containers = recs;
900 rc = lnet_portals_create();
902 CERROR("Failed to create portals for LNet: %d\n", rc);
914 lnet_unprepare (void)
916 /* NB no LNET_LOCK since this is the last reference. All LND instances
917 * have shut down already, so it is safe to unlink and free all
918 * descriptors, even those that appear committed to a network op (eg MD
919 * with non-zero pending count) */
921 lnet_fail_nid(LNET_NID_ANY, 0);
923 LASSERT(the_lnet.ln_refcount == 0);
924 LASSERT(list_empty(&the_lnet.ln_test_peers));
925 LASSERT(list_empty(&the_lnet.ln_nets));
927 lnet_portals_destroy();
929 if (the_lnet.ln_md_containers != NULL) {
930 lnet_res_containers_destroy(the_lnet.ln_md_containers);
931 the_lnet.ln_md_containers = NULL;
934 if (the_lnet.ln_me_containers != NULL) {
935 lnet_res_containers_destroy(the_lnet.ln_me_containers);
936 the_lnet.ln_me_containers = NULL;
939 lnet_res_container_cleanup(&the_lnet.ln_eq_container);
941 lnet_msg_containers_destroy();
943 lnet_rtrpools_free(0);
945 if (the_lnet.ln_counters != NULL) {
946 cfs_percpt_free(the_lnet.ln_counters);
947 the_lnet.ln_counters = NULL;
949 lnet_destroy_remote_nets_table();
950 lnet_descriptor_cleanup();
956 lnet_net2ni_locked(__u32 net_id, int cpt)
959 struct lnet_net *net;
961 LASSERT(cpt != LNET_LOCK_EX);
963 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
964 if (net->net_id == net_id) {
965 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
975 lnet_net2ni_addref(__u32 net)
980 ni = lnet_net2ni_locked(net, 0);
982 lnet_ni_addref_locked(ni, 0);
987 EXPORT_SYMBOL(lnet_net2ni_addref);
990 lnet_get_net_locked(__u32 net_id)
992 struct lnet_net *net;
994 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
995 if (net->net_id == net_id)
1003 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
1008 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
1013 val = hash_long(key, LNET_CPT_BITS);
1014 /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
1018 return (unsigned int)(key + val + (val >> 1)) % number;
1022 lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
1024 struct lnet_net *net;
1026 /* must called with hold of lnet_net_lock */
1027 if (LNET_CPT_NUMBER == 1)
1028 return 0; /* the only one */
1031 * If NI is provided then use the CPT identified in the NI cpt
1032 * list if one exists. If one doesn't exist, then that NI is
1033 * associated with all CPTs and it follows that the net it belongs
1034 * to is implicitly associated with all CPTs, so just hash the nid
1038 if (ni->ni_cpts != NULL)
1039 return ni->ni_cpts[lnet_nid_cpt_hash(nid,
1042 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1045 /* no NI provided so look at the net */
1046 net = lnet_get_net_locked(LNET_NIDNET(nid));
1048 if (net != NULL && net->net_cpts != NULL) {
1049 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
1052 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
1056 lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
1061 if (LNET_CPT_NUMBER == 1)
1062 return 0; /* the only one */
1064 cpt = lnet_net_lock_current();
1066 cpt2 = lnet_cpt_of_nid_locked(nid, ni);
1068 lnet_net_unlock(cpt);
1072 EXPORT_SYMBOL(lnet_cpt_of_nid);
1075 lnet_islocalnet(__u32 net_id)
1077 struct lnet_net *net;
1081 cpt = lnet_net_lock_current();
1083 net = lnet_get_net_locked(net_id);
1085 local = net != NULL;
1087 lnet_net_unlock(cpt);
1093 lnet_is_ni_healthy_locked(struct lnet_ni *ni)
1095 if (ni->ni_state & LNET_NI_STATE_ACTIVE)
1102 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
1104 struct lnet_net *net;
1107 LASSERT(cpt != LNET_LOCK_EX);
1109 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1110 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1111 if (ni->ni_nid == nid)
1120 lnet_nid2ni_addref(lnet_nid_t nid)
1125 ni = lnet_nid2ni_locked(nid, 0);
1127 lnet_ni_addref_locked(ni, 0);
1132 EXPORT_SYMBOL(lnet_nid2ni_addref);
1135 lnet_islocalnid(lnet_nid_t nid)
1140 cpt = lnet_net_lock_current();
1141 ni = lnet_nid2ni_locked(nid, cpt);
1142 lnet_net_unlock(cpt);
1148 lnet_count_acceptor_nets(void)
1150 /* Return the # of NIs that need the acceptor. */
1152 struct lnet_net *net;
1155 cpt = lnet_net_lock_current();
1156 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1157 /* all socklnd type networks should have the acceptor
1159 if (net->net_lnd->lnd_accept != NULL)
1163 lnet_net_unlock(cpt);
1168 struct lnet_ping_buffer *
1169 lnet_ping_buffer_alloc(int nnis, gfp_t gfp)
1171 struct lnet_ping_buffer *pbuf;
1173 LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nnis), gfp);
1175 pbuf->pb_nnis = nnis;
1176 atomic_set(&pbuf->pb_refcnt, 1);
1183 lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
1185 LASSERT(lnet_ping_buffer_numref(pbuf) == 0);
1186 LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
1189 static struct lnet_ping_buffer *
1190 lnet_ping_target_create(int nnis)
1192 struct lnet_ping_buffer *pbuf;
1194 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1196 CERROR("Can't allocate ping source [%d]\n", nnis);
1200 pbuf->pb_info.pi_nnis = nnis;
1201 pbuf->pb_info.pi_pid = the_lnet.ln_pid;
1202 pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
1203 pbuf->pb_info.pi_features =
1204 LNET_PING_FEAT_NI_STATUS | LNET_PING_FEAT_MULTI_RAIL;
1210 lnet_get_net_ni_count_locked(struct lnet_net *net)
1215 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1222 lnet_get_net_ni_count_pre(struct lnet_net *net)
1227 list_for_each_entry(ni, &net->net_ni_added, ni_netlist)
1234 lnet_get_ni_count(void)
1237 struct lnet_net *net;
1242 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1243 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1253 lnet_ping_info_validate(struct lnet_ping_info *pinfo)
1257 if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
1259 if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
1261 /* Loopback is guaranteed to be present */
1262 if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
1264 if (LNET_NETTYP(LNET_NIDNET(LNET_PING_INFO_LONI(pinfo))) != LOLND)
1270 lnet_ping_target_destroy(void)
1272 struct lnet_net *net;
1275 lnet_net_lock(LNET_LOCK_EX);
1277 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1278 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1280 ni->ni_status = NULL;
1285 lnet_ping_buffer_decref(the_lnet.ln_ping_target);
1286 the_lnet.ln_ping_target = NULL;
1288 lnet_net_unlock(LNET_LOCK_EX);
1292 lnet_ping_target_event_handler(struct lnet_event *event)
1294 struct lnet_ping_buffer *pbuf = event->md.user_ptr;
1296 if (event->unlinked)
1297 lnet_ping_buffer_decref(pbuf);
1301 lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
1302 struct lnet_handle_md *ping_mdh,
1303 int ni_count, bool set_eq)
1305 struct lnet_process_id id = {
1306 .nid = LNET_NID_ANY,
1309 struct lnet_handle_me me_handle;
1310 struct lnet_md md = { NULL };
1314 rc = LNetEQAlloc(0, lnet_ping_target_event_handler,
1315 &the_lnet.ln_ping_target_eq);
1317 CERROR("Can't allocate ping buffer EQ: %d\n", rc);
1322 *ppbuf = lnet_ping_target_create(ni_count);
1323 if (*ppbuf == NULL) {
1328 /* Ping target ME/MD */
1329 rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1330 LNET_PROTO_PING_MATCHBITS, 0,
1331 LNET_UNLINK, LNET_INS_AFTER,
1334 CERROR("Can't create ping target ME: %d\n", rc);
1335 goto fail_decref_ping_buffer;
1338 /* initialize md content */
1339 md.start = &(*ppbuf)->pb_info;
1340 md.length = LNET_PING_INFO_SIZE((*ppbuf)->pb_nnis);
1341 md.threshold = LNET_MD_THRESH_INF;
1343 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1344 LNET_MD_MANAGE_REMOTE;
1345 md.eq_handle = the_lnet.ln_ping_target_eq;
1346 md.user_ptr = *ppbuf;
1348 rc = LNetMDAttach(me_handle, md, LNET_RETAIN, ping_mdh);
1350 CERROR("Can't attach ping target MD: %d\n", rc);
1351 goto fail_unlink_ping_me;
1353 lnet_ping_buffer_addref(*ppbuf);
1357 fail_unlink_ping_me:
1358 rc2 = LNetMEUnlink(me_handle);
1360 fail_decref_ping_buffer:
1361 LASSERT(lnet_ping_buffer_numref(*ppbuf) == 1);
1362 lnet_ping_buffer_decref(*ppbuf);
1366 rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
1373 lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
1374 struct lnet_handle_md *ping_mdh)
1376 sigset_t blocked = cfs_block_allsigs();
1378 LNetMDUnlink(*ping_mdh);
1379 LNetInvalidateMDHandle(ping_mdh);
1381 /* NB the MD could be busy; this just starts the unlink */
1382 while (lnet_ping_buffer_numref(pbuf) > 1) {
1383 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1384 set_current_state(TASK_UNINTERRUPTIBLE);
1385 schedule_timeout(cfs_time_seconds(1));
1388 cfs_restore_sigs(blocked);
1392 lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
1395 struct lnet_net *net;
1396 struct lnet_ni_status *ns;
1401 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1402 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1403 LASSERT(i < pbuf->pb_nnis);
1405 ns = &pbuf->pb_info.pi_ni[i];
1407 ns->ns_nid = ni->ni_nid;
1410 ns->ns_status = (ni->ni_status != NULL) ?
1411 ni->ni_status->ns_status :
1420 * We (ab)use the ns_status of the loopback interface to
1421 * transmit the sequence number. The first interface listed
1422 * must be the loopback interface.
1424 rc = lnet_ping_info_validate(&pbuf->pb_info);
1426 LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
1429 LNET_PING_BUFFER_SEQNO(pbuf) =
1430 atomic_inc_return(&the_lnet.ln_ping_target_seqno);
1434 lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
1435 struct lnet_handle_md ping_mdh)
1437 struct lnet_ping_buffer *old_pbuf = NULL;
1438 struct lnet_handle_md old_ping_md;
1440 /* switch the NIs to point to the new ping info created */
1441 lnet_net_lock(LNET_LOCK_EX);
1443 if (!the_lnet.ln_routing)
1444 pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1445 if (!lnet_peer_discovery_disabled)
1446 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
1448 /* Ensure only known feature bits have been set. */
1449 LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
1450 LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
1452 lnet_ping_target_install_locked(pbuf);
1454 if (the_lnet.ln_ping_target) {
1455 old_pbuf = the_lnet.ln_ping_target;
1456 old_ping_md = the_lnet.ln_ping_target_md;
1458 the_lnet.ln_ping_target_md = ping_mdh;
1459 the_lnet.ln_ping_target = pbuf;
1461 lnet_net_unlock(LNET_LOCK_EX);
1464 /* unlink and free the old ping info */
1465 lnet_ping_md_unlink(old_pbuf, &old_ping_md);
1466 lnet_ping_buffer_decref(old_pbuf);
1469 lnet_push_update_to_peers(0);
1473 lnet_ping_target_fini(void)
1477 lnet_ping_md_unlink(the_lnet.ln_ping_target,
1478 &the_lnet.ln_ping_target_md);
1480 rc = LNetEQFree(the_lnet.ln_ping_target_eq);
1483 lnet_ping_target_destroy();
1486 /* Resize the push target. */
1487 int lnet_push_target_resize(void)
1489 struct lnet_process_id id = { LNET_NID_ANY, LNET_PID_ANY };
1490 struct lnet_md md = { NULL };
1491 struct lnet_handle_me meh;
1492 struct lnet_handle_md mdh;
1493 struct lnet_handle_md old_mdh;
1494 struct lnet_ping_buffer *pbuf;
1495 struct lnet_ping_buffer *old_pbuf;
1496 int nnis = the_lnet.ln_push_target_nnis;
1504 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1510 rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1511 LNET_PROTO_PING_MATCHBITS, 0,
1512 LNET_UNLINK, LNET_INS_AFTER,
1515 CERROR("Can't create push target ME: %d\n", rc);
1516 goto fail_decref_pbuf;
1519 /* initialize md content */
1520 md.start = &pbuf->pb_info;
1521 md.length = LNET_PING_INFO_SIZE(nnis);
1522 md.threshold = LNET_MD_THRESH_INF;
1524 md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE |
1525 LNET_MD_MANAGE_REMOTE;
1527 md.eq_handle = the_lnet.ln_push_target_eq;
1529 rc = LNetMDAttach(meh, md, LNET_RETAIN, &mdh);
1531 CERROR("Can't attach push MD: %d\n", rc);
1532 goto fail_unlink_meh;
1534 lnet_ping_buffer_addref(pbuf);
1536 lnet_net_lock(LNET_LOCK_EX);
1537 old_pbuf = the_lnet.ln_push_target;
1538 old_mdh = the_lnet.ln_push_target_md;
1539 the_lnet.ln_push_target = pbuf;
1540 the_lnet.ln_push_target_md = mdh;
1541 lnet_net_unlock(LNET_LOCK_EX);
1544 LNetMDUnlink(old_mdh);
1545 lnet_ping_buffer_decref(old_pbuf);
1548 if (nnis < the_lnet.ln_push_target_nnis)
1551 CDEBUG(D_NET, "nnis %d success\n", nnis);
1558 lnet_ping_buffer_decref(pbuf);
1560 CDEBUG(D_NET, "nnis %d error %d\n", nnis, rc);
1564 static void lnet_push_target_event_handler(struct lnet_event *ev)
1566 struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
1568 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
1569 lnet_swap_pinginfo(pbuf);
1571 lnet_peer_push_event(ev);
1573 lnet_ping_buffer_decref(pbuf);
1576 /* Initialize the push target. */
1577 static int lnet_push_target_init(void)
1581 if (the_lnet.ln_push_target)
1584 rc = LNetEQAlloc(0, lnet_push_target_event_handler,
1585 &the_lnet.ln_push_target_eq);
1587 CERROR("Can't allocated push target EQ: %d\n", rc);
1591 /* Start at the required minimum, we'll enlarge if required. */
1592 the_lnet.ln_push_target_nnis = LNET_INTERFACES_MIN;
1594 rc = lnet_push_target_resize();
1597 LNetEQFree(the_lnet.ln_push_target_eq);
1598 LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
1604 /* Clean up the push target. */
1605 static void lnet_push_target_fini(void)
1607 if (!the_lnet.ln_push_target)
1610 /* Unlink and invalidate to prevent new references. */
1611 LNetMDUnlink(the_lnet.ln_push_target_md);
1612 LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
1614 /* Wait for the unlink to complete. */
1615 while (lnet_ping_buffer_numref(the_lnet.ln_push_target) > 1) {
1616 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1617 set_current_state(TASK_UNINTERRUPTIBLE);
1618 schedule_timeout(cfs_time_seconds(1));
1621 lnet_ping_buffer_decref(the_lnet.ln_push_target);
1622 the_lnet.ln_push_target = NULL;
1623 the_lnet.ln_push_target_nnis = 0;
1625 LNetEQFree(the_lnet.ln_push_target_eq);
1626 LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
1630 lnet_ni_tq_credits(struct lnet_ni *ni)
1634 LASSERT(ni->ni_ncpts >= 1);
1636 if (ni->ni_ncpts == 1)
1637 return ni->ni_net->net_tunables.lct_max_tx_credits;
1639 credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
1640 credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
1641 credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
1647 lnet_ni_unlink_locked(struct lnet_ni *ni)
1649 if (!list_empty(&ni->ni_cptlist)) {
1650 list_del_init(&ni->ni_cptlist);
1651 lnet_ni_decref_locked(ni, 0);
1654 /* move it to zombie list and nobody can find it anymore */
1655 LASSERT(!list_empty(&ni->ni_netlist));
1656 list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
1657 lnet_ni_decref_locked(ni, 0);
1661 lnet_clear_zombies_nis_locked(struct lnet_net *net)
1666 struct list_head *zombie_list = &net->net_ni_zombie;
1669 * Now wait for the NIs I just nuked to show up on the zombie
1670 * list and shut them down in guaranteed thread context
1673 while (!list_empty(zombie_list)) {
1677 ni = list_entry(zombie_list->next,
1678 struct lnet_ni, ni_netlist);
1679 list_del_init(&ni->ni_netlist);
1680 /* the ni should be in deleting state. If it's not it's
1682 LASSERT(ni->ni_state & LNET_NI_STATE_DELETING);
1683 cfs_percpt_for_each(ref, j, ni->ni_refs) {
1686 /* still busy, add it back to zombie list */
1687 list_add(&ni->ni_netlist, zombie_list);
1691 if (!list_empty(&ni->ni_netlist)) {
1692 lnet_net_unlock(LNET_LOCK_EX);
1694 if ((i & (-i)) == i) {
1696 "Waiting for zombie LNI %s\n",
1697 libcfs_nid2str(ni->ni_nid));
1699 set_current_state(TASK_UNINTERRUPTIBLE);
1700 schedule_timeout(cfs_time_seconds(1));
1701 lnet_net_lock(LNET_LOCK_EX);
1705 lnet_net_unlock(LNET_LOCK_EX);
1707 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
1709 LASSERT(!in_interrupt());
1710 (net->net_lnd->lnd_shutdown)(ni);
1713 CDEBUG(D_LNI, "Removed LNI %s\n",
1714 libcfs_nid2str(ni->ni_nid));
1718 lnet_net_lock(LNET_LOCK_EX);
1722 /* shutdown down the NI and release refcount */
1724 lnet_shutdown_lndni(struct lnet_ni *ni)
1727 struct lnet_net *net = ni->ni_net;
1729 lnet_net_lock(LNET_LOCK_EX);
1731 ni->ni_state |= LNET_NI_STATE_DELETING;
1732 ni->ni_state &= ~LNET_NI_STATE_ACTIVE;
1734 lnet_ni_unlink_locked(ni);
1735 lnet_incr_dlc_seq();
1736 lnet_net_unlock(LNET_LOCK_EX);
1738 /* clear messages for this NI on the lazy portal */
1739 for (i = 0; i < the_lnet.ln_nportals; i++)
1740 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
1742 lnet_net_lock(LNET_LOCK_EX);
1743 lnet_clear_zombies_nis_locked(net);
1744 lnet_net_unlock(LNET_LOCK_EX);
1748 lnet_shutdown_lndnet(struct lnet_net *net)
1752 lnet_net_lock(LNET_LOCK_EX);
1754 net->net_state = LNET_NET_STATE_DELETING;
1756 list_del_init(&net->net_list);
1758 while (!list_empty(&net->net_ni_list)) {
1759 ni = list_entry(net->net_ni_list.next,
1760 struct lnet_ni, ni_netlist);
1761 lnet_net_unlock(LNET_LOCK_EX);
1762 lnet_shutdown_lndni(ni);
1763 lnet_net_lock(LNET_LOCK_EX);
1766 lnet_net_unlock(LNET_LOCK_EX);
1768 /* Do peer table cleanup for this net */
1769 lnet_peer_tables_cleanup(net);
1771 lnet_net_lock(LNET_LOCK_EX);
1773 * decrement ref count on lnd only when the entire network goes
1776 net->net_lnd->lnd_refcount--;
1778 lnet_net_unlock(LNET_LOCK_EX);
1784 lnet_shutdown_lndnets(void)
1786 struct lnet_net *net;
1787 struct list_head resend;
1788 struct lnet_msg *msg, *tmp;
1790 INIT_LIST_HEAD(&resend);
1792 /* NB called holding the global mutex */
1794 /* All quiet on the API front */
1795 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
1796 LASSERT(the_lnet.ln_refcount == 0);
1798 lnet_net_lock(LNET_LOCK_EX);
1799 the_lnet.ln_state = LNET_STATE_STOPPING;
1801 while (!list_empty(&the_lnet.ln_nets)) {
1803 * move the nets to the zombie list to avoid them being
1804 * picked up for new work. LONET is also included in the
1805 * Nets that will be moved to the zombie list
1807 net = list_entry(the_lnet.ln_nets.next,
1808 struct lnet_net, net_list);
1809 list_move(&net->net_list, &the_lnet.ln_net_zombie);
1812 /* Drop the cached loopback Net. */
1813 if (the_lnet.ln_loni != NULL) {
1814 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
1815 the_lnet.ln_loni = NULL;
1817 lnet_net_unlock(LNET_LOCK_EX);
1819 /* iterate through the net zombie list and delete each net */
1820 while (!list_empty(&the_lnet.ln_net_zombie)) {
1821 net = list_entry(the_lnet.ln_net_zombie.next,
1822 struct lnet_net, net_list);
1823 lnet_shutdown_lndnet(net);
1826 spin_lock(&the_lnet.ln_msg_resend_lock);
1827 list_splice(&the_lnet.ln_msg_resend, &resend);
1828 spin_unlock(&the_lnet.ln_msg_resend_lock);
1830 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
1831 list_del_init(&msg->msg_list);
1832 msg->msg_no_resend = true;
1833 lnet_finalize(msg, -ECANCELED);
1836 lnet_net_lock(LNET_LOCK_EX);
1837 the_lnet.ln_state = LNET_STATE_SHUTDOWN;
1838 lnet_net_unlock(LNET_LOCK_EX);
1842 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
1845 struct lnet_tx_queue *tq;
1847 struct lnet_net *net = ni->ni_net;
1849 mutex_lock(&the_lnet.ln_lnd_mutex);
1852 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
1853 ni->ni_lnd_tunables_set = true;
1856 rc = (net->net_lnd->lnd_startup)(ni);
1858 mutex_unlock(&the_lnet.ln_lnd_mutex);
1861 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
1862 rc, libcfs_lnd2str(net->net_lnd->lnd_type));
1863 lnet_net_lock(LNET_LOCK_EX);
1864 net->net_lnd->lnd_refcount--;
1865 lnet_net_unlock(LNET_LOCK_EX);
1870 ni->ni_state |= LNET_NI_STATE_ACTIVE;
1871 ni->ni_state &= ~LNET_NI_STATE_INIT;
1874 /* We keep a reference on the loopback net through the loopback NI */
1875 if (net->net_lnd->lnd_type == LOLND) {
1877 LASSERT(the_lnet.ln_loni == NULL);
1878 the_lnet.ln_loni = ni;
1879 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
1880 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
1881 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
1882 ni->ni_net->net_tunables.lct_peer_timeout = 0;
1886 if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
1887 ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
1888 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
1889 libcfs_lnd2str(net->net_lnd->lnd_type),
1890 ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
1892 /* shutdown the NI since if we get here then it must've already
1895 lnet_shutdown_lndni(ni);
1899 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
1900 tq->tq_credits_min =
1901 tq->tq_credits_max =
1902 tq->tq_credits = lnet_ni_tq_credits(ni);
1905 atomic_set(&ni->ni_tx_credits,
1906 lnet_ni_tq_credits(ni) * ni->ni_ncpts);
1907 atomic_set(&ni->ni_healthv, LNET_MAX_HEALTH_VALUE);
1909 CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
1910 libcfs_nid2str(ni->ni_nid),
1911 ni->ni_net->net_tunables.lct_peer_tx_credits,
1912 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
1913 ni->ni_net->net_tunables.lct_peer_rtr_credits,
1914 ni->ni_net->net_tunables.lct_peer_timeout);
1923 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
1926 struct lnet_net *net_l = NULL;
1927 struct list_head local_ni_list;
1931 struct lnet_lnd *lnd;
1933 net->net_tunables.lct_peer_timeout;
1935 net->net_tunables.lct_max_tx_credits;
1936 int peerrtrcredits =
1937 net->net_tunables.lct_peer_rtr_credits;
1939 INIT_LIST_HEAD(&local_ni_list);
1942 * make sure that this net is unique. If it isn't then
1943 * we are adding interfaces to an already existing network, and
1944 * 'net' is just a convenient way to pass in the list.
1945 * if it is unique we need to find the LND and load it if
1948 if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
1949 lnd_type = LNET_NETTYP(net->net_id);
1951 mutex_lock(&the_lnet.ln_lnd_mutex);
1952 lnd = lnet_find_lnd_by_type(lnd_type);
1955 mutex_unlock(&the_lnet.ln_lnd_mutex);
1956 rc = request_module("%s", libcfs_lnd2modname(lnd_type));
1957 mutex_lock(&the_lnet.ln_lnd_mutex);
1959 lnd = lnet_find_lnd_by_type(lnd_type);
1961 mutex_unlock(&the_lnet.ln_lnd_mutex);
1962 CERROR("Can't load LND %s, module %s, rc=%d\n",
1963 libcfs_lnd2str(lnd_type),
1964 libcfs_lnd2modname(lnd_type), rc);
1965 #ifndef HAVE_MODULE_LOADING_SUPPORT
1966 LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
1967 "compiled with kernel module "
1968 "loading support.");
1975 lnet_net_lock(LNET_LOCK_EX);
1976 lnd->lnd_refcount++;
1977 lnet_net_unlock(LNET_LOCK_EX);
1981 mutex_unlock(&the_lnet.ln_lnd_mutex);
1987 * net_l: if the network being added is unique then net_l
1988 * will point to that network
1989 * if the network being added is not unique then
1990 * net_l points to the existing network.
1992 * When we enter the loop below, we'll pick NIs off he
1993 * network beign added and start them up, then add them to
1994 * a local ni list. Once we've successfully started all
1995 * the NIs then we join the local NI list (of started up
1996 * networks) with the net_l->net_ni_list, which should
1997 * point to the correct network to add the new ni list to
1999 * If any of the new NIs fail to start up, then we want to
2000 * iterate through the local ni list, which should include
2001 * any NIs which were successfully started up, and shut
2004 * After than we want to delete the network being added,
2005 * to avoid a memory leak.
2009 * When a network uses TCP bonding then all its interfaces
2010 * must be specified when the network is first defined: the
2011 * TCP bonding code doesn't allow for interfaces to be added
2014 if (net_l != net && net_l != NULL && use_tcp_bonding &&
2015 LNET_NETTYP(net_l->net_id) == SOCKLND) {
2020 while (!list_empty(&net->net_ni_added)) {
2021 ni = list_entry(net->net_ni_added.next, struct lnet_ni,
2023 list_del_init(&ni->ni_netlist);
2025 /* make sure that the the NI we're about to start
2026 * up is actually unique. if it's not fail. */
2027 if (!lnet_ni_unique_net(&net_l->net_ni_list,
2028 ni->ni_interfaces[0])) {
2033 /* adjust the pointer the parent network, just in case it
2034 * the net is a duplicate */
2037 rc = lnet_startup_lndni(ni, tun);
2039 LASSERT(ni->ni_net->net_tunables.lct_peer_timeout <= 0 ||
2040 ni->ni_net->net_lnd->lnd_query != NULL);
2046 list_add_tail(&ni->ni_netlist, &local_ni_list);
2051 lnet_net_lock(LNET_LOCK_EX);
2052 list_splice_tail(&local_ni_list, &net_l->net_ni_list);
2053 lnet_incr_dlc_seq();
2054 lnet_net_unlock(LNET_LOCK_EX);
2056 /* if the network is not unique then we don't want to keep
2057 * it around after we're done. Free it. Otherwise add that
2058 * net to the global the_lnet.ln_nets */
2059 if (net_l != net && net_l != NULL) {
2061 * TODO - note. currently the tunables can not be updated
2066 net->net_state = LNET_NET_STATE_ACTIVE;
2068 * restore tunables after it has been overwitten by the
2071 if (peer_timeout != -1)
2072 net->net_tunables.lct_peer_timeout = peer_timeout;
2073 if (maxtxcredits != -1)
2074 net->net_tunables.lct_max_tx_credits = maxtxcredits;
2075 if (peerrtrcredits != -1)
2076 net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
2078 lnet_net_lock(LNET_LOCK_EX);
2079 list_add_tail(&net->net_list, &the_lnet.ln_nets);
2080 lnet_net_unlock(LNET_LOCK_EX);
2087 * shutdown the new NIs that are being started up
2088 * free the NET being started
2090 while (!list_empty(&local_ni_list)) {
2091 ni = list_entry(local_ni_list.next, struct lnet_ni,
2094 lnet_shutdown_lndni(ni);
2104 lnet_startup_lndnets(struct list_head *netlist)
2106 struct lnet_net *net;
2111 * Change to running state before bringing up the LNDs. This
2112 * allows lnet_shutdown_lndnets() to assert that we've passed
2115 lnet_net_lock(LNET_LOCK_EX);
2116 the_lnet.ln_state = LNET_STATE_RUNNING;
2117 lnet_net_unlock(LNET_LOCK_EX);
2119 while (!list_empty(netlist)) {
2120 net = list_entry(netlist->next, struct lnet_net, net_list);
2121 list_del_init(&net->net_list);
2123 rc = lnet_startup_lndnet(net, NULL);
2133 lnet_shutdown_lndnets();
2139 * Initialize LNet library.
2141 * Automatically called at module loading time. Caller has to call
2142 * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
2143 * latter returned 0. It must be called exactly once.
2145 * \retval 0 on success
2146 * \retval -ve on failures.
2148 int lnet_lib_init(void)
2152 lnet_assert_wire_constants();
2154 /* refer to global cfs_cpt_table for now */
2155 the_lnet.ln_cpt_table = cfs_cpt_table;
2156 the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_table);
2158 LASSERT(the_lnet.ln_cpt_number > 0);
2159 if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
2160 /* we are under risk of consuming all lh_cookie */
2161 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
2162 "please change setting of CPT-table and retry\n",
2163 the_lnet.ln_cpt_number, LNET_CPT_MAX);
2167 while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
2168 the_lnet.ln_cpt_bits++;
2170 rc = lnet_create_locks();
2172 CERROR("Can't create LNet global locks: %d\n", rc);
2176 the_lnet.ln_refcount = 0;
2177 LNetInvalidateEQHandle(&the_lnet.ln_rc_eqh);
2178 INIT_LIST_HEAD(&the_lnet.ln_lnds);
2179 INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
2180 INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
2181 INIT_LIST_HEAD(&the_lnet.ln_msg_resend);
2182 INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
2184 /* The hash table size is the number of bits it takes to express the set
2185 * ln_num_routes, minus 1 (better to under estimate than over so we
2186 * don't waste memory). */
2187 if (rnet_htable_size <= 0)
2188 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
2189 else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
2190 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
2191 the_lnet.ln_remote_nets_hbits = max_t(int, 1,
2192 order_base_2(rnet_htable_size) - 1);
2194 /* All LNDs apart from the LOLND are in separate modules. They
2195 * register themselves when their module loads, and unregister
2196 * themselves when their module is unloaded. */
2197 lnet_register_lnd(&the_lolnd);
2202 * Finalize LNet library.
2204 * \pre lnet_lib_init() called with success.
2205 * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
2207 void lnet_lib_exit(void)
2209 LASSERT(the_lnet.ln_refcount == 0);
2211 while (!list_empty(&the_lnet.ln_lnds))
2212 lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
2213 struct lnet_lnd, lnd_list));
2214 lnet_destroy_locks();
2218 * Set LNet PID and start LNet interfaces, routing, and forwarding.
2220 * Users must call this function at least once before any other functions.
2221 * For each successful call there must be a corresponding call to
2222 * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
2225 * The PID used by LNet may be different from the one requested.
2228 * \param requested_pid PID requested by the caller.
2230 * \return >= 0 on success, and < 0 error code on failures.
2233 LNetNIInit(lnet_pid_t requested_pid)
2235 int im_a_router = 0;
2238 struct lnet_ping_buffer *pbuf;
2239 struct lnet_handle_md ping_mdh;
2240 struct list_head net_head;
2241 struct lnet_net *net;
2243 INIT_LIST_HEAD(&net_head);
2245 mutex_lock(&the_lnet.ln_api_mutex);
2247 CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
2249 if (the_lnet.ln_refcount > 0) {
2250 rc = the_lnet.ln_refcount++;
2251 mutex_unlock(&the_lnet.ln_api_mutex);
2255 rc = lnet_prepare(requested_pid);
2257 mutex_unlock(&the_lnet.ln_api_mutex);
2261 /* create a network for Loopback network */
2262 net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
2265 goto err_empty_list;
2268 /* Add in the loopback NI */
2269 if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
2271 goto err_empty_list;
2274 /* If LNet is being initialized via DLC it is possible
2275 * that the user requests not to load module parameters (ones which
2276 * are supported by DLC) on initialization. Therefore, make sure not
2277 * to load networks, routes and forwarding from module parameters
2278 * in this case. On cleanup in case of failure only clean up
2279 * routes if it has been loaded */
2280 if (!the_lnet.ln_nis_from_mod_params) {
2281 rc = lnet_parse_networks(&net_head, lnet_get_networks(),
2284 goto err_empty_list;
2287 ni_count = lnet_startup_lndnets(&net_head);
2290 goto err_empty_list;
2293 if (!the_lnet.ln_nis_from_mod_params) {
2294 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
2296 goto err_shutdown_lndnis;
2298 rc = lnet_check_routes();
2300 goto err_destroy_routes;
2302 rc = lnet_rtrpools_alloc(im_a_router);
2304 goto err_destroy_routes;
2307 rc = lnet_acceptor_start();
2309 goto err_destroy_routes;
2311 the_lnet.ln_refcount = 1;
2312 /* Now I may use my own API functions... */
2314 rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_count, true);
2316 goto err_acceptor_stop;
2318 lnet_ping_target_update(pbuf, ping_mdh);
2320 rc = lnet_monitor_thr_start();
2324 rc = lnet_push_target_init();
2326 goto err_stop_monitor_thr;
2328 rc = lnet_peer_discovery_start();
2330 goto err_destroy_push_target;
2333 lnet_router_debugfs_init();
2335 mutex_unlock(&the_lnet.ln_api_mutex);
2339 err_destroy_push_target:
2340 lnet_push_target_fini();
2341 err_stop_monitor_thr:
2342 lnet_monitor_thr_stop();
2344 lnet_ping_target_fini();
2346 the_lnet.ln_refcount = 0;
2347 lnet_acceptor_stop();
2349 if (!the_lnet.ln_nis_from_mod_params)
2350 lnet_destroy_routes();
2351 err_shutdown_lndnis:
2352 lnet_shutdown_lndnets();
2356 mutex_unlock(&the_lnet.ln_api_mutex);
2357 while (!list_empty(&net_head)) {
2358 struct lnet_net *net;
2360 net = list_entry(net_head.next, struct lnet_net, net_list);
2361 list_del_init(&net->net_list);
2366 EXPORT_SYMBOL(LNetNIInit);
2369 * Stop LNet interfaces, routing, and forwarding.
2371 * Users must call this function once for each successful call to LNetNIInit().
2372 * Once the LNetNIFini() operation has been started, the results of pending
2373 * API operations are undefined.
2375 * \return always 0 for current implementation.
2380 mutex_lock(&the_lnet.ln_api_mutex);
2382 LASSERT(the_lnet.ln_refcount > 0);
2384 if (the_lnet.ln_refcount != 1) {
2385 the_lnet.ln_refcount--;
2387 LASSERT(!the_lnet.ln_niinit_self);
2391 lnet_router_debugfs_init();
2392 lnet_peer_discovery_stop();
2393 lnet_push_target_fini();
2394 lnet_monitor_thr_stop();
2395 lnet_ping_target_fini();
2397 /* Teardown fns that use my own API functions BEFORE here */
2398 the_lnet.ln_refcount = 0;
2400 lnet_acceptor_stop();
2401 lnet_destroy_routes();
2402 lnet_shutdown_lndnets();
2406 mutex_unlock(&the_lnet.ln_api_mutex);
2409 EXPORT_SYMBOL(LNetNIFini);
2412 * Grabs the ni data from the ni structure and fills the out
2415 * \param[in] ni network interface structure
2416 * \param[out] cfg_ni NI config information
2417 * \param[out] tun network and LND tunables
2420 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
2421 struct lnet_ioctl_config_lnd_tunables *tun,
2422 struct lnet_ioctl_element_stats *stats,
2425 size_t min_size = 0;
2428 if (!ni || !cfg_ni || !tun)
2431 if (ni->ni_interfaces[0] != NULL) {
2432 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2433 if (ni->ni_interfaces[i] != NULL) {
2434 strncpy(cfg_ni->lic_ni_intf[i],
2435 ni->ni_interfaces[i],
2436 sizeof(cfg_ni->lic_ni_intf[i]));
2441 cfg_ni->lic_nid = ni->ni_nid;
2442 if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2443 cfg_ni->lic_status = LNET_NI_STATUS_UP;
2445 cfg_ni->lic_status = ni->ni_status->ns_status;
2446 cfg_ni->lic_tcp_bonding = use_tcp_bonding;
2447 cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
2449 memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
2452 stats->iel_send_count = lnet_sum_stats(&ni->ni_stats,
2453 LNET_STATS_TYPE_SEND);
2454 stats->iel_recv_count = lnet_sum_stats(&ni->ni_stats,
2455 LNET_STATS_TYPE_RECV);
2456 stats->iel_drop_count = lnet_sum_stats(&ni->ni_stats,
2457 LNET_STATS_TYPE_DROP);
2461 * tun->lt_tun will always be present, but in order to be
2462 * backwards compatible, we need to deal with the cases when
2463 * tun->lt_tun is smaller than what the kernel has, because it
2464 * comes from an older version of a userspace program, then we'll
2465 * need to copy as much information as we have available space.
2467 min_size = tun_size - sizeof(tun->lt_cmn);
2468 memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
2470 /* copy over the cpts */
2471 if (ni->ni_ncpts == LNET_CPT_NUMBER &&
2472 ni->ni_cpts == NULL) {
2473 for (i = 0; i < ni->ni_ncpts; i++)
2474 cfg_ni->lic_cpts[i] = i;
2477 ni->ni_cpts != NULL && i < ni->ni_ncpts &&
2478 i < LNET_MAX_SHOW_NUM_CPT;
2480 cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
2482 cfg_ni->lic_ncpts = ni->ni_ncpts;
2486 * NOTE: This is a legacy function left in the code to be backwards
2487 * compatible with older userspace programs. It should eventually be
2490 * Grabs the ni data from the ni structure and fills the out
2493 * \param[in] ni network interface structure
2494 * \param[out] config config information
2497 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
2498 struct lnet_ioctl_config_data *config)
2500 struct lnet_ioctl_net_config *net_config;
2501 struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
2502 size_t min_size, tunable_size = 0;
2508 net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
2512 BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
2513 ARRAY_SIZE(net_config->ni_interfaces));
2515 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2516 if (!ni->ni_interfaces[i])
2519 strncpy(net_config->ni_interfaces[i],
2520 ni->ni_interfaces[i],
2521 sizeof(net_config->ni_interfaces[i]));
2524 config->cfg_nid = ni->ni_nid;
2525 config->cfg_config_u.cfg_net.net_peer_timeout =
2526 ni->ni_net->net_tunables.lct_peer_timeout;
2527 config->cfg_config_u.cfg_net.net_max_tx_credits =
2528 ni->ni_net->net_tunables.lct_max_tx_credits;
2529 config->cfg_config_u.cfg_net.net_peer_tx_credits =
2530 ni->ni_net->net_tunables.lct_peer_tx_credits;
2531 config->cfg_config_u.cfg_net.net_peer_rtr_credits =
2532 ni->ni_net->net_tunables.lct_peer_rtr_credits;
2534 if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2535 net_config->ni_status = LNET_NI_STATUS_UP;
2537 net_config->ni_status = ni->ni_status->ns_status;
2540 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
2542 for (i = 0; i < num_cpts; i++)
2543 net_config->ni_cpts[i] = ni->ni_cpts[i];
2545 config->cfg_ncpts = num_cpts;
2549 * See if user land tools sent in a newer and larger version
2550 * of struct lnet_tunables than what the kernel uses.
2552 min_size = sizeof(*config) + sizeof(*net_config);
2554 if (config->cfg_hdr.ioc_len > min_size)
2555 tunable_size = config->cfg_hdr.ioc_len - min_size;
2557 /* Don't copy too much data to user space */
2558 min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
2559 lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
2561 if (lnd_cfg && min_size) {
2562 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
2563 config->cfg_config_u.cfg_net.net_interface_count = 1;
2565 /* Tell user land that kernel side has less data */
2566 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
2567 min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
2568 config->cfg_hdr.ioc_len -= min_size;
2574 lnet_get_ni_idx_locked(int idx)
2577 struct lnet_net *net;
2579 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2580 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2590 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
2593 struct lnet_net *net = mynet;
2596 * It is possible that the net has been cleaned out while there is
2597 * a message being sent. This function accessed the net without
2598 * checking if the list is empty
2602 net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
2604 if (list_empty(&net->net_ni_list))
2606 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2612 if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
2613 /* if you reached the end of the ni list and the net is
2614 * specified, then there are no more nis in that net */
2618 /* we reached the end of this net ni list. move to the
2620 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
2621 /* no more nets and no more NIs. */
2624 /* get the next net */
2625 net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
2627 if (list_empty(&net->net_ni_list))
2629 /* get the ni on it */
2630 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2636 if (list_empty(&prev->ni_netlist))
2639 /* there are more nis left */
2640 ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
2646 lnet_get_net_config(struct lnet_ioctl_config_data *config)
2651 int idx = config->cfg_count;
2653 cpt = lnet_net_lock_current();
2655 ni = lnet_get_ni_idx_locked(idx);
2660 lnet_fill_ni_info_legacy(ni, config);
2664 lnet_net_unlock(cpt);
2669 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
2670 struct lnet_ioctl_config_lnd_tunables *tun,
2671 struct lnet_ioctl_element_stats *stats,
2678 if (!cfg_ni || !tun || !stats)
2681 cpt = lnet_net_lock_current();
2683 ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
2688 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
2692 lnet_net_unlock(cpt);
2696 int lnet_get_ni_stats(struct lnet_ioctl_element_msg_stats *msg_stats)
2705 cpt = lnet_net_lock_current();
2707 ni = lnet_get_ni_idx_locked(msg_stats->im_idx);
2710 lnet_usr_translate_stats(msg_stats, &ni->ni_stats);
2714 lnet_net_unlock(cpt);
2719 static int lnet_add_net_common(struct lnet_net *net,
2720 struct lnet_ioctl_config_lnd_tunables *tun)
2723 struct lnet_ping_buffer *pbuf;
2724 struct lnet_handle_md ping_mdh;
2726 struct lnet_remotenet *rnet;
2728 int num_acceptor_nets;
2730 lnet_net_lock(LNET_LOCK_EX);
2731 rnet = lnet_find_rnet_locked(net->net_id);
2732 lnet_net_unlock(LNET_LOCK_EX);
2734 * make sure that the net added doesn't invalidate the current
2735 * configuration LNet is keeping
2738 CERROR("Adding net %s will invalidate routing configuration\n",
2739 libcfs_net2str(net->net_id));
2745 * make sure you calculate the correct number of slots in the ping
2746 * buffer. Since the ping info is a flattened list of all the NIs,
2747 * we should allocate enough slots to accomodate the number of NIs
2748 * which will be added.
2750 * since ni hasn't been configured yet, use
2751 * lnet_get_net_ni_count_pre() which checks the net_ni_added list
2753 net_ni_count = lnet_get_net_ni_count_pre(net);
2755 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2756 net_ni_count + lnet_get_ni_count(),
2764 memcpy(&net->net_tunables,
2765 &tun->lt_cmn, sizeof(net->net_tunables));
2767 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
2770 * before starting this network get a count of the current TCP
2771 * networks which require the acceptor thread running. If that
2772 * count is == 0 before we start up this network, then we'd want to
2773 * start up the acceptor thread after starting up this network
2775 num_acceptor_nets = lnet_count_acceptor_nets();
2777 net_id = net->net_id;
2779 rc = lnet_startup_lndnet(net,
2780 (tun) ? &tun->lt_tun : NULL);
2784 lnet_net_lock(LNET_LOCK_EX);
2785 net = lnet_get_net_locked(net_id);
2786 lnet_net_unlock(LNET_LOCK_EX);
2791 * Start the acceptor thread if this is the first network
2792 * being added that requires the thread.
2794 if (net->net_lnd->lnd_accept && num_acceptor_nets == 0) {
2795 rc = lnet_acceptor_start();
2797 /* shutdown the net that we just started */
2798 CERROR("Failed to start up acceptor thread\n");
2799 lnet_shutdown_lndnet(net);
2804 lnet_net_lock(LNET_LOCK_EX);
2805 lnet_peer_net_added(net);
2806 lnet_net_unlock(LNET_LOCK_EX);
2808 lnet_ping_target_update(pbuf, ping_mdh);
2813 lnet_ping_md_unlink(pbuf, &ping_mdh);
2814 lnet_ping_buffer_decref(pbuf);
2818 static int lnet_handle_legacy_ip2nets(char *ip2nets,
2819 struct lnet_ioctl_config_lnd_tunables *tun)
2821 struct lnet_net *net;
2824 struct list_head net_head;
2826 INIT_LIST_HEAD(&net_head);
2828 rc = lnet_parse_ip2nets(&nets, ip2nets);
2832 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
2836 mutex_lock(&the_lnet.ln_api_mutex);
2837 while (!list_empty(&net_head)) {
2838 net = list_entry(net_head.next, struct lnet_net, net_list);
2839 list_del_init(&net->net_list);
2840 rc = lnet_add_net_common(net, tun);
2846 mutex_unlock(&the_lnet.ln_api_mutex);
2848 while (!list_empty(&net_head)) {
2849 net = list_entry(net_head.next, struct lnet_net, net_list);
2850 list_del_init(&net->net_list);
2856 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
2858 struct lnet_net *net;
2860 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
2862 __u32 net_id, lnd_type;
2864 /* get the tunables if they are available */
2865 if (conf->lic_cfg_hdr.ioc_len >=
2866 sizeof(*conf) + sizeof(*tun))
2867 tun = (struct lnet_ioctl_config_lnd_tunables *)
2870 /* handle legacy ip2nets from DLC */
2871 if (conf->lic_legacy_ip2nets[0] != '\0')
2872 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
2875 net_id = LNET_NIDNET(conf->lic_nid);
2876 lnd_type = LNET_NETTYP(net_id);
2878 if (!libcfs_isknown_lnd(lnd_type)) {
2879 CERROR("No valid net and lnd information provided\n");
2883 net = lnet_net_alloc(net_id, NULL);
2887 for (i = 0; i < conf->lic_ncpts; i++) {
2888 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER)
2892 ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
2893 conf->lic_ni_intf[0]);
2897 mutex_lock(&the_lnet.ln_api_mutex);
2899 rc = lnet_add_net_common(net, tun);
2901 mutex_unlock(&the_lnet.ln_api_mutex);
2906 int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
2908 struct lnet_net *net;
2910 __u32 net_id = LNET_NIDNET(conf->lic_nid);
2911 struct lnet_ping_buffer *pbuf;
2912 struct lnet_handle_md ping_mdh;
2917 /* don't allow userspace to shutdown the LOLND */
2918 if (LNET_NETTYP(net_id) == LOLND)
2921 mutex_lock(&the_lnet.ln_api_mutex);
2925 net = lnet_get_net_locked(net_id);
2927 CERROR("net %s not found\n",
2928 libcfs_net2str(net_id));
2933 addr = LNET_NIDADDR(conf->lic_nid);
2935 /* remove the entire net */
2936 net_count = lnet_get_net_ni_count_locked(net);
2940 /* create and link a new ping info, before removing the old one */
2941 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2942 lnet_get_ni_count() - net_count,
2945 goto unlock_api_mutex;
2947 lnet_shutdown_lndnet(net);
2949 if (lnet_count_acceptor_nets() == 0)
2950 lnet_acceptor_stop();
2952 lnet_ping_target_update(pbuf, ping_mdh);
2954 goto unlock_api_mutex;
2957 ni = lnet_nid2ni_locked(conf->lic_nid, 0);
2959 CERROR("nid %s not found\n",
2960 libcfs_nid2str(conf->lic_nid));
2965 net_count = lnet_get_net_ni_count_locked(net);
2969 /* create and link a new ping info, before removing the old one */
2970 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2971 lnet_get_ni_count() - 1, false);
2973 goto unlock_api_mutex;
2975 lnet_shutdown_lndni(ni);
2977 if (lnet_count_acceptor_nets() == 0)
2978 lnet_acceptor_stop();
2980 lnet_ping_target_update(pbuf, ping_mdh);
2982 /* check if the net is empty and remove it if it is */
2984 lnet_shutdown_lndnet(net);
2986 goto unlock_api_mutex;
2991 mutex_unlock(&the_lnet.ln_api_mutex);
2997 * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
2998 * They are only expected to be called for unique networks.
2999 * That can be as a result of older DLC library
3000 * calls. Multi-Rail DLC and beyond no longer uses these APIs.
3003 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
3005 struct lnet_net *net;
3006 struct list_head net_head;
3008 struct lnet_ioctl_config_lnd_tunables tun;
3009 char *nets = conf->cfg_config_u.cfg_net.net_intf;
3011 INIT_LIST_HEAD(&net_head);
3013 /* Create a net/ni structures for the network string */
3014 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
3016 return rc == 0 ? -EINVAL : rc;
3018 mutex_lock(&the_lnet.ln_api_mutex);
3021 rc = -EINVAL; /* only add one network per call */
3022 goto out_unlock_clean;
3025 net = list_entry(net_head.next, struct lnet_net, net_list);
3026 list_del_init(&net->net_list);
3028 LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
3030 memset(&tun, 0, sizeof(tun));
3032 tun.lt_cmn.lct_peer_timeout =
3033 conf->cfg_config_u.cfg_net.net_peer_timeout;
3034 tun.lt_cmn.lct_peer_tx_credits =
3035 conf->cfg_config_u.cfg_net.net_peer_tx_credits;
3036 tun.lt_cmn.lct_peer_rtr_credits =
3037 conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
3038 tun.lt_cmn.lct_max_tx_credits =
3039 conf->cfg_config_u.cfg_net.net_max_tx_credits;
3041 rc = lnet_add_net_common(net, &tun);
3044 mutex_unlock(&the_lnet.ln_api_mutex);
3045 while (!list_empty(&net_head)) {
3046 /* net_head list is empty in success case */
3047 net = list_entry(net_head.next, struct lnet_net, net_list);
3048 list_del_init(&net->net_list);
3055 lnet_dyn_del_net(__u32 net_id)
3057 struct lnet_net *net;
3058 struct lnet_ping_buffer *pbuf;
3059 struct lnet_handle_md ping_mdh;
3063 /* don't allow userspace to shutdown the LOLND */
3064 if (LNET_NETTYP(net_id) == LOLND)
3067 mutex_lock(&the_lnet.ln_api_mutex);
3071 net = lnet_get_net_locked(net_id);
3078 net_ni_count = lnet_get_net_ni_count_locked(net);
3082 /* create and link a new ping info, before removing the old one */
3083 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
3084 lnet_get_ni_count() - net_ni_count, false);
3088 lnet_shutdown_lndnet(net);
3090 if (lnet_count_acceptor_nets() == 0)
3091 lnet_acceptor_stop();
3093 lnet_ping_target_update(pbuf, ping_mdh);
3096 mutex_unlock(&the_lnet.ln_api_mutex);
3101 void lnet_incr_dlc_seq(void)
3103 atomic_inc(&lnet_dlc_seq_no);
3106 __u32 lnet_get_dlc_seq_locked(void)
3108 return atomic_read(&lnet_dlc_seq_no);
3112 * LNet ioctl handler.
3116 LNetCtl(unsigned int cmd, void *arg)
3118 struct libcfs_ioctl_data *data = arg;
3119 struct lnet_ioctl_config_data *config;
3120 struct lnet_process_id id = {0};
3124 BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
3125 sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
3128 case IOC_LIBCFS_GET_NI:
3129 rc = LNetGetId(data->ioc_count, &id);
3130 data->ioc_nid = id.nid;
3133 case IOC_LIBCFS_FAIL_NID:
3134 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
3136 case IOC_LIBCFS_ADD_ROUTE:
3139 if (config->cfg_hdr.ioc_len < sizeof(*config))
3142 mutex_lock(&the_lnet.ln_api_mutex);
3143 rc = lnet_add_route(config->cfg_net,
3144 config->cfg_config_u.cfg_route.rtr_hop,
3146 config->cfg_config_u.cfg_route.
3149 rc = lnet_check_routes();
3151 lnet_del_route(config->cfg_net,
3154 mutex_unlock(&the_lnet.ln_api_mutex);
3157 case IOC_LIBCFS_DEL_ROUTE:
3160 if (config->cfg_hdr.ioc_len < sizeof(*config))
3163 mutex_lock(&the_lnet.ln_api_mutex);
3164 rc = lnet_del_route(config->cfg_net, config->cfg_nid);
3165 mutex_unlock(&the_lnet.ln_api_mutex);
3168 case IOC_LIBCFS_GET_ROUTE:
3171 if (config->cfg_hdr.ioc_len < sizeof(*config))
3174 mutex_lock(&the_lnet.ln_api_mutex);
3175 rc = lnet_get_route(config->cfg_count,
3177 &config->cfg_config_u.cfg_route.rtr_hop,
3179 &config->cfg_config_u.cfg_route.rtr_flags,
3180 &config->cfg_config_u.cfg_route.
3182 mutex_unlock(&the_lnet.ln_api_mutex);
3185 case IOC_LIBCFS_GET_LOCAL_NI: {
3186 struct lnet_ioctl_config_ni *cfg_ni;
3187 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
3188 struct lnet_ioctl_element_stats *stats;
3193 /* get the tunables if they are available */
3194 if (cfg_ni->lic_cfg_hdr.ioc_len <
3195 sizeof(*cfg_ni) + sizeof(*stats) + sizeof(*tun))
3198 stats = (struct lnet_ioctl_element_stats *)
3200 tun = (struct lnet_ioctl_config_lnd_tunables *)
3201 (cfg_ni->lic_bulk + sizeof(*stats));
3203 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
3206 mutex_lock(&the_lnet.ln_api_mutex);
3207 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
3208 mutex_unlock(&the_lnet.ln_api_mutex);
3212 case IOC_LIBCFS_GET_LOCAL_NI_MSG_STATS: {
3213 struct lnet_ioctl_element_msg_stats *msg_stats = arg;
3215 if (msg_stats->im_hdr.ioc_len != sizeof(*msg_stats))
3218 mutex_lock(&the_lnet.ln_api_mutex);
3219 rc = lnet_get_ni_stats(msg_stats);
3220 mutex_unlock(&the_lnet.ln_api_mutex);
3225 case IOC_LIBCFS_GET_NET: {
3226 size_t total = sizeof(*config) +
3227 sizeof(struct lnet_ioctl_net_config);
3230 if (config->cfg_hdr.ioc_len < total)
3233 mutex_lock(&the_lnet.ln_api_mutex);
3234 rc = lnet_get_net_config(config);
3235 mutex_unlock(&the_lnet.ln_api_mutex);
3239 case IOC_LIBCFS_GET_LNET_STATS:
3241 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
3243 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
3246 mutex_lock(&the_lnet.ln_api_mutex);
3247 lnet_counters_get(&lnet_stats->st_cntrs);
3248 mutex_unlock(&the_lnet.ln_api_mutex);
3252 case IOC_LIBCFS_CONFIG_RTR:
3255 if (config->cfg_hdr.ioc_len < sizeof(*config))
3258 mutex_lock(&the_lnet.ln_api_mutex);
3259 if (config->cfg_config_u.cfg_buffers.buf_enable) {
3260 rc = lnet_rtrpools_enable();
3261 mutex_unlock(&the_lnet.ln_api_mutex);
3264 lnet_rtrpools_disable();
3265 mutex_unlock(&the_lnet.ln_api_mutex);
3268 case IOC_LIBCFS_ADD_BUF:
3271 if (config->cfg_hdr.ioc_len < sizeof(*config))
3274 mutex_lock(&the_lnet.ln_api_mutex);
3275 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
3277 config->cfg_config_u.cfg_buffers.
3279 config->cfg_config_u.cfg_buffers.
3281 mutex_unlock(&the_lnet.ln_api_mutex);
3284 case IOC_LIBCFS_SET_NUMA_RANGE: {
3285 struct lnet_ioctl_set_value *numa;
3287 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3289 lnet_net_lock(LNET_LOCK_EX);
3290 lnet_numa_range = numa->sv_value;
3291 lnet_net_unlock(LNET_LOCK_EX);
3295 case IOC_LIBCFS_GET_NUMA_RANGE: {
3296 struct lnet_ioctl_set_value *numa;
3298 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3300 numa->sv_value = lnet_numa_range;
3304 case IOC_LIBCFS_GET_BUF: {
3305 struct lnet_ioctl_pool_cfg *pool_cfg;
3306 size_t total = sizeof(*config) + sizeof(*pool_cfg);
3310 if (config->cfg_hdr.ioc_len < total)
3313 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
3315 mutex_lock(&the_lnet.ln_api_mutex);
3316 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
3317 mutex_unlock(&the_lnet.ln_api_mutex);
3321 case IOC_LIBCFS_ADD_PEER_NI: {
3322 struct lnet_ioctl_peer_cfg *cfg = arg;
3324 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3327 mutex_lock(&the_lnet.ln_api_mutex);
3328 rc = lnet_add_peer_ni(cfg->prcfg_prim_nid,
3331 mutex_unlock(&the_lnet.ln_api_mutex);
3335 case IOC_LIBCFS_DEL_PEER_NI: {
3336 struct lnet_ioctl_peer_cfg *cfg = arg;
3338 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3341 mutex_lock(&the_lnet.ln_api_mutex);
3342 rc = lnet_del_peer_ni(cfg->prcfg_prim_nid,
3343 cfg->prcfg_cfg_nid);
3344 mutex_unlock(&the_lnet.ln_api_mutex);
3348 case IOC_LIBCFS_GET_PEER_INFO: {
3349 struct lnet_ioctl_peer *peer_info = arg;
3351 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
3354 mutex_lock(&the_lnet.ln_api_mutex);
3355 rc = lnet_get_peer_ni_info(
3356 peer_info->pr_count,
3358 peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
3359 &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
3360 &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
3361 &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
3362 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
3363 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
3364 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
3365 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
3366 mutex_unlock(&the_lnet.ln_api_mutex);
3370 case IOC_LIBCFS_GET_PEER_NI: {
3371 struct lnet_ioctl_peer_cfg *cfg = arg;
3373 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3376 mutex_lock(&the_lnet.ln_api_mutex);
3377 rc = lnet_get_peer_info(cfg,
3378 (void __user *)cfg->prcfg_bulk);
3379 mutex_unlock(&the_lnet.ln_api_mutex);
3383 case IOC_LIBCFS_GET_PEER_LIST: {
3384 struct lnet_ioctl_peer_cfg *cfg = arg;
3386 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3389 mutex_lock(&the_lnet.ln_api_mutex);
3390 rc = lnet_get_peer_list(&cfg->prcfg_count, &cfg->prcfg_size,
3391 (struct lnet_process_id __user *)cfg->prcfg_bulk);
3392 mutex_unlock(&the_lnet.ln_api_mutex);
3396 case IOC_LIBCFS_NOTIFY_ROUTER: {
3397 time64_t deadline = ktime_get_real_seconds() - data->ioc_u64[0];
3399 /* The deadline passed in by the user should be some time in
3400 * seconds in the future since the UNIX epoch. We have to map
3401 * that deadline to the wall clock.
3403 deadline += ktime_get_seconds();
3404 return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
3408 case IOC_LIBCFS_LNET_DIST:
3409 rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
3410 if (rc < 0 && rc != -EHOSTUNREACH)
3413 data->ioc_u32[0] = rc;
3416 case IOC_LIBCFS_TESTPROTOCOMPAT:
3417 lnet_net_lock(LNET_LOCK_EX);
3418 the_lnet.ln_testprotocompat = data->ioc_flags;
3419 lnet_net_unlock(LNET_LOCK_EX);
3422 case IOC_LIBCFS_LNET_FAULT:
3423 return lnet_fault_ctl(data->ioc_flags, data);
3425 case IOC_LIBCFS_PING: {
3426 signed long timeout;
3428 id.nid = data->ioc_nid;
3429 id.pid = data->ioc_u32[0];
3431 /* If timeout is negative then set default of 3 minutes */
3432 if (((s32)data->ioc_u32[1] <= 0) ||
3433 data->ioc_u32[1] > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
3434 timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
3436 timeout = msecs_to_jiffies(data->ioc_u32[1]);
3438 rc = lnet_ping(id, timeout, data->ioc_pbuf1,
3439 data->ioc_plen1 / sizeof(struct lnet_process_id));
3444 data->ioc_count = rc;
3448 case IOC_LIBCFS_PING_PEER: {
3449 struct lnet_ioctl_ping_data *ping = arg;
3450 struct lnet_peer *lp;
3451 signed long timeout;
3453 /* If timeout is negative then set default of 3 minutes */
3454 if (((s32)ping->op_param) <= 0 ||
3455 ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
3456 timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
3458 timeout = msecs_to_jiffies(ping->op_param);
3460 rc = lnet_ping(ping->ping_id, timeout,
3466 mutex_lock(&the_lnet.ln_api_mutex);
3467 lp = lnet_find_peer(ping->ping_id.nid);
3469 ping->ping_id.nid = lp->lp_primary_nid;
3470 ping->mr_info = lnet_peer_is_multi_rail(lp);
3471 lnet_peer_decref_locked(lp);
3473 mutex_unlock(&the_lnet.ln_api_mutex);
3475 ping->ping_count = rc;
3479 case IOC_LIBCFS_DISCOVER: {
3480 struct lnet_ioctl_ping_data *discover = arg;
3481 struct lnet_peer *lp;
3483 rc = lnet_discover(discover->ping_id, discover->op_param,
3485 discover->ping_count);
3489 mutex_lock(&the_lnet.ln_api_mutex);
3490 lp = lnet_find_peer(discover->ping_id.nid);
3492 discover->ping_id.nid = lp->lp_primary_nid;
3493 discover->mr_info = lnet_peer_is_multi_rail(lp);
3494 lnet_peer_decref_locked(lp);
3496 mutex_unlock(&the_lnet.ln_api_mutex);
3498 discover->ping_count = rc;
3503 ni = lnet_net2ni_addref(data->ioc_net);
3507 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
3510 rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
3517 EXPORT_SYMBOL(LNetCtl);
3519 void LNetDebugPeer(struct lnet_process_id id)
3521 lnet_debug_peer(id.nid);
3523 EXPORT_SYMBOL(LNetDebugPeer);
3526 * Determine if the specified peer \a nid is on the local node.
3528 * \param nid peer nid to check
3530 * \retval true If peer NID is on the local node.
3531 * \retval false If peer NID is not on the local node.
3533 bool LNetIsPeerLocal(lnet_nid_t nid)
3535 struct lnet_net *net;
3539 cpt = lnet_net_lock_current();
3540 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3541 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3542 if (ni->ni_nid == nid) {
3543 lnet_net_unlock(cpt);
3548 lnet_net_unlock(cpt);
3552 EXPORT_SYMBOL(LNetIsPeerLocal);
3555 * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
3556 * Note that all interfaces share a same PID, as requested by LNetNIInit().
3558 * \param index Index of the interface to look up.
3559 * \param id On successful return, this location will hold the
3560 * struct lnet_process_id ID of the interface.
3562 * \retval 0 If an interface exists at \a index.
3563 * \retval -ENOENT If no interface has been found.
3566 LNetGetId(unsigned int index, struct lnet_process_id *id)
3569 struct lnet_net *net;
3573 LASSERT(the_lnet.ln_refcount > 0);
3575 cpt = lnet_net_lock_current();
3577 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3578 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3582 id->nid = ni->ni_nid;
3583 id->pid = the_lnet.ln_pid;
3589 lnet_net_unlock(cpt);
3592 EXPORT_SYMBOL(LNetGetId);
3594 static int lnet_ping(struct lnet_process_id id, signed long timeout,
3595 struct lnet_process_id __user *ids, int n_ids)
3597 struct lnet_handle_eq eqh;
3598 struct lnet_handle_md mdh;
3599 struct lnet_event event;
3600 struct lnet_md md = { NULL };
3604 const signed long a_long_time = msecs_to_jiffies(60 * MSEC_PER_SEC);
3605 struct lnet_ping_buffer *pbuf;
3606 struct lnet_process_id tmpid;
3613 /* n_ids limit is arbitrary */
3614 if (n_ids <= 0 || id.nid == LNET_NID_ANY)
3618 * if the user buffer has more space than the lnet_interfaces_max
3619 * then only fill it up to lnet_interfaces_max
3621 if (n_ids > lnet_interfaces_max)
3622 n_ids = lnet_interfaces_max;
3624 if (id.pid == LNET_PID_ANY)
3625 id.pid = LNET_PID_LUSTRE;
3627 pbuf = lnet_ping_buffer_alloc(n_ids, GFP_NOFS);
3631 /* NB 2 events max (including any unlink event) */
3632 rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
3634 CERROR("Can't allocate EQ: %d\n", rc);
3635 goto fail_ping_buffer_decref;
3638 /* initialize md content */
3639 md.start = &pbuf->pb_info;
3640 md.length = LNET_PING_INFO_SIZE(n_ids);
3641 md.threshold = 2; /* GET/REPLY */
3643 md.options = LNET_MD_TRUNCATE;
3647 rc = LNetMDBind(md, LNET_UNLINK, &mdh);
3649 CERROR("Can't bind MD: %d\n", rc);
3653 rc = LNetGet(LNET_NID_ANY, mdh, id,
3654 LNET_RESERVED_PORTAL,
3655 LNET_PROTO_PING_MATCHBITS, 0, false);
3658 /* Don't CERROR; this could be deliberate! */
3659 rc2 = LNetMDUnlink(mdh);
3662 /* NB must wait for the UNLINK event below... */
3664 timeout = a_long_time;
3668 /* MUST block for unlink to complete */
3670 blocked = cfs_block_allsigs();
3672 rc2 = LNetEQPoll(&eqh, 1, timeout, &event, &which);
3675 cfs_restore_sigs(blocked);
3677 CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
3678 (rc2 <= 0) ? -1 : event.type,
3679 (rc2 <= 0) ? -1 : event.status,
3680 (rc2 > 0 && event.unlinked) ? " unlinked" : "");
3682 LASSERT(rc2 != -EOVERFLOW); /* can't miss anything */
3684 if (rc2 <= 0 || event.status != 0) {
3685 /* timeout or error */
3686 if (!replied && rc == 0)
3687 rc = (rc2 < 0) ? rc2 :
3688 (rc2 == 0) ? -ETIMEDOUT :
3692 /* Ensure completion in finite time... */
3694 /* No assertion (racing with network) */
3696 timeout = a_long_time;
3697 } else if (rc2 == 0) {
3698 /* timed out waiting for unlink */
3699 CWARN("ping %s: late network completion\n",
3702 } else if (event.type == LNET_EVENT_REPLY) {
3706 } while (rc2 <= 0 || !event.unlinked);
3710 CWARN("%s: Unexpected rc >= 0 but no reply!\n",
3717 LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
3719 rc = -EPROTO; /* if I can't parse... */
3722 CERROR("%s: ping info too short %d\n",
3723 libcfs_id2str(id), nob);
3727 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
3728 lnet_swap_pinginfo(pbuf);
3729 } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
3730 CERROR("%s: Unexpected magic %08x\n",
3731 libcfs_id2str(id), pbuf->pb_info.pi_magic);
3735 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
3736 CERROR("%s: ping w/o NI status: 0x%x\n",
3737 libcfs_id2str(id), pbuf->pb_info.pi_features);
3741 if (nob < LNET_PING_INFO_SIZE(0)) {
3742 CERROR("%s: Short reply %d(%d min)\n",
3744 nob, (int)LNET_PING_INFO_SIZE(0));
3748 if (pbuf->pb_info.pi_nnis < n_ids)
3749 n_ids = pbuf->pb_info.pi_nnis;
3751 if (nob < LNET_PING_INFO_SIZE(n_ids)) {
3752 CERROR("%s: Short reply %d(%d expected)\n",
3754 nob, (int)LNET_PING_INFO_SIZE(n_ids));
3758 rc = -EFAULT; /* if I segv in copy_to_user()... */
3760 memset(&tmpid, 0, sizeof(tmpid));
3761 for (i = 0; i < n_ids; i++) {
3762 tmpid.pid = pbuf->pb_info.pi_pid;
3763 tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
3764 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
3767 rc = pbuf->pb_info.pi_nnis;
3770 rc2 = LNetEQFree(eqh);
3772 CERROR("rc2 %d\n", rc2);
3775 fail_ping_buffer_decref:
3776 lnet_ping_buffer_decref(pbuf);
3781 lnet_discover(struct lnet_process_id id, __u32 force,
3782 struct lnet_process_id __user *ids, int n_ids)
3784 struct lnet_peer_ni *lpni;
3785 struct lnet_peer_ni *p;
3786 struct lnet_peer *lp;
3787 struct lnet_process_id *buf;
3791 int max_intf = lnet_interfaces_max;
3795 id.nid == LNET_NID_ANY)
3798 if (id.pid == LNET_PID_ANY)
3799 id.pid = LNET_PID_LUSTRE;
3802 * if the user buffer has more space than the max_intf
3803 * then only fill it up to max_intf
3805 if (n_ids > max_intf)
3808 buf_size = n_ids * sizeof(*buf);
3810 LIBCFS_ALLOC(buf, buf_size);
3814 cpt = lnet_net_lock_current();
3815 lpni = lnet_nid2peerni_locked(id.nid, LNET_NID_ANY, cpt);
3822 * Clearing the NIDS_UPTODATE flag ensures the peer will
3823 * be discovered, provided discovery has not been disabled.
3825 lp = lpni->lpni_peer_net->lpn_peer;
3826 spin_lock(&lp->lp_lock);
3827 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3828 /* If the force flag is set, force a PING and PUSH as well. */
3830 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
3831 spin_unlock(&lp->lp_lock);
3832 rc = lnet_discover_peer_locked(lpni, cpt, true);
3836 /* Peer may have changed. */
3837 lp = lpni->lpni_peer_net->lpn_peer;
3838 if (lp->lp_nnis < n_ids)
3839 n_ids = lp->lp_nnis;
3843 while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
3844 buf[i].pid = id.pid;
3845 buf[i].nid = p->lpni_nid;
3850 lnet_net_unlock(cpt);
3853 if (copy_to_user(ids, buf, n_ids * sizeof(*buf)))
3859 lnet_peer_ni_decref_locked(lpni);
3861 lnet_net_unlock(cpt);
3863 LIBCFS_FREE(buf, buf_size);