4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_LNET
35 #include <linux/ctype.h>
36 #include <linux/log2.h>
37 #include <linux/ktime.h>
38 #include <linux/moduleparam.h>
39 #include <linux/uaccess.h>
41 #include <lnet/lib-lnet.h>
43 #define D_LNI D_CONSOLE
46 * initialize ln_api_mutex statically, since it needs to be used in
47 * discovery_set callback. That module parameter callback can be called
48 * before module init completes. The mutex needs to be ready for use then.
50 struct lnet the_lnet = {
51 .ln_api_mutex = __MUTEX_INITIALIZER(the_lnet.ln_api_mutex),
52 }; /* THE state of the network */
53 EXPORT_SYMBOL(the_lnet);
55 static char *ip2nets = "";
56 module_param(ip2nets, charp, 0444);
57 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
59 static char *networks = "";
60 module_param(networks, charp, 0444);
61 MODULE_PARM_DESC(networks, "local networks");
63 static char *routes = "";
64 module_param(routes, charp, 0444);
65 MODULE_PARM_DESC(routes, "routes to non-local networks");
67 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
68 module_param(rnet_htable_size, int, 0444);
69 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
71 static int use_tcp_bonding = false;
72 module_param(use_tcp_bonding, int, 0444);
73 MODULE_PARM_DESC(use_tcp_bonding,
74 "Set to 1 to use socklnd bonding. 0 to use Multi-Rail");
76 unsigned int lnet_numa_range = 0;
77 module_param(lnet_numa_range, uint, 0444);
78 MODULE_PARM_DESC(lnet_numa_range,
79 "NUMA range to consider during Multi-Rail selection");
81 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
82 static int intf_max_set(const char *val, struct kernel_param *kp);
83 module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
84 &lnet_interfaces_max, S_IRUGO|S_IWUSR);
85 MODULE_PARM_DESC(lnet_interfaces_max,
86 "Maximum number of interfaces in a node.");
88 unsigned lnet_peer_discovery_disabled = 0;
89 static int discovery_set(const char *val, struct kernel_param *kp);
90 module_param_call(lnet_peer_discovery_disabled, discovery_set, param_get_int,
91 &lnet_peer_discovery_disabled, S_IRUGO|S_IWUSR);
92 MODULE_PARM_DESC(lnet_peer_discovery_disabled,
93 "Set to 1 to disable peer discovery on this node.");
95 unsigned lnet_transaction_timeout = 5;
96 module_param(lnet_transaction_timeout, uint, 0444);
97 MODULE_PARM_DESC(lnet_transaction_timeout,
98 "Time in seconds to wait for a REPLY or an ACK");
101 * This sequence number keeps track of how many times DLC was used to
102 * update the local NIs. It is incremented when a NI is added or
103 * removed and checked when sending a message to determine if there is
104 * a need to re-run the selection algorithm. See lnet_select_pathway()
105 * for more details on its usage.
107 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
109 static int lnet_ping(struct lnet_process_id id, signed long timeout,
110 struct lnet_process_id __user *ids, int n_ids);
112 static int lnet_discover(struct lnet_process_id id, __u32 force,
113 struct lnet_process_id __user *ids, int n_ids);
116 discovery_set(const char *val, struct kernel_param *kp)
119 unsigned *discovery = (unsigned *)kp->arg;
121 struct lnet_ping_buffer *pbuf;
123 rc = kstrtoul(val, 0, &value);
125 CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
129 value = (value) ? 1 : 0;
132 * The purpose of locking the api_mutex here is to ensure that
133 * the correct value ends up stored properly.
135 mutex_lock(&the_lnet.ln_api_mutex);
137 if (value == *discovery) {
138 mutex_unlock(&the_lnet.ln_api_mutex);
144 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
145 mutex_unlock(&the_lnet.ln_api_mutex);
149 /* tell peers that discovery setting has changed */
150 lnet_net_lock(LNET_LOCK_EX);
151 pbuf = the_lnet.ln_ping_target;
153 pbuf->pb_info.pi_features &= ~LNET_PING_FEAT_DISCOVERY;
155 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
156 lnet_net_unlock(LNET_LOCK_EX);
158 lnet_push_update_to_peers(1);
160 mutex_unlock(&the_lnet.ln_api_mutex);
166 intf_max_set(const char *val, struct kernel_param *kp)
170 rc = kstrtoint(val, 0, &value);
172 CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
176 if (value < LNET_INTERFACES_MIN) {
177 CWARN("max interfaces provided are too small, setting to %d\n",
178 LNET_INTERFACES_MAX_DEFAULT);
179 value = LNET_INTERFACES_MAX_DEFAULT;
182 *(int *)kp->arg = value;
188 lnet_get_routes(void)
194 lnet_get_networks(void)
199 if (*networks != 0 && *ip2nets != 0) {
200 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
201 "'ip2nets' but not both at once\n");
206 rc = lnet_parse_ip2nets(&nets, ip2nets);
207 return (rc == 0) ? nets : NULL;
217 lnet_init_locks(void)
219 spin_lock_init(&the_lnet.ln_eq_wait_lock);
220 spin_lock_init(&the_lnet.ln_msg_resend_lock);
221 init_waitqueue_head(&the_lnet.ln_eq_waitq);
222 init_waitqueue_head(&the_lnet.ln_rc_waitq);
223 mutex_init(&the_lnet.ln_lnd_mutex);
227 lnet_fini_locks(void)
231 struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
232 struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
236 lnet_descriptor_setup(void)
238 /* create specific kmem_cache for MEs and small MDs (i.e., originally
239 * allocated in <size-xxx> kmem_cache).
241 lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(struct lnet_me),
243 if (!lnet_mes_cachep)
246 lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
247 LNET_SMALL_MD_SIZE, 0, 0,
249 if (!lnet_small_mds_cachep)
256 lnet_descriptor_cleanup(void)
259 if (lnet_small_mds_cachep) {
260 kmem_cache_destroy(lnet_small_mds_cachep);
261 lnet_small_mds_cachep = NULL;
264 if (lnet_mes_cachep) {
265 kmem_cache_destroy(lnet_mes_cachep);
266 lnet_mes_cachep = NULL;
271 lnet_create_remote_nets_table(void)
274 struct list_head *hash;
276 LASSERT(the_lnet.ln_remote_nets_hash == NULL);
277 LASSERT(the_lnet.ln_remote_nets_hbits > 0);
278 LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
280 CERROR("Failed to create remote nets hash table\n");
284 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
285 INIT_LIST_HEAD(&hash[i]);
286 the_lnet.ln_remote_nets_hash = hash;
291 lnet_destroy_remote_nets_table(void)
295 if (the_lnet.ln_remote_nets_hash == NULL)
298 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
299 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
301 LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
302 LNET_REMOTE_NETS_HASH_SIZE *
303 sizeof(the_lnet.ln_remote_nets_hash[0]));
304 the_lnet.ln_remote_nets_hash = NULL;
308 lnet_destroy_locks(void)
310 if (the_lnet.ln_res_lock != NULL) {
311 cfs_percpt_lock_free(the_lnet.ln_res_lock);
312 the_lnet.ln_res_lock = NULL;
315 if (the_lnet.ln_net_lock != NULL) {
316 cfs_percpt_lock_free(the_lnet.ln_net_lock);
317 the_lnet.ln_net_lock = NULL;
324 lnet_create_locks(void)
328 the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
329 if (the_lnet.ln_res_lock == NULL)
332 the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
333 if (the_lnet.ln_net_lock == NULL)
339 lnet_destroy_locks();
343 static void lnet_assert_wire_constants(void)
345 /* Wire protocol assertions generated by 'wirecheck'
346 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
347 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
348 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
351 CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
352 CLASSERT(LNET_PROTO_TCP_VERSION_MAJOR == 1);
353 CLASSERT(LNET_PROTO_TCP_VERSION_MINOR == 0);
354 CLASSERT(LNET_MSG_ACK == 0);
355 CLASSERT(LNET_MSG_PUT == 1);
356 CLASSERT(LNET_MSG_GET == 2);
357 CLASSERT(LNET_MSG_REPLY == 3);
358 CLASSERT(LNET_MSG_HELLO == 4);
360 /* Checks for struct lnet_handle_wire */
361 CLASSERT((int)sizeof(struct lnet_handle_wire) == 16);
362 CLASSERT((int)offsetof(struct lnet_handle_wire, wh_interface_cookie) == 0);
363 CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) == 8);
364 CLASSERT((int)offsetof(struct lnet_handle_wire, wh_object_cookie) == 8);
365 CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) == 8);
367 /* Checks for struct struct lnet_magicversion */
368 CLASSERT((int)sizeof(struct lnet_magicversion) == 8);
369 CLASSERT((int)offsetof(struct lnet_magicversion, magic) == 0);
370 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->magic) == 4);
371 CLASSERT((int)offsetof(struct lnet_magicversion, version_major) == 4);
372 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_major) == 2);
373 CLASSERT((int)offsetof(struct lnet_magicversion, version_minor) == 6);
374 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_minor) == 2);
376 /* Checks for struct struct lnet_hdr */
377 CLASSERT((int)sizeof(struct lnet_hdr) == 72);
378 CLASSERT((int)offsetof(struct lnet_hdr, dest_nid) == 0);
379 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_nid) == 8);
380 CLASSERT((int)offsetof(struct lnet_hdr, src_nid) == 8);
381 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_nid) == 8);
382 CLASSERT((int)offsetof(struct lnet_hdr, dest_pid) == 16);
383 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_pid) == 4);
384 CLASSERT((int)offsetof(struct lnet_hdr, src_pid) == 20);
385 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_pid) == 4);
386 CLASSERT((int)offsetof(struct lnet_hdr, type) == 24);
387 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->type) == 4);
388 CLASSERT((int)offsetof(struct lnet_hdr, payload_length) == 28);
389 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->payload_length) == 4);
390 CLASSERT((int)offsetof(struct lnet_hdr, msg) == 32);
391 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg) == 40);
394 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) == 32);
395 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) == 16);
396 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.match_bits) == 48);
397 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) == 8);
398 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.mlength) == 56);
399 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) == 4);
402 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) == 32);
403 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) == 16);
404 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.match_bits) == 48);
405 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) == 8);
406 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.hdr_data) == 56);
407 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) == 8);
408 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ptl_index) == 64);
409 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) == 4);
410 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.offset) == 68);
411 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) == 4);
414 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.return_wmd) == 32);
415 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) == 16);
416 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.match_bits) == 48);
417 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) == 8);
418 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.ptl_index) == 56);
419 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) == 4);
420 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.src_offset) == 60);
421 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) == 4);
422 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.sink_length) == 64);
423 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) == 4);
426 CLASSERT((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) == 32);
427 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) == 16);
430 CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.incarnation) == 32);
431 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) == 8);
432 CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.type) == 40);
433 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) == 4);
435 /* Checks for struct lnet_ni_status and related constants */
436 CLASSERT(LNET_NI_STATUS_INVALID == 0x00000000);
437 CLASSERT(LNET_NI_STATUS_UP == 0x15aac0de);
438 CLASSERT(LNET_NI_STATUS_DOWN == 0xdeadface);
440 /* Checks for struct lnet_ni_status */
441 CLASSERT((int)sizeof(struct lnet_ni_status) == 16);
442 CLASSERT((int)offsetof(struct lnet_ni_status, ns_nid) == 0);
443 CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) == 8);
444 CLASSERT((int)offsetof(struct lnet_ni_status, ns_status) == 8);
445 CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_status) == 4);
446 CLASSERT((int)offsetof(struct lnet_ni_status, ns_unused) == 12);
447 CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_unused) == 4);
449 /* Checks for struct lnet_ping_info and related constants */
450 CLASSERT(LNET_PROTO_PING_MAGIC == 0x70696E67);
451 CLASSERT(LNET_PING_FEAT_INVAL == 0);
452 CLASSERT(LNET_PING_FEAT_BASE == 1);
453 CLASSERT(LNET_PING_FEAT_NI_STATUS == 2);
454 CLASSERT(LNET_PING_FEAT_RTE_DISABLED == 4);
455 CLASSERT(LNET_PING_FEAT_MULTI_RAIL == 8);
456 CLASSERT(LNET_PING_FEAT_DISCOVERY == 16);
457 CLASSERT(LNET_PING_FEAT_BITS == 31);
459 /* Checks for struct lnet_ping_info */
460 CLASSERT((int)sizeof(struct lnet_ping_info) == 16);
461 CLASSERT((int)offsetof(struct lnet_ping_info, pi_magic) == 0);
462 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) == 4);
463 CLASSERT((int)offsetof(struct lnet_ping_info, pi_features) == 4);
464 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_features) == 4);
465 CLASSERT((int)offsetof(struct lnet_ping_info, pi_pid) == 8);
466 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) == 4);
467 CLASSERT((int)offsetof(struct lnet_ping_info, pi_nnis) == 12);
468 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) == 4);
469 CLASSERT((int)offsetof(struct lnet_ping_info, pi_ni) == 16);
470 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) == 0);
473 static struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
475 struct lnet_lnd *lnd;
476 struct list_head *tmp;
478 /* holding lnd mutex */
479 list_for_each(tmp, &the_lnet.ln_lnds) {
480 lnd = list_entry(tmp, struct lnet_lnd, lnd_list);
482 if (lnd->lnd_type == type)
489 lnet_register_lnd(struct lnet_lnd *lnd)
491 mutex_lock(&the_lnet.ln_lnd_mutex);
493 LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
494 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
496 list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
497 lnd->lnd_refcount = 0;
499 CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
501 mutex_unlock(&the_lnet.ln_lnd_mutex);
503 EXPORT_SYMBOL(lnet_register_lnd);
506 lnet_unregister_lnd(struct lnet_lnd *lnd)
508 mutex_lock(&the_lnet.ln_lnd_mutex);
510 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
511 LASSERT(lnd->lnd_refcount == 0);
513 list_del(&lnd->lnd_list);
514 CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
516 mutex_unlock(&the_lnet.ln_lnd_mutex);
518 EXPORT_SYMBOL(lnet_unregister_lnd);
521 lnet_counters_get(struct lnet_counters *counters)
523 struct lnet_counters *ctr;
526 memset(counters, 0, sizeof(*counters));
528 lnet_net_lock(LNET_LOCK_EX);
530 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
531 counters->msgs_max += ctr->msgs_max;
532 counters->msgs_alloc += ctr->msgs_alloc;
533 counters->errors += ctr->errors;
534 counters->send_count += ctr->send_count;
535 counters->recv_count += ctr->recv_count;
536 counters->route_count += ctr->route_count;
537 counters->drop_count += ctr->drop_count;
538 counters->send_length += ctr->send_length;
539 counters->recv_length += ctr->recv_length;
540 counters->route_length += ctr->route_length;
541 counters->drop_length += ctr->drop_length;
544 lnet_net_unlock(LNET_LOCK_EX);
546 EXPORT_SYMBOL(lnet_counters_get);
549 lnet_counters_reset(void)
551 struct lnet_counters *counters;
554 lnet_net_lock(LNET_LOCK_EX);
556 cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
557 memset(counters, 0, sizeof(struct lnet_counters));
559 lnet_net_unlock(LNET_LOCK_EX);
563 lnet_res_type2str(int type)
568 case LNET_COOKIE_TYPE_MD:
570 case LNET_COOKIE_TYPE_ME:
572 case LNET_COOKIE_TYPE_EQ:
578 lnet_res_container_cleanup(struct lnet_res_container *rec)
582 if (rec->rec_type == 0) /* not set yet, it's uninitialized */
585 while (!list_empty(&rec->rec_active)) {
586 struct list_head *e = rec->rec_active.next;
589 if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
590 lnet_eq_free(list_entry(e, struct lnet_eq, eq_list));
592 } else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
593 lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
595 } else { /* NB: Active MEs should be attached on portals */
602 /* Found alive MD/ME/EQ, user really should unlink/free
603 * all of them before finalize LNet, but if someone didn't,
604 * we have to recycle garbage for him */
605 CERROR("%d active elements on exit of %s container\n",
606 count, lnet_res_type2str(rec->rec_type));
609 if (rec->rec_lh_hash != NULL) {
610 LIBCFS_FREE(rec->rec_lh_hash,
611 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
612 rec->rec_lh_hash = NULL;
615 rec->rec_type = 0; /* mark it as finalized */
619 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
624 LASSERT(rec->rec_type == 0);
626 rec->rec_type = type;
627 INIT_LIST_HEAD(&rec->rec_active);
629 rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
631 /* Arbitrary choice of hash table size */
632 LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
633 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
634 if (rec->rec_lh_hash == NULL) {
639 for (i = 0; i < LNET_LH_HASH_SIZE; i++)
640 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
645 CERROR("Failed to setup %s resource container\n",
646 lnet_res_type2str(type));
647 lnet_res_container_cleanup(rec);
652 lnet_res_containers_destroy(struct lnet_res_container **recs)
654 struct lnet_res_container *rec;
657 cfs_percpt_for_each(rec, i, recs)
658 lnet_res_container_cleanup(rec);
660 cfs_percpt_free(recs);
663 static struct lnet_res_container **
664 lnet_res_containers_create(int type)
666 struct lnet_res_container **recs;
667 struct lnet_res_container *rec;
671 recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
673 CERROR("Failed to allocate %s resource containers\n",
674 lnet_res_type2str(type));
678 cfs_percpt_for_each(rec, i, recs) {
679 rc = lnet_res_container_setup(rec, i, type);
681 lnet_res_containers_destroy(recs);
689 struct lnet_libhandle *
690 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
692 /* ALWAYS called with lnet_res_lock held */
693 struct list_head *head;
694 struct lnet_libhandle *lh;
697 if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
700 hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
701 head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
703 list_for_each_entry(lh, head, lh_hash_chain) {
704 if (lh->lh_cookie == cookie)
712 lnet_res_lh_initialize(struct lnet_res_container *rec,
713 struct lnet_libhandle *lh)
715 /* ALWAYS called with lnet_res_lock held */
716 unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
719 lh->lh_cookie = rec->rec_lh_cookie;
720 rec->rec_lh_cookie += 1 << ibits;
722 hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
724 list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
727 static int lnet_unprepare(void);
730 lnet_prepare(lnet_pid_t requested_pid)
732 /* Prepare to bring up the network */
733 struct lnet_res_container **recs;
736 if (requested_pid == LNET_PID_ANY) {
737 /* Don't instantiate LNET just for me */
741 LASSERT(the_lnet.ln_refcount == 0);
743 the_lnet.ln_routing = 0;
745 LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
746 the_lnet.ln_pid = requested_pid;
748 INIT_LIST_HEAD(&the_lnet.ln_test_peers);
749 INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
750 INIT_LIST_HEAD(&the_lnet.ln_nets);
751 INIT_LIST_HEAD(&the_lnet.ln_routers);
752 INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
753 INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
754 INIT_LIST_HEAD(&the_lnet.ln_dc_request);
755 INIT_LIST_HEAD(&the_lnet.ln_dc_working);
756 INIT_LIST_HEAD(&the_lnet.ln_dc_expired);
757 init_waitqueue_head(&the_lnet.ln_dc_waitq);
759 rc = lnet_descriptor_setup();
763 rc = lnet_create_remote_nets_table();
768 * NB the interface cookie in wire handles guards against delayed
769 * replies and ACKs appearing valid after reboot.
771 the_lnet.ln_interface_cookie = ktime_get_real_ns();
773 the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
774 sizeof(struct lnet_counters));
775 if (the_lnet.ln_counters == NULL) {
776 CERROR("Failed to allocate counters for LNet\n");
781 rc = lnet_peer_tables_create();
785 rc = lnet_msg_containers_create();
789 rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
790 LNET_COOKIE_TYPE_EQ);
794 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
800 the_lnet.ln_me_containers = recs;
802 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
808 the_lnet.ln_md_containers = recs;
810 rc = lnet_portals_create();
812 CERROR("Failed to create portals for LNet: %d\n", rc);
824 lnet_unprepare (void)
826 /* NB no LNET_LOCK since this is the last reference. All LND instances
827 * have shut down already, so it is safe to unlink and free all
828 * descriptors, even those that appear committed to a network op (eg MD
829 * with non-zero pending count) */
831 lnet_fail_nid(LNET_NID_ANY, 0);
833 LASSERT(the_lnet.ln_refcount == 0);
834 LASSERT(list_empty(&the_lnet.ln_test_peers));
835 LASSERT(list_empty(&the_lnet.ln_nets));
837 lnet_portals_destroy();
839 if (the_lnet.ln_md_containers != NULL) {
840 lnet_res_containers_destroy(the_lnet.ln_md_containers);
841 the_lnet.ln_md_containers = NULL;
844 if (the_lnet.ln_me_containers != NULL) {
845 lnet_res_containers_destroy(the_lnet.ln_me_containers);
846 the_lnet.ln_me_containers = NULL;
849 lnet_res_container_cleanup(&the_lnet.ln_eq_container);
851 lnet_msg_containers_destroy();
853 lnet_rtrpools_free(0);
855 if (the_lnet.ln_counters != NULL) {
856 cfs_percpt_free(the_lnet.ln_counters);
857 the_lnet.ln_counters = NULL;
859 lnet_destroy_remote_nets_table();
860 lnet_descriptor_cleanup();
866 lnet_net2ni_locked(__u32 net_id, int cpt)
869 struct lnet_net *net;
871 LASSERT(cpt != LNET_LOCK_EX);
873 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
874 if (net->net_id == net_id) {
875 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
885 lnet_net2ni_addref(__u32 net)
890 ni = lnet_net2ni_locked(net, 0);
892 lnet_ni_addref_locked(ni, 0);
897 EXPORT_SYMBOL(lnet_net2ni_addref);
900 lnet_get_net_locked(__u32 net_id)
902 struct lnet_net *net;
904 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
905 if (net->net_id == net_id)
913 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
918 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
923 val = hash_long(key, LNET_CPT_BITS);
924 /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
928 return (unsigned int)(key + val + (val >> 1)) % number;
932 lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
934 struct lnet_net *net;
936 /* must called with hold of lnet_net_lock */
937 if (LNET_CPT_NUMBER == 1)
938 return 0; /* the only one */
941 * If NI is provided then use the CPT identified in the NI cpt
942 * list if one exists. If one doesn't exist, then that NI is
943 * associated with all CPTs and it follows that the net it belongs
944 * to is implicitly associated with all CPTs, so just hash the nid
948 if (ni->ni_cpts != NULL)
949 return ni->ni_cpts[lnet_nid_cpt_hash(nid,
952 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
955 /* no NI provided so look at the net */
956 net = lnet_get_net_locked(LNET_NIDNET(nid));
958 if (net != NULL && net->net_cpts != NULL) {
959 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
962 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
966 lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
971 if (LNET_CPT_NUMBER == 1)
972 return 0; /* the only one */
974 cpt = lnet_net_lock_current();
976 cpt2 = lnet_cpt_of_nid_locked(nid, ni);
978 lnet_net_unlock(cpt);
982 EXPORT_SYMBOL(lnet_cpt_of_nid);
985 lnet_islocalnet(__u32 net_id)
987 struct lnet_net *net;
991 cpt = lnet_net_lock_current();
993 net = lnet_get_net_locked(net_id);
997 lnet_net_unlock(cpt);
1003 lnet_is_ni_healthy_locked(struct lnet_ni *ni)
1005 if (ni->ni_state == LNET_NI_STATE_ACTIVE ||
1006 ni->ni_state == LNET_NI_STATE_DEGRADED)
1013 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
1015 struct lnet_net *net;
1018 LASSERT(cpt != LNET_LOCK_EX);
1020 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1021 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1022 if (ni->ni_nid == nid)
1031 lnet_nid2ni_addref(lnet_nid_t nid)
1036 ni = lnet_nid2ni_locked(nid, 0);
1038 lnet_ni_addref_locked(ni, 0);
1043 EXPORT_SYMBOL(lnet_nid2ni_addref);
1046 lnet_islocalnid(lnet_nid_t nid)
1051 cpt = lnet_net_lock_current();
1052 ni = lnet_nid2ni_locked(nid, cpt);
1053 lnet_net_unlock(cpt);
1059 lnet_count_acceptor_nets(void)
1061 /* Return the # of NIs that need the acceptor. */
1063 struct lnet_net *net;
1066 cpt = lnet_net_lock_current();
1067 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1068 /* all socklnd type networks should have the acceptor
1070 if (net->net_lnd->lnd_accept != NULL)
1074 lnet_net_unlock(cpt);
1079 struct lnet_ping_buffer *
1080 lnet_ping_buffer_alloc(int nnis, gfp_t gfp)
1082 struct lnet_ping_buffer *pbuf;
1084 LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nnis), gfp);
1086 pbuf->pb_nnis = nnis;
1087 atomic_set(&pbuf->pb_refcnt, 1);
1094 lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
1096 LASSERT(lnet_ping_buffer_numref(pbuf) == 0);
1097 LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
1100 static struct lnet_ping_buffer *
1101 lnet_ping_target_create(int nnis)
1103 struct lnet_ping_buffer *pbuf;
1105 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1107 CERROR("Can't allocate ping source [%d]\n", nnis);
1111 pbuf->pb_info.pi_nnis = nnis;
1112 pbuf->pb_info.pi_pid = the_lnet.ln_pid;
1113 pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
1114 pbuf->pb_info.pi_features =
1115 LNET_PING_FEAT_NI_STATUS | LNET_PING_FEAT_MULTI_RAIL;
1121 lnet_get_net_ni_count_locked(struct lnet_net *net)
1126 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1133 lnet_get_net_ni_count_pre(struct lnet_net *net)
1138 list_for_each_entry(ni, &net->net_ni_added, ni_netlist)
1145 lnet_get_ni_count(void)
1148 struct lnet_net *net;
1153 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1154 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1164 lnet_ping_info_validate(struct lnet_ping_info *pinfo)
1168 if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
1170 if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
1172 /* Loopback is guaranteed to be present */
1173 if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
1175 if (LNET_NETTYP(LNET_NIDNET(LNET_PING_INFO_LONI(pinfo))) != LOLND)
1181 lnet_ping_target_destroy(void)
1183 struct lnet_net *net;
1186 lnet_net_lock(LNET_LOCK_EX);
1188 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1189 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1191 ni->ni_status = NULL;
1196 lnet_ping_buffer_decref(the_lnet.ln_ping_target);
1197 the_lnet.ln_ping_target = NULL;
1199 lnet_net_unlock(LNET_LOCK_EX);
1203 lnet_ping_target_event_handler(struct lnet_event *event)
1205 struct lnet_ping_buffer *pbuf = event->md.user_ptr;
1207 if (event->unlinked)
1208 lnet_ping_buffer_decref(pbuf);
1212 lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
1213 struct lnet_handle_md *ping_mdh,
1214 int ni_count, bool set_eq)
1216 struct lnet_process_id id = {
1217 .nid = LNET_NID_ANY,
1220 struct lnet_handle_me me_handle;
1221 struct lnet_md md = { NULL };
1225 rc = LNetEQAlloc(0, lnet_ping_target_event_handler,
1226 &the_lnet.ln_ping_target_eq);
1228 CERROR("Can't allocate ping buffer EQ: %d\n", rc);
1233 *ppbuf = lnet_ping_target_create(ni_count);
1234 if (*ppbuf == NULL) {
1239 /* Ping target ME/MD */
1240 rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1241 LNET_PROTO_PING_MATCHBITS, 0,
1242 LNET_UNLINK, LNET_INS_AFTER,
1245 CERROR("Can't create ping target ME: %d\n", rc);
1246 goto fail_decref_ping_buffer;
1249 /* initialize md content */
1250 md.start = &(*ppbuf)->pb_info;
1251 md.length = LNET_PING_INFO_SIZE((*ppbuf)->pb_nnis);
1252 md.threshold = LNET_MD_THRESH_INF;
1254 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1255 LNET_MD_MANAGE_REMOTE;
1256 md.eq_handle = the_lnet.ln_ping_target_eq;
1257 md.user_ptr = *ppbuf;
1259 rc = LNetMDAttach(me_handle, md, LNET_RETAIN, ping_mdh);
1261 CERROR("Can't attach ping target MD: %d\n", rc);
1262 goto fail_unlink_ping_me;
1264 lnet_ping_buffer_addref(*ppbuf);
1268 fail_unlink_ping_me:
1269 rc2 = LNetMEUnlink(me_handle);
1271 fail_decref_ping_buffer:
1272 LASSERT(lnet_ping_buffer_numref(*ppbuf) == 1);
1273 lnet_ping_buffer_decref(*ppbuf);
1277 rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
1284 lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
1285 struct lnet_handle_md *ping_mdh)
1287 sigset_t blocked = cfs_block_allsigs();
1289 LNetMDUnlink(*ping_mdh);
1290 LNetInvalidateMDHandle(ping_mdh);
1292 /* NB the MD could be busy; this just starts the unlink */
1293 while (lnet_ping_buffer_numref(pbuf) > 1) {
1294 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1295 set_current_state(TASK_UNINTERRUPTIBLE);
1296 schedule_timeout(cfs_time_seconds(1));
1299 cfs_restore_sigs(blocked);
1303 lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
1306 struct lnet_net *net;
1307 struct lnet_ni_status *ns;
1312 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1313 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1314 LASSERT(i < pbuf->pb_nnis);
1316 ns = &pbuf->pb_info.pi_ni[i];
1318 ns->ns_nid = ni->ni_nid;
1321 ns->ns_status = (ni->ni_status != NULL) ?
1322 ni->ni_status->ns_status :
1331 * We (ab)use the ns_status of the loopback interface to
1332 * transmit the sequence number. The first interface listed
1333 * must be the loopback interface.
1335 rc = lnet_ping_info_validate(&pbuf->pb_info);
1337 LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
1340 LNET_PING_BUFFER_SEQNO(pbuf) =
1341 atomic_inc_return(&the_lnet.ln_ping_target_seqno);
1345 lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
1346 struct lnet_handle_md ping_mdh)
1348 struct lnet_ping_buffer *old_pbuf = NULL;
1349 struct lnet_handle_md old_ping_md;
1351 /* switch the NIs to point to the new ping info created */
1352 lnet_net_lock(LNET_LOCK_EX);
1354 if (!the_lnet.ln_routing)
1355 pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1356 if (!lnet_peer_discovery_disabled)
1357 pbuf->pb_info.pi_features |= LNET_PING_FEAT_DISCOVERY;
1359 /* Ensure only known feature bits have been set. */
1360 LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
1361 LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
1363 lnet_ping_target_install_locked(pbuf);
1365 if (the_lnet.ln_ping_target) {
1366 old_pbuf = the_lnet.ln_ping_target;
1367 old_ping_md = the_lnet.ln_ping_target_md;
1369 the_lnet.ln_ping_target_md = ping_mdh;
1370 the_lnet.ln_ping_target = pbuf;
1372 lnet_net_unlock(LNET_LOCK_EX);
1375 /* unlink and free the old ping info */
1376 lnet_ping_md_unlink(old_pbuf, &old_ping_md);
1377 lnet_ping_buffer_decref(old_pbuf);
1380 lnet_push_update_to_peers(0);
1384 lnet_ping_target_fini(void)
1388 lnet_ping_md_unlink(the_lnet.ln_ping_target,
1389 &the_lnet.ln_ping_target_md);
1391 rc = LNetEQFree(the_lnet.ln_ping_target_eq);
1394 lnet_ping_target_destroy();
1397 /* Resize the push target. */
1398 int lnet_push_target_resize(void)
1400 struct lnet_process_id id = { LNET_NID_ANY, LNET_PID_ANY };
1401 struct lnet_md md = { NULL };
1402 struct lnet_handle_me meh;
1403 struct lnet_handle_md mdh;
1404 struct lnet_handle_md old_mdh;
1405 struct lnet_ping_buffer *pbuf;
1406 struct lnet_ping_buffer *old_pbuf;
1407 int nnis = the_lnet.ln_push_target_nnis;
1415 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1421 rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1422 LNET_PROTO_PING_MATCHBITS, 0,
1423 LNET_UNLINK, LNET_INS_AFTER,
1426 CERROR("Can't create push target ME: %d\n", rc);
1427 goto fail_decref_pbuf;
1430 /* initialize md content */
1431 md.start = &pbuf->pb_info;
1432 md.length = LNET_PING_INFO_SIZE(nnis);
1433 md.threshold = LNET_MD_THRESH_INF;
1435 md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE |
1436 LNET_MD_MANAGE_REMOTE;
1438 md.eq_handle = the_lnet.ln_push_target_eq;
1440 rc = LNetMDAttach(meh, md, LNET_RETAIN, &mdh);
1442 CERROR("Can't attach push MD: %d\n", rc);
1443 goto fail_unlink_meh;
1445 lnet_ping_buffer_addref(pbuf);
1447 lnet_net_lock(LNET_LOCK_EX);
1448 old_pbuf = the_lnet.ln_push_target;
1449 old_mdh = the_lnet.ln_push_target_md;
1450 the_lnet.ln_push_target = pbuf;
1451 the_lnet.ln_push_target_md = mdh;
1452 lnet_net_unlock(LNET_LOCK_EX);
1455 LNetMDUnlink(old_mdh);
1456 lnet_ping_buffer_decref(old_pbuf);
1459 if (nnis < the_lnet.ln_push_target_nnis)
1462 CDEBUG(D_NET, "nnis %d success\n", nnis);
1469 lnet_ping_buffer_decref(pbuf);
1471 CDEBUG(D_NET, "nnis %d error %d\n", nnis, rc);
1475 static void lnet_push_target_event_handler(struct lnet_event *ev)
1477 struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
1479 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
1480 lnet_swap_pinginfo(pbuf);
1482 lnet_peer_push_event(ev);
1484 lnet_ping_buffer_decref(pbuf);
1487 /* Initialize the push target. */
1488 static int lnet_push_target_init(void)
1492 if (the_lnet.ln_push_target)
1495 rc = LNetEQAlloc(0, lnet_push_target_event_handler,
1496 &the_lnet.ln_push_target_eq);
1498 CERROR("Can't allocated push target EQ: %d\n", rc);
1502 /* Start at the required minimum, we'll enlarge if required. */
1503 the_lnet.ln_push_target_nnis = LNET_INTERFACES_MIN;
1505 rc = lnet_push_target_resize();
1508 LNetEQFree(the_lnet.ln_push_target_eq);
1509 LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
1515 /* Clean up the push target. */
1516 static void lnet_push_target_fini(void)
1518 if (!the_lnet.ln_push_target)
1521 /* Unlink and invalidate to prevent new references. */
1522 LNetMDUnlink(the_lnet.ln_push_target_md);
1523 LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
1525 /* Wait for the unlink to complete. */
1526 while (lnet_ping_buffer_numref(the_lnet.ln_push_target) > 1) {
1527 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1528 set_current_state(TASK_UNINTERRUPTIBLE);
1529 schedule_timeout(cfs_time_seconds(1));
1532 lnet_ping_buffer_decref(the_lnet.ln_push_target);
1533 the_lnet.ln_push_target = NULL;
1534 the_lnet.ln_push_target_nnis = 0;
1536 LNetEQFree(the_lnet.ln_push_target_eq);
1537 LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
1541 lnet_ni_tq_credits(struct lnet_ni *ni)
1545 LASSERT(ni->ni_ncpts >= 1);
1547 if (ni->ni_ncpts == 1)
1548 return ni->ni_net->net_tunables.lct_max_tx_credits;
1550 credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
1551 credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
1552 credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
1558 lnet_ni_unlink_locked(struct lnet_ni *ni)
1560 if (!list_empty(&ni->ni_cptlist)) {
1561 list_del_init(&ni->ni_cptlist);
1562 lnet_ni_decref_locked(ni, 0);
1565 /* move it to zombie list and nobody can find it anymore */
1566 LASSERT(!list_empty(&ni->ni_netlist));
1567 list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
1568 lnet_ni_decref_locked(ni, 0);
1572 lnet_clear_zombies_nis_locked(struct lnet_net *net)
1577 struct list_head *zombie_list = &net->net_ni_zombie;
1580 * Now wait for the NIs I just nuked to show up on the zombie
1581 * list and shut them down in guaranteed thread context
1584 while (!list_empty(zombie_list)) {
1588 ni = list_entry(zombie_list->next,
1589 struct lnet_ni, ni_netlist);
1590 list_del_init(&ni->ni_netlist);
1591 /* the ni should be in deleting state. If it's not it's
1593 LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
1594 cfs_percpt_for_each(ref, j, ni->ni_refs) {
1597 /* still busy, add it back to zombie list */
1598 list_add(&ni->ni_netlist, zombie_list);
1602 if (!list_empty(&ni->ni_netlist)) {
1603 lnet_net_unlock(LNET_LOCK_EX);
1605 if ((i & (-i)) == i) {
1607 "Waiting for zombie LNI %s\n",
1608 libcfs_nid2str(ni->ni_nid));
1610 set_current_state(TASK_UNINTERRUPTIBLE);
1611 schedule_timeout(cfs_time_seconds(1));
1612 lnet_net_lock(LNET_LOCK_EX);
1616 lnet_net_unlock(LNET_LOCK_EX);
1618 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
1620 LASSERT(!in_interrupt());
1621 (net->net_lnd->lnd_shutdown)(ni);
1624 CDEBUG(D_LNI, "Removed LNI %s\n",
1625 libcfs_nid2str(ni->ni_nid));
1629 lnet_net_lock(LNET_LOCK_EX);
1633 /* shutdown down the NI and release refcount */
1635 lnet_shutdown_lndni(struct lnet_ni *ni)
1638 struct lnet_net *net = ni->ni_net;
1640 lnet_net_lock(LNET_LOCK_EX);
1641 ni->ni_state = LNET_NI_STATE_DELETING;
1642 lnet_ni_unlink_locked(ni);
1643 lnet_incr_dlc_seq();
1644 lnet_net_unlock(LNET_LOCK_EX);
1646 /* clear messages for this NI on the lazy portal */
1647 for (i = 0; i < the_lnet.ln_nportals; i++)
1648 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
1650 lnet_net_lock(LNET_LOCK_EX);
1651 lnet_clear_zombies_nis_locked(net);
1652 lnet_net_unlock(LNET_LOCK_EX);
1656 lnet_shutdown_lndnet(struct lnet_net *net)
1660 lnet_net_lock(LNET_LOCK_EX);
1662 net->net_state = LNET_NET_STATE_DELETING;
1664 list_del_init(&net->net_list);
1666 while (!list_empty(&net->net_ni_list)) {
1667 ni = list_entry(net->net_ni_list.next,
1668 struct lnet_ni, ni_netlist);
1669 lnet_net_unlock(LNET_LOCK_EX);
1670 lnet_shutdown_lndni(ni);
1671 lnet_net_lock(LNET_LOCK_EX);
1674 lnet_net_unlock(LNET_LOCK_EX);
1676 /* Do peer table cleanup for this net */
1677 lnet_peer_tables_cleanup(net);
1679 lnet_net_lock(LNET_LOCK_EX);
1681 * decrement ref count on lnd only when the entire network goes
1684 net->net_lnd->lnd_refcount--;
1686 lnet_net_unlock(LNET_LOCK_EX);
1692 lnet_shutdown_lndnets(void)
1694 struct lnet_net *net;
1695 struct list_head resend;
1696 struct lnet_msg *msg, *tmp;
1698 INIT_LIST_HEAD(&resend);
1700 /* NB called holding the global mutex */
1702 /* All quiet on the API front */
1703 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
1704 LASSERT(the_lnet.ln_refcount == 0);
1706 lnet_net_lock(LNET_LOCK_EX);
1707 the_lnet.ln_state = LNET_STATE_STOPPING;
1709 while (!list_empty(&the_lnet.ln_nets)) {
1711 * move the nets to the zombie list to avoid them being
1712 * picked up for new work. LONET is also included in the
1713 * Nets that will be moved to the zombie list
1715 net = list_entry(the_lnet.ln_nets.next,
1716 struct lnet_net, net_list);
1717 list_move(&net->net_list, &the_lnet.ln_net_zombie);
1720 /* Drop the cached loopback Net. */
1721 if (the_lnet.ln_loni != NULL) {
1722 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
1723 the_lnet.ln_loni = NULL;
1725 lnet_net_unlock(LNET_LOCK_EX);
1727 /* iterate through the net zombie list and delete each net */
1728 while (!list_empty(&the_lnet.ln_net_zombie)) {
1729 net = list_entry(the_lnet.ln_net_zombie.next,
1730 struct lnet_net, net_list);
1731 lnet_shutdown_lndnet(net);
1734 spin_lock(&the_lnet.ln_msg_resend_lock);
1735 list_splice(&the_lnet.ln_msg_resend, &resend);
1736 spin_unlock(&the_lnet.ln_msg_resend_lock);
1738 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
1739 list_del_init(&msg->msg_list);
1740 lnet_finalize(msg, -ECANCELED);
1743 lnet_net_lock(LNET_LOCK_EX);
1744 the_lnet.ln_state = LNET_STATE_SHUTDOWN;
1745 lnet_net_unlock(LNET_LOCK_EX);
1749 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
1752 struct lnet_tx_queue *tq;
1754 struct lnet_net *net = ni->ni_net;
1756 mutex_lock(&the_lnet.ln_lnd_mutex);
1759 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
1760 ni->ni_lnd_tunables_set = true;
1763 rc = (net->net_lnd->lnd_startup)(ni);
1765 mutex_unlock(&the_lnet.ln_lnd_mutex);
1768 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
1769 rc, libcfs_lnd2str(net->net_lnd->lnd_type));
1770 lnet_net_lock(LNET_LOCK_EX);
1771 net->net_lnd->lnd_refcount--;
1772 lnet_net_unlock(LNET_LOCK_EX);
1776 ni->ni_state = LNET_NI_STATE_ACTIVE;
1778 /* We keep a reference on the loopback net through the loopback NI */
1779 if (net->net_lnd->lnd_type == LOLND) {
1781 LASSERT(the_lnet.ln_loni == NULL);
1782 the_lnet.ln_loni = ni;
1783 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
1784 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
1785 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
1786 ni->ni_net->net_tunables.lct_peer_timeout = 0;
1790 if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
1791 ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
1792 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
1793 libcfs_lnd2str(net->net_lnd->lnd_type),
1794 ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
1796 /* shutdown the NI since if we get here then it must've already
1799 lnet_shutdown_lndni(ni);
1803 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
1804 tq->tq_credits_min =
1805 tq->tq_credits_max =
1806 tq->tq_credits = lnet_ni_tq_credits(ni);
1809 atomic_set(&ni->ni_tx_credits,
1810 lnet_ni_tq_credits(ni) * ni->ni_ncpts);
1812 CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
1813 libcfs_nid2str(ni->ni_nid),
1814 ni->ni_net->net_tunables.lct_peer_tx_credits,
1815 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
1816 ni->ni_net->net_tunables.lct_peer_rtr_credits,
1817 ni->ni_net->net_tunables.lct_peer_timeout);
1826 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
1829 struct lnet_net *net_l = NULL;
1830 struct list_head local_ni_list;
1834 struct lnet_lnd *lnd;
1836 net->net_tunables.lct_peer_timeout;
1838 net->net_tunables.lct_max_tx_credits;
1839 int peerrtrcredits =
1840 net->net_tunables.lct_peer_rtr_credits;
1842 INIT_LIST_HEAD(&local_ni_list);
1845 * make sure that this net is unique. If it isn't then
1846 * we are adding interfaces to an already existing network, and
1847 * 'net' is just a convenient way to pass in the list.
1848 * if it is unique we need to find the LND and load it if
1851 if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
1852 lnd_type = LNET_NETTYP(net->net_id);
1854 mutex_lock(&the_lnet.ln_lnd_mutex);
1855 lnd = lnet_find_lnd_by_type(lnd_type);
1858 mutex_unlock(&the_lnet.ln_lnd_mutex);
1859 rc = request_module("%s", libcfs_lnd2modname(lnd_type));
1860 mutex_lock(&the_lnet.ln_lnd_mutex);
1862 lnd = lnet_find_lnd_by_type(lnd_type);
1864 mutex_unlock(&the_lnet.ln_lnd_mutex);
1865 CERROR("Can't load LND %s, module %s, rc=%d\n",
1866 libcfs_lnd2str(lnd_type),
1867 libcfs_lnd2modname(lnd_type), rc);
1868 #ifndef HAVE_MODULE_LOADING_SUPPORT
1869 LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
1870 "compiled with kernel module "
1871 "loading support.");
1878 lnet_net_lock(LNET_LOCK_EX);
1879 lnd->lnd_refcount++;
1880 lnet_net_unlock(LNET_LOCK_EX);
1884 mutex_unlock(&the_lnet.ln_lnd_mutex);
1890 * net_l: if the network being added is unique then net_l
1891 * will point to that network
1892 * if the network being added is not unique then
1893 * net_l points to the existing network.
1895 * When we enter the loop below, we'll pick NIs off he
1896 * network beign added and start them up, then add them to
1897 * a local ni list. Once we've successfully started all
1898 * the NIs then we join the local NI list (of started up
1899 * networks) with the net_l->net_ni_list, which should
1900 * point to the correct network to add the new ni list to
1902 * If any of the new NIs fail to start up, then we want to
1903 * iterate through the local ni list, which should include
1904 * any NIs which were successfully started up, and shut
1907 * After than we want to delete the network being added,
1908 * to avoid a memory leak.
1912 * When a network uses TCP bonding then all its interfaces
1913 * must be specified when the network is first defined: the
1914 * TCP bonding code doesn't allow for interfaces to be added
1917 if (net_l != net && net_l != NULL && use_tcp_bonding &&
1918 LNET_NETTYP(net_l->net_id) == SOCKLND) {
1923 while (!list_empty(&net->net_ni_added)) {
1924 ni = list_entry(net->net_ni_added.next, struct lnet_ni,
1926 list_del_init(&ni->ni_netlist);
1928 /* make sure that the the NI we're about to start
1929 * up is actually unique. if it's not fail. */
1930 if (!lnet_ni_unique_net(&net_l->net_ni_list,
1931 ni->ni_interfaces[0])) {
1936 /* adjust the pointer the parent network, just in case it
1937 * the net is a duplicate */
1940 rc = lnet_startup_lndni(ni, tun);
1942 LASSERT(ni->ni_net->net_tunables.lct_peer_timeout <= 0 ||
1943 ni->ni_net->net_lnd->lnd_query != NULL);
1949 list_add_tail(&ni->ni_netlist, &local_ni_list);
1954 lnet_net_lock(LNET_LOCK_EX);
1955 list_splice_tail(&local_ni_list, &net_l->net_ni_list);
1956 lnet_incr_dlc_seq();
1957 lnet_net_unlock(LNET_LOCK_EX);
1959 /* if the network is not unique then we don't want to keep
1960 * it around after we're done. Free it. Otherwise add that
1961 * net to the global the_lnet.ln_nets */
1962 if (net_l != net && net_l != NULL) {
1964 * TODO - note. currently the tunables can not be updated
1969 net->net_state = LNET_NET_STATE_ACTIVE;
1971 * restore tunables after it has been overwitten by the
1974 if (peer_timeout != -1)
1975 net->net_tunables.lct_peer_timeout = peer_timeout;
1976 if (maxtxcredits != -1)
1977 net->net_tunables.lct_max_tx_credits = maxtxcredits;
1978 if (peerrtrcredits != -1)
1979 net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
1981 lnet_net_lock(LNET_LOCK_EX);
1982 list_add_tail(&net->net_list, &the_lnet.ln_nets);
1983 lnet_net_unlock(LNET_LOCK_EX);
1990 * shutdown the new NIs that are being started up
1991 * free the NET being started
1993 while (!list_empty(&local_ni_list)) {
1994 ni = list_entry(local_ni_list.next, struct lnet_ni,
1997 lnet_shutdown_lndni(ni);
2007 lnet_startup_lndnets(struct list_head *netlist)
2009 struct lnet_net *net;
2014 * Change to running state before bringing up the LNDs. This
2015 * allows lnet_shutdown_lndnets() to assert that we've passed
2018 lnet_net_lock(LNET_LOCK_EX);
2019 the_lnet.ln_state = LNET_STATE_RUNNING;
2020 lnet_net_unlock(LNET_LOCK_EX);
2022 while (!list_empty(netlist)) {
2023 net = list_entry(netlist->next, struct lnet_net, net_list);
2024 list_del_init(&net->net_list);
2026 rc = lnet_startup_lndnet(net, NULL);
2036 lnet_shutdown_lndnets();
2042 * Initialize LNet library.
2044 * Automatically called at module loading time. Caller has to call
2045 * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
2046 * latter returned 0. It must be called exactly once.
2048 * \retval 0 on success
2049 * \retval -ve on failures.
2051 int lnet_lib_init(void)
2055 lnet_assert_wire_constants();
2057 /* refer to global cfs_cpt_table for now */
2058 the_lnet.ln_cpt_table = cfs_cpt_table;
2059 the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_table);
2061 LASSERT(the_lnet.ln_cpt_number > 0);
2062 if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
2063 /* we are under risk of consuming all lh_cookie */
2064 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
2065 "please change setting of CPT-table and retry\n",
2066 the_lnet.ln_cpt_number, LNET_CPT_MAX);
2070 while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
2071 the_lnet.ln_cpt_bits++;
2073 rc = lnet_create_locks();
2075 CERROR("Can't create LNet global locks: %d\n", rc);
2079 the_lnet.ln_refcount = 0;
2080 LNetInvalidateEQHandle(&the_lnet.ln_rc_eqh);
2081 INIT_LIST_HEAD(&the_lnet.ln_lnds);
2082 INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
2083 INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
2084 INIT_LIST_HEAD(&the_lnet.ln_msg_resend);
2085 INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
2087 /* The hash table size is the number of bits it takes to express the set
2088 * ln_num_routes, minus 1 (better to under estimate than over so we
2089 * don't waste memory). */
2090 if (rnet_htable_size <= 0)
2091 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
2092 else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
2093 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
2094 the_lnet.ln_remote_nets_hbits = max_t(int, 1,
2095 order_base_2(rnet_htable_size) - 1);
2097 /* All LNDs apart from the LOLND are in separate modules. They
2098 * register themselves when their module loads, and unregister
2099 * themselves when their module is unloaded. */
2100 lnet_register_lnd(&the_lolnd);
2105 * Finalize LNet library.
2107 * \pre lnet_lib_init() called with success.
2108 * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
2110 void lnet_lib_exit(void)
2112 LASSERT(the_lnet.ln_refcount == 0);
2114 while (!list_empty(&the_lnet.ln_lnds))
2115 lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
2116 struct lnet_lnd, lnd_list));
2117 lnet_destroy_locks();
2121 * Set LNet PID and start LNet interfaces, routing, and forwarding.
2123 * Users must call this function at least once before any other functions.
2124 * For each successful call there must be a corresponding call to
2125 * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
2128 * The PID used by LNet may be different from the one requested.
2131 * \param requested_pid PID requested by the caller.
2133 * \return >= 0 on success, and < 0 error code on failures.
2136 LNetNIInit(lnet_pid_t requested_pid)
2138 int im_a_router = 0;
2141 struct lnet_ping_buffer *pbuf;
2142 struct lnet_handle_md ping_mdh;
2143 struct list_head net_head;
2144 struct lnet_net *net;
2146 INIT_LIST_HEAD(&net_head);
2148 mutex_lock(&the_lnet.ln_api_mutex);
2150 CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
2152 if (the_lnet.ln_refcount > 0) {
2153 rc = the_lnet.ln_refcount++;
2154 mutex_unlock(&the_lnet.ln_api_mutex);
2158 rc = lnet_prepare(requested_pid);
2160 mutex_unlock(&the_lnet.ln_api_mutex);
2164 /* create a network for Loopback network */
2165 net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
2168 goto err_empty_list;
2171 /* Add in the loopback NI */
2172 if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
2174 goto err_empty_list;
2177 /* If LNet is being initialized via DLC it is possible
2178 * that the user requests not to load module parameters (ones which
2179 * are supported by DLC) on initialization. Therefore, make sure not
2180 * to load networks, routes and forwarding from module parameters
2181 * in this case. On cleanup in case of failure only clean up
2182 * routes if it has been loaded */
2183 if (!the_lnet.ln_nis_from_mod_params) {
2184 rc = lnet_parse_networks(&net_head, lnet_get_networks(),
2187 goto err_empty_list;
2190 ni_count = lnet_startup_lndnets(&net_head);
2193 goto err_empty_list;
2196 if (!the_lnet.ln_nis_from_mod_params) {
2197 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
2199 goto err_shutdown_lndnis;
2201 rc = lnet_check_routes();
2203 goto err_destroy_routes;
2205 rc = lnet_rtrpools_alloc(im_a_router);
2207 goto err_destroy_routes;
2210 rc = lnet_acceptor_start();
2212 goto err_destroy_routes;
2214 the_lnet.ln_refcount = 1;
2215 /* Now I may use my own API functions... */
2217 rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_count, true);
2219 goto err_acceptor_stop;
2221 lnet_ping_target_update(pbuf, ping_mdh);
2223 rc = lnet_router_checker_start();
2227 rc = lnet_push_target_init();
2229 goto err_stop_router_checker;
2231 rc = lnet_peer_discovery_start();
2233 goto err_destroy_push_target;
2236 lnet_router_debugfs_init();
2238 mutex_unlock(&the_lnet.ln_api_mutex);
2242 err_destroy_push_target:
2243 lnet_push_target_fini();
2244 err_stop_router_checker:
2245 lnet_router_checker_stop();
2247 lnet_ping_target_fini();
2249 the_lnet.ln_refcount = 0;
2250 lnet_acceptor_stop();
2252 if (!the_lnet.ln_nis_from_mod_params)
2253 lnet_destroy_routes();
2254 err_shutdown_lndnis:
2255 lnet_shutdown_lndnets();
2259 mutex_unlock(&the_lnet.ln_api_mutex);
2260 while (!list_empty(&net_head)) {
2261 struct lnet_net *net;
2263 net = list_entry(net_head.next, struct lnet_net, net_list);
2264 list_del_init(&net->net_list);
2269 EXPORT_SYMBOL(LNetNIInit);
2272 * Stop LNet interfaces, routing, and forwarding.
2274 * Users must call this function once for each successful call to LNetNIInit().
2275 * Once the LNetNIFini() operation has been started, the results of pending
2276 * API operations are undefined.
2278 * \return always 0 for current implementation.
2283 mutex_lock(&the_lnet.ln_api_mutex);
2285 LASSERT(the_lnet.ln_refcount > 0);
2287 if (the_lnet.ln_refcount != 1) {
2288 the_lnet.ln_refcount--;
2290 LASSERT(!the_lnet.ln_niinit_self);
2294 lnet_router_debugfs_init();
2295 lnet_peer_discovery_stop();
2296 lnet_push_target_fini();
2297 lnet_router_checker_stop();
2298 lnet_ping_target_fini();
2300 /* Teardown fns that use my own API functions BEFORE here */
2301 the_lnet.ln_refcount = 0;
2303 lnet_acceptor_stop();
2304 lnet_destroy_routes();
2305 lnet_shutdown_lndnets();
2309 mutex_unlock(&the_lnet.ln_api_mutex);
2312 EXPORT_SYMBOL(LNetNIFini);
2315 * Grabs the ni data from the ni structure and fills the out
2318 * \param[in] ni network interface structure
2319 * \param[out] cfg_ni NI config information
2320 * \param[out] tun network and LND tunables
2323 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
2324 struct lnet_ioctl_config_lnd_tunables *tun,
2325 struct lnet_ioctl_element_stats *stats,
2328 size_t min_size = 0;
2331 if (!ni || !cfg_ni || !tun)
2334 if (ni->ni_interfaces[0] != NULL) {
2335 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2336 if (ni->ni_interfaces[i] != NULL) {
2337 strncpy(cfg_ni->lic_ni_intf[i],
2338 ni->ni_interfaces[i],
2339 sizeof(cfg_ni->lic_ni_intf[i]));
2344 cfg_ni->lic_nid = ni->ni_nid;
2345 if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2346 cfg_ni->lic_status = LNET_NI_STATUS_UP;
2348 cfg_ni->lic_status = ni->ni_status->ns_status;
2349 cfg_ni->lic_tcp_bonding = use_tcp_bonding;
2350 cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
2352 memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
2355 stats->iel_send_count = lnet_sum_stats(&ni->ni_stats,
2356 LNET_STATS_TYPE_SEND);
2357 stats->iel_recv_count = lnet_sum_stats(&ni->ni_stats,
2358 LNET_STATS_TYPE_RECV);
2359 stats->iel_drop_count = lnet_sum_stats(&ni->ni_stats,
2360 LNET_STATS_TYPE_DROP);
2364 * tun->lt_tun will always be present, but in order to be
2365 * backwards compatible, we need to deal with the cases when
2366 * tun->lt_tun is smaller than what the kernel has, because it
2367 * comes from an older version of a userspace program, then we'll
2368 * need to copy as much information as we have available space.
2370 min_size = tun_size - sizeof(tun->lt_cmn);
2371 memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
2373 /* copy over the cpts */
2374 if (ni->ni_ncpts == LNET_CPT_NUMBER &&
2375 ni->ni_cpts == NULL) {
2376 for (i = 0; i < ni->ni_ncpts; i++)
2377 cfg_ni->lic_cpts[i] = i;
2380 ni->ni_cpts != NULL && i < ni->ni_ncpts &&
2381 i < LNET_MAX_SHOW_NUM_CPT;
2383 cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
2385 cfg_ni->lic_ncpts = ni->ni_ncpts;
2389 * NOTE: This is a legacy function left in the code to be backwards
2390 * compatible with older userspace programs. It should eventually be
2393 * Grabs the ni data from the ni structure and fills the out
2396 * \param[in] ni network interface structure
2397 * \param[out] config config information
2400 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
2401 struct lnet_ioctl_config_data *config)
2403 struct lnet_ioctl_net_config *net_config;
2404 struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
2405 size_t min_size, tunable_size = 0;
2411 net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
2415 BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
2416 ARRAY_SIZE(net_config->ni_interfaces));
2418 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2419 if (!ni->ni_interfaces[i])
2422 strncpy(net_config->ni_interfaces[i],
2423 ni->ni_interfaces[i],
2424 sizeof(net_config->ni_interfaces[i]));
2427 config->cfg_nid = ni->ni_nid;
2428 config->cfg_config_u.cfg_net.net_peer_timeout =
2429 ni->ni_net->net_tunables.lct_peer_timeout;
2430 config->cfg_config_u.cfg_net.net_max_tx_credits =
2431 ni->ni_net->net_tunables.lct_max_tx_credits;
2432 config->cfg_config_u.cfg_net.net_peer_tx_credits =
2433 ni->ni_net->net_tunables.lct_peer_tx_credits;
2434 config->cfg_config_u.cfg_net.net_peer_rtr_credits =
2435 ni->ni_net->net_tunables.lct_peer_rtr_credits;
2437 if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2438 net_config->ni_status = LNET_NI_STATUS_UP;
2440 net_config->ni_status = ni->ni_status->ns_status;
2443 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
2445 for (i = 0; i < num_cpts; i++)
2446 net_config->ni_cpts[i] = ni->ni_cpts[i];
2448 config->cfg_ncpts = num_cpts;
2452 * See if user land tools sent in a newer and larger version
2453 * of struct lnet_tunables than what the kernel uses.
2455 min_size = sizeof(*config) + sizeof(*net_config);
2457 if (config->cfg_hdr.ioc_len > min_size)
2458 tunable_size = config->cfg_hdr.ioc_len - min_size;
2460 /* Don't copy too much data to user space */
2461 min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
2462 lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
2464 if (lnd_cfg && min_size) {
2465 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
2466 config->cfg_config_u.cfg_net.net_interface_count = 1;
2468 /* Tell user land that kernel side has less data */
2469 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
2470 min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
2471 config->cfg_hdr.ioc_len -= min_size;
2477 lnet_get_ni_idx_locked(int idx)
2480 struct lnet_net *net;
2482 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2483 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2493 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
2496 struct lnet_net *net = mynet;
2500 net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
2502 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2508 if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
2509 /* if you reached the end of the ni list and the net is
2510 * specified, then there are no more nis in that net */
2514 /* we reached the end of this net ni list. move to the
2516 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
2517 /* no more nets and no more NIs. */
2520 /* get the next net */
2521 net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
2523 /* get the ni on it */
2524 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2530 /* there are more nis left */
2531 ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
2537 lnet_get_net_config(struct lnet_ioctl_config_data *config)
2542 int idx = config->cfg_count;
2544 cpt = lnet_net_lock_current();
2546 ni = lnet_get_ni_idx_locked(idx);
2551 lnet_fill_ni_info_legacy(ni, config);
2555 lnet_net_unlock(cpt);
2560 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
2561 struct lnet_ioctl_config_lnd_tunables *tun,
2562 struct lnet_ioctl_element_stats *stats,
2569 if (!cfg_ni || !tun || !stats)
2572 cpt = lnet_net_lock_current();
2574 ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
2579 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
2583 lnet_net_unlock(cpt);
2587 int lnet_get_ni_stats(struct lnet_ioctl_element_msg_stats *msg_stats)
2596 cpt = lnet_net_lock_current();
2598 ni = lnet_get_ni_idx_locked(msg_stats->im_idx);
2601 lnet_usr_translate_stats(msg_stats, &ni->ni_stats);
2605 lnet_net_unlock(cpt);
2610 static int lnet_add_net_common(struct lnet_net *net,
2611 struct lnet_ioctl_config_lnd_tunables *tun)
2614 struct lnet_ping_buffer *pbuf;
2615 struct lnet_handle_md ping_mdh;
2617 struct lnet_remotenet *rnet;
2619 int num_acceptor_nets;
2621 lnet_net_lock(LNET_LOCK_EX);
2622 rnet = lnet_find_rnet_locked(net->net_id);
2623 lnet_net_unlock(LNET_LOCK_EX);
2625 * make sure that the net added doesn't invalidate the current
2626 * configuration LNet is keeping
2629 CERROR("Adding net %s will invalidate routing configuration\n",
2630 libcfs_net2str(net->net_id));
2636 * make sure you calculate the correct number of slots in the ping
2637 * buffer. Since the ping info is a flattened list of all the NIs,
2638 * we should allocate enough slots to accomodate the number of NIs
2639 * which will be added.
2641 * since ni hasn't been configured yet, use
2642 * lnet_get_net_ni_count_pre() which checks the net_ni_added list
2644 net_ni_count = lnet_get_net_ni_count_pre(net);
2646 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2647 net_ni_count + lnet_get_ni_count(),
2655 memcpy(&net->net_tunables,
2656 &tun->lt_cmn, sizeof(net->net_tunables));
2658 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
2661 * before starting this network get a count of the current TCP
2662 * networks which require the acceptor thread running. If that
2663 * count is == 0 before we start up this network, then we'd want to
2664 * start up the acceptor thread after starting up this network
2666 num_acceptor_nets = lnet_count_acceptor_nets();
2668 net_id = net->net_id;
2670 rc = lnet_startup_lndnet(net,
2671 (tun) ? &tun->lt_tun : NULL);
2675 lnet_net_lock(LNET_LOCK_EX);
2676 net = lnet_get_net_locked(net_id);
2677 lnet_net_unlock(LNET_LOCK_EX);
2682 * Start the acceptor thread if this is the first network
2683 * being added that requires the thread.
2685 if (net->net_lnd->lnd_accept && num_acceptor_nets == 0) {
2686 rc = lnet_acceptor_start();
2688 /* shutdown the net that we just started */
2689 CERROR("Failed to start up acceptor thread\n");
2690 lnet_shutdown_lndnet(net);
2695 lnet_net_lock(LNET_LOCK_EX);
2696 lnet_peer_net_added(net);
2697 lnet_net_unlock(LNET_LOCK_EX);
2699 lnet_ping_target_update(pbuf, ping_mdh);
2704 lnet_ping_md_unlink(pbuf, &ping_mdh);
2705 lnet_ping_buffer_decref(pbuf);
2709 static int lnet_handle_legacy_ip2nets(char *ip2nets,
2710 struct lnet_ioctl_config_lnd_tunables *tun)
2712 struct lnet_net *net;
2715 struct list_head net_head;
2717 INIT_LIST_HEAD(&net_head);
2719 rc = lnet_parse_ip2nets(&nets, ip2nets);
2723 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
2727 mutex_lock(&the_lnet.ln_api_mutex);
2728 while (!list_empty(&net_head)) {
2729 net = list_entry(net_head.next, struct lnet_net, net_list);
2730 list_del_init(&net->net_list);
2731 rc = lnet_add_net_common(net, tun);
2737 mutex_unlock(&the_lnet.ln_api_mutex);
2739 while (!list_empty(&net_head)) {
2740 net = list_entry(net_head.next, struct lnet_net, net_list);
2741 list_del_init(&net->net_list);
2747 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
2749 struct lnet_net *net;
2751 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
2753 __u32 net_id, lnd_type;
2755 /* get the tunables if they are available */
2756 if (conf->lic_cfg_hdr.ioc_len >=
2757 sizeof(*conf) + sizeof(*tun))
2758 tun = (struct lnet_ioctl_config_lnd_tunables *)
2761 /* handle legacy ip2nets from DLC */
2762 if (conf->lic_legacy_ip2nets[0] != '\0')
2763 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
2766 net_id = LNET_NIDNET(conf->lic_nid);
2767 lnd_type = LNET_NETTYP(net_id);
2769 if (!libcfs_isknown_lnd(lnd_type)) {
2770 CERROR("No valid net and lnd information provided\n");
2774 net = lnet_net_alloc(net_id, NULL);
2778 for (i = 0; i < conf->lic_ncpts; i++) {
2779 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER)
2783 ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
2784 conf->lic_ni_intf[0]);
2788 mutex_lock(&the_lnet.ln_api_mutex);
2790 rc = lnet_add_net_common(net, tun);
2792 mutex_unlock(&the_lnet.ln_api_mutex);
2797 int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
2799 struct lnet_net *net;
2801 __u32 net_id = LNET_NIDNET(conf->lic_nid);
2802 struct lnet_ping_buffer *pbuf;
2803 struct lnet_handle_md ping_mdh;
2808 /* don't allow userspace to shutdown the LOLND */
2809 if (LNET_NETTYP(net_id) == LOLND)
2812 mutex_lock(&the_lnet.ln_api_mutex);
2816 net = lnet_get_net_locked(net_id);
2818 CERROR("net %s not found\n",
2819 libcfs_net2str(net_id));
2824 addr = LNET_NIDADDR(conf->lic_nid);
2826 /* remove the entire net */
2827 net_count = lnet_get_net_ni_count_locked(net);
2831 /* create and link a new ping info, before removing the old one */
2832 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2833 lnet_get_ni_count() - net_count,
2836 goto unlock_api_mutex;
2838 lnet_shutdown_lndnet(net);
2840 if (lnet_count_acceptor_nets() == 0)
2841 lnet_acceptor_stop();
2843 lnet_ping_target_update(pbuf, ping_mdh);
2845 goto unlock_api_mutex;
2848 ni = lnet_nid2ni_locked(conf->lic_nid, 0);
2850 CERROR("nid %s not found\n",
2851 libcfs_nid2str(conf->lic_nid));
2856 net_count = lnet_get_net_ni_count_locked(net);
2860 /* create and link a new ping info, before removing the old one */
2861 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2862 lnet_get_ni_count() - 1, false);
2864 goto unlock_api_mutex;
2866 lnet_shutdown_lndni(ni);
2868 if (lnet_count_acceptor_nets() == 0)
2869 lnet_acceptor_stop();
2871 lnet_ping_target_update(pbuf, ping_mdh);
2873 /* check if the net is empty and remove it if it is */
2875 lnet_shutdown_lndnet(net);
2877 goto unlock_api_mutex;
2882 mutex_unlock(&the_lnet.ln_api_mutex);
2888 * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
2889 * They are only expected to be called for unique networks.
2890 * That can be as a result of older DLC library
2891 * calls. Multi-Rail DLC and beyond no longer uses these APIs.
2894 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
2896 struct lnet_net *net;
2897 struct list_head net_head;
2899 struct lnet_ioctl_config_lnd_tunables tun;
2900 char *nets = conf->cfg_config_u.cfg_net.net_intf;
2902 INIT_LIST_HEAD(&net_head);
2904 /* Create a net/ni structures for the network string */
2905 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
2907 return rc == 0 ? -EINVAL : rc;
2909 mutex_lock(&the_lnet.ln_api_mutex);
2912 rc = -EINVAL; /* only add one network per call */
2913 goto out_unlock_clean;
2916 net = list_entry(net_head.next, struct lnet_net, net_list);
2917 list_del_init(&net->net_list);
2919 LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
2921 memset(&tun, 0, sizeof(tun));
2923 tun.lt_cmn.lct_peer_timeout =
2924 conf->cfg_config_u.cfg_net.net_peer_timeout;
2925 tun.lt_cmn.lct_peer_tx_credits =
2926 conf->cfg_config_u.cfg_net.net_peer_tx_credits;
2927 tun.lt_cmn.lct_peer_rtr_credits =
2928 conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
2929 tun.lt_cmn.lct_max_tx_credits =
2930 conf->cfg_config_u.cfg_net.net_max_tx_credits;
2932 rc = lnet_add_net_common(net, &tun);
2935 mutex_unlock(&the_lnet.ln_api_mutex);
2936 while (!list_empty(&net_head)) {
2937 /* net_head list is empty in success case */
2938 net = list_entry(net_head.next, struct lnet_net, net_list);
2939 list_del_init(&net->net_list);
2946 lnet_dyn_del_net(__u32 net_id)
2948 struct lnet_net *net;
2949 struct lnet_ping_buffer *pbuf;
2950 struct lnet_handle_md ping_mdh;
2954 /* don't allow userspace to shutdown the LOLND */
2955 if (LNET_NETTYP(net_id) == LOLND)
2958 mutex_lock(&the_lnet.ln_api_mutex);
2962 net = lnet_get_net_locked(net_id);
2969 net_ni_count = lnet_get_net_ni_count_locked(net);
2973 /* create and link a new ping info, before removing the old one */
2974 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2975 lnet_get_ni_count() - net_ni_count, false);
2979 lnet_shutdown_lndnet(net);
2981 if (lnet_count_acceptor_nets() == 0)
2982 lnet_acceptor_stop();
2984 lnet_ping_target_update(pbuf, ping_mdh);
2987 mutex_unlock(&the_lnet.ln_api_mutex);
2992 void lnet_incr_dlc_seq(void)
2994 atomic_inc(&lnet_dlc_seq_no);
2997 __u32 lnet_get_dlc_seq_locked(void)
2999 return atomic_read(&lnet_dlc_seq_no);
3003 * LNet ioctl handler.
3007 LNetCtl(unsigned int cmd, void *arg)
3009 struct libcfs_ioctl_data *data = arg;
3010 struct lnet_ioctl_config_data *config;
3011 struct lnet_process_id id = {0};
3015 BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
3016 sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
3019 case IOC_LIBCFS_GET_NI:
3020 rc = LNetGetId(data->ioc_count, &id);
3021 data->ioc_nid = id.nid;
3024 case IOC_LIBCFS_FAIL_NID:
3025 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
3027 case IOC_LIBCFS_ADD_ROUTE:
3030 if (config->cfg_hdr.ioc_len < sizeof(*config))
3033 mutex_lock(&the_lnet.ln_api_mutex);
3034 rc = lnet_add_route(config->cfg_net,
3035 config->cfg_config_u.cfg_route.rtr_hop,
3037 config->cfg_config_u.cfg_route.
3040 rc = lnet_check_routes();
3042 lnet_del_route(config->cfg_net,
3045 mutex_unlock(&the_lnet.ln_api_mutex);
3048 case IOC_LIBCFS_DEL_ROUTE:
3051 if (config->cfg_hdr.ioc_len < sizeof(*config))
3054 mutex_lock(&the_lnet.ln_api_mutex);
3055 rc = lnet_del_route(config->cfg_net, config->cfg_nid);
3056 mutex_unlock(&the_lnet.ln_api_mutex);
3059 case IOC_LIBCFS_GET_ROUTE:
3062 if (config->cfg_hdr.ioc_len < sizeof(*config))
3065 mutex_lock(&the_lnet.ln_api_mutex);
3066 rc = lnet_get_route(config->cfg_count,
3068 &config->cfg_config_u.cfg_route.rtr_hop,
3070 &config->cfg_config_u.cfg_route.rtr_flags,
3071 &config->cfg_config_u.cfg_route.
3073 mutex_unlock(&the_lnet.ln_api_mutex);
3076 case IOC_LIBCFS_GET_LOCAL_NI: {
3077 struct lnet_ioctl_config_ni *cfg_ni;
3078 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
3079 struct lnet_ioctl_element_stats *stats;
3084 /* get the tunables if they are available */
3085 if (cfg_ni->lic_cfg_hdr.ioc_len <
3086 sizeof(*cfg_ni) + sizeof(*stats) + sizeof(*tun))
3089 stats = (struct lnet_ioctl_element_stats *)
3091 tun = (struct lnet_ioctl_config_lnd_tunables *)
3092 (cfg_ni->lic_bulk + sizeof(*stats));
3094 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
3097 mutex_lock(&the_lnet.ln_api_mutex);
3098 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
3099 mutex_unlock(&the_lnet.ln_api_mutex);
3103 case IOC_LIBCFS_GET_LOCAL_NI_MSG_STATS: {
3104 struct lnet_ioctl_element_msg_stats *msg_stats = arg;
3106 if (msg_stats->im_hdr.ioc_len != sizeof(*msg_stats))
3109 mutex_lock(&the_lnet.ln_api_mutex);
3110 rc = lnet_get_ni_stats(msg_stats);
3111 mutex_unlock(&the_lnet.ln_api_mutex);
3116 case IOC_LIBCFS_GET_NET: {
3117 size_t total = sizeof(*config) +
3118 sizeof(struct lnet_ioctl_net_config);
3121 if (config->cfg_hdr.ioc_len < total)
3124 mutex_lock(&the_lnet.ln_api_mutex);
3125 rc = lnet_get_net_config(config);
3126 mutex_unlock(&the_lnet.ln_api_mutex);
3130 case IOC_LIBCFS_GET_LNET_STATS:
3132 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
3134 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
3137 mutex_lock(&the_lnet.ln_api_mutex);
3138 lnet_counters_get(&lnet_stats->st_cntrs);
3139 mutex_unlock(&the_lnet.ln_api_mutex);
3143 case IOC_LIBCFS_CONFIG_RTR:
3146 if (config->cfg_hdr.ioc_len < sizeof(*config))
3149 mutex_lock(&the_lnet.ln_api_mutex);
3150 if (config->cfg_config_u.cfg_buffers.buf_enable) {
3151 rc = lnet_rtrpools_enable();
3152 mutex_unlock(&the_lnet.ln_api_mutex);
3155 lnet_rtrpools_disable();
3156 mutex_unlock(&the_lnet.ln_api_mutex);
3159 case IOC_LIBCFS_ADD_BUF:
3162 if (config->cfg_hdr.ioc_len < sizeof(*config))
3165 mutex_lock(&the_lnet.ln_api_mutex);
3166 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
3168 config->cfg_config_u.cfg_buffers.
3170 config->cfg_config_u.cfg_buffers.
3172 mutex_unlock(&the_lnet.ln_api_mutex);
3175 case IOC_LIBCFS_SET_NUMA_RANGE: {
3176 struct lnet_ioctl_set_value *numa;
3178 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3180 lnet_net_lock(LNET_LOCK_EX);
3181 lnet_numa_range = numa->sv_value;
3182 lnet_net_unlock(LNET_LOCK_EX);
3186 case IOC_LIBCFS_GET_NUMA_RANGE: {
3187 struct lnet_ioctl_set_value *numa;
3189 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3191 numa->sv_value = lnet_numa_range;
3195 case IOC_LIBCFS_GET_BUF: {
3196 struct lnet_ioctl_pool_cfg *pool_cfg;
3197 size_t total = sizeof(*config) + sizeof(*pool_cfg);
3201 if (config->cfg_hdr.ioc_len < total)
3204 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
3206 mutex_lock(&the_lnet.ln_api_mutex);
3207 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
3208 mutex_unlock(&the_lnet.ln_api_mutex);
3212 case IOC_LIBCFS_ADD_PEER_NI: {
3213 struct lnet_ioctl_peer_cfg *cfg = arg;
3215 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3218 mutex_lock(&the_lnet.ln_api_mutex);
3219 rc = lnet_add_peer_ni(cfg->prcfg_prim_nid,
3222 mutex_unlock(&the_lnet.ln_api_mutex);
3226 case IOC_LIBCFS_DEL_PEER_NI: {
3227 struct lnet_ioctl_peer_cfg *cfg = arg;
3229 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3232 mutex_lock(&the_lnet.ln_api_mutex);
3233 rc = lnet_del_peer_ni(cfg->prcfg_prim_nid,
3234 cfg->prcfg_cfg_nid);
3235 mutex_unlock(&the_lnet.ln_api_mutex);
3239 case IOC_LIBCFS_GET_PEER_INFO: {
3240 struct lnet_ioctl_peer *peer_info = arg;
3242 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
3245 mutex_lock(&the_lnet.ln_api_mutex);
3246 rc = lnet_get_peer_ni_info(
3247 peer_info->pr_count,
3249 peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
3250 &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
3251 &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
3252 &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
3253 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
3254 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
3255 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
3256 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
3257 mutex_unlock(&the_lnet.ln_api_mutex);
3261 case IOC_LIBCFS_GET_PEER_NI: {
3262 struct lnet_ioctl_peer_cfg *cfg = arg;
3264 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3267 mutex_lock(&the_lnet.ln_api_mutex);
3268 rc = lnet_get_peer_info(cfg,
3269 (void __user *)cfg->prcfg_bulk);
3270 mutex_unlock(&the_lnet.ln_api_mutex);
3274 case IOC_LIBCFS_GET_PEER_LIST: {
3275 struct lnet_ioctl_peer_cfg *cfg = arg;
3277 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3280 mutex_lock(&the_lnet.ln_api_mutex);
3281 rc = lnet_get_peer_list(&cfg->prcfg_count, &cfg->prcfg_size,
3282 (struct lnet_process_id __user *)cfg->prcfg_bulk);
3283 mutex_unlock(&the_lnet.ln_api_mutex);
3287 case IOC_LIBCFS_NOTIFY_ROUTER: {
3288 time64_t deadline = ktime_get_real_seconds() - data->ioc_u64[0];
3290 /* The deadline passed in by the user should be some time in
3291 * seconds in the future since the UNIX epoch. We have to map
3292 * that deadline to the wall clock.
3294 deadline += ktime_get_seconds();
3295 return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
3299 case IOC_LIBCFS_LNET_DIST:
3300 rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
3301 if (rc < 0 && rc != -EHOSTUNREACH)
3304 data->ioc_u32[0] = rc;
3307 case IOC_LIBCFS_TESTPROTOCOMPAT:
3308 lnet_net_lock(LNET_LOCK_EX);
3309 the_lnet.ln_testprotocompat = data->ioc_flags;
3310 lnet_net_unlock(LNET_LOCK_EX);
3313 case IOC_LIBCFS_LNET_FAULT:
3314 return lnet_fault_ctl(data->ioc_flags, data);
3316 case IOC_LIBCFS_PING: {
3317 signed long timeout;
3319 id.nid = data->ioc_nid;
3320 id.pid = data->ioc_u32[0];
3322 /* If timeout is negative then set default of 3 minutes */
3323 if (((s32)data->ioc_u32[1] <= 0) ||
3324 data->ioc_u32[1] > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
3325 timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
3327 timeout = msecs_to_jiffies(data->ioc_u32[1]);
3329 rc = lnet_ping(id, timeout, data->ioc_pbuf1,
3330 data->ioc_plen1 / sizeof(struct lnet_process_id));
3335 data->ioc_count = rc;
3339 case IOC_LIBCFS_PING_PEER: {
3340 struct lnet_ioctl_ping_data *ping = arg;
3341 struct lnet_peer *lp;
3342 signed long timeout;
3344 /* If timeout is negative then set default of 3 minutes */
3345 if (((s32)ping->op_param) <= 0 ||
3346 ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC))
3347 timeout = msecs_to_jiffies(DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC);
3349 timeout = msecs_to_jiffies(ping->op_param);
3351 rc = lnet_ping(ping->ping_id, timeout,
3357 mutex_lock(&the_lnet.ln_api_mutex);
3358 lp = lnet_find_peer(ping->ping_id.nid);
3360 ping->ping_id.nid = lp->lp_primary_nid;
3361 ping->mr_info = lnet_peer_is_multi_rail(lp);
3362 lnet_peer_decref_locked(lp);
3364 mutex_unlock(&the_lnet.ln_api_mutex);
3366 ping->ping_count = rc;
3370 case IOC_LIBCFS_DISCOVER: {
3371 struct lnet_ioctl_ping_data *discover = arg;
3372 struct lnet_peer *lp;
3374 rc = lnet_discover(discover->ping_id, discover->op_param,
3376 discover->ping_count);
3380 mutex_lock(&the_lnet.ln_api_mutex);
3381 lp = lnet_find_peer(discover->ping_id.nid);
3383 discover->ping_id.nid = lp->lp_primary_nid;
3384 discover->mr_info = lnet_peer_is_multi_rail(lp);
3385 lnet_peer_decref_locked(lp);
3387 mutex_unlock(&the_lnet.ln_api_mutex);
3389 discover->ping_count = rc;
3394 ni = lnet_net2ni_addref(data->ioc_net);
3398 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
3401 rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
3408 EXPORT_SYMBOL(LNetCtl);
3410 void LNetDebugPeer(struct lnet_process_id id)
3412 lnet_debug_peer(id.nid);
3414 EXPORT_SYMBOL(LNetDebugPeer);
3417 * Determine if the specified peer \a nid is on the local node.
3419 * \param nid peer nid to check
3421 * \retval true If peer NID is on the local node.
3422 * \retval false If peer NID is not on the local node.
3424 bool LNetIsPeerLocal(lnet_nid_t nid)
3426 struct lnet_net *net;
3430 cpt = lnet_net_lock_current();
3431 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3432 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3433 if (ni->ni_nid == nid) {
3434 lnet_net_unlock(cpt);
3439 lnet_net_unlock(cpt);
3443 EXPORT_SYMBOL(LNetIsPeerLocal);
3446 * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
3447 * Note that all interfaces share a same PID, as requested by LNetNIInit().
3449 * \param index Index of the interface to look up.
3450 * \param id On successful return, this location will hold the
3451 * struct lnet_process_id ID of the interface.
3453 * \retval 0 If an interface exists at \a index.
3454 * \retval -ENOENT If no interface has been found.
3457 LNetGetId(unsigned int index, struct lnet_process_id *id)
3460 struct lnet_net *net;
3464 LASSERT(the_lnet.ln_refcount > 0);
3466 cpt = lnet_net_lock_current();
3468 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3469 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3473 id->nid = ni->ni_nid;
3474 id->pid = the_lnet.ln_pid;
3480 lnet_net_unlock(cpt);
3483 EXPORT_SYMBOL(LNetGetId);
3485 static int lnet_ping(struct lnet_process_id id, signed long timeout,
3486 struct lnet_process_id __user *ids, int n_ids)
3488 struct lnet_handle_eq eqh;
3489 struct lnet_handle_md mdh;
3490 struct lnet_event event;
3491 struct lnet_md md = { NULL };
3495 const signed long a_long_time = msecs_to_jiffies(60 * MSEC_PER_SEC);
3496 struct lnet_ping_buffer *pbuf;
3497 struct lnet_process_id tmpid;
3504 /* n_ids limit is arbitrary */
3505 if (n_ids <= 0 || id.nid == LNET_NID_ANY)
3509 * if the user buffer has more space than the lnet_interfaces_max
3510 * then only fill it up to lnet_interfaces_max
3512 if (n_ids > lnet_interfaces_max)
3513 n_ids = lnet_interfaces_max;
3515 if (id.pid == LNET_PID_ANY)
3516 id.pid = LNET_PID_LUSTRE;
3518 pbuf = lnet_ping_buffer_alloc(n_ids, GFP_NOFS);
3522 /* NB 2 events max (including any unlink event) */
3523 rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
3525 CERROR("Can't allocate EQ: %d\n", rc);
3526 goto fail_ping_buffer_decref;
3529 /* initialize md content */
3530 md.start = &pbuf->pb_info;
3531 md.length = LNET_PING_INFO_SIZE(n_ids);
3532 md.threshold = 2; /* GET/REPLY */
3534 md.options = LNET_MD_TRUNCATE;
3538 rc = LNetMDBind(md, LNET_UNLINK, &mdh);
3540 CERROR("Can't bind MD: %d\n", rc);
3544 rc = LNetGet(LNET_NID_ANY, mdh, id,
3545 LNET_RESERVED_PORTAL,
3546 LNET_PROTO_PING_MATCHBITS, 0);
3549 /* Don't CERROR; this could be deliberate! */
3550 rc2 = LNetMDUnlink(mdh);
3553 /* NB must wait for the UNLINK event below... */
3555 timeout = a_long_time;
3559 /* MUST block for unlink to complete */
3561 blocked = cfs_block_allsigs();
3563 rc2 = LNetEQPoll(&eqh, 1, timeout, &event, &which);
3566 cfs_restore_sigs(blocked);
3568 CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
3569 (rc2 <= 0) ? -1 : event.type,
3570 (rc2 <= 0) ? -1 : event.status,
3571 (rc2 > 0 && event.unlinked) ? " unlinked" : "");
3573 LASSERT(rc2 != -EOVERFLOW); /* can't miss anything */
3575 if (rc2 <= 0 || event.status != 0) {
3576 /* timeout or error */
3577 if (!replied && rc == 0)
3578 rc = (rc2 < 0) ? rc2 :
3579 (rc2 == 0) ? -ETIMEDOUT :
3583 /* Ensure completion in finite time... */
3585 /* No assertion (racing with network) */
3587 timeout = a_long_time;
3588 } else if (rc2 == 0) {
3589 /* timed out waiting for unlink */
3590 CWARN("ping %s: late network completion\n",
3593 } else if (event.type == LNET_EVENT_REPLY) {
3597 } while (rc2 <= 0 || !event.unlinked);
3601 CWARN("%s: Unexpected rc >= 0 but no reply!\n",
3608 LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
3610 rc = -EPROTO; /* if I can't parse... */
3613 CERROR("%s: ping info too short %d\n",
3614 libcfs_id2str(id), nob);
3618 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
3619 lnet_swap_pinginfo(pbuf);
3620 } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
3621 CERROR("%s: Unexpected magic %08x\n",
3622 libcfs_id2str(id), pbuf->pb_info.pi_magic);
3626 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
3627 CERROR("%s: ping w/o NI status: 0x%x\n",
3628 libcfs_id2str(id), pbuf->pb_info.pi_features);
3632 if (nob < LNET_PING_INFO_SIZE(0)) {
3633 CERROR("%s: Short reply %d(%d min)\n",
3635 nob, (int)LNET_PING_INFO_SIZE(0));
3639 if (pbuf->pb_info.pi_nnis < n_ids)
3640 n_ids = pbuf->pb_info.pi_nnis;
3642 if (nob < LNET_PING_INFO_SIZE(n_ids)) {
3643 CERROR("%s: Short reply %d(%d expected)\n",
3645 nob, (int)LNET_PING_INFO_SIZE(n_ids));
3649 rc = -EFAULT; /* if I segv in copy_to_user()... */
3651 memset(&tmpid, 0, sizeof(tmpid));
3652 for (i = 0; i < n_ids; i++) {
3653 tmpid.pid = pbuf->pb_info.pi_pid;
3654 tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
3655 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
3658 rc = pbuf->pb_info.pi_nnis;
3661 rc2 = LNetEQFree(eqh);
3663 CERROR("rc2 %d\n", rc2);
3666 fail_ping_buffer_decref:
3667 lnet_ping_buffer_decref(pbuf);
3672 lnet_discover(struct lnet_process_id id, __u32 force,
3673 struct lnet_process_id __user *ids, int n_ids)
3675 struct lnet_peer_ni *lpni;
3676 struct lnet_peer_ni *p;
3677 struct lnet_peer *lp;
3678 struct lnet_process_id *buf;
3682 int max_intf = lnet_interfaces_max;
3686 id.nid == LNET_NID_ANY)
3689 if (id.pid == LNET_PID_ANY)
3690 id.pid = LNET_PID_LUSTRE;
3693 * if the user buffer has more space than the max_intf
3694 * then only fill it up to max_intf
3696 if (n_ids > max_intf)
3699 buf_size = n_ids * sizeof(*buf);
3701 LIBCFS_ALLOC(buf, buf_size);
3705 cpt = lnet_net_lock_current();
3706 lpni = lnet_nid2peerni_locked(id.nid, LNET_NID_ANY, cpt);
3713 * Clearing the NIDS_UPTODATE flag ensures the peer will
3714 * be discovered, provided discovery has not been disabled.
3716 lp = lpni->lpni_peer_net->lpn_peer;
3717 spin_lock(&lp->lp_lock);
3718 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3719 /* If the force flag is set, force a PING and PUSH as well. */
3721 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
3722 spin_unlock(&lp->lp_lock);
3723 rc = lnet_discover_peer_locked(lpni, cpt, true);
3727 /* Peer may have changed. */
3728 lp = lpni->lpni_peer_net->lpn_peer;
3729 if (lp->lp_nnis < n_ids)
3730 n_ids = lp->lp_nnis;
3734 while ((p = lnet_get_next_peer_ni_locked(lp, NULL, p)) != NULL) {
3735 buf[i].pid = id.pid;
3736 buf[i].nid = p->lpni_nid;
3741 lnet_net_unlock(cpt);
3744 if (copy_to_user(ids, buf, n_ids * sizeof(*buf)))
3750 lnet_peer_ni_decref_locked(lpni);
3752 lnet_net_unlock(cpt);
3754 LIBCFS_FREE(buf, buf_size);