4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_LNET
34 #include <linux/log2.h>
35 #include <linux/ktime.h>
36 #include <linux/moduleparam.h>
38 #include <lnet/lib-lnet.h>
40 #define D_LNI D_CONSOLE
42 struct lnet the_lnet; /* THE state of the network */
43 EXPORT_SYMBOL(the_lnet);
45 static char *ip2nets = "";
46 module_param(ip2nets, charp, 0444);
47 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
49 static char *networks = "";
50 module_param(networks, charp, 0444);
51 MODULE_PARM_DESC(networks, "local networks");
53 static char *routes = "";
54 module_param(routes, charp, 0444);
55 MODULE_PARM_DESC(routes, "routes to non-local networks");
57 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
58 module_param(rnet_htable_size, int, 0444);
59 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
61 static int use_tcp_bonding = false;
62 module_param(use_tcp_bonding, int, 0444);
63 MODULE_PARM_DESC(use_tcp_bonding,
64 "Set to 1 to use socklnd bonding. 0 to use Multi-Rail");
66 unsigned int lnet_numa_range = 0;
67 module_param(lnet_numa_range, uint, 0444);
68 MODULE_PARM_DESC(lnet_numa_range,
69 "NUMA range to consider during Multi-Rail selection");
71 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
72 static int intf_max_set(const char *val, struct kernel_param *kp);
73 module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
74 &lnet_interfaces_max, S_IRUGO|S_IWUSR);
75 MODULE_PARM_DESC(lnet_interfaces_max,
76 "Maximum number of interfaces in a node.");
79 * This sequence number keeps track of how many times DLC was used to
80 * update the local NIs. It is incremented when a NI is added or
81 * removed and checked when sending a message to determine if there is
82 * a need to re-run the selection algorithm. See lnet_select_pathway()
83 * for more details on its usage.
85 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
87 static int lnet_ping(struct lnet_process_id id, signed long timeout,
88 struct lnet_process_id __user *ids, int n_ids);
91 intf_max_set(const char *val, struct kernel_param *kp)
95 rc = kstrtoint(val, 0, &value);
97 CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
101 if (value < LNET_INTERFACES_MIN) {
102 CWARN("max interfaces provided are too small, setting to %d\n",
103 LNET_INTERFACES_MIN);
104 value = LNET_INTERFACES_MIN;
107 *(int *)kp->arg = value;
113 lnet_get_routes(void)
119 lnet_get_networks(void)
124 if (*networks != 0 && *ip2nets != 0) {
125 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
126 "'ip2nets' but not both at once\n");
131 rc = lnet_parse_ip2nets(&nets, ip2nets);
132 return (rc == 0) ? nets : NULL;
142 lnet_init_locks(void)
144 spin_lock_init(&the_lnet.ln_eq_wait_lock);
145 init_waitqueue_head(&the_lnet.ln_eq_waitq);
146 init_waitqueue_head(&the_lnet.ln_rc_waitq);
147 mutex_init(&the_lnet.ln_lnd_mutex);
148 mutex_init(&the_lnet.ln_api_mutex);
152 lnet_fini_locks(void)
156 struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
157 struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
161 lnet_descriptor_setup(void)
163 /* create specific kmem_cache for MEs and small MDs (i.e., originally
164 * allocated in <size-xxx> kmem_cache).
166 lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(struct lnet_me),
168 if (!lnet_mes_cachep)
171 lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
172 LNET_SMALL_MD_SIZE, 0, 0,
174 if (!lnet_small_mds_cachep)
181 lnet_descriptor_cleanup(void)
184 if (lnet_small_mds_cachep) {
185 kmem_cache_destroy(lnet_small_mds_cachep);
186 lnet_small_mds_cachep = NULL;
189 if (lnet_mes_cachep) {
190 kmem_cache_destroy(lnet_mes_cachep);
191 lnet_mes_cachep = NULL;
196 lnet_create_remote_nets_table(void)
199 struct list_head *hash;
201 LASSERT(the_lnet.ln_remote_nets_hash == NULL);
202 LASSERT(the_lnet.ln_remote_nets_hbits > 0);
203 LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
205 CERROR("Failed to create remote nets hash table\n");
209 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
210 INIT_LIST_HEAD(&hash[i]);
211 the_lnet.ln_remote_nets_hash = hash;
216 lnet_destroy_remote_nets_table(void)
220 if (the_lnet.ln_remote_nets_hash == NULL)
223 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
224 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
226 LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
227 LNET_REMOTE_NETS_HASH_SIZE *
228 sizeof(the_lnet.ln_remote_nets_hash[0]));
229 the_lnet.ln_remote_nets_hash = NULL;
233 lnet_destroy_locks(void)
235 if (the_lnet.ln_res_lock != NULL) {
236 cfs_percpt_lock_free(the_lnet.ln_res_lock);
237 the_lnet.ln_res_lock = NULL;
240 if (the_lnet.ln_net_lock != NULL) {
241 cfs_percpt_lock_free(the_lnet.ln_net_lock);
242 the_lnet.ln_net_lock = NULL;
249 lnet_create_locks(void)
253 the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
254 if (the_lnet.ln_res_lock == NULL)
257 the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
258 if (the_lnet.ln_net_lock == NULL)
264 lnet_destroy_locks();
268 static void lnet_assert_wire_constants(void)
270 /* Wire protocol assertions generated by 'wirecheck'
271 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
272 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
273 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
276 CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
277 CLASSERT(LNET_PROTO_TCP_VERSION_MAJOR == 1);
278 CLASSERT(LNET_PROTO_TCP_VERSION_MINOR == 0);
279 CLASSERT(LNET_MSG_ACK == 0);
280 CLASSERT(LNET_MSG_PUT == 1);
281 CLASSERT(LNET_MSG_GET == 2);
282 CLASSERT(LNET_MSG_REPLY == 3);
283 CLASSERT(LNET_MSG_HELLO == 4);
285 /* Checks for struct lnet_handle_wire */
286 CLASSERT((int)sizeof(struct lnet_handle_wire) == 16);
287 CLASSERT((int)offsetof(struct lnet_handle_wire, wh_interface_cookie) == 0);
288 CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) == 8);
289 CLASSERT((int)offsetof(struct lnet_handle_wire, wh_object_cookie) == 8);
290 CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) == 8);
292 /* Checks for struct struct lnet_magicversion */
293 CLASSERT((int)sizeof(struct lnet_magicversion) == 8);
294 CLASSERT((int)offsetof(struct lnet_magicversion, magic) == 0);
295 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->magic) == 4);
296 CLASSERT((int)offsetof(struct lnet_magicversion, version_major) == 4);
297 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_major) == 2);
298 CLASSERT((int)offsetof(struct lnet_magicversion, version_minor) == 6);
299 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_minor) == 2);
301 /* Checks for struct struct lnet_hdr */
302 CLASSERT((int)sizeof(struct lnet_hdr) == 72);
303 CLASSERT((int)offsetof(struct lnet_hdr, dest_nid) == 0);
304 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_nid) == 8);
305 CLASSERT((int)offsetof(struct lnet_hdr, src_nid) == 8);
306 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_nid) == 8);
307 CLASSERT((int)offsetof(struct lnet_hdr, dest_pid) == 16);
308 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_pid) == 4);
309 CLASSERT((int)offsetof(struct lnet_hdr, src_pid) == 20);
310 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_pid) == 4);
311 CLASSERT((int)offsetof(struct lnet_hdr, type) == 24);
312 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->type) == 4);
313 CLASSERT((int)offsetof(struct lnet_hdr, payload_length) == 28);
314 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->payload_length) == 4);
315 CLASSERT((int)offsetof(struct lnet_hdr, msg) == 32);
316 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg) == 40);
319 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) == 32);
320 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) == 16);
321 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.match_bits) == 48);
322 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) == 8);
323 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.mlength) == 56);
324 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) == 4);
327 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) == 32);
328 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) == 16);
329 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.match_bits) == 48);
330 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) == 8);
331 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.hdr_data) == 56);
332 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) == 8);
333 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ptl_index) == 64);
334 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) == 4);
335 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.offset) == 68);
336 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) == 4);
339 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.return_wmd) == 32);
340 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) == 16);
341 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.match_bits) == 48);
342 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) == 8);
343 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.ptl_index) == 56);
344 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) == 4);
345 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.src_offset) == 60);
346 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) == 4);
347 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.sink_length) == 64);
348 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) == 4);
351 CLASSERT((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) == 32);
352 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) == 16);
355 CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.incarnation) == 32);
356 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) == 8);
357 CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.type) == 40);
358 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) == 4);
361 static struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
363 struct lnet_lnd *lnd;
364 struct list_head *tmp;
366 /* holding lnd mutex */
367 list_for_each(tmp, &the_lnet.ln_lnds) {
368 lnd = list_entry(tmp, struct lnet_lnd, lnd_list);
370 if (lnd->lnd_type == type)
377 lnet_register_lnd(struct lnet_lnd *lnd)
379 mutex_lock(&the_lnet.ln_lnd_mutex);
381 LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
382 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
384 list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
385 lnd->lnd_refcount = 0;
387 CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
389 mutex_unlock(&the_lnet.ln_lnd_mutex);
391 EXPORT_SYMBOL(lnet_register_lnd);
394 lnet_unregister_lnd(struct lnet_lnd *lnd)
396 mutex_lock(&the_lnet.ln_lnd_mutex);
398 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
399 LASSERT(lnd->lnd_refcount == 0);
401 list_del(&lnd->lnd_list);
402 CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
404 mutex_unlock(&the_lnet.ln_lnd_mutex);
406 EXPORT_SYMBOL(lnet_unregister_lnd);
409 lnet_counters_get(struct lnet_counters *counters)
411 struct lnet_counters *ctr;
414 memset(counters, 0, sizeof(*counters));
416 lnet_net_lock(LNET_LOCK_EX);
418 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
419 counters->msgs_max += ctr->msgs_max;
420 counters->msgs_alloc += ctr->msgs_alloc;
421 counters->errors += ctr->errors;
422 counters->send_count += ctr->send_count;
423 counters->recv_count += ctr->recv_count;
424 counters->route_count += ctr->route_count;
425 counters->drop_count += ctr->drop_count;
426 counters->send_length += ctr->send_length;
427 counters->recv_length += ctr->recv_length;
428 counters->route_length += ctr->route_length;
429 counters->drop_length += ctr->drop_length;
432 lnet_net_unlock(LNET_LOCK_EX);
434 EXPORT_SYMBOL(lnet_counters_get);
437 lnet_counters_reset(void)
439 struct lnet_counters *counters;
442 lnet_net_lock(LNET_LOCK_EX);
444 cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
445 memset(counters, 0, sizeof(struct lnet_counters));
447 lnet_net_unlock(LNET_LOCK_EX);
451 lnet_res_type2str(int type)
456 case LNET_COOKIE_TYPE_MD:
458 case LNET_COOKIE_TYPE_ME:
460 case LNET_COOKIE_TYPE_EQ:
466 lnet_res_container_cleanup(struct lnet_res_container *rec)
470 if (rec->rec_type == 0) /* not set yet, it's uninitialized */
473 while (!list_empty(&rec->rec_active)) {
474 struct list_head *e = rec->rec_active.next;
477 if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
478 lnet_eq_free(list_entry(e, struct lnet_eq, eq_list));
480 } else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
481 lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
483 } else { /* NB: Active MEs should be attached on portals */
490 /* Found alive MD/ME/EQ, user really should unlink/free
491 * all of them before finalize LNet, but if someone didn't,
492 * we have to recycle garbage for him */
493 CERROR("%d active elements on exit of %s container\n",
494 count, lnet_res_type2str(rec->rec_type));
497 if (rec->rec_lh_hash != NULL) {
498 LIBCFS_FREE(rec->rec_lh_hash,
499 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
500 rec->rec_lh_hash = NULL;
503 rec->rec_type = 0; /* mark it as finalized */
507 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
512 LASSERT(rec->rec_type == 0);
514 rec->rec_type = type;
515 INIT_LIST_HEAD(&rec->rec_active);
517 rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
519 /* Arbitrary choice of hash table size */
520 LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
521 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
522 if (rec->rec_lh_hash == NULL) {
527 for (i = 0; i < LNET_LH_HASH_SIZE; i++)
528 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
533 CERROR("Failed to setup %s resource container\n",
534 lnet_res_type2str(type));
535 lnet_res_container_cleanup(rec);
540 lnet_res_containers_destroy(struct lnet_res_container **recs)
542 struct lnet_res_container *rec;
545 cfs_percpt_for_each(rec, i, recs)
546 lnet_res_container_cleanup(rec);
548 cfs_percpt_free(recs);
551 static struct lnet_res_container **
552 lnet_res_containers_create(int type)
554 struct lnet_res_container **recs;
555 struct lnet_res_container *rec;
559 recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
561 CERROR("Failed to allocate %s resource containers\n",
562 lnet_res_type2str(type));
566 cfs_percpt_for_each(rec, i, recs) {
567 rc = lnet_res_container_setup(rec, i, type);
569 lnet_res_containers_destroy(recs);
577 struct lnet_libhandle *
578 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
580 /* ALWAYS called with lnet_res_lock held */
581 struct list_head *head;
582 struct lnet_libhandle *lh;
585 if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
588 hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
589 head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
591 list_for_each_entry(lh, head, lh_hash_chain) {
592 if (lh->lh_cookie == cookie)
600 lnet_res_lh_initialize(struct lnet_res_container *rec,
601 struct lnet_libhandle *lh)
603 /* ALWAYS called with lnet_res_lock held */
604 unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
607 lh->lh_cookie = rec->rec_lh_cookie;
608 rec->rec_lh_cookie += 1 << ibits;
610 hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
612 list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
615 static int lnet_unprepare(void);
618 lnet_prepare(lnet_pid_t requested_pid)
620 /* Prepare to bring up the network */
621 struct lnet_res_container **recs;
624 if (requested_pid == LNET_PID_ANY) {
625 /* Don't instantiate LNET just for me */
629 LASSERT(the_lnet.ln_refcount == 0);
631 the_lnet.ln_routing = 0;
633 LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
634 the_lnet.ln_pid = requested_pid;
636 INIT_LIST_HEAD(&the_lnet.ln_test_peers);
637 INIT_LIST_HEAD(&the_lnet.ln_peers);
638 INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
639 INIT_LIST_HEAD(&the_lnet.ln_nets);
640 INIT_LIST_HEAD(&the_lnet.ln_routers);
641 INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
642 INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
644 rc = lnet_descriptor_setup();
648 rc = lnet_create_remote_nets_table();
653 * NB the interface cookie in wire handles guards against delayed
654 * replies and ACKs appearing valid after reboot.
656 the_lnet.ln_interface_cookie = ktime_get_real_ns();
658 the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
659 sizeof(struct lnet_counters));
660 if (the_lnet.ln_counters == NULL) {
661 CERROR("Failed to allocate counters for LNet\n");
666 rc = lnet_peer_tables_create();
670 rc = lnet_msg_containers_create();
674 rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
675 LNET_COOKIE_TYPE_EQ);
679 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
685 the_lnet.ln_me_containers = recs;
687 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
693 the_lnet.ln_md_containers = recs;
695 rc = lnet_portals_create();
697 CERROR("Failed to create portals for LNet: %d\n", rc);
709 lnet_unprepare (void)
711 /* NB no LNET_LOCK since this is the last reference. All LND instances
712 * have shut down already, so it is safe to unlink and free all
713 * descriptors, even those that appear committed to a network op (eg MD
714 * with non-zero pending count) */
716 lnet_fail_nid(LNET_NID_ANY, 0);
718 LASSERT(the_lnet.ln_refcount == 0);
719 LASSERT(list_empty(&the_lnet.ln_test_peers));
720 LASSERT(list_empty(&the_lnet.ln_nets));
722 lnet_portals_destroy();
724 if (the_lnet.ln_md_containers != NULL) {
725 lnet_res_containers_destroy(the_lnet.ln_md_containers);
726 the_lnet.ln_md_containers = NULL;
729 if (the_lnet.ln_me_containers != NULL) {
730 lnet_res_containers_destroy(the_lnet.ln_me_containers);
731 the_lnet.ln_me_containers = NULL;
734 lnet_res_container_cleanup(&the_lnet.ln_eq_container);
736 lnet_msg_containers_destroy();
738 lnet_rtrpools_free(0);
740 if (the_lnet.ln_counters != NULL) {
741 cfs_percpt_free(the_lnet.ln_counters);
742 the_lnet.ln_counters = NULL;
744 lnet_destroy_remote_nets_table();
745 lnet_descriptor_cleanup();
751 lnet_net2ni_locked(__u32 net_id, int cpt)
754 struct lnet_net *net;
756 LASSERT(cpt != LNET_LOCK_EX);
758 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
759 if (net->net_id == net_id) {
760 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
770 lnet_net2ni_addref(__u32 net)
775 ni = lnet_net2ni_locked(net, 0);
777 lnet_ni_addref_locked(ni, 0);
782 EXPORT_SYMBOL(lnet_net2ni_addref);
785 lnet_get_net_locked(__u32 net_id)
787 struct lnet_net *net;
789 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
790 if (net->net_id == net_id)
798 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
803 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
808 val = hash_long(key, LNET_CPT_BITS);
809 /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
813 return (unsigned int)(key + val + (val >> 1)) % number;
817 lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
819 struct lnet_net *net;
821 /* must called with hold of lnet_net_lock */
822 if (LNET_CPT_NUMBER == 1)
823 return 0; /* the only one */
826 * If NI is provided then use the CPT identified in the NI cpt
827 * list if one exists. If one doesn't exist, then that NI is
828 * associated with all CPTs and it follows that the net it belongs
829 * to is implicitly associated with all CPTs, so just hash the nid
833 if (ni->ni_cpts != NULL)
834 return ni->ni_cpts[lnet_nid_cpt_hash(nid,
837 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
840 /* no NI provided so look at the net */
841 net = lnet_get_net_locked(LNET_NIDNET(nid));
843 if (net != NULL && net->net_cpts != NULL) {
844 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
847 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
851 lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
856 if (LNET_CPT_NUMBER == 1)
857 return 0; /* the only one */
859 cpt = lnet_net_lock_current();
861 cpt2 = lnet_cpt_of_nid_locked(nid, ni);
863 lnet_net_unlock(cpt);
867 EXPORT_SYMBOL(lnet_cpt_of_nid);
870 lnet_islocalnet(__u32 net_id)
872 struct lnet_net *net;
876 cpt = lnet_net_lock_current();
878 net = lnet_get_net_locked(net_id);
882 lnet_net_unlock(cpt);
888 lnet_is_ni_healthy_locked(struct lnet_ni *ni)
890 if (ni->ni_state == LNET_NI_STATE_ACTIVE ||
891 ni->ni_state == LNET_NI_STATE_DEGRADED)
898 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
900 struct lnet_net *net;
903 LASSERT(cpt != LNET_LOCK_EX);
905 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
906 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
907 if (ni->ni_nid == nid)
916 lnet_nid2ni_addref(lnet_nid_t nid)
921 ni = lnet_nid2ni_locked(nid, 0);
923 lnet_ni_addref_locked(ni, 0);
928 EXPORT_SYMBOL(lnet_nid2ni_addref);
931 lnet_islocalnid(lnet_nid_t nid)
936 cpt = lnet_net_lock_current();
937 ni = lnet_nid2ni_locked(nid, cpt);
938 lnet_net_unlock(cpt);
944 lnet_count_acceptor_nets(void)
946 /* Return the # of NIs that need the acceptor. */
948 struct lnet_net *net;
951 cpt = lnet_net_lock_current();
952 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
953 /* all socklnd type networks should have the acceptor
955 if (net->net_lnd->lnd_accept != NULL)
959 lnet_net_unlock(cpt);
964 struct lnet_ping_buffer *
965 lnet_ping_buffer_alloc(int nnis, gfp_t gfp)
967 struct lnet_ping_buffer *pbuf;
969 LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nnis), gfp);
971 pbuf->pb_nnis = nnis;
972 atomic_set(&pbuf->pb_refcnt, 1);
979 lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
981 LASSERT(lnet_ping_buffer_numref(pbuf) == 0);
982 LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
985 static struct lnet_ping_buffer *
986 lnet_ping_target_create(int nnis)
988 struct lnet_ping_buffer *pbuf;
990 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
992 CERROR("Can't allocate ping source [%d]\n", nnis);
996 pbuf->pb_info.pi_nnis = nnis;
997 pbuf->pb_info.pi_pid = the_lnet.ln_pid;
998 pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
999 pbuf->pb_info.pi_features = LNET_PING_FEAT_NI_STATUS;
1005 lnet_get_net_ni_count_locked(struct lnet_net *net)
1010 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1017 lnet_get_net_ni_count_pre(struct lnet_net *net)
1022 list_for_each_entry(ni, &net->net_ni_added, ni_netlist)
1029 lnet_get_ni_count(void)
1032 struct lnet_net *net;
1037 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1038 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1048 lnet_ping_info_validate(struct lnet_ping_info *pinfo)
1052 if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
1054 if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
1056 /* Loopback is guaranteed to be present */
1057 if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
1059 if (LNET_NETTYP(LNET_NIDNET(LNET_PING_INFO_LONI(pinfo))) != LOLND)
1065 lnet_ping_target_destroy(void)
1067 struct lnet_net *net;
1070 lnet_net_lock(LNET_LOCK_EX);
1072 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1073 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1075 ni->ni_status = NULL;
1080 lnet_ping_buffer_decref(the_lnet.ln_ping_target);
1081 the_lnet.ln_ping_target = NULL;
1083 lnet_net_unlock(LNET_LOCK_EX);
1087 lnet_ping_target_event_handler(struct lnet_event *event)
1089 struct lnet_ping_buffer *pbuf = event->md.user_ptr;
1091 if (event->unlinked)
1092 lnet_ping_buffer_decref(pbuf);
1096 lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
1097 struct lnet_handle_md *ping_mdh,
1098 int ni_count, bool set_eq)
1100 struct lnet_process_id id = {
1101 .nid = LNET_NID_ANY,
1104 struct lnet_handle_me me_handle;
1105 struct lnet_md md = { NULL };
1109 rc = LNetEQAlloc(0, lnet_ping_target_event_handler,
1110 &the_lnet.ln_ping_target_eq);
1112 CERROR("Can't allocate ping buffer EQ: %d\n", rc);
1117 *ppbuf = lnet_ping_target_create(ni_count);
1118 if (*ppbuf == NULL) {
1123 /* Ping target ME/MD */
1124 rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1125 LNET_PROTO_PING_MATCHBITS, 0,
1126 LNET_UNLINK, LNET_INS_AFTER,
1129 CERROR("Can't create ping target ME: %d\n", rc);
1130 goto fail_decref_ping_buffer;
1133 /* initialize md content */
1134 md.start = &(*ppbuf)->pb_info;
1135 md.length = LNET_PING_INFO_SIZE((*ppbuf)->pb_nnis);
1136 md.threshold = LNET_MD_THRESH_INF;
1138 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1139 LNET_MD_MANAGE_REMOTE;
1140 md.eq_handle = the_lnet.ln_ping_target_eq;
1141 md.user_ptr = *ppbuf;
1143 rc = LNetMDAttach(me_handle, md, LNET_RETAIN, ping_mdh);
1145 CERROR("Can't attach ping target MD: %d\n", rc);
1146 goto fail_unlink_ping_me;
1148 lnet_ping_buffer_addref(*ppbuf);
1152 fail_unlink_ping_me:
1153 rc2 = LNetMEUnlink(me_handle);
1155 fail_decref_ping_buffer:
1156 LASSERT(lnet_ping_buffer_numref(*ppbuf) == 1);
1157 lnet_ping_buffer_decref(*ppbuf);
1161 rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
1168 lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
1169 struct lnet_handle_md *ping_mdh)
1171 sigset_t blocked = cfs_block_allsigs();
1173 LNetMDUnlink(*ping_mdh);
1174 LNetInvalidateMDHandle(ping_mdh);
1176 /* NB the MD could be busy; this just starts the unlink */
1177 while (lnet_ping_buffer_numref(pbuf) > 1) {
1178 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1179 set_current_state(TASK_UNINTERRUPTIBLE);
1180 schedule_timeout(cfs_time_seconds(1));
1183 cfs_restore_sigs(blocked);
1187 lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
1190 struct lnet_net *net;
1191 struct lnet_ni_status *ns;
1196 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1197 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1198 LASSERT(i < pbuf->pb_nnis);
1200 ns = &pbuf->pb_info.pi_ni[i];
1202 ns->ns_nid = ni->ni_nid;
1205 ns->ns_status = (ni->ni_status != NULL) ?
1206 ni->ni_status->ns_status :
1215 * We (ab)use the ns_status of the loopback interface to
1216 * transmit the sequence number. The first interface listed
1217 * must be the loopback interface.
1219 rc = lnet_ping_info_validate(&pbuf->pb_info);
1221 LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
1224 LNET_PING_BUFFER_SEQNO(pbuf) =
1225 atomic_inc_return(&the_lnet.ln_ping_target_seqno);
1229 lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
1230 struct lnet_handle_md ping_mdh)
1232 struct lnet_ping_buffer *old_pbuf = NULL;
1233 struct lnet_handle_md old_ping_md;
1235 /* switch the NIs to point to the new ping info created */
1236 lnet_net_lock(LNET_LOCK_EX);
1238 if (!the_lnet.ln_routing)
1239 pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1240 lnet_ping_target_install_locked(pbuf);
1242 if (the_lnet.ln_ping_target) {
1243 old_pbuf = the_lnet.ln_ping_target;
1244 old_ping_md = the_lnet.ln_ping_target_md;
1246 the_lnet.ln_ping_target_md = ping_mdh;
1247 the_lnet.ln_ping_target = pbuf;
1249 lnet_net_unlock(LNET_LOCK_EX);
1252 /* unlink and free the old ping info */
1253 lnet_ping_md_unlink(old_pbuf, &old_ping_md);
1254 lnet_ping_buffer_decref(old_pbuf);
1259 lnet_ping_target_fini(void)
1263 lnet_ping_md_unlink(the_lnet.ln_ping_target,
1264 &the_lnet.ln_ping_target_md);
1266 rc = LNetEQFree(the_lnet.ln_ping_target_eq);
1269 lnet_ping_target_destroy();
1273 lnet_ni_tq_credits(struct lnet_ni *ni)
1277 LASSERT(ni->ni_ncpts >= 1);
1279 if (ni->ni_ncpts == 1)
1280 return ni->ni_net->net_tunables.lct_max_tx_credits;
1282 credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
1283 credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
1284 credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
1290 lnet_ni_unlink_locked(struct lnet_ni *ni)
1292 if (!list_empty(&ni->ni_cptlist)) {
1293 list_del_init(&ni->ni_cptlist);
1294 lnet_ni_decref_locked(ni, 0);
1297 /* move it to zombie list and nobody can find it anymore */
1298 LASSERT(!list_empty(&ni->ni_netlist));
1299 list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
1300 lnet_ni_decref_locked(ni, 0);
1304 lnet_clear_zombies_nis_locked(struct lnet_net *net)
1309 struct list_head *zombie_list = &net->net_ni_zombie;
1312 * Now wait for the NIs I just nuked to show up on the zombie
1313 * list and shut them down in guaranteed thread context
1316 while (!list_empty(zombie_list)) {
1320 ni = list_entry(zombie_list->next,
1321 struct lnet_ni, ni_netlist);
1322 list_del_init(&ni->ni_netlist);
1323 /* the ni should be in deleting state. If it's not it's
1325 LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
1326 cfs_percpt_for_each(ref, j, ni->ni_refs) {
1329 /* still busy, add it back to zombie list */
1330 list_add(&ni->ni_netlist, zombie_list);
1334 if (!list_empty(&ni->ni_netlist)) {
1335 lnet_net_unlock(LNET_LOCK_EX);
1337 if ((i & (-i)) == i) {
1339 "Waiting for zombie LNI %s\n",
1340 libcfs_nid2str(ni->ni_nid));
1342 set_current_state(TASK_UNINTERRUPTIBLE);
1343 schedule_timeout(cfs_time_seconds(1));
1344 lnet_net_lock(LNET_LOCK_EX);
1348 lnet_net_unlock(LNET_LOCK_EX);
1350 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
1352 LASSERT(!in_interrupt());
1353 (net->net_lnd->lnd_shutdown)(ni);
1356 CDEBUG(D_LNI, "Removed LNI %s\n",
1357 libcfs_nid2str(ni->ni_nid));
1361 lnet_net_lock(LNET_LOCK_EX);
1365 /* shutdown down the NI and release refcount */
1367 lnet_shutdown_lndni(struct lnet_ni *ni)
1370 struct lnet_net *net = ni->ni_net;
1372 lnet_net_lock(LNET_LOCK_EX);
1373 ni->ni_state = LNET_NI_STATE_DELETING;
1374 lnet_ni_unlink_locked(ni);
1375 lnet_incr_dlc_seq();
1376 lnet_net_unlock(LNET_LOCK_EX);
1378 /* clear messages for this NI on the lazy portal */
1379 for (i = 0; i < the_lnet.ln_nportals; i++)
1380 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
1382 lnet_net_lock(LNET_LOCK_EX);
1383 lnet_clear_zombies_nis_locked(net);
1384 lnet_net_unlock(LNET_LOCK_EX);
1388 lnet_shutdown_lndnet(struct lnet_net *net)
1392 lnet_net_lock(LNET_LOCK_EX);
1394 net->net_state = LNET_NET_STATE_DELETING;
1396 list_del_init(&net->net_list);
1398 while (!list_empty(&net->net_ni_list)) {
1399 ni = list_entry(net->net_ni_list.next,
1400 struct lnet_ni, ni_netlist);
1401 lnet_net_unlock(LNET_LOCK_EX);
1402 lnet_shutdown_lndni(ni);
1403 lnet_net_lock(LNET_LOCK_EX);
1406 lnet_net_unlock(LNET_LOCK_EX);
1408 /* Do peer table cleanup for this net */
1409 lnet_peer_tables_cleanup(net);
1411 lnet_net_lock(LNET_LOCK_EX);
1413 * decrement ref count on lnd only when the entire network goes
1416 net->net_lnd->lnd_refcount--;
1418 lnet_net_unlock(LNET_LOCK_EX);
1424 lnet_shutdown_lndnets(void)
1426 struct lnet_net *net;
1428 /* NB called holding the global mutex */
1430 /* All quiet on the API front */
1431 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
1432 LASSERT(the_lnet.ln_refcount == 0);
1434 lnet_net_lock(LNET_LOCK_EX);
1435 the_lnet.ln_state = LNET_STATE_STOPPING;
1437 while (!list_empty(&the_lnet.ln_nets)) {
1439 * move the nets to the zombie list to avoid them being
1440 * picked up for new work. LONET is also included in the
1441 * Nets that will be moved to the zombie list
1443 net = list_entry(the_lnet.ln_nets.next,
1444 struct lnet_net, net_list);
1445 list_move(&net->net_list, &the_lnet.ln_net_zombie);
1448 /* Drop the cached loopback Net. */
1449 if (the_lnet.ln_loni != NULL) {
1450 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
1451 the_lnet.ln_loni = NULL;
1453 lnet_net_unlock(LNET_LOCK_EX);
1455 /* iterate through the net zombie list and delete each net */
1456 while (!list_empty(&the_lnet.ln_net_zombie)) {
1457 net = list_entry(the_lnet.ln_net_zombie.next,
1458 struct lnet_net, net_list);
1459 lnet_shutdown_lndnet(net);
1462 lnet_net_lock(LNET_LOCK_EX);
1463 the_lnet.ln_state = LNET_STATE_SHUTDOWN;
1464 lnet_net_unlock(LNET_LOCK_EX);
1468 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
1471 struct lnet_tx_queue *tq;
1473 struct lnet_net *net = ni->ni_net;
1475 mutex_lock(&the_lnet.ln_lnd_mutex);
1478 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
1479 ni->ni_lnd_tunables_set = true;
1482 rc = (net->net_lnd->lnd_startup)(ni);
1484 mutex_unlock(&the_lnet.ln_lnd_mutex);
1487 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
1488 rc, libcfs_lnd2str(net->net_lnd->lnd_type));
1489 lnet_net_lock(LNET_LOCK_EX);
1490 net->net_lnd->lnd_refcount--;
1491 lnet_net_unlock(LNET_LOCK_EX);
1495 ni->ni_state = LNET_NI_STATE_ACTIVE;
1497 /* We keep a reference on the loopback net through the loopback NI */
1498 if (net->net_lnd->lnd_type == LOLND) {
1500 LASSERT(the_lnet.ln_loni == NULL);
1501 the_lnet.ln_loni = ni;
1502 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
1503 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
1504 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
1505 ni->ni_net->net_tunables.lct_peer_timeout = 0;
1509 if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
1510 ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
1511 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
1512 libcfs_lnd2str(net->net_lnd->lnd_type),
1513 ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
1515 /* shutdown the NI since if we get here then it must've already
1518 lnet_shutdown_lndni(ni);
1522 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
1523 tq->tq_credits_min =
1524 tq->tq_credits_max =
1525 tq->tq_credits = lnet_ni_tq_credits(ni);
1528 atomic_set(&ni->ni_tx_credits,
1529 lnet_ni_tq_credits(ni) * ni->ni_ncpts);
1531 CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
1532 libcfs_nid2str(ni->ni_nid),
1533 ni->ni_net->net_tunables.lct_peer_tx_credits,
1534 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
1535 ni->ni_net->net_tunables.lct_peer_rtr_credits,
1536 ni->ni_net->net_tunables.lct_peer_timeout);
1545 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
1548 struct lnet_net *net_l = NULL;
1549 struct list_head local_ni_list;
1553 struct lnet_lnd *lnd;
1555 net->net_tunables.lct_peer_timeout;
1557 net->net_tunables.lct_max_tx_credits;
1558 int peerrtrcredits =
1559 net->net_tunables.lct_peer_rtr_credits;
1561 INIT_LIST_HEAD(&local_ni_list);
1564 * make sure that this net is unique. If it isn't then
1565 * we are adding interfaces to an already existing network, and
1566 * 'net' is just a convenient way to pass in the list.
1567 * if it is unique we need to find the LND and load it if
1570 if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
1571 lnd_type = LNET_NETTYP(net->net_id);
1573 LASSERT(libcfs_isknown_lnd(lnd_type));
1575 mutex_lock(&the_lnet.ln_lnd_mutex);
1576 lnd = lnet_find_lnd_by_type(lnd_type);
1579 mutex_unlock(&the_lnet.ln_lnd_mutex);
1580 rc = request_module("%s", libcfs_lnd2modname(lnd_type));
1581 mutex_lock(&the_lnet.ln_lnd_mutex);
1583 lnd = lnet_find_lnd_by_type(lnd_type);
1585 mutex_unlock(&the_lnet.ln_lnd_mutex);
1586 CERROR("Can't load LND %s, module %s, rc=%d\n",
1587 libcfs_lnd2str(lnd_type),
1588 libcfs_lnd2modname(lnd_type), rc);
1589 #ifndef HAVE_MODULE_LOADING_SUPPORT
1590 LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
1591 "compiled with kernel module "
1592 "loading support.");
1599 lnet_net_lock(LNET_LOCK_EX);
1600 lnd->lnd_refcount++;
1601 lnet_net_unlock(LNET_LOCK_EX);
1605 mutex_unlock(&the_lnet.ln_lnd_mutex);
1611 * net_l: if the network being added is unique then net_l
1612 * will point to that network
1613 * if the network being added is not unique then
1614 * net_l points to the existing network.
1616 * When we enter the loop below, we'll pick NIs off he
1617 * network beign added and start them up, then add them to
1618 * a local ni list. Once we've successfully started all
1619 * the NIs then we join the local NI list (of started up
1620 * networks) with the net_l->net_ni_list, which should
1621 * point to the correct network to add the new ni list to
1623 * If any of the new NIs fail to start up, then we want to
1624 * iterate through the local ni list, which should include
1625 * any NIs which were successfully started up, and shut
1628 * After than we want to delete the network being added,
1629 * to avoid a memory leak.
1633 * When a network uses TCP bonding then all its interfaces
1634 * must be specified when the network is first defined: the
1635 * TCP bonding code doesn't allow for interfaces to be added
1638 if (net_l != net && net_l != NULL && use_tcp_bonding &&
1639 LNET_NETTYP(net_l->net_id) == SOCKLND) {
1644 while (!list_empty(&net->net_ni_added)) {
1645 ni = list_entry(net->net_ni_added.next, struct lnet_ni,
1647 list_del_init(&ni->ni_netlist);
1649 /* make sure that the the NI we're about to start
1650 * up is actually unique. if it's not fail. */
1651 if (!lnet_ni_unique_net(&net_l->net_ni_list,
1652 ni->ni_interfaces[0])) {
1657 /* adjust the pointer the parent network, just in case it
1658 * the net is a duplicate */
1661 rc = lnet_startup_lndni(ni, tun);
1663 LASSERT(ni->ni_net->net_tunables.lct_peer_timeout <= 0 ||
1664 ni->ni_net->net_lnd->lnd_query != NULL);
1670 list_add_tail(&ni->ni_netlist, &local_ni_list);
1675 lnet_net_lock(LNET_LOCK_EX);
1676 list_splice_tail(&local_ni_list, &net_l->net_ni_list);
1677 lnet_incr_dlc_seq();
1678 lnet_net_unlock(LNET_LOCK_EX);
1680 /* if the network is not unique then we don't want to keep
1681 * it around after we're done. Free it. Otherwise add that
1682 * net to the global the_lnet.ln_nets */
1683 if (net_l != net && net_l != NULL) {
1685 * TODO - note. currently the tunables can not be updated
1690 net->net_state = LNET_NET_STATE_ACTIVE;
1692 * restore tunables after it has been overwitten by the
1695 if (peer_timeout != -1)
1696 net->net_tunables.lct_peer_timeout = peer_timeout;
1697 if (maxtxcredits != -1)
1698 net->net_tunables.lct_max_tx_credits = maxtxcredits;
1699 if (peerrtrcredits != -1)
1700 net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
1702 lnet_net_lock(LNET_LOCK_EX);
1703 list_add_tail(&net->net_list, &the_lnet.ln_nets);
1704 lnet_net_unlock(LNET_LOCK_EX);
1711 * shutdown the new NIs that are being started up
1712 * free the NET being started
1714 while (!list_empty(&local_ni_list)) {
1715 ni = list_entry(local_ni_list.next, struct lnet_ni,
1718 lnet_shutdown_lndni(ni);
1728 lnet_startup_lndnets(struct list_head *netlist)
1730 struct lnet_net *net;
1735 * Change to running state before bringing up the LNDs. This
1736 * allows lnet_shutdown_lndnets() to assert that we've passed
1739 lnet_net_lock(LNET_LOCK_EX);
1740 the_lnet.ln_state = LNET_STATE_RUNNING;
1741 lnet_net_unlock(LNET_LOCK_EX);
1743 while (!list_empty(netlist)) {
1744 net = list_entry(netlist->next, struct lnet_net, net_list);
1745 list_del_init(&net->net_list);
1747 rc = lnet_startup_lndnet(net, NULL);
1757 lnet_shutdown_lndnets();
1763 * Initialize LNet library.
1765 * Automatically called at module loading time. Caller has to call
1766 * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
1767 * latter returned 0. It must be called exactly once.
1769 * \retval 0 on success
1770 * \retval -ve on failures.
1772 int lnet_lib_init(void)
1776 lnet_assert_wire_constants();
1778 memset(&the_lnet, 0, sizeof(the_lnet));
1780 /* refer to global cfs_cpt_table for now */
1781 the_lnet.ln_cpt_table = cfs_cpt_table;
1782 the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_table);
1784 LASSERT(the_lnet.ln_cpt_number > 0);
1785 if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
1786 /* we are under risk of consuming all lh_cookie */
1787 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
1788 "please change setting of CPT-table and retry\n",
1789 the_lnet.ln_cpt_number, LNET_CPT_MAX);
1793 while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
1794 the_lnet.ln_cpt_bits++;
1796 rc = lnet_create_locks();
1798 CERROR("Can't create LNet global locks: %d\n", rc);
1802 the_lnet.ln_refcount = 0;
1803 LNetInvalidateEQHandle(&the_lnet.ln_rc_eqh);
1804 INIT_LIST_HEAD(&the_lnet.ln_lnds);
1805 INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
1806 INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
1807 INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
1809 /* The hash table size is the number of bits it takes to express the set
1810 * ln_num_routes, minus 1 (better to under estimate than over so we
1811 * don't waste memory). */
1812 if (rnet_htable_size <= 0)
1813 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
1814 else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
1815 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
1816 the_lnet.ln_remote_nets_hbits = max_t(int, 1,
1817 order_base_2(rnet_htable_size) - 1);
1819 /* All LNDs apart from the LOLND are in separate modules. They
1820 * register themselves when their module loads, and unregister
1821 * themselves when their module is unloaded. */
1822 lnet_register_lnd(&the_lolnd);
1827 * Finalize LNet library.
1829 * \pre lnet_lib_init() called with success.
1830 * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
1832 void lnet_lib_exit(void)
1834 LASSERT(the_lnet.ln_refcount == 0);
1836 while (!list_empty(&the_lnet.ln_lnds))
1837 lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
1838 struct lnet_lnd, lnd_list));
1839 lnet_destroy_locks();
1843 * Set LNet PID and start LNet interfaces, routing, and forwarding.
1845 * Users must call this function at least once before any other functions.
1846 * For each successful call there must be a corresponding call to
1847 * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
1850 * The PID used by LNet may be different from the one requested.
1853 * \param requested_pid PID requested by the caller.
1855 * \return >= 0 on success, and < 0 error code on failures.
1858 LNetNIInit(lnet_pid_t requested_pid)
1860 int im_a_router = 0;
1863 struct lnet_ping_buffer *pbuf;
1864 struct lnet_handle_md ping_mdh;
1865 struct list_head net_head;
1866 struct lnet_net *net;
1868 INIT_LIST_HEAD(&net_head);
1870 mutex_lock(&the_lnet.ln_api_mutex);
1872 CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
1874 if (the_lnet.ln_refcount > 0) {
1875 rc = the_lnet.ln_refcount++;
1876 mutex_unlock(&the_lnet.ln_api_mutex);
1880 rc = lnet_prepare(requested_pid);
1882 mutex_unlock(&the_lnet.ln_api_mutex);
1886 /* create a network for Loopback network */
1887 net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
1890 goto err_empty_list;
1893 /* Add in the loopback NI */
1894 if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
1896 goto err_empty_list;
1899 /* If LNet is being initialized via DLC it is possible
1900 * that the user requests not to load module parameters (ones which
1901 * are supported by DLC) on initialization. Therefore, make sure not
1902 * to load networks, routes and forwarding from module parameters
1903 * in this case. On cleanup in case of failure only clean up
1904 * routes if it has been loaded */
1905 if (!the_lnet.ln_nis_from_mod_params) {
1906 rc = lnet_parse_networks(&net_head, lnet_get_networks(),
1909 goto err_empty_list;
1912 ni_count = lnet_startup_lndnets(&net_head);
1915 goto err_empty_list;
1918 if (!the_lnet.ln_nis_from_mod_params) {
1919 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
1921 goto err_shutdown_lndnis;
1923 rc = lnet_check_routes();
1925 goto err_destroy_routes;
1927 rc = lnet_rtrpools_alloc(im_a_router);
1929 goto err_destroy_routes;
1932 rc = lnet_acceptor_start();
1934 goto err_destroy_routes;
1936 the_lnet.ln_refcount = 1;
1937 /* Now I may use my own API functions... */
1939 rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_count, true);
1941 goto err_acceptor_stop;
1943 lnet_ping_target_update(pbuf, ping_mdh);
1945 rc = lnet_router_checker_start();
1952 mutex_unlock(&the_lnet.ln_api_mutex);
1957 lnet_ping_target_fini();
1959 the_lnet.ln_refcount = 0;
1960 lnet_acceptor_stop();
1962 if (!the_lnet.ln_nis_from_mod_params)
1963 lnet_destroy_routes();
1964 err_shutdown_lndnis:
1965 lnet_shutdown_lndnets();
1969 mutex_unlock(&the_lnet.ln_api_mutex);
1970 while (!list_empty(&net_head)) {
1971 struct lnet_net *net;
1973 net = list_entry(net_head.next, struct lnet_net, net_list);
1974 list_del_init(&net->net_list);
1979 EXPORT_SYMBOL(LNetNIInit);
1982 * Stop LNet interfaces, routing, and forwarding.
1984 * Users must call this function once for each successful call to LNetNIInit().
1985 * Once the LNetNIFini() operation has been started, the results of pending
1986 * API operations are undefined.
1988 * \return always 0 for current implementation.
1993 mutex_lock(&the_lnet.ln_api_mutex);
1995 LASSERT(the_lnet.ln_refcount > 0);
1997 if (the_lnet.ln_refcount != 1) {
1998 the_lnet.ln_refcount--;
2000 LASSERT(!the_lnet.ln_niinit_self);
2005 lnet_router_checker_stop();
2006 lnet_ping_target_fini();
2008 /* Teardown fns that use my own API functions BEFORE here */
2009 the_lnet.ln_refcount = 0;
2011 lnet_acceptor_stop();
2012 lnet_destroy_routes();
2013 lnet_shutdown_lndnets();
2017 mutex_unlock(&the_lnet.ln_api_mutex);
2020 EXPORT_SYMBOL(LNetNIFini);
2023 * Grabs the ni data from the ni structure and fills the out
2026 * \param[in] ni network interface structure
2027 * \param[out] cfg_ni NI config information
2028 * \param[out] tun network and LND tunables
2031 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
2032 struct lnet_ioctl_config_lnd_tunables *tun,
2033 struct lnet_ioctl_element_stats *stats,
2036 size_t min_size = 0;
2039 if (!ni || !cfg_ni || !tun)
2042 if (ni->ni_interfaces[0] != NULL) {
2043 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2044 if (ni->ni_interfaces[i] != NULL) {
2045 strncpy(cfg_ni->lic_ni_intf[i],
2046 ni->ni_interfaces[i],
2047 sizeof(cfg_ni->lic_ni_intf[i]));
2052 cfg_ni->lic_nid = ni->ni_nid;
2053 if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2054 cfg_ni->lic_status = LNET_NI_STATUS_UP;
2056 cfg_ni->lic_status = ni->ni_status->ns_status;
2057 cfg_ni->lic_tcp_bonding = use_tcp_bonding;
2058 cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
2060 memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
2063 stats->iel_send_count = atomic_read(&ni->ni_stats.send_count);
2064 stats->iel_recv_count = atomic_read(&ni->ni_stats.recv_count);
2068 * tun->lt_tun will always be present, but in order to be
2069 * backwards compatible, we need to deal with the cases when
2070 * tun->lt_tun is smaller than what the kernel has, because it
2071 * comes from an older version of a userspace program, then we'll
2072 * need to copy as much information as we have available space.
2074 min_size = tun_size - sizeof(tun->lt_cmn);
2075 memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
2077 /* copy over the cpts */
2078 if (ni->ni_ncpts == LNET_CPT_NUMBER &&
2079 ni->ni_cpts == NULL) {
2080 for (i = 0; i < ni->ni_ncpts; i++)
2081 cfg_ni->lic_cpts[i] = i;
2084 ni->ni_cpts != NULL && i < ni->ni_ncpts &&
2085 i < LNET_MAX_SHOW_NUM_CPT;
2087 cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
2089 cfg_ni->lic_ncpts = ni->ni_ncpts;
2093 * NOTE: This is a legacy function left in the code to be backwards
2094 * compatible with older userspace programs. It should eventually be
2097 * Grabs the ni data from the ni structure and fills the out
2100 * \param[in] ni network interface structure
2101 * \param[out] config config information
2104 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
2105 struct lnet_ioctl_config_data *config)
2107 struct lnet_ioctl_net_config *net_config;
2108 struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
2109 size_t min_size, tunable_size = 0;
2115 net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
2119 BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
2120 ARRAY_SIZE(net_config->ni_interfaces));
2122 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2123 if (!ni->ni_interfaces[i])
2126 strncpy(net_config->ni_interfaces[i],
2127 ni->ni_interfaces[i],
2128 sizeof(net_config->ni_interfaces[i]));
2131 config->cfg_nid = ni->ni_nid;
2132 config->cfg_config_u.cfg_net.net_peer_timeout =
2133 ni->ni_net->net_tunables.lct_peer_timeout;
2134 config->cfg_config_u.cfg_net.net_max_tx_credits =
2135 ni->ni_net->net_tunables.lct_max_tx_credits;
2136 config->cfg_config_u.cfg_net.net_peer_tx_credits =
2137 ni->ni_net->net_tunables.lct_peer_tx_credits;
2138 config->cfg_config_u.cfg_net.net_peer_rtr_credits =
2139 ni->ni_net->net_tunables.lct_peer_rtr_credits;
2141 if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2142 net_config->ni_status = LNET_NI_STATUS_UP;
2144 net_config->ni_status = ni->ni_status->ns_status;
2147 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
2149 for (i = 0; i < num_cpts; i++)
2150 net_config->ni_cpts[i] = ni->ni_cpts[i];
2152 config->cfg_ncpts = num_cpts;
2156 * See if user land tools sent in a newer and larger version
2157 * of struct lnet_tunables than what the kernel uses.
2159 min_size = sizeof(*config) + sizeof(*net_config);
2161 if (config->cfg_hdr.ioc_len > min_size)
2162 tunable_size = config->cfg_hdr.ioc_len - min_size;
2164 /* Don't copy too much data to user space */
2165 min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
2166 lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
2168 if (lnd_cfg && min_size) {
2169 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
2170 config->cfg_config_u.cfg_net.net_interface_count = 1;
2172 /* Tell user land that kernel side has less data */
2173 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
2174 min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
2175 config->cfg_hdr.ioc_len -= min_size;
2181 lnet_get_ni_idx_locked(int idx)
2184 struct lnet_net *net;
2186 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2187 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2197 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
2200 struct lnet_net *net = mynet;
2204 net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
2206 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2212 if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
2213 /* if you reached the end of the ni list and the net is
2214 * specified, then there are no more nis in that net */
2218 /* we reached the end of this net ni list. move to the
2220 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
2221 /* no more nets and no more NIs. */
2224 /* get the next net */
2225 net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
2227 /* get the ni on it */
2228 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2234 /* there are more nis left */
2235 ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
2241 lnet_get_net_config(struct lnet_ioctl_config_data *config)
2246 int idx = config->cfg_count;
2248 cpt = lnet_net_lock_current();
2250 ni = lnet_get_ni_idx_locked(idx);
2255 lnet_fill_ni_info_legacy(ni, config);
2259 lnet_net_unlock(cpt);
2264 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
2265 struct lnet_ioctl_config_lnd_tunables *tun,
2266 struct lnet_ioctl_element_stats *stats,
2273 if (!cfg_ni || !tun || !stats)
2276 cpt = lnet_net_lock_current();
2278 ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
2283 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
2287 lnet_net_unlock(cpt);
2291 static int lnet_add_net_common(struct lnet_net *net,
2292 struct lnet_ioctl_config_lnd_tunables *tun)
2295 struct lnet_ping_buffer *pbuf;
2296 struct lnet_handle_md ping_mdh;
2298 struct lnet_remotenet *rnet;
2300 int num_acceptor_nets;
2302 lnet_net_lock(LNET_LOCK_EX);
2303 rnet = lnet_find_rnet_locked(net->net_id);
2304 lnet_net_unlock(LNET_LOCK_EX);
2306 * make sure that the net added doesn't invalidate the current
2307 * configuration LNet is keeping
2310 CERROR("Adding net %s will invalidate routing configuration\n",
2311 libcfs_net2str(net->net_id));
2317 * make sure you calculate the correct number of slots in the ping
2318 * buffer. Since the ping info is a flattened list of all the NIs,
2319 * we should allocate enough slots to accomodate the number of NIs
2320 * which will be added.
2322 * since ni hasn't been configured yet, use
2323 * lnet_get_net_ni_count_pre() which checks the net_ni_added list
2325 net_ni_count = lnet_get_net_ni_count_pre(net);
2327 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2328 net_ni_count + lnet_get_ni_count(),
2336 memcpy(&net->net_tunables,
2337 &tun->lt_cmn, sizeof(net->net_tunables));
2339 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
2342 * before starting this network get a count of the current TCP
2343 * networks which require the acceptor thread running. If that
2344 * count is == 0 before we start up this network, then we'd want to
2345 * start up the acceptor thread after starting up this network
2347 num_acceptor_nets = lnet_count_acceptor_nets();
2349 net_id = net->net_id;
2351 rc = lnet_startup_lndnet(net,
2352 (tun) ? &tun->lt_tun : NULL);
2356 lnet_net_lock(LNET_LOCK_EX);
2357 net = lnet_get_net_locked(net_id);
2358 lnet_net_unlock(LNET_LOCK_EX);
2363 * Start the acceptor thread if this is the first network
2364 * being added that requires the thread.
2366 if (net->net_lnd->lnd_accept && num_acceptor_nets == 0) {
2367 rc = lnet_acceptor_start();
2369 /* shutdown the net that we just started */
2370 CERROR("Failed to start up acceptor thread\n");
2371 lnet_shutdown_lndnet(net);
2376 lnet_net_lock(LNET_LOCK_EX);
2377 lnet_peer_net_added(net);
2378 lnet_net_unlock(LNET_LOCK_EX);
2380 lnet_ping_target_update(pbuf, ping_mdh);
2385 lnet_ping_md_unlink(pbuf, &ping_mdh);
2386 lnet_ping_buffer_decref(pbuf);
2390 static int lnet_handle_legacy_ip2nets(char *ip2nets,
2391 struct lnet_ioctl_config_lnd_tunables *tun)
2393 struct lnet_net *net;
2396 struct list_head net_head;
2398 INIT_LIST_HEAD(&net_head);
2400 rc = lnet_parse_ip2nets(&nets, ip2nets);
2404 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
2408 mutex_lock(&the_lnet.ln_api_mutex);
2409 while (!list_empty(&net_head)) {
2410 net = list_entry(net_head.next, struct lnet_net, net_list);
2411 list_del_init(&net->net_list);
2412 rc = lnet_add_net_common(net, tun);
2418 mutex_unlock(&the_lnet.ln_api_mutex);
2420 while (!list_empty(&net_head)) {
2421 net = list_entry(net_head.next, struct lnet_net, net_list);
2422 list_del_init(&net->net_list);
2428 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
2430 struct lnet_net *net;
2432 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
2436 /* get the tunables if they are available */
2437 if (conf->lic_cfg_hdr.ioc_len >=
2438 sizeof(*conf) + sizeof(*tun))
2439 tun = (struct lnet_ioctl_config_lnd_tunables *)
2442 /* handle legacy ip2nets from DLC */
2443 if (conf->lic_legacy_ip2nets[0] != '\0')
2444 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
2447 net_id = LNET_NIDNET(conf->lic_nid);
2449 net = lnet_net_alloc(net_id, NULL);
2453 for (i = 0; i < conf->lic_ncpts; i++) {
2454 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER)
2458 ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
2459 conf->lic_ni_intf[0]);
2463 mutex_lock(&the_lnet.ln_api_mutex);
2465 rc = lnet_add_net_common(net, tun);
2467 mutex_unlock(&the_lnet.ln_api_mutex);
2472 int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
2474 struct lnet_net *net;
2476 __u32 net_id = LNET_NIDNET(conf->lic_nid);
2477 struct lnet_ping_buffer *pbuf;
2478 struct lnet_handle_md ping_mdh;
2483 /* don't allow userspace to shutdown the LOLND */
2484 if (LNET_NETTYP(net_id) == LOLND)
2487 mutex_lock(&the_lnet.ln_api_mutex);
2491 net = lnet_get_net_locked(net_id);
2493 CERROR("net %s not found\n",
2494 libcfs_net2str(net_id));
2499 addr = LNET_NIDADDR(conf->lic_nid);
2501 /* remove the entire net */
2502 net_count = lnet_get_net_ni_count_locked(net);
2506 /* create and link a new ping info, before removing the old one */
2507 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2508 lnet_get_ni_count() - net_count,
2511 goto unlock_api_mutex;
2513 lnet_shutdown_lndnet(net);
2515 if (lnet_count_acceptor_nets() == 0)
2516 lnet_acceptor_stop();
2518 lnet_ping_target_update(pbuf, ping_mdh);
2520 goto unlock_api_mutex;
2523 ni = lnet_nid2ni_locked(conf->lic_nid, 0);
2525 CERROR("nid %s not found\n",
2526 libcfs_nid2str(conf->lic_nid));
2531 net_count = lnet_get_net_ni_count_locked(net);
2535 /* create and link a new ping info, before removing the old one */
2536 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2537 lnet_get_ni_count() - 1, false);
2539 goto unlock_api_mutex;
2541 lnet_shutdown_lndni(ni);
2543 if (lnet_count_acceptor_nets() == 0)
2544 lnet_acceptor_stop();
2546 lnet_ping_target_update(pbuf, ping_mdh);
2548 /* check if the net is empty and remove it if it is */
2550 lnet_shutdown_lndnet(net);
2552 goto unlock_api_mutex;
2557 mutex_unlock(&the_lnet.ln_api_mutex);
2563 * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
2564 * They are only expected to be called for unique networks.
2565 * That can be as a result of older DLC library
2566 * calls. Multi-Rail DLC and beyond no longer uses these APIs.
2569 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
2571 struct lnet_net *net;
2572 struct list_head net_head;
2574 struct lnet_ioctl_config_lnd_tunables tun;
2575 char *nets = conf->cfg_config_u.cfg_net.net_intf;
2577 INIT_LIST_HEAD(&net_head);
2579 /* Create a net/ni structures for the network string */
2580 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
2582 return rc == 0 ? -EINVAL : rc;
2584 mutex_lock(&the_lnet.ln_api_mutex);
2587 rc = -EINVAL; /* only add one network per call */
2588 goto out_unlock_clean;
2591 net = list_entry(net_head.next, struct lnet_net, net_list);
2592 list_del_init(&net->net_list);
2594 LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
2596 memset(&tun, 0, sizeof(tun));
2598 tun.lt_cmn.lct_peer_timeout =
2599 conf->cfg_config_u.cfg_net.net_peer_timeout;
2600 tun.lt_cmn.lct_peer_tx_credits =
2601 conf->cfg_config_u.cfg_net.net_peer_tx_credits;
2602 tun.lt_cmn.lct_peer_rtr_credits =
2603 conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
2604 tun.lt_cmn.lct_max_tx_credits =
2605 conf->cfg_config_u.cfg_net.net_max_tx_credits;
2607 rc = lnet_add_net_common(net, &tun);
2610 mutex_unlock(&the_lnet.ln_api_mutex);
2611 while (!list_empty(&net_head)) {
2612 /* net_head list is empty in success case */
2613 net = list_entry(net_head.next, struct lnet_net, net_list);
2614 list_del_init(&net->net_list);
2621 lnet_dyn_del_net(__u32 net_id)
2623 struct lnet_net *net;
2624 struct lnet_ping_buffer *pbuf;
2625 struct lnet_handle_md ping_mdh;
2629 /* don't allow userspace to shutdown the LOLND */
2630 if (LNET_NETTYP(net_id) == LOLND)
2633 mutex_lock(&the_lnet.ln_api_mutex);
2637 net = lnet_get_net_locked(net_id);
2644 net_ni_count = lnet_get_net_ni_count_locked(net);
2648 /* create and link a new ping info, before removing the old one */
2649 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2650 lnet_get_ni_count() - net_ni_count, false);
2654 lnet_shutdown_lndnet(net);
2656 if (lnet_count_acceptor_nets() == 0)
2657 lnet_acceptor_stop();
2659 lnet_ping_target_update(pbuf, ping_mdh);
2662 mutex_unlock(&the_lnet.ln_api_mutex);
2667 void lnet_incr_dlc_seq(void)
2669 atomic_inc(&lnet_dlc_seq_no);
2672 __u32 lnet_get_dlc_seq_locked(void)
2674 return atomic_read(&lnet_dlc_seq_no);
2678 * LNet ioctl handler.
2682 LNetCtl(unsigned int cmd, void *arg)
2684 struct libcfs_ioctl_data *data = arg;
2685 struct lnet_ioctl_config_data *config;
2686 struct lnet_process_id id = {0};
2690 BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
2691 sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
2694 case IOC_LIBCFS_GET_NI:
2695 rc = LNetGetId(data->ioc_count, &id);
2696 data->ioc_nid = id.nid;
2699 case IOC_LIBCFS_FAIL_NID:
2700 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
2702 case IOC_LIBCFS_ADD_ROUTE:
2705 if (config->cfg_hdr.ioc_len < sizeof(*config))
2708 mutex_lock(&the_lnet.ln_api_mutex);
2709 rc = lnet_add_route(config->cfg_net,
2710 config->cfg_config_u.cfg_route.rtr_hop,
2712 config->cfg_config_u.cfg_route.
2715 rc = lnet_check_routes();
2717 lnet_del_route(config->cfg_net,
2720 mutex_unlock(&the_lnet.ln_api_mutex);
2723 case IOC_LIBCFS_DEL_ROUTE:
2726 if (config->cfg_hdr.ioc_len < sizeof(*config))
2729 mutex_lock(&the_lnet.ln_api_mutex);
2730 rc = lnet_del_route(config->cfg_net, config->cfg_nid);
2731 mutex_unlock(&the_lnet.ln_api_mutex);
2734 case IOC_LIBCFS_GET_ROUTE:
2737 if (config->cfg_hdr.ioc_len < sizeof(*config))
2740 mutex_lock(&the_lnet.ln_api_mutex);
2741 rc = lnet_get_route(config->cfg_count,
2743 &config->cfg_config_u.cfg_route.rtr_hop,
2745 &config->cfg_config_u.cfg_route.rtr_flags,
2746 &config->cfg_config_u.cfg_route.
2748 mutex_unlock(&the_lnet.ln_api_mutex);
2751 case IOC_LIBCFS_GET_LOCAL_NI: {
2752 struct lnet_ioctl_config_ni *cfg_ni;
2753 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
2754 struct lnet_ioctl_element_stats *stats;
2758 /* get the tunables if they are available */
2759 if (cfg_ni->lic_cfg_hdr.ioc_len <
2760 sizeof(*cfg_ni) + sizeof(*stats)+ sizeof(*tun))
2763 stats = (struct lnet_ioctl_element_stats *)
2765 tun = (struct lnet_ioctl_config_lnd_tunables *)
2766 (cfg_ni->lic_bulk + sizeof(*stats));
2768 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
2771 mutex_lock(&the_lnet.ln_api_mutex);
2772 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
2773 mutex_unlock(&the_lnet.ln_api_mutex);
2777 case IOC_LIBCFS_GET_NET: {
2778 size_t total = sizeof(*config) +
2779 sizeof(struct lnet_ioctl_net_config);
2782 if (config->cfg_hdr.ioc_len < total)
2785 mutex_lock(&the_lnet.ln_api_mutex);
2786 rc = lnet_get_net_config(config);
2787 mutex_unlock(&the_lnet.ln_api_mutex);
2791 case IOC_LIBCFS_GET_LNET_STATS:
2793 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
2795 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
2798 mutex_lock(&the_lnet.ln_api_mutex);
2799 lnet_counters_get(&lnet_stats->st_cntrs);
2800 mutex_unlock(&the_lnet.ln_api_mutex);
2804 case IOC_LIBCFS_CONFIG_RTR:
2807 if (config->cfg_hdr.ioc_len < sizeof(*config))
2810 mutex_lock(&the_lnet.ln_api_mutex);
2811 if (config->cfg_config_u.cfg_buffers.buf_enable) {
2812 rc = lnet_rtrpools_enable();
2813 mutex_unlock(&the_lnet.ln_api_mutex);
2816 lnet_rtrpools_disable();
2817 mutex_unlock(&the_lnet.ln_api_mutex);
2820 case IOC_LIBCFS_ADD_BUF:
2823 if (config->cfg_hdr.ioc_len < sizeof(*config))
2826 mutex_lock(&the_lnet.ln_api_mutex);
2827 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
2829 config->cfg_config_u.cfg_buffers.
2831 config->cfg_config_u.cfg_buffers.
2833 mutex_unlock(&the_lnet.ln_api_mutex);
2836 case IOC_LIBCFS_SET_NUMA_RANGE: {
2837 struct lnet_ioctl_set_value *numa;
2839 if (numa->sv_hdr.ioc_len != sizeof(*numa))
2841 lnet_net_lock(LNET_LOCK_EX);
2842 lnet_numa_range = numa->sv_value;
2843 lnet_net_unlock(LNET_LOCK_EX);
2847 case IOC_LIBCFS_GET_NUMA_RANGE: {
2848 struct lnet_ioctl_set_value *numa;
2850 if (numa->sv_hdr.ioc_len != sizeof(*numa))
2852 numa->sv_value = lnet_numa_range;
2856 case IOC_LIBCFS_GET_BUF: {
2857 struct lnet_ioctl_pool_cfg *pool_cfg;
2858 size_t total = sizeof(*config) + sizeof(*pool_cfg);
2862 if (config->cfg_hdr.ioc_len < total)
2865 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
2867 mutex_lock(&the_lnet.ln_api_mutex);
2868 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
2869 mutex_unlock(&the_lnet.ln_api_mutex);
2873 case IOC_LIBCFS_ADD_PEER_NI: {
2874 struct lnet_ioctl_peer_cfg *cfg = arg;
2876 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
2879 mutex_lock(&the_lnet.ln_api_mutex);
2880 rc = lnet_add_peer_ni_to_peer(cfg->prcfg_prim_nid,
2883 mutex_unlock(&the_lnet.ln_api_mutex);
2887 case IOC_LIBCFS_DEL_PEER_NI: {
2888 struct lnet_ioctl_peer_cfg *cfg = arg;
2890 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
2893 mutex_lock(&the_lnet.ln_api_mutex);
2894 rc = lnet_del_peer_ni_from_peer(cfg->prcfg_prim_nid,
2895 cfg->prcfg_cfg_nid);
2896 mutex_unlock(&the_lnet.ln_api_mutex);
2900 case IOC_LIBCFS_GET_PEER_INFO: {
2901 struct lnet_ioctl_peer *peer_info = arg;
2903 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
2906 mutex_lock(&the_lnet.ln_api_mutex);
2907 rc = lnet_get_peer_ni_info(
2908 peer_info->pr_count,
2910 peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
2911 &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
2912 &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
2913 &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
2914 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
2915 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
2916 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
2917 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
2918 mutex_unlock(&the_lnet.ln_api_mutex);
2922 case IOC_LIBCFS_GET_PEER_NI: {
2923 struct lnet_ioctl_peer_cfg *cfg = arg;
2924 struct lnet_peer_ni_credit_info __user *lpni_cri;
2925 struct lnet_ioctl_element_stats __user *lpni_stats;
2926 size_t usr_size = sizeof(*lpni_cri) + sizeof(*lpni_stats);
2928 if ((cfg->prcfg_hdr.ioc_len != sizeof(*cfg)) ||
2929 (cfg->prcfg_size != usr_size))
2932 lpni_cri = cfg->prcfg_bulk;
2933 lpni_stats = cfg->prcfg_bulk + sizeof(*lpni_cri);
2935 mutex_lock(&the_lnet.ln_api_mutex);
2936 rc = lnet_get_peer_info(cfg->prcfg_count, &cfg->prcfg_prim_nid,
2937 &cfg->prcfg_cfg_nid, &cfg->prcfg_mr,
2938 lpni_cri, lpni_stats);
2939 mutex_unlock(&the_lnet.ln_api_mutex);
2943 case IOC_LIBCFS_NOTIFY_ROUTER: {
2944 unsigned long jiffies_passed;
2946 jiffies_passed = ktime_get_real_seconds() - data->ioc_u64[0];
2947 jiffies_passed = cfs_time_seconds(jiffies_passed);
2949 return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
2950 jiffies - jiffies_passed);
2953 case IOC_LIBCFS_LNET_DIST:
2954 rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
2955 if (rc < 0 && rc != -EHOSTUNREACH)
2958 data->ioc_u32[0] = rc;
2961 case IOC_LIBCFS_TESTPROTOCOMPAT:
2962 lnet_net_lock(LNET_LOCK_EX);
2963 the_lnet.ln_testprotocompat = data->ioc_flags;
2964 lnet_net_unlock(LNET_LOCK_EX);
2967 case IOC_LIBCFS_LNET_FAULT:
2968 return lnet_fault_ctl(data->ioc_flags, data);
2970 case IOC_LIBCFS_PING: {
2971 signed long timeout;
2973 id.nid = data->ioc_nid;
2974 id.pid = data->ioc_u32[0];
2976 /* Don't block longer than 2 minutes */
2977 if (data->ioc_u32[1] > 120 * MSEC_PER_SEC)
2980 /* If timestamp is negative then disable timeout */
2981 if ((s32)data->ioc_u32[1] < 0)
2982 timeout = MAX_SCHEDULE_TIMEOUT;
2984 timeout = msecs_to_jiffies(data->ioc_u32[1]);
2986 rc = lnet_ping(id, timeout, data->ioc_pbuf1,
2987 data->ioc_plen1 / sizeof(struct lnet_process_id));
2990 data->ioc_count = rc;
2995 ni = lnet_net2ni_addref(data->ioc_net);
2999 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
3002 rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
3009 EXPORT_SYMBOL(LNetCtl);
3011 void LNetDebugPeer(struct lnet_process_id id)
3013 lnet_debug_peer(id.nid);
3015 EXPORT_SYMBOL(LNetDebugPeer);
3018 * Determine if the specified peer \a nid is on the local node.
3020 * \param nid peer nid to check
3022 * \retval true If peer NID is on the local node.
3023 * \retval false If peer NID is not on the local node.
3025 bool LNetIsPeerLocal(lnet_nid_t nid)
3027 struct lnet_net *net;
3031 cpt = lnet_net_lock_current();
3032 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3033 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3034 if (ni->ni_nid == nid) {
3035 lnet_net_unlock(cpt);
3040 lnet_net_unlock(cpt);
3044 EXPORT_SYMBOL(LNetIsPeerLocal);
3047 * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
3048 * Note that all interfaces share a same PID, as requested by LNetNIInit().
3050 * \param index Index of the interface to look up.
3051 * \param id On successful return, this location will hold the
3052 * struct lnet_process_id ID of the interface.
3054 * \retval 0 If an interface exists at \a index.
3055 * \retval -ENOENT If no interface has been found.
3058 LNetGetId(unsigned int index, struct lnet_process_id *id)
3061 struct lnet_net *net;
3065 LASSERT(the_lnet.ln_refcount > 0);
3067 cpt = lnet_net_lock_current();
3069 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3070 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3074 id->nid = ni->ni_nid;
3075 id->pid = the_lnet.ln_pid;
3081 lnet_net_unlock(cpt);
3084 EXPORT_SYMBOL(LNetGetId);
3086 static int lnet_ping(struct lnet_process_id id, signed long timeout,
3087 struct lnet_process_id __user *ids, int n_ids)
3089 struct lnet_handle_eq eqh;
3090 struct lnet_handle_md mdh;
3091 struct lnet_event event;
3092 struct lnet_md md = { NULL };
3096 const signed long a_long_time = msecs_to_jiffies(60 * MSEC_PER_SEC);
3097 struct lnet_ping_buffer *pbuf;
3098 struct lnet_process_id tmpid;
3105 /* n_ids limit is arbitrary */
3106 if (n_ids <= 0 || n_ids > lnet_interfaces_max || id.nid == LNET_NID_ANY)
3109 if (id.pid == LNET_PID_ANY)
3110 id.pid = LNET_PID_LUSTRE;
3112 pbuf = lnet_ping_buffer_alloc(n_ids, GFP_NOFS);
3116 /* NB 2 events max (including any unlink event) */
3117 rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
3119 CERROR("Can't allocate EQ: %d\n", rc);
3120 goto fail_ping_buffer_decref;
3123 /* initialize md content */
3124 md.start = &pbuf->pb_info;
3125 md.length = LNET_PING_INFO_SIZE(n_ids);
3126 md.threshold = 2; /*GET/REPLY*/
3128 md.options = LNET_MD_TRUNCATE;
3132 rc = LNetMDBind(md, LNET_UNLINK, &mdh);
3134 CERROR("Can't bind MD: %d\n", rc);
3138 rc = LNetGet(LNET_NID_ANY, mdh, id,
3139 LNET_RESERVED_PORTAL,
3140 LNET_PROTO_PING_MATCHBITS, 0);
3143 /* Don't CERROR; this could be deliberate! */
3145 rc2 = LNetMDUnlink(mdh);
3148 /* NB must wait for the UNLINK event below... */
3150 timeout = a_long_time;
3154 /* MUST block for unlink to complete */
3156 blocked = cfs_block_allsigs();
3158 rc2 = LNetEQPoll(&eqh, 1, timeout, &event, &which);
3161 cfs_restore_sigs(blocked);
3163 CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
3164 (rc2 <= 0) ? -1 : event.type,
3165 (rc2 <= 0) ? -1 : event.status,
3166 (rc2 > 0 && event.unlinked) ? " unlinked" : "");
3168 LASSERT(rc2 != -EOVERFLOW); /* can't miss anything */
3170 if (rc2 <= 0 || event.status != 0) {
3171 /* timeout or error */
3172 if (!replied && rc == 0)
3173 rc = (rc2 < 0) ? rc2 :
3174 (rc2 == 0) ? -ETIMEDOUT :
3178 /* Ensure completion in finite time... */
3180 /* No assertion (racing with network) */
3182 timeout = a_long_time;
3183 } else if (rc2 == 0) {
3184 /* timed out waiting for unlink */
3185 CWARN("ping %s: late network completion\n",
3188 } else if (event.type == LNET_EVENT_REPLY) {
3193 } while (rc2 <= 0 || !event.unlinked);
3197 CWARN("%s: Unexpected rc >= 0 but no reply!\n",
3204 LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
3206 rc = -EPROTO; /* if I can't parse... */
3209 /* can't check magic/version */
3210 CERROR("%s: ping info too short %d\n",
3211 libcfs_id2str(id), nob);
3215 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
3216 lnet_swap_pinginfo(pbuf);
3217 } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
3218 CERROR("%s: Unexpected magic %08x\n",
3219 libcfs_id2str(id), pbuf->pb_info.pi_magic);
3223 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
3224 CERROR("%s: ping w/o NI status: 0x%x\n",
3225 libcfs_id2str(id), pbuf->pb_info.pi_features);
3229 if (nob < LNET_PING_INFO_SIZE(0)) {
3230 CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
3231 nob, (int)LNET_PING_INFO_SIZE(0));
3235 if (pbuf->pb_info.pi_nnis < n_ids)
3236 n_ids = pbuf->pb_info.pi_nnis;
3238 if (nob < LNET_PING_INFO_SIZE(n_ids)) {
3239 CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
3240 nob, (int)LNET_PING_INFO_SIZE(n_ids));
3244 rc = -EFAULT; /* If I SEGV... */
3246 memset(&tmpid, 0, sizeof(tmpid));
3247 for (i = 0; i < n_ids; i++) {
3248 tmpid.pid = pbuf->pb_info.pi_pid;
3249 tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
3250 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
3253 rc = pbuf->pb_info.pi_nnis;
3256 rc2 = LNetEQFree(eqh);
3258 CERROR("rc2 %d\n", rc2);
3261 fail_ping_buffer_decref:
3262 lnet_ping_buffer_decref(pbuf);