4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_LNET
34 #include <linux/log2.h>
35 #include <linux/ktime.h>
37 #include <lnet/lib-lnet.h>
39 #define D_LNI D_CONSOLE
41 lnet_t the_lnet; /* THE state of the network */
42 EXPORT_SYMBOL(the_lnet);
44 static char *ip2nets = "";
45 module_param(ip2nets, charp, 0444);
46 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
48 static char *networks = "";
49 module_param(networks, charp, 0444);
50 MODULE_PARM_DESC(networks, "local networks");
52 static char *routes = "";
53 module_param(routes, charp, 0444);
54 MODULE_PARM_DESC(routes, "routes to non-local networks");
56 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
57 module_param(rnet_htable_size, int, 0444);
58 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
60 static int use_tcp_bonding = false;
61 module_param(use_tcp_bonding, int, 0444);
62 MODULE_PARM_DESC(use_tcp_bonding,
63 "Set to 1 to use socklnd bonding. 0 to use Multi-Rail");
65 static __u32 lnet_numa_range = 0;
66 module_param(lnet_numa_range, int, 0444);
67 MODULE_PARM_DESC(lnet_numa_range,
68 "NUMA range to consider during Multi-Rail selection");
71 * This sequence number keeps track of how many times DLC was used to
72 * update the local NIs. It is incremented when a NI is added or
73 * removed and checked when sending a message to determine if there is
74 * a need to re-run the selection algorithm. See lnet_select_pathway()
75 * for more details on its usage.
77 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
79 static int lnet_ping(lnet_process_id_t id, signed long timeout,
80 lnet_process_id_t __user *ids, int n_ids);
89 lnet_get_networks(void)
94 if (*networks != 0 && *ip2nets != 0) {
95 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
96 "'ip2nets' but not both at once\n");
101 rc = lnet_parse_ip2nets(&nets, ip2nets);
102 return (rc == 0) ? nets : NULL;
112 lnet_init_locks(void)
114 spin_lock_init(&the_lnet.ln_eq_wait_lock);
115 init_waitqueue_head(&the_lnet.ln_eq_waitq);
116 init_waitqueue_head(&the_lnet.ln_rc_waitq);
117 mutex_init(&the_lnet.ln_lnd_mutex);
118 mutex_init(&the_lnet.ln_api_mutex);
122 lnet_fini_locks(void)
126 struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
127 struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
131 lnet_descriptor_setup(void)
133 /* create specific kmem_cache for MEs and small MDs (i.e., originally
134 * allocated in <size-xxx> kmem_cache).
136 lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(lnet_me_t),
138 if (!lnet_mes_cachep)
141 lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
142 LNET_SMALL_MD_SIZE, 0, 0,
144 if (!lnet_small_mds_cachep)
151 lnet_descriptor_cleanup(void)
154 if (lnet_small_mds_cachep) {
155 kmem_cache_destroy(lnet_small_mds_cachep);
156 lnet_small_mds_cachep = NULL;
159 if (lnet_mes_cachep) {
160 kmem_cache_destroy(lnet_mes_cachep);
161 lnet_mes_cachep = NULL;
166 lnet_create_remote_nets_table(void)
169 struct list_head *hash;
171 LASSERT(the_lnet.ln_remote_nets_hash == NULL);
172 LASSERT(the_lnet.ln_remote_nets_hbits > 0);
173 LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
175 CERROR("Failed to create remote nets hash table\n");
179 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
180 INIT_LIST_HEAD(&hash[i]);
181 the_lnet.ln_remote_nets_hash = hash;
186 lnet_destroy_remote_nets_table(void)
190 if (the_lnet.ln_remote_nets_hash == NULL)
193 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
194 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
196 LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
197 LNET_REMOTE_NETS_HASH_SIZE *
198 sizeof(the_lnet.ln_remote_nets_hash[0]));
199 the_lnet.ln_remote_nets_hash = NULL;
203 lnet_destroy_locks(void)
205 if (the_lnet.ln_res_lock != NULL) {
206 cfs_percpt_lock_free(the_lnet.ln_res_lock);
207 the_lnet.ln_res_lock = NULL;
210 if (the_lnet.ln_net_lock != NULL) {
211 cfs_percpt_lock_free(the_lnet.ln_net_lock);
212 the_lnet.ln_net_lock = NULL;
219 lnet_create_locks(void)
223 the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
224 if (the_lnet.ln_res_lock == NULL)
227 the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
228 if (the_lnet.ln_net_lock == NULL)
234 lnet_destroy_locks();
238 static void lnet_assert_wire_constants(void)
240 /* Wire protocol assertions generated by 'wirecheck'
241 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
242 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
243 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
246 CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
247 CLASSERT(LNET_PROTO_TCP_VERSION_MAJOR == 1);
248 CLASSERT(LNET_PROTO_TCP_VERSION_MINOR == 0);
249 CLASSERT(LNET_MSG_ACK == 0);
250 CLASSERT(LNET_MSG_PUT == 1);
251 CLASSERT(LNET_MSG_GET == 2);
252 CLASSERT(LNET_MSG_REPLY == 3);
253 CLASSERT(LNET_MSG_HELLO == 4);
255 /* Checks for struct lnet_handle_wire */
256 CLASSERT((int)sizeof(struct lnet_handle_wire) == 16);
257 CLASSERT((int)offsetof(struct lnet_handle_wire, wh_interface_cookie) == 0);
258 CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) == 8);
259 CLASSERT((int)offsetof(struct lnet_handle_wire, wh_object_cookie) == 8);
260 CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) == 8);
262 /* Checks for struct lnet_magicversion_t */
263 CLASSERT((int)sizeof(lnet_magicversion_t) == 8);
264 CLASSERT((int)offsetof(lnet_magicversion_t, magic) == 0);
265 CLASSERT((int)sizeof(((lnet_magicversion_t *)0)->magic) == 4);
266 CLASSERT((int)offsetof(lnet_magicversion_t, version_major) == 4);
267 CLASSERT((int)sizeof(((lnet_magicversion_t *)0)->version_major) == 2);
268 CLASSERT((int)offsetof(lnet_magicversion_t, version_minor) == 6);
269 CLASSERT((int)sizeof(((lnet_magicversion_t *)0)->version_minor) == 2);
271 /* Checks for struct lnet_hdr_t */
272 CLASSERT((int)sizeof(lnet_hdr_t) == 72);
273 CLASSERT((int)offsetof(lnet_hdr_t, dest_nid) == 0);
274 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->dest_nid) == 8);
275 CLASSERT((int)offsetof(lnet_hdr_t, src_nid) == 8);
276 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->src_nid) == 8);
277 CLASSERT((int)offsetof(lnet_hdr_t, dest_pid) == 16);
278 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->dest_pid) == 4);
279 CLASSERT((int)offsetof(lnet_hdr_t, src_pid) == 20);
280 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->src_pid) == 4);
281 CLASSERT((int)offsetof(lnet_hdr_t, type) == 24);
282 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->type) == 4);
283 CLASSERT((int)offsetof(lnet_hdr_t, payload_length) == 28);
284 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->payload_length) == 4);
285 CLASSERT((int)offsetof(lnet_hdr_t, msg) == 32);
286 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg) == 40);
289 CLASSERT((int)offsetof(lnet_hdr_t, msg.ack.dst_wmd) == 32);
290 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.ack.dst_wmd) == 16);
291 CLASSERT((int)offsetof(lnet_hdr_t, msg.ack.match_bits) == 48);
292 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.ack.match_bits) == 8);
293 CLASSERT((int)offsetof(lnet_hdr_t, msg.ack.mlength) == 56);
294 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.ack.mlength) == 4);
297 CLASSERT((int)offsetof(lnet_hdr_t, msg.put.ack_wmd) == 32);
298 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.ack_wmd) == 16);
299 CLASSERT((int)offsetof(lnet_hdr_t, msg.put.match_bits) == 48);
300 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.match_bits) == 8);
301 CLASSERT((int)offsetof(lnet_hdr_t, msg.put.hdr_data) == 56);
302 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.hdr_data) == 8);
303 CLASSERT((int)offsetof(lnet_hdr_t, msg.put.ptl_index) == 64);
304 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.ptl_index) == 4);
305 CLASSERT((int)offsetof(lnet_hdr_t, msg.put.offset) == 68);
306 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.offset) == 4);
309 CLASSERT((int)offsetof(lnet_hdr_t, msg.get.return_wmd) == 32);
310 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.return_wmd) == 16);
311 CLASSERT((int)offsetof(lnet_hdr_t, msg.get.match_bits) == 48);
312 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.match_bits) == 8);
313 CLASSERT((int)offsetof(lnet_hdr_t, msg.get.ptl_index) == 56);
314 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.ptl_index) == 4);
315 CLASSERT((int)offsetof(lnet_hdr_t, msg.get.src_offset) == 60);
316 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.src_offset) == 4);
317 CLASSERT((int)offsetof(lnet_hdr_t, msg.get.sink_length) == 64);
318 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.sink_length) == 4);
321 CLASSERT((int)offsetof(lnet_hdr_t, msg.reply.dst_wmd) == 32);
322 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.reply.dst_wmd) == 16);
325 CLASSERT((int)offsetof(lnet_hdr_t, msg.hello.incarnation) == 32);
326 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.hello.incarnation) == 8);
327 CLASSERT((int)offsetof(lnet_hdr_t, msg.hello.type) == 40);
328 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.hello.type) == 4);
331 static lnd_t *lnet_find_lnd_by_type(__u32 type)
334 struct list_head *tmp;
336 /* holding lnd mutex */
337 list_for_each(tmp, &the_lnet.ln_lnds) {
338 lnd = list_entry(tmp, lnd_t, lnd_list);
340 if (lnd->lnd_type == type)
347 lnet_register_lnd (lnd_t *lnd)
349 mutex_lock(&the_lnet.ln_lnd_mutex);
351 LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
352 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
354 list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
355 lnd->lnd_refcount = 0;
357 CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
359 mutex_unlock(&the_lnet.ln_lnd_mutex);
361 EXPORT_SYMBOL(lnet_register_lnd);
364 lnet_unregister_lnd (lnd_t *lnd)
366 mutex_lock(&the_lnet.ln_lnd_mutex);
368 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
369 LASSERT(lnd->lnd_refcount == 0);
371 list_del(&lnd->lnd_list);
372 CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
374 mutex_unlock(&the_lnet.ln_lnd_mutex);
376 EXPORT_SYMBOL(lnet_unregister_lnd);
379 lnet_counters_get(lnet_counters_t *counters)
381 lnet_counters_t *ctr;
384 memset(counters, 0, sizeof(*counters));
386 lnet_net_lock(LNET_LOCK_EX);
388 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
389 counters->msgs_max += ctr->msgs_max;
390 counters->msgs_alloc += ctr->msgs_alloc;
391 counters->errors += ctr->errors;
392 counters->send_count += ctr->send_count;
393 counters->recv_count += ctr->recv_count;
394 counters->route_count += ctr->route_count;
395 counters->drop_count += ctr->drop_count;
396 counters->send_length += ctr->send_length;
397 counters->recv_length += ctr->recv_length;
398 counters->route_length += ctr->route_length;
399 counters->drop_length += ctr->drop_length;
402 lnet_net_unlock(LNET_LOCK_EX);
404 EXPORT_SYMBOL(lnet_counters_get);
407 lnet_counters_reset(void)
409 lnet_counters_t *counters;
412 lnet_net_lock(LNET_LOCK_EX);
414 cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
415 memset(counters, 0, sizeof(lnet_counters_t));
417 lnet_net_unlock(LNET_LOCK_EX);
421 lnet_res_type2str(int type)
426 case LNET_COOKIE_TYPE_MD:
428 case LNET_COOKIE_TYPE_ME:
430 case LNET_COOKIE_TYPE_EQ:
436 lnet_res_container_cleanup(struct lnet_res_container *rec)
440 if (rec->rec_type == 0) /* not set yet, it's uninitialized */
443 while (!list_empty(&rec->rec_active)) {
444 struct list_head *e = rec->rec_active.next;
447 if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
448 lnet_eq_free(list_entry(e, lnet_eq_t, eq_list));
450 } else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
451 lnet_md_free(list_entry(e, lnet_libmd_t, md_list));
453 } else { /* NB: Active MEs should be attached on portals */
460 /* Found alive MD/ME/EQ, user really should unlink/free
461 * all of them before finalize LNet, but if someone didn't,
462 * we have to recycle garbage for him */
463 CERROR("%d active elements on exit of %s container\n",
464 count, lnet_res_type2str(rec->rec_type));
467 if (rec->rec_lh_hash != NULL) {
468 LIBCFS_FREE(rec->rec_lh_hash,
469 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
470 rec->rec_lh_hash = NULL;
473 rec->rec_type = 0; /* mark it as finalized */
477 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
482 LASSERT(rec->rec_type == 0);
484 rec->rec_type = type;
485 INIT_LIST_HEAD(&rec->rec_active);
487 rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
489 /* Arbitrary choice of hash table size */
490 LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
491 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
492 if (rec->rec_lh_hash == NULL) {
497 for (i = 0; i < LNET_LH_HASH_SIZE; i++)
498 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
503 CERROR("Failed to setup %s resource container\n",
504 lnet_res_type2str(type));
505 lnet_res_container_cleanup(rec);
510 lnet_res_containers_destroy(struct lnet_res_container **recs)
512 struct lnet_res_container *rec;
515 cfs_percpt_for_each(rec, i, recs)
516 lnet_res_container_cleanup(rec);
518 cfs_percpt_free(recs);
521 static struct lnet_res_container **
522 lnet_res_containers_create(int type)
524 struct lnet_res_container **recs;
525 struct lnet_res_container *rec;
529 recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
531 CERROR("Failed to allocate %s resource containers\n",
532 lnet_res_type2str(type));
536 cfs_percpt_for_each(rec, i, recs) {
537 rc = lnet_res_container_setup(rec, i, type);
539 lnet_res_containers_destroy(recs);
548 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
550 /* ALWAYS called with lnet_res_lock held */
551 struct list_head *head;
552 lnet_libhandle_t *lh;
555 if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
558 hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
559 head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
561 list_for_each_entry(lh, head, lh_hash_chain) {
562 if (lh->lh_cookie == cookie)
570 lnet_res_lh_initialize(struct lnet_res_container *rec, lnet_libhandle_t *lh)
572 /* ALWAYS called with lnet_res_lock held */
573 unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
576 lh->lh_cookie = rec->rec_lh_cookie;
577 rec->rec_lh_cookie += 1 << ibits;
579 hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
581 list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
584 static int lnet_unprepare(void);
587 lnet_prepare(lnet_pid_t requested_pid)
589 /* Prepare to bring up the network */
590 struct lnet_res_container **recs;
593 if (requested_pid == LNET_PID_ANY) {
594 /* Don't instantiate LNET just for me */
598 LASSERT(the_lnet.ln_refcount == 0);
600 the_lnet.ln_routing = 0;
602 LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
603 the_lnet.ln_pid = requested_pid;
605 INIT_LIST_HEAD(&the_lnet.ln_test_peers);
606 INIT_LIST_HEAD(&the_lnet.ln_peers);
607 INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
608 INIT_LIST_HEAD(&the_lnet.ln_nets);
609 INIT_LIST_HEAD(&the_lnet.ln_routers);
610 INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
611 INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
613 rc = lnet_descriptor_setup();
617 rc = lnet_create_remote_nets_table();
622 * NB the interface cookie in wire handles guards against delayed
623 * replies and ACKs appearing valid after reboot.
625 the_lnet.ln_interface_cookie = ktime_get_real_ns();
627 the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
628 sizeof(lnet_counters_t));
629 if (the_lnet.ln_counters == NULL) {
630 CERROR("Failed to allocate counters for LNet\n");
635 rc = lnet_peer_tables_create();
639 rc = lnet_msg_containers_create();
643 rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
644 LNET_COOKIE_TYPE_EQ);
648 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
654 the_lnet.ln_me_containers = recs;
656 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
662 the_lnet.ln_md_containers = recs;
664 rc = lnet_portals_create();
666 CERROR("Failed to create portals for LNet: %d\n", rc);
678 lnet_unprepare (void)
680 /* NB no LNET_LOCK since this is the last reference. All LND instances
681 * have shut down already, so it is safe to unlink and free all
682 * descriptors, even those that appear committed to a network op (eg MD
683 * with non-zero pending count) */
685 lnet_fail_nid(LNET_NID_ANY, 0);
687 LASSERT(the_lnet.ln_refcount == 0);
688 LASSERT(list_empty(&the_lnet.ln_test_peers));
689 LASSERT(list_empty(&the_lnet.ln_nets));
691 lnet_portals_destroy();
693 if (the_lnet.ln_md_containers != NULL) {
694 lnet_res_containers_destroy(the_lnet.ln_md_containers);
695 the_lnet.ln_md_containers = NULL;
698 if (the_lnet.ln_me_containers != NULL) {
699 lnet_res_containers_destroy(the_lnet.ln_me_containers);
700 the_lnet.ln_me_containers = NULL;
703 lnet_res_container_cleanup(&the_lnet.ln_eq_container);
705 lnet_msg_containers_destroy();
707 lnet_rtrpools_free(0);
709 if (the_lnet.ln_counters != NULL) {
710 cfs_percpt_free(the_lnet.ln_counters);
711 the_lnet.ln_counters = NULL;
713 lnet_destroy_remote_nets_table();
714 lnet_descriptor_cleanup();
720 lnet_net2ni_locked(__u32 net_id, int cpt)
723 struct lnet_net *net;
725 LASSERT(cpt != LNET_LOCK_EX);
727 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
728 if (net->net_id == net_id) {
729 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
739 lnet_net2ni_addref(__u32 net)
744 ni = lnet_net2ni_locked(net, 0);
746 lnet_ni_addref_locked(ni, 0);
751 EXPORT_SYMBOL(lnet_net2ni_addref);
754 lnet_get_net_locked(__u32 net_id)
756 struct lnet_net *net;
758 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
759 if (net->net_id == net_id)
767 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
772 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
777 val = hash_long(key, LNET_CPT_BITS);
778 /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
782 return (unsigned int)(key + val + (val >> 1)) % number;
786 lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
788 struct lnet_net *net;
790 /* must called with hold of lnet_net_lock */
791 if (LNET_CPT_NUMBER == 1)
792 return 0; /* the only one */
795 * If NI is provided then use the CPT identified in the NI cpt
796 * list if one exists. If one doesn't exist, then that NI is
797 * associated with all CPTs and it follows that the net it belongs
798 * to is implicitly associated with all CPTs, so just hash the nid
802 if (ni->ni_cpts != NULL)
803 return ni->ni_cpts[lnet_nid_cpt_hash(nid,
806 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
809 /* no NI provided so look at the net */
810 net = lnet_get_net_locked(LNET_NIDNET(nid));
812 if (net != NULL && net->net_cpts != NULL) {
813 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
816 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
820 lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
825 if (LNET_CPT_NUMBER == 1)
826 return 0; /* the only one */
828 cpt = lnet_net_lock_current();
830 cpt2 = lnet_cpt_of_nid_locked(nid, ni);
832 lnet_net_unlock(cpt);
836 EXPORT_SYMBOL(lnet_cpt_of_nid);
839 lnet_islocalnet(__u32 net_id)
841 struct lnet_net *net;
845 cpt = lnet_net_lock_current();
847 net = lnet_get_net_locked(net_id);
851 lnet_net_unlock(cpt);
857 lnet_is_ni_healthy_locked(struct lnet_ni *ni)
859 if (ni->ni_state == LNET_NI_STATE_ACTIVE ||
860 ni->ni_state == LNET_NI_STATE_DEGRADED)
867 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
869 struct lnet_net *net;
872 LASSERT(cpt != LNET_LOCK_EX);
874 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
875 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
876 if (ni->ni_nid == nid)
885 lnet_nid2ni_addref(lnet_nid_t nid)
890 ni = lnet_nid2ni_locked(nid, 0);
892 lnet_ni_addref_locked(ni, 0);
897 EXPORT_SYMBOL(lnet_nid2ni_addref);
900 lnet_islocalnid(lnet_nid_t nid)
905 cpt = lnet_net_lock_current();
906 ni = lnet_nid2ni_locked(nid, cpt);
907 lnet_net_unlock(cpt);
913 lnet_count_acceptor_nets(void)
915 /* Return the # of NIs that need the acceptor. */
917 struct lnet_net *net;
920 cpt = lnet_net_lock_current();
921 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
922 /* all socklnd type networks should have the acceptor
924 if (net->net_lnd->lnd_accept != NULL)
928 lnet_net_unlock(cpt);
933 static struct lnet_ping_info *
934 lnet_ping_info_create(int num_ni)
936 struct lnet_ping_info *ping_info;
939 infosz = offsetof(struct lnet_ping_info, pi_ni[num_ni]);
940 LIBCFS_ALLOC(ping_info, infosz);
941 if (ping_info == NULL) {
942 CERROR("Can't allocate ping info[%d]\n", num_ni);
946 ping_info->pi_nnis = num_ni;
947 ping_info->pi_pid = the_lnet.ln_pid;
948 ping_info->pi_magic = LNET_PROTO_PING_MAGIC;
949 ping_info->pi_features = LNET_PING_FEAT_NI_STATUS;
955 lnet_get_net_ni_count_locked(struct lnet_net *net)
960 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
967 lnet_get_net_ni_count_pre(struct lnet_net *net)
972 list_for_each_entry(ni, &net->net_ni_added, ni_netlist)
979 lnet_get_ni_count(void)
982 struct lnet_net *net;
987 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
988 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
998 lnet_ping_info_free(struct lnet_ping_info *pinfo)
1001 offsetof(struct lnet_ping_info,
1002 pi_ni[pinfo->pi_nnis]));
1006 lnet_ping_info_destroy(void)
1008 struct lnet_net *net;
1011 lnet_net_lock(LNET_LOCK_EX);
1013 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1014 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1016 ni->ni_status = NULL;
1021 lnet_ping_info_free(the_lnet.ln_ping_info);
1022 the_lnet.ln_ping_info = NULL;
1024 lnet_net_unlock(LNET_LOCK_EX);
1028 lnet_ping_event_handler(lnet_event_t *event)
1030 struct lnet_ping_info *pinfo = event->md.user_ptr;
1032 if (event->unlinked)
1033 pinfo->pi_features = LNET_PING_FEAT_INVAL;
1037 lnet_ping_info_setup(struct lnet_ping_info **ppinfo, lnet_handle_md_t *md_handle,
1038 int ni_count, bool set_eq)
1040 lnet_handle_me_t me_handle;
1041 lnet_process_id_t id = {LNET_NID_ANY, LNET_PID_ANY};
1042 lnet_md_t md = {NULL};
1046 rc = LNetEQAlloc(0, lnet_ping_event_handler,
1047 &the_lnet.ln_ping_target_eq);
1049 CERROR("Can't allocate ping EQ: %d\n", rc);
1054 *ppinfo = lnet_ping_info_create(ni_count);
1055 if (*ppinfo == NULL) {
1060 rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1061 LNET_PROTO_PING_MATCHBITS, 0,
1062 LNET_UNLINK, LNET_INS_AFTER,
1065 CERROR("Can't create ping ME: %d\n", rc);
1069 /* initialize md content */
1071 md.length = offsetof(struct lnet_ping_info,
1072 pi_ni[(*ppinfo)->pi_nnis]);
1073 md.threshold = LNET_MD_THRESH_INF;
1075 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1076 LNET_MD_MANAGE_REMOTE;
1078 md.eq_handle = the_lnet.ln_ping_target_eq;
1079 md.user_ptr = *ppinfo;
1081 rc = LNetMDAttach(me_handle, md, LNET_RETAIN, md_handle);
1083 CERROR("Can't attach ping MD: %d\n", rc);
1090 rc2 = LNetMEUnlink(me_handle);
1093 lnet_ping_info_free(*ppinfo);
1097 LNetEQFree(the_lnet.ln_ping_target_eq);
1102 lnet_ping_md_unlink(struct lnet_ping_info *pinfo, lnet_handle_md_t *md_handle)
1104 sigset_t blocked = cfs_block_allsigs();
1106 LNetMDUnlink(*md_handle);
1107 LNetInvalidateHandle(md_handle);
1109 /* NB md could be busy; this just starts the unlink */
1110 while (pinfo->pi_features != LNET_PING_FEAT_INVAL) {
1111 CDEBUG(D_NET, "Still waiting for ping MD to unlink\n");
1112 set_current_state(TASK_UNINTERRUPTIBLE);
1113 schedule_timeout(cfs_time_seconds(1));
1116 cfs_restore_sigs(blocked);
1120 lnet_ping_info_install_locked(struct lnet_ping_info *ping_info)
1124 struct lnet_net *net;
1125 struct lnet_ni_status *ns;
1128 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1129 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1130 LASSERT(i < ping_info->pi_nnis);
1132 ns = &ping_info->pi_ni[i];
1134 ns->ns_nid = ni->ni_nid;
1137 ns->ns_status = (ni->ni_status != NULL) ?
1138 ni->ni_status->ns_status :
1150 lnet_ping_target_update(struct lnet_ping_info *pinfo, lnet_handle_md_t md_handle)
1152 struct lnet_ping_info *old_pinfo = NULL;
1153 lnet_handle_md_t old_md;
1155 /* switch the NIs to point to the new ping info created */
1156 lnet_net_lock(LNET_LOCK_EX);
1158 if (!the_lnet.ln_routing)
1159 pinfo->pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1160 lnet_ping_info_install_locked(pinfo);
1162 if (the_lnet.ln_ping_info != NULL) {
1163 old_pinfo = the_lnet.ln_ping_info;
1164 old_md = the_lnet.ln_ping_target_md;
1166 the_lnet.ln_ping_target_md = md_handle;
1167 the_lnet.ln_ping_info = pinfo;
1169 lnet_net_unlock(LNET_LOCK_EX);
1171 if (old_pinfo != NULL) {
1172 /* unlink the old ping info */
1173 lnet_ping_md_unlink(old_pinfo, &old_md);
1174 lnet_ping_info_free(old_pinfo);
1179 lnet_ping_target_fini(void)
1183 lnet_ping_md_unlink(the_lnet.ln_ping_info,
1184 &the_lnet.ln_ping_target_md);
1186 rc = LNetEQFree(the_lnet.ln_ping_target_eq);
1189 lnet_ping_info_destroy();
1193 lnet_ni_tq_credits(lnet_ni_t *ni)
1197 LASSERT(ni->ni_ncpts >= 1);
1199 if (ni->ni_ncpts == 1)
1200 return ni->ni_net->net_tunables.lct_max_tx_credits;
1202 credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
1203 credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
1204 credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
1210 lnet_ni_unlink_locked(lnet_ni_t *ni)
1212 if (!list_empty(&ni->ni_cptlist)) {
1213 list_del_init(&ni->ni_cptlist);
1214 lnet_ni_decref_locked(ni, 0);
1217 /* move it to zombie list and nobody can find it anymore */
1218 LASSERT(!list_empty(&ni->ni_netlist));
1219 list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
1220 lnet_ni_decref_locked(ni, 0);
1224 lnet_clear_zombies_nis_locked(struct lnet_net *net)
1229 struct list_head *zombie_list = &net->net_ni_zombie;
1232 * Now wait for the NIs I just nuked to show up on the zombie
1233 * list and shut them down in guaranteed thread context
1236 while (!list_empty(zombie_list)) {
1240 ni = list_entry(zombie_list->next,
1241 lnet_ni_t, ni_netlist);
1242 list_del_init(&ni->ni_netlist);
1243 /* the ni should be in deleting state. If it's not it's
1245 LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
1246 cfs_percpt_for_each(ref, j, ni->ni_refs) {
1249 /* still busy, add it back to zombie list */
1250 list_add(&ni->ni_netlist, zombie_list);
1254 if (!list_empty(&ni->ni_netlist)) {
1255 lnet_net_unlock(LNET_LOCK_EX);
1257 if ((i & (-i)) == i) {
1259 "Waiting for zombie LNI %s\n",
1260 libcfs_nid2str(ni->ni_nid));
1262 set_current_state(TASK_UNINTERRUPTIBLE);
1263 schedule_timeout(cfs_time_seconds(1));
1264 lnet_net_lock(LNET_LOCK_EX);
1268 lnet_net_unlock(LNET_LOCK_EX);
1270 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
1272 LASSERT(!in_interrupt());
1273 (net->net_lnd->lnd_shutdown)(ni);
1276 CDEBUG(D_LNI, "Removed LNI %s\n",
1277 libcfs_nid2str(ni->ni_nid));
1281 lnet_net_lock(LNET_LOCK_EX);
1285 /* shutdown down the NI and release refcount */
1287 lnet_shutdown_lndni(struct lnet_ni *ni)
1290 struct lnet_net *net = ni->ni_net;
1292 lnet_net_lock(LNET_LOCK_EX);
1293 ni->ni_state = LNET_NI_STATE_DELETING;
1294 lnet_ni_unlink_locked(ni);
1295 lnet_incr_dlc_seq();
1296 lnet_net_unlock(LNET_LOCK_EX);
1298 /* clear messages for this NI on the lazy portal */
1299 for (i = 0; i < the_lnet.ln_nportals; i++)
1300 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
1302 lnet_net_lock(LNET_LOCK_EX);
1303 lnet_clear_zombies_nis_locked(net);
1304 lnet_net_unlock(LNET_LOCK_EX);
1308 lnet_shutdown_lndnet(struct lnet_net *net)
1312 lnet_net_lock(LNET_LOCK_EX);
1314 net->net_state = LNET_NET_STATE_DELETING;
1316 list_del_init(&net->net_list);
1318 while (!list_empty(&net->net_ni_list)) {
1319 ni = list_entry(net->net_ni_list.next,
1320 lnet_ni_t, ni_netlist);
1321 lnet_net_unlock(LNET_LOCK_EX);
1322 lnet_shutdown_lndni(ni);
1323 lnet_net_lock(LNET_LOCK_EX);
1326 lnet_net_unlock(LNET_LOCK_EX);
1328 /* Do peer table cleanup for this net */
1329 lnet_peer_tables_cleanup(net);
1331 lnet_net_lock(LNET_LOCK_EX);
1333 * decrement ref count on lnd only when the entire network goes
1336 net->net_lnd->lnd_refcount--;
1338 lnet_net_unlock(LNET_LOCK_EX);
1344 lnet_shutdown_lndnets(void)
1346 struct lnet_net *net;
1348 /* NB called holding the global mutex */
1350 /* All quiet on the API front */
1351 LASSERT(!the_lnet.ln_shutdown);
1352 LASSERT(the_lnet.ln_refcount == 0);
1354 lnet_net_lock(LNET_LOCK_EX);
1355 the_lnet.ln_shutdown = 1; /* flag shutdown */
1357 while (!list_empty(&the_lnet.ln_nets)) {
1359 * move the nets to the zombie list to avoid them being
1360 * picked up for new work. LONET is also included in the
1361 * Nets that will be moved to the zombie list
1363 net = list_entry(the_lnet.ln_nets.next,
1364 struct lnet_net, net_list);
1365 list_move(&net->net_list, &the_lnet.ln_net_zombie);
1368 /* Drop the cached loopback Net. */
1369 if (the_lnet.ln_loni != NULL) {
1370 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
1371 the_lnet.ln_loni = NULL;
1373 lnet_net_unlock(LNET_LOCK_EX);
1375 /* iterate through the net zombie list and delete each net */
1376 while (!list_empty(&the_lnet.ln_net_zombie)) {
1377 net = list_entry(the_lnet.ln_net_zombie.next,
1378 struct lnet_net, net_list);
1379 lnet_shutdown_lndnet(net);
1382 lnet_net_lock(LNET_LOCK_EX);
1383 the_lnet.ln_shutdown = 0;
1384 lnet_net_unlock(LNET_LOCK_EX);
1388 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
1391 struct lnet_tx_queue *tq;
1393 struct lnet_net *net = ni->ni_net;
1395 mutex_lock(&the_lnet.ln_lnd_mutex);
1398 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
1399 ni->ni_lnd_tunables_set = true;
1402 rc = (net->net_lnd->lnd_startup)(ni);
1404 mutex_unlock(&the_lnet.ln_lnd_mutex);
1407 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
1408 rc, libcfs_lnd2str(net->net_lnd->lnd_type));
1409 lnet_net_lock(LNET_LOCK_EX);
1410 net->net_lnd->lnd_refcount--;
1411 lnet_net_unlock(LNET_LOCK_EX);
1415 ni->ni_state = LNET_NI_STATE_ACTIVE;
1417 /* We keep a reference on the loopback net through the loopback NI */
1418 if (net->net_lnd->lnd_type == LOLND) {
1420 LASSERT(the_lnet.ln_loni == NULL);
1421 the_lnet.ln_loni = ni;
1422 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
1423 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
1424 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
1425 ni->ni_net->net_tunables.lct_peer_timeout = 0;
1429 if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
1430 ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
1431 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
1432 libcfs_lnd2str(net->net_lnd->lnd_type),
1433 ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
1435 /* shutdown the NI since if we get here then it must've already
1438 lnet_shutdown_lndni(ni);
1442 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
1443 tq->tq_credits_min =
1444 tq->tq_credits_max =
1445 tq->tq_credits = lnet_ni_tq_credits(ni);
1448 atomic_set(&ni->ni_tx_credits,
1449 lnet_ni_tq_credits(ni) * ni->ni_ncpts);
1451 CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
1452 libcfs_nid2str(ni->ni_nid),
1453 ni->ni_net->net_tunables.lct_peer_tx_credits,
1454 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
1455 ni->ni_net->net_tunables.lct_peer_rtr_credits,
1456 ni->ni_net->net_tunables.lct_peer_timeout);
1465 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
1468 struct lnet_net *net_l = NULL;
1469 struct list_head local_ni_list;
1475 net->net_tunables.lct_peer_timeout;
1477 net->net_tunables.lct_max_tx_credits;
1478 int peerrtrcredits =
1479 net->net_tunables.lct_peer_rtr_credits;
1481 INIT_LIST_HEAD(&local_ni_list);
1484 * make sure that this net is unique. If it isn't then
1485 * we are adding interfaces to an already existing network, and
1486 * 'net' is just a convenient way to pass in the list.
1487 * if it is unique we need to find the LND and load it if
1490 if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
1491 lnd_type = LNET_NETTYP(net->net_id);
1493 LASSERT(libcfs_isknown_lnd(lnd_type));
1495 if (lnd_type == CIBLND || lnd_type == OPENIBLND ||
1496 lnd_type == IIBLND || lnd_type == VIBLND) {
1497 CERROR("LND %s obsoleted\n", libcfs_lnd2str(lnd_type));
1502 mutex_lock(&the_lnet.ln_lnd_mutex);
1503 lnd = lnet_find_lnd_by_type(lnd_type);
1506 mutex_unlock(&the_lnet.ln_lnd_mutex);
1507 rc = request_module("%s", libcfs_lnd2modname(lnd_type));
1508 mutex_lock(&the_lnet.ln_lnd_mutex);
1510 lnd = lnet_find_lnd_by_type(lnd_type);
1512 mutex_unlock(&the_lnet.ln_lnd_mutex);
1513 CERROR("Can't load LND %s, module %s, rc=%d\n",
1514 libcfs_lnd2str(lnd_type),
1515 libcfs_lnd2modname(lnd_type), rc);
1516 #ifndef HAVE_MODULE_LOADING_SUPPORT
1517 LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
1518 "compiled with kernel module "
1519 "loading support.");
1526 lnet_net_lock(LNET_LOCK_EX);
1527 lnd->lnd_refcount++;
1528 lnet_net_unlock(LNET_LOCK_EX);
1532 mutex_unlock(&the_lnet.ln_lnd_mutex);
1538 * net_l: if the network being added is unique then net_l
1539 * will point to that network
1540 * if the network being added is not unique then
1541 * net_l points to the existing network.
1543 * When we enter the loop below, we'll pick NIs off he
1544 * network beign added and start them up, then add them to
1545 * a local ni list. Once we've successfully started all
1546 * the NIs then we join the local NI list (of started up
1547 * networks) with the net_l->net_ni_list, which should
1548 * point to the correct network to add the new ni list to
1550 * If any of the new NIs fail to start up, then we want to
1551 * iterate through the local ni list, which should include
1552 * any NIs which were successfully started up, and shut
1555 * After than we want to delete the network being added,
1556 * to avoid a memory leak.
1560 * When a network uses TCP bonding then all its interfaces
1561 * must be specified when the network is first defined: the
1562 * TCP bonding code doesn't allow for interfaces to be added
1565 if (net_l != net && net_l != NULL && use_tcp_bonding &&
1566 LNET_NETTYP(net_l->net_id) == SOCKLND) {
1571 while (!list_empty(&net->net_ni_added)) {
1572 ni = list_entry(net->net_ni_added.next, struct lnet_ni,
1574 list_del_init(&ni->ni_netlist);
1576 /* make sure that the the NI we're about to start
1577 * up is actually unique. if it's not fail. */
1578 if (!lnet_ni_unique_net(&net_l->net_ni_list,
1579 ni->ni_interfaces[0])) {
1584 /* adjust the pointer the parent network, just in case it
1585 * the net is a duplicate */
1588 rc = lnet_startup_lndni(ni, tun);
1590 LASSERT(ni->ni_net->net_tunables.lct_peer_timeout <= 0 ||
1591 ni->ni_net->net_lnd->lnd_query != NULL);
1597 list_add_tail(&ni->ni_netlist, &local_ni_list);
1602 lnet_net_lock(LNET_LOCK_EX);
1603 list_splice_tail(&local_ni_list, &net_l->net_ni_list);
1604 lnet_incr_dlc_seq();
1605 lnet_net_unlock(LNET_LOCK_EX);
1607 /* if the network is not unique then we don't want to keep
1608 * it around after we're done. Free it. Otherwise add that
1609 * net to the global the_lnet.ln_nets */
1610 if (net_l != net && net_l != NULL) {
1612 * TODO - note. currently the tunables can not be updated
1617 net->net_state = LNET_NET_STATE_ACTIVE;
1619 * restore tunables after it has been overwitten by the
1622 if (peer_timeout != -1)
1623 net->net_tunables.lct_peer_timeout = peer_timeout;
1624 if (maxtxcredits != -1)
1625 net->net_tunables.lct_max_tx_credits = maxtxcredits;
1626 if (peerrtrcredits != -1)
1627 net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
1629 lnet_net_lock(LNET_LOCK_EX);
1630 list_add_tail(&net->net_list, &the_lnet.ln_nets);
1631 lnet_net_unlock(LNET_LOCK_EX);
1638 * shutdown the new NIs that are being started up
1639 * free the NET being started
1641 while (!list_empty(&local_ni_list)) {
1642 ni = list_entry(local_ni_list.next, struct lnet_ni,
1645 lnet_shutdown_lndni(ni);
1655 lnet_startup_lndnets(struct list_head *netlist)
1657 struct lnet_net *net;
1661 while (!list_empty(netlist)) {
1662 net = list_entry(netlist->next, struct lnet_net, net_list);
1663 list_del_init(&net->net_list);
1665 rc = lnet_startup_lndnet(net, NULL);
1675 lnet_shutdown_lndnets();
1681 * Initialize LNet library.
1683 * Automatically called at module loading time. Caller has to call
1684 * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
1685 * latter returned 0. It must be called exactly once.
1687 * \retval 0 on success
1688 * \retval -ve on failures.
1690 int lnet_lib_init(void)
1694 lnet_assert_wire_constants();
1696 memset(&the_lnet, 0, sizeof(the_lnet));
1698 /* refer to global cfs_cpt_table for now */
1699 the_lnet.ln_cpt_table = cfs_cpt_table;
1700 the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_table);
1702 LASSERT(the_lnet.ln_cpt_number > 0);
1703 if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
1704 /* we are under risk of consuming all lh_cookie */
1705 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
1706 "please change setting of CPT-table and retry\n",
1707 the_lnet.ln_cpt_number, LNET_CPT_MAX);
1711 while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
1712 the_lnet.ln_cpt_bits++;
1714 rc = lnet_create_locks();
1716 CERROR("Can't create LNet global locks: %d\n", rc);
1720 the_lnet.ln_refcount = 0;
1721 LNetInvalidateHandle(&the_lnet.ln_rc_eqh);
1722 INIT_LIST_HEAD(&the_lnet.ln_lnds);
1723 INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
1724 INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
1725 INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
1727 /* The hash table size is the number of bits it takes to express the set
1728 * ln_num_routes, minus 1 (better to under estimate than over so we
1729 * don't waste memory). */
1730 if (rnet_htable_size <= 0)
1731 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
1732 else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
1733 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
1734 the_lnet.ln_remote_nets_hbits = max_t(int, 1,
1735 order_base_2(rnet_htable_size) - 1);
1737 /* All LNDs apart from the LOLND are in separate modules. They
1738 * register themselves when their module loads, and unregister
1739 * themselves when their module is unloaded. */
1740 lnet_register_lnd(&the_lolnd);
1745 * Finalize LNet library.
1747 * \pre lnet_lib_init() called with success.
1748 * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
1750 void lnet_lib_exit(void)
1752 LASSERT(the_lnet.ln_refcount == 0);
1754 while (!list_empty(&the_lnet.ln_lnds))
1755 lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
1757 lnet_destroy_locks();
1761 * Set LNet PID and start LNet interfaces, routing, and forwarding.
1763 * Users must call this function at least once before any other functions.
1764 * For each successful call there must be a corresponding call to
1765 * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
1768 * The PID used by LNet may be different from the one requested.
1771 * \param requested_pid PID requested by the caller.
1773 * \return >= 0 on success, and < 0 error code on failures.
1776 LNetNIInit(lnet_pid_t requested_pid)
1778 int im_a_router = 0;
1781 struct lnet_ping_info *pinfo;
1782 lnet_handle_md_t md_handle;
1783 struct list_head net_head;
1784 struct lnet_net *net;
1786 INIT_LIST_HEAD(&net_head);
1788 mutex_lock(&the_lnet.ln_api_mutex);
1790 CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
1792 if (the_lnet.ln_refcount > 0) {
1793 rc = the_lnet.ln_refcount++;
1794 mutex_unlock(&the_lnet.ln_api_mutex);
1798 rc = lnet_prepare(requested_pid);
1800 mutex_unlock(&the_lnet.ln_api_mutex);
1804 /* create a network for Loopback network */
1805 net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
1808 goto err_empty_list;
1811 /* Add in the loopback NI */
1812 if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
1814 goto err_empty_list;
1817 /* If LNet is being initialized via DLC it is possible
1818 * that the user requests not to load module parameters (ones which
1819 * are supported by DLC) on initialization. Therefore, make sure not
1820 * to load networks, routes and forwarding from module parameters
1821 * in this case. On cleanup in case of failure only clean up
1822 * routes if it has been loaded */
1823 if (!the_lnet.ln_nis_from_mod_params) {
1824 rc = lnet_parse_networks(&net_head, lnet_get_networks(),
1827 goto err_empty_list;
1830 ni_count = lnet_startup_lndnets(&net_head);
1833 goto err_empty_list;
1836 if (!the_lnet.ln_nis_from_mod_params) {
1837 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
1839 goto err_shutdown_lndnis;
1841 rc = lnet_check_routes();
1843 goto err_destroy_routes;
1845 rc = lnet_rtrpools_alloc(im_a_router);
1847 goto err_destroy_routes;
1850 rc = lnet_acceptor_start();
1852 goto err_destroy_routes;
1854 the_lnet.ln_refcount = 1;
1855 /* Now I may use my own API functions... */
1857 rc = lnet_ping_info_setup(&pinfo, &md_handle, ni_count, true);
1859 goto err_acceptor_stop;
1861 lnet_ping_target_update(pinfo, md_handle);
1863 rc = lnet_router_checker_start();
1870 mutex_unlock(&the_lnet.ln_api_mutex);
1875 lnet_ping_target_fini();
1877 the_lnet.ln_refcount = 0;
1878 lnet_acceptor_stop();
1880 if (!the_lnet.ln_nis_from_mod_params)
1881 lnet_destroy_routes();
1882 err_shutdown_lndnis:
1883 lnet_shutdown_lndnets();
1887 mutex_unlock(&the_lnet.ln_api_mutex);
1888 while (!list_empty(&net_head)) {
1889 struct lnet_net *net;
1891 net = list_entry(net_head.next, struct lnet_net, net_list);
1892 list_del_init(&net->net_list);
1897 EXPORT_SYMBOL(LNetNIInit);
1900 * Stop LNet interfaces, routing, and forwarding.
1902 * Users must call this function once for each successful call to LNetNIInit().
1903 * Once the LNetNIFini() operation has been started, the results of pending
1904 * API operations are undefined.
1906 * \return always 0 for current implementation.
1911 mutex_lock(&the_lnet.ln_api_mutex);
1913 LASSERT(the_lnet.ln_refcount > 0);
1915 if (the_lnet.ln_refcount != 1) {
1916 the_lnet.ln_refcount--;
1918 LASSERT(!the_lnet.ln_niinit_self);
1923 lnet_router_checker_stop();
1924 lnet_ping_target_fini();
1926 /* Teardown fns that use my own API functions BEFORE here */
1927 the_lnet.ln_refcount = 0;
1929 lnet_acceptor_stop();
1930 lnet_destroy_routes();
1931 lnet_shutdown_lndnets();
1935 mutex_unlock(&the_lnet.ln_api_mutex);
1938 EXPORT_SYMBOL(LNetNIFini);
1941 static int lnet_handle_dbg_task(struct lnet_ioctl_dbg *dbg,
1942 struct lnet_dbg_task_info *dbg_info)
1944 switch (dbg->dbg_task) {
1945 case LNET_DBG_INCR_DLC_SEQ:
1946 lnet_incr_dlc_seq();
1952 * Grabs the ni data from the ni structure and fills the out
1955 * \param[in] ni network interface structure
1956 * \param[out] cfg_ni NI config information
1957 * \param[out] tun network and LND tunables
1960 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
1961 struct lnet_ioctl_config_lnd_tunables *tun,
1962 struct lnet_ioctl_element_stats *stats,
1965 size_t min_size = 0;
1968 if (!ni || !cfg_ni || !tun)
1971 if (ni->ni_interfaces[0] != NULL) {
1972 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
1973 if (ni->ni_interfaces[i] != NULL) {
1974 strncpy(cfg_ni->lic_ni_intf[i],
1975 ni->ni_interfaces[i],
1976 sizeof(cfg_ni->lic_ni_intf[i]));
1981 cfg_ni->lic_nid = ni->ni_nid;
1982 cfg_ni->lic_status = ni->ni_status->ns_status;
1983 cfg_ni->lic_tcp_bonding = use_tcp_bonding;
1984 cfg_ni->lic_dev_cpt = ni->dev_cpt;
1986 memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
1989 stats->send_count = atomic_read(&ni->ni_stats.send_count);
1990 stats->recv_count = atomic_read(&ni->ni_stats.recv_count);
1994 * tun->lt_tun will always be present, but in order to be
1995 * backwards compatible, we need to deal with the cases when
1996 * tun->lt_tun is smaller than what the kernel has, because it
1997 * comes from an older version of a userspace program, then we'll
1998 * need to copy as much information as we have available space.
2000 min_size = tun_size - sizeof(tun->lt_cmn);
2001 memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
2003 /* copy over the cpts */
2004 if (ni->ni_ncpts == LNET_CPT_NUMBER &&
2005 ni->ni_cpts == NULL) {
2006 for (i = 0; i < ni->ni_ncpts; i++)
2007 cfg_ni->lic_cpts[i] = i;
2010 ni->ni_cpts != NULL && i < ni->ni_ncpts &&
2011 i < LNET_MAX_SHOW_NUM_CPT;
2013 cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
2015 cfg_ni->lic_ncpts = ni->ni_ncpts;
2019 * NOTE: This is a legacy function left in the code to be backwards
2020 * compatible with older userspace programs. It should eventually be
2023 * Grabs the ni data from the ni structure and fills the out
2026 * \param[in] ni network interface structure
2027 * \param[out] config config information
2030 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
2031 struct lnet_ioctl_config_data *config)
2033 struct lnet_ioctl_net_config *net_config;
2034 struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
2035 size_t min_size, tunable_size = 0;
2041 net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
2045 BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
2046 ARRAY_SIZE(net_config->ni_interfaces));
2048 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2049 if (!ni->ni_interfaces[i])
2052 strncpy(net_config->ni_interfaces[i],
2053 ni->ni_interfaces[i],
2054 sizeof(net_config->ni_interfaces[i]));
2057 config->cfg_nid = ni->ni_nid;
2058 config->cfg_config_u.cfg_net.net_peer_timeout =
2059 ni->ni_net->net_tunables.lct_peer_timeout;
2060 config->cfg_config_u.cfg_net.net_max_tx_credits =
2061 ni->ni_net->net_tunables.lct_max_tx_credits;
2062 config->cfg_config_u.cfg_net.net_peer_tx_credits =
2063 ni->ni_net->net_tunables.lct_peer_tx_credits;
2064 config->cfg_config_u.cfg_net.net_peer_rtr_credits =
2065 ni->ni_net->net_tunables.lct_peer_rtr_credits;
2067 net_config->ni_status = ni->ni_status->ns_status;
2070 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
2072 for (i = 0; i < num_cpts; i++)
2073 net_config->ni_cpts[i] = ni->ni_cpts[i];
2075 config->cfg_ncpts = num_cpts;
2079 * See if user land tools sent in a newer and larger version
2080 * of struct lnet_tunables than what the kernel uses.
2082 min_size = sizeof(*config) + sizeof(*net_config);
2084 if (config->cfg_hdr.ioc_len > min_size)
2085 tunable_size = config->cfg_hdr.ioc_len - min_size;
2087 /* Don't copy too much data to user space */
2088 min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
2089 lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
2091 if (lnd_cfg && min_size) {
2092 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
2093 config->cfg_config_u.cfg_net.net_interface_count = 1;
2095 /* Tell user land that kernel side has less data */
2096 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
2097 min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
2098 config->cfg_hdr.ioc_len -= min_size;
2104 lnet_get_ni_idx_locked(int idx)
2107 struct lnet_net *net;
2109 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2110 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2120 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
2123 struct lnet_net *net = mynet;
2127 net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
2129 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2135 if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
2136 /* if you reached the end of the ni list and the net is
2137 * specified, then there are no more nis in that net */
2141 /* we reached the end of this net ni list. move to the
2143 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
2144 /* no more nets and no more NIs. */
2147 /* get the next net */
2148 net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
2150 /* get the ni on it */
2151 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2157 /* there are more nis left */
2158 ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
2164 lnet_get_net_config(struct lnet_ioctl_config_data *config)
2169 int idx = config->cfg_count;
2171 cpt = lnet_net_lock_current();
2173 ni = lnet_get_ni_idx_locked(idx);
2178 lnet_fill_ni_info_legacy(ni, config);
2182 lnet_net_unlock(cpt);
2187 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
2188 struct lnet_ioctl_config_lnd_tunables *tun,
2189 struct lnet_ioctl_element_stats *stats,
2196 if (!cfg_ni || !tun || !stats)
2199 cpt = lnet_net_lock_current();
2201 ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
2206 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
2210 lnet_net_unlock(cpt);
2214 static int lnet_add_net_common(struct lnet_net *net,
2215 struct lnet_ioctl_config_lnd_tunables *tun)
2217 struct lnet_net *netl = NULL;
2219 lnet_ping_info_t *pinfo;
2220 lnet_handle_md_t md_handle;
2222 lnet_remotenet_t *rnet;
2224 int num_acceptor_nets;
2226 lnet_net_lock(LNET_LOCK_EX);
2227 rnet = lnet_find_rnet_locked(net->net_id);
2228 lnet_net_unlock(LNET_LOCK_EX);
2230 * make sure that the net added doesn't invalidate the current
2231 * configuration LNet is keeping
2234 CERROR("Adding net %s will invalidate routing configuration\n",
2235 libcfs_net2str(net->net_id));
2241 * make sure you calculate the correct number of slots in the ping
2242 * info. Since the ping info is a flattened list of all the NIs,
2243 * we should allocate enough slots to accomodate the number of NIs
2244 * which will be added.
2246 * since ni hasn't been configured yet, use
2247 * lnet_get_net_ni_count_pre() which checks the net_ni_added list
2249 net_ni_count = lnet_get_net_ni_count_pre(net);
2251 rc = lnet_ping_info_setup(&pinfo, &md_handle,
2252 net_ni_count + lnet_get_ni_count(),
2258 memcpy(&net->net_tunables,
2259 &tun->lt_cmn, sizeof(net->net_tunables));
2261 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
2264 * before starting this network get a count of the current TCP
2265 * networks which require the acceptor thread running. If that
2266 * count is == 0 before we start up this network, then we'd want to
2267 * start up the acceptor thread after starting up this network
2269 num_acceptor_nets = lnet_count_acceptor_nets();
2271 net_id = net->net_id;
2273 rc = lnet_startup_lndnet(net,
2274 (tun) ? &tun->lt_tun : NULL);
2278 lnet_net_lock(LNET_LOCK_EX);
2279 netl = lnet_get_net_locked(net_id);
2280 lnet_net_unlock(LNET_LOCK_EX);
2285 * Start the acceptor thread if this is the first network
2286 * being added that requires the thread.
2288 if (netl->net_lnd->lnd_accept &&
2289 num_acceptor_nets == 0)
2291 rc = lnet_acceptor_start();
2293 /* shutdown the net that we just started */
2294 CERROR("Failed to start up acceptor thread\n");
2295 lnet_shutdown_lndnet(net);
2300 lnet_net_lock(LNET_LOCK_EX);
2301 lnet_peer_net_added(netl);
2302 lnet_net_unlock(LNET_LOCK_EX);
2304 lnet_ping_target_update(pinfo, md_handle);
2309 lnet_ping_md_unlink(pinfo, &md_handle);
2310 lnet_ping_info_free(pinfo);
2316 static int lnet_handle_legacy_ip2nets(char *ip2nets,
2317 struct lnet_ioctl_config_lnd_tunables *tun)
2319 struct lnet_net *net;
2322 struct list_head net_head;
2324 INIT_LIST_HEAD(&net_head);
2326 rc = lnet_parse_ip2nets(&nets, ip2nets);
2330 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
2334 mutex_lock(&the_lnet.ln_api_mutex);
2335 while (!list_empty(&net_head)) {
2336 net = list_entry(net_head.next, struct lnet_net, net_list);
2337 list_del_init(&net->net_list);
2338 rc = lnet_add_net_common(net, tun);
2344 mutex_unlock(&the_lnet.ln_api_mutex);
2346 while (!list_empty(&net_head)) {
2347 net = list_entry(net_head.next, struct lnet_net, net_list);
2348 list_del_init(&net->net_list);
2354 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
2356 struct lnet_net *net;
2358 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
2362 /* get the tunables if they are available */
2363 if (conf->lic_cfg_hdr.ioc_len >=
2364 sizeof(*conf) + sizeof(*tun))
2365 tun = (struct lnet_ioctl_config_lnd_tunables *)
2368 /* handle legacy ip2nets from DLC */
2369 if (conf->lic_legacy_ip2nets[0] != '\0')
2370 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
2373 net_id = LNET_NIDNET(conf->lic_nid);
2375 net = lnet_net_alloc(net_id, NULL);
2379 for (i = 0; i < conf->lic_ncpts; i++) {
2380 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER)
2384 ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
2385 conf->lic_ni_intf[0]);
2389 mutex_lock(&the_lnet.ln_api_mutex);
2391 rc = lnet_add_net_common(net, tun);
2393 mutex_unlock(&the_lnet.ln_api_mutex);
2398 int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
2400 struct lnet_net *net;
2402 __u32 net_id = LNET_NIDNET(conf->lic_nid);
2403 lnet_ping_info_t *pinfo;
2404 lnet_handle_md_t md_handle;
2409 /* don't allow userspace to shutdown the LOLND */
2410 if (LNET_NETTYP(net_id) == LOLND)
2413 mutex_lock(&the_lnet.ln_api_mutex);
2417 net = lnet_get_net_locked(net_id);
2419 CERROR("net %s not found\n",
2420 libcfs_net2str(net_id));
2425 addr = LNET_NIDADDR(conf->lic_nid);
2427 /* remove the entire net */
2428 net_count = lnet_get_net_ni_count_locked(net);
2432 /* create and link a new ping info, before removing the old one */
2433 rc = lnet_ping_info_setup(&pinfo, &md_handle,
2434 lnet_get_ni_count() - net_count,
2439 lnet_shutdown_lndnet(net);
2441 if (lnet_count_acceptor_nets() == 0)
2442 lnet_acceptor_stop();
2444 lnet_ping_target_update(pinfo, md_handle);
2449 ni = lnet_nid2ni_locked(conf->lic_nid, 0);
2451 CERROR("nid %s not found \n",
2452 libcfs_nid2str(conf->lic_nid));
2457 net_count = lnet_get_net_ni_count_locked(net);
2461 /* create and link a new ping info, before removing the old one */
2462 rc = lnet_ping_info_setup(&pinfo, &md_handle,
2463 lnet_get_ni_count() - 1, false);
2467 lnet_shutdown_lndni(ni);
2469 if (lnet_count_acceptor_nets() == 0)
2470 lnet_acceptor_stop();
2472 lnet_ping_target_update(pinfo, md_handle);
2474 /* check if the net is empty and remove it if it is */
2476 lnet_shutdown_lndnet(net);
2483 mutex_unlock(&the_lnet.ln_api_mutex);
2489 * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
2490 * They are only expected to be called for unique networks.
2491 * That can be as a result of older DLC library
2492 * calls. Multi-Rail DLC and beyond no longer uses these APIs.
2495 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
2497 struct lnet_net *net;
2498 struct list_head net_head;
2500 struct lnet_ioctl_config_lnd_tunables tun;
2501 char *nets = conf->cfg_config_u.cfg_net.net_intf;
2503 INIT_LIST_HEAD(&net_head);
2505 /* Create a net/ni structures for the network string */
2506 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
2508 return rc == 0 ? -EINVAL : rc;
2510 mutex_lock(&the_lnet.ln_api_mutex);
2513 rc = -EINVAL; /* only add one network per call */
2517 net = list_entry(net_head.next, struct lnet_net, net_list);
2518 list_del_init(&net->net_list);
2520 LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
2522 memset(&tun, sizeof(tun), 0);
2524 tun.lt_cmn.lct_peer_timeout =
2525 conf->cfg_config_u.cfg_net.net_peer_timeout;
2526 tun.lt_cmn.lct_peer_tx_credits =
2527 conf->cfg_config_u.cfg_net.net_peer_tx_credits;
2528 tun.lt_cmn.lct_peer_rtr_credits =
2529 conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
2530 tun.lt_cmn.lct_max_tx_credits =
2531 conf->cfg_config_u.cfg_net.net_max_tx_credits;
2533 rc = lnet_add_net_common(net, &tun);
2540 mutex_unlock(&the_lnet.ln_api_mutex);
2541 while (!list_empty(&net_head)) {
2542 net = list_entry(net_head.next, struct lnet_net, net_list);
2543 list_del_init(&net->net_list);
2550 lnet_dyn_del_net(__u32 net_id)
2552 struct lnet_net *net;
2553 struct lnet_ping_info *pinfo;
2554 lnet_handle_md_t md_handle;
2558 /* don't allow userspace to shutdown the LOLND */
2559 if (LNET_NETTYP(net_id) == LOLND)
2562 mutex_lock(&the_lnet.ln_api_mutex);
2566 net = lnet_get_net_locked(net_id);
2572 net_ni_count = lnet_get_net_ni_count_locked(net);
2576 /* create and link a new ping info, before removing the old one */
2577 rc = lnet_ping_info_setup(&pinfo, &md_handle,
2578 lnet_get_ni_count() - net_ni_count, false);
2582 lnet_shutdown_lndnet(net);
2584 if (lnet_count_acceptor_nets() == 0)
2585 lnet_acceptor_stop();
2587 lnet_ping_target_update(pinfo, md_handle);
2590 mutex_unlock(&the_lnet.ln_api_mutex);
2595 void lnet_incr_dlc_seq(void)
2597 atomic_inc(&lnet_dlc_seq_no);
2600 __u32 lnet_get_dlc_seq_locked(void)
2602 return atomic_read(&lnet_dlc_seq_no);
2605 inline __u32 lnet_get_numa_range(void)
2607 return lnet_numa_range;
2611 * LNet ioctl handler.
2615 LNetCtl(unsigned int cmd, void *arg)
2617 struct libcfs_ioctl_data *data = arg;
2618 struct lnet_ioctl_config_data *config;
2619 lnet_process_id_t id = {0};
2623 BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
2624 sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
2627 case IOC_LIBCFS_GET_NI:
2628 rc = LNetGetId(data->ioc_count, &id);
2629 data->ioc_nid = id.nid;
2632 case IOC_LIBCFS_FAIL_NID:
2633 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
2635 case IOC_LIBCFS_ADD_ROUTE:
2638 if (config->cfg_hdr.ioc_len < sizeof(*config))
2641 mutex_lock(&the_lnet.ln_api_mutex);
2642 rc = lnet_add_route(config->cfg_net,
2643 config->cfg_config_u.cfg_route.rtr_hop,
2645 config->cfg_config_u.cfg_route.
2648 rc = lnet_check_routes();
2650 lnet_del_route(config->cfg_net,
2653 mutex_unlock(&the_lnet.ln_api_mutex);
2656 case IOC_LIBCFS_DEL_ROUTE:
2659 if (config->cfg_hdr.ioc_len < sizeof(*config))
2662 mutex_lock(&the_lnet.ln_api_mutex);
2663 rc = lnet_del_route(config->cfg_net, config->cfg_nid);
2664 mutex_unlock(&the_lnet.ln_api_mutex);
2667 case IOC_LIBCFS_GET_ROUTE:
2670 if (config->cfg_hdr.ioc_len < sizeof(*config))
2673 mutex_lock(&the_lnet.ln_api_mutex);
2674 rc = lnet_get_route(config->cfg_count,
2676 &config->cfg_config_u.cfg_route.rtr_hop,
2678 &config->cfg_config_u.cfg_route.rtr_flags,
2679 &config->cfg_config_u.cfg_route.
2681 mutex_unlock(&the_lnet.ln_api_mutex);
2684 case IOC_LIBCFS_GET_LOCAL_NI: {
2685 struct lnet_ioctl_config_ni *cfg_ni;
2686 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
2687 struct lnet_ioctl_element_stats *stats;
2691 /* get the tunables if they are available */
2692 if (cfg_ni->lic_cfg_hdr.ioc_len <
2693 sizeof(*cfg_ni) + sizeof(*stats)+ sizeof(*tun))
2696 stats = (struct lnet_ioctl_element_stats *)
2698 tun = (struct lnet_ioctl_config_lnd_tunables *)
2699 (cfg_ni->lic_bulk + sizeof(*stats));
2701 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
2704 mutex_lock(&the_lnet.ln_api_mutex);
2705 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
2706 mutex_unlock(&the_lnet.ln_api_mutex);
2710 case IOC_LIBCFS_GET_NET: {
2711 size_t total = sizeof(*config) +
2712 sizeof(struct lnet_ioctl_net_config);
2715 if (config->cfg_hdr.ioc_len < total)
2718 mutex_lock(&the_lnet.ln_api_mutex);
2719 rc = lnet_get_net_config(config);
2720 mutex_unlock(&the_lnet.ln_api_mutex);
2724 case IOC_LIBCFS_GET_LNET_STATS:
2726 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
2728 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
2731 mutex_lock(&the_lnet.ln_api_mutex);
2732 lnet_counters_get(&lnet_stats->st_cntrs);
2733 mutex_unlock(&the_lnet.ln_api_mutex);
2737 case IOC_LIBCFS_CONFIG_RTR:
2740 if (config->cfg_hdr.ioc_len < sizeof(*config))
2743 mutex_lock(&the_lnet.ln_api_mutex);
2744 if (config->cfg_config_u.cfg_buffers.buf_enable) {
2745 rc = lnet_rtrpools_enable();
2746 mutex_unlock(&the_lnet.ln_api_mutex);
2749 lnet_rtrpools_disable();
2750 mutex_unlock(&the_lnet.ln_api_mutex);
2753 case IOC_LIBCFS_ADD_BUF:
2756 if (config->cfg_hdr.ioc_len < sizeof(*config))
2759 mutex_lock(&the_lnet.ln_api_mutex);
2760 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
2762 config->cfg_config_u.cfg_buffers.
2764 config->cfg_config_u.cfg_buffers.
2766 mutex_unlock(&the_lnet.ln_api_mutex);
2769 case IOC_LIBCFS_SET_NUMA_RANGE: {
2770 struct lnet_ioctl_numa_range *numa;
2772 if (numa->nr_hdr.ioc_len != sizeof(*numa))
2774 mutex_lock(&the_lnet.ln_api_mutex);
2775 lnet_numa_range = numa->nr_range;
2776 mutex_unlock(&the_lnet.ln_api_mutex);
2780 case IOC_LIBCFS_GET_NUMA_RANGE: {
2781 struct lnet_ioctl_numa_range *numa;
2783 if (numa->nr_hdr.ioc_len != sizeof(*numa))
2785 numa->nr_range = lnet_numa_range;
2789 case IOC_LIBCFS_GET_BUF: {
2790 struct lnet_ioctl_pool_cfg *pool_cfg;
2791 size_t total = sizeof(*config) + sizeof(*pool_cfg);
2795 if (config->cfg_hdr.ioc_len < total)
2798 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
2800 mutex_lock(&the_lnet.ln_api_mutex);
2801 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
2802 mutex_unlock(&the_lnet.ln_api_mutex);
2806 case IOC_LIBCFS_ADD_PEER_NI: {
2807 struct lnet_ioctl_peer_cfg *cfg = arg;
2809 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
2812 mutex_lock(&the_lnet.ln_api_mutex);
2813 rc = lnet_add_peer_ni_to_peer(cfg->prcfg_key_nid,
2816 mutex_unlock(&the_lnet.ln_api_mutex);
2820 case IOC_LIBCFS_DEL_PEER_NI: {
2821 struct lnet_ioctl_peer_cfg *cfg = arg;
2823 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
2826 mutex_lock(&the_lnet.ln_api_mutex);
2827 rc = lnet_del_peer_ni_from_peer(cfg->prcfg_key_nid,
2828 cfg->prcfg_cfg_nid);
2829 mutex_unlock(&the_lnet.ln_api_mutex);
2833 case IOC_LIBCFS_GET_PEER_INFO: {
2834 struct lnet_ioctl_peer *peer_info = arg;
2836 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
2839 mutex_lock(&the_lnet.ln_api_mutex);
2840 rc = lnet_get_peer_ni_info(
2841 peer_info->pr_count,
2843 peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
2844 &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
2845 &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
2846 &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
2847 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
2848 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
2849 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
2850 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
2851 mutex_unlock(&the_lnet.ln_api_mutex);
2855 case IOC_LIBCFS_GET_PEER_NI: {
2856 struct lnet_ioctl_peer_cfg *cfg = arg;
2857 struct lnet_peer_ni_credit_info *lpni_cri;
2858 struct lnet_ioctl_element_stats *lpni_stats;
2859 size_t total = sizeof(*cfg) + sizeof(*lpni_cri) +
2860 sizeof(*lpni_stats);
2862 if (cfg->prcfg_hdr.ioc_len < total)
2865 lpni_cri = (struct lnet_peer_ni_credit_info*) cfg->prcfg_bulk;
2866 lpni_stats = (struct lnet_ioctl_element_stats *)
2867 (cfg->prcfg_bulk + sizeof(*lpni_cri));
2869 mutex_lock(&the_lnet.ln_api_mutex);
2870 rc = lnet_get_peer_info(cfg->prcfg_idx, &cfg->prcfg_key_nid,
2871 &cfg->prcfg_cfg_nid, &cfg->prcfg_mr,
2872 lpni_cri, lpni_stats);
2873 mutex_unlock(&the_lnet.ln_api_mutex);
2877 case IOC_LIBCFS_NOTIFY_ROUTER: {
2878 unsigned long jiffies_passed;
2880 jiffies_passed = ktime_get_real_seconds() - data->ioc_u64[0];
2881 jiffies_passed = cfs_time_seconds(jiffies_passed);
2883 return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
2884 jiffies - jiffies_passed);
2887 case IOC_LIBCFS_LNET_DIST:
2888 rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
2889 if (rc < 0 && rc != -EHOSTUNREACH)
2892 data->ioc_u32[0] = rc;
2895 case IOC_LIBCFS_TESTPROTOCOMPAT:
2896 lnet_net_lock(LNET_LOCK_EX);
2897 the_lnet.ln_testprotocompat = data->ioc_flags;
2898 lnet_net_unlock(LNET_LOCK_EX);
2901 case IOC_LIBCFS_LNET_FAULT:
2902 return lnet_fault_ctl(data->ioc_flags, data);
2904 case IOC_LIBCFS_PING: {
2905 signed long timeout;
2907 id.nid = data->ioc_nid;
2908 id.pid = data->ioc_u32[0];
2910 /* Don't block longer than 2 minutes */
2911 if (data->ioc_u32[1] > 120 * MSEC_PER_SEC)
2914 /* If timestamp is negative then disable timeout */
2915 if ((s32)data->ioc_u32[1] < 0)
2916 timeout = MAX_SCHEDULE_TIMEOUT;
2918 timeout = msecs_to_jiffies(data->ioc_u32[1]);
2920 rc = lnet_ping(id, timeout, data->ioc_pbuf1,
2921 data->ioc_plen1 / sizeof(lnet_process_id_t));
2924 data->ioc_count = rc;
2928 case IOC_LIBCFS_DBG: {
2929 struct lnet_ioctl_dbg *dbg = arg;
2930 struct lnet_dbg_task_info *dbg_info;
2931 size_t total = sizeof(*dbg) + sizeof(*dbg_info);
2933 if (dbg->dbg_hdr.ioc_len < total)
2936 dbg_info = (struct lnet_dbg_task_info*) dbg->dbg_bulk;
2938 return lnet_handle_dbg_task(dbg, dbg_info);
2942 ni = lnet_net2ni_addref(data->ioc_net);
2946 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
2949 rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
2956 EXPORT_SYMBOL(LNetCtl);
2958 void LNetDebugPeer(lnet_process_id_t id)
2960 lnet_debug_peer(id.nid);
2962 EXPORT_SYMBOL(LNetDebugPeer);
2965 * Retrieve the lnet_process_id_t ID of LNet interface at \a index. Note that
2966 * all interfaces share a same PID, as requested by LNetNIInit().
2968 * \param index Index of the interface to look up.
2969 * \param id On successful return, this location will hold the
2970 * lnet_process_id_t ID of the interface.
2972 * \retval 0 If an interface exists at \a index.
2973 * \retval -ENOENT If no interface has been found.
2976 LNetGetId(unsigned int index, lnet_process_id_t *id)
2979 struct lnet_net *net;
2983 LASSERT(the_lnet.ln_refcount > 0);
2985 cpt = lnet_net_lock_current();
2987 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2988 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2992 id->nid = ni->ni_nid;
2993 id->pid = the_lnet.ln_pid;
2999 lnet_net_unlock(cpt);
3002 EXPORT_SYMBOL(LNetGetId);
3005 * Print a string representation of handle \a h into buffer \a str of
3009 LNetSnprintHandle(char *str, int len, lnet_handle_any_t h)
3011 snprintf(str, len, "%#llx", h.cookie);
3013 EXPORT_SYMBOL(LNetSnprintHandle);
3015 static int lnet_ping(lnet_process_id_t id, signed long timeout,
3016 lnet_process_id_t __user *ids, int n_ids)
3018 lnet_handle_eq_t eqh;
3019 lnet_handle_md_t mdh;
3021 lnet_md_t md = { NULL };
3025 const signed long a_long_time = msecs_to_jiffies(60 * MSEC_PER_SEC);
3027 struct lnet_ping_info *info;
3028 lnet_process_id_t tmpid;
3035 infosz = offsetof(struct lnet_ping_info, pi_ni[n_ids]);
3037 /* n_ids limit is arbitrary */
3038 if (n_ids <= 0 || n_ids > 20 || id.nid == LNET_NID_ANY)
3041 if (id.pid == LNET_PID_ANY)
3042 id.pid = LNET_PID_LUSTRE;
3044 LIBCFS_ALLOC(info, infosz);
3048 /* NB 2 events max (including any unlink event) */
3049 rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
3051 CERROR("Can't allocate EQ: %d\n", rc);
3055 /* initialize md content */
3058 md.threshold = 2; /*GET/REPLY*/
3060 md.options = LNET_MD_TRUNCATE;
3064 rc = LNetMDBind(md, LNET_UNLINK, &mdh);
3066 CERROR("Can't bind MD: %d\n", rc);
3070 rc = LNetGet(LNET_NID_ANY, mdh, id,
3071 LNET_RESERVED_PORTAL,
3072 LNET_PROTO_PING_MATCHBITS, 0);
3075 /* Don't CERROR; this could be deliberate! */
3077 rc2 = LNetMDUnlink(mdh);
3080 /* NB must wait for the UNLINK event below... */
3082 timeout = a_long_time;
3086 /* MUST block for unlink to complete */
3088 blocked = cfs_block_allsigs();
3090 rc2 = LNetEQPoll(&eqh, 1, timeout, &event, &which);
3093 cfs_restore_sigs(blocked);
3095 CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
3096 (rc2 <= 0) ? -1 : event.type,
3097 (rc2 <= 0) ? -1 : event.status,
3098 (rc2 > 0 && event.unlinked) ? " unlinked" : "");
3100 LASSERT(rc2 != -EOVERFLOW); /* can't miss anything */
3102 if (rc2 <= 0 || event.status != 0) {
3103 /* timeout or error */
3104 if (!replied && rc == 0)
3105 rc = (rc2 < 0) ? rc2 :
3106 (rc2 == 0) ? -ETIMEDOUT :
3110 /* Ensure completion in finite time... */
3112 /* No assertion (racing with network) */
3114 timeout = a_long_time;
3115 } else if (rc2 == 0) {
3116 /* timed out waiting for unlink */
3117 CWARN("ping %s: late network completion\n",
3120 } else if (event.type == LNET_EVENT_REPLY) {
3125 } while (rc2 <= 0 || !event.unlinked);
3129 CWARN("%s: Unexpected rc >= 0 but no reply!\n",
3136 LASSERT(nob >= 0 && nob <= infosz);
3138 rc = -EPROTO; /* if I can't parse... */
3141 /* can't check magic/version */
3142 CERROR("%s: ping info too short %d\n",
3143 libcfs_id2str(id), nob);
3147 if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
3148 lnet_swap_pinginfo(info);
3149 } else if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
3150 CERROR("%s: Unexpected magic %08x\n",
3151 libcfs_id2str(id), info->pi_magic);
3155 if ((info->pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
3156 CERROR("%s: ping w/o NI status: 0x%x\n",
3157 libcfs_id2str(id), info->pi_features);
3161 if (nob < offsetof(struct lnet_ping_info, pi_ni[0])) {
3162 CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
3163 nob, (int)offsetof(struct lnet_ping_info, pi_ni[0]));
3167 if (info->pi_nnis < n_ids)
3168 n_ids = info->pi_nnis;
3170 if (nob < offsetof(struct lnet_ping_info, pi_ni[n_ids])) {
3171 CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
3172 nob, (int)offsetof(struct lnet_ping_info, pi_ni[n_ids]));
3176 rc = -EFAULT; /* If I SEGV... */
3178 memset(&tmpid, 0, sizeof(tmpid));
3179 for (i = 0; i < n_ids; i++) {
3180 tmpid.pid = info->pi_pid;
3181 tmpid.nid = info->pi_ni[i].ns_nid;
3182 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
3188 rc2 = LNetEQFree(eqh);
3190 CERROR("rc2 %d\n", rc2);
3194 LIBCFS_FREE(info, infosz);