4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #define DEBUG_SUBSYSTEM S_LNET
38 #include <linux/log2.h>
39 #include <lnet/lib-lnet.h>
41 #define D_LNI D_CONSOLE
43 lnet_t the_lnet; /* THE state of the network */
44 EXPORT_SYMBOL(the_lnet);
46 static char *ip2nets = "";
47 CFS_MODULE_PARM(ip2nets, "s", charp, 0444,
48 "LNET network <- IP table");
50 static char *networks = "";
51 CFS_MODULE_PARM(networks, "s", charp, 0444,
54 static char *routes = "";
55 CFS_MODULE_PARM(routes, "s", charp, 0444,
56 "routes to non-local networks");
58 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
59 CFS_MODULE_PARM(rnet_htable_size, "i", int, 0444,
60 "size of remote network hash table");
62 static int lnet_ping(lnet_process_id_t id, int timeout_ms,
63 lnet_process_id_t *ids, int n_ids);
72 lnet_get_networks(void)
77 if (*networks != 0 && *ip2nets != 0) {
78 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
79 "'ip2nets' but not both at once\n");
84 rc = lnet_parse_ip2nets(&nets, ip2nets);
85 return (rc == 0) ? nets : NULL;
97 spin_lock_init(&the_lnet.ln_eq_wait_lock);
98 init_waitqueue_head(&the_lnet.ln_eq_waitq);
99 init_waitqueue_head(&the_lnet.ln_rc_waitq);
100 mutex_init(&the_lnet.ln_lnd_mutex);
101 mutex_init(&the_lnet.ln_api_mutex);
105 lnet_fini_locks(void)
110 lnet_create_remote_nets_table(void)
113 struct list_head *hash;
115 LASSERT(the_lnet.ln_remote_nets_hash == NULL);
116 LASSERT(the_lnet.ln_remote_nets_hbits > 0);
117 LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
119 CERROR("Failed to create remote nets hash table\n");
123 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
124 INIT_LIST_HEAD(&hash[i]);
125 the_lnet.ln_remote_nets_hash = hash;
130 lnet_destroy_remote_nets_table(void)
134 if (the_lnet.ln_remote_nets_hash == NULL)
137 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
138 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
140 LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
141 LNET_REMOTE_NETS_HASH_SIZE *
142 sizeof(the_lnet.ln_remote_nets_hash[0]));
143 the_lnet.ln_remote_nets_hash = NULL;
147 lnet_destroy_locks(void)
149 if (the_lnet.ln_res_lock != NULL) {
150 cfs_percpt_lock_free(the_lnet.ln_res_lock);
151 the_lnet.ln_res_lock = NULL;
154 if (the_lnet.ln_net_lock != NULL) {
155 cfs_percpt_lock_free(the_lnet.ln_net_lock);
156 the_lnet.ln_net_lock = NULL;
163 lnet_create_locks(void)
167 the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
168 if (the_lnet.ln_res_lock == NULL)
171 the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
172 if (the_lnet.ln_net_lock == NULL)
178 lnet_destroy_locks();
182 static void lnet_assert_wire_constants(void)
184 /* Wire protocol assertions generated by 'wirecheck'
185 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
186 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
187 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
190 CLASSERT (LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
191 CLASSERT (LNET_PROTO_TCP_VERSION_MAJOR == 1);
192 CLASSERT (LNET_PROTO_TCP_VERSION_MINOR == 0);
193 CLASSERT (LNET_MSG_ACK == 0);
194 CLASSERT (LNET_MSG_PUT == 1);
195 CLASSERT (LNET_MSG_GET == 2);
196 CLASSERT (LNET_MSG_REPLY == 3);
197 CLASSERT (LNET_MSG_HELLO == 4);
199 /* Checks for struct ptl_handle_wire_t */
200 CLASSERT ((int)sizeof(lnet_handle_wire_t) == 16);
201 CLASSERT ((int)offsetof(lnet_handle_wire_t, wh_interface_cookie) == 0);
202 CLASSERT ((int)sizeof(((lnet_handle_wire_t *)0)->wh_interface_cookie) == 8);
203 CLASSERT ((int)offsetof(lnet_handle_wire_t, wh_object_cookie) == 8);
204 CLASSERT ((int)sizeof(((lnet_handle_wire_t *)0)->wh_object_cookie) == 8);
206 /* Checks for struct lnet_magicversion_t */
207 CLASSERT ((int)sizeof(lnet_magicversion_t) == 8);
208 CLASSERT ((int)offsetof(lnet_magicversion_t, magic) == 0);
209 CLASSERT ((int)sizeof(((lnet_magicversion_t *)0)->magic) == 4);
210 CLASSERT ((int)offsetof(lnet_magicversion_t, version_major) == 4);
211 CLASSERT ((int)sizeof(((lnet_magicversion_t *)0)->version_major) == 2);
212 CLASSERT ((int)offsetof(lnet_magicversion_t, version_minor) == 6);
213 CLASSERT ((int)sizeof(((lnet_magicversion_t *)0)->version_minor) == 2);
215 /* Checks for struct lnet_hdr_t */
216 CLASSERT ((int)sizeof(lnet_hdr_t) == 72);
217 CLASSERT ((int)offsetof(lnet_hdr_t, dest_nid) == 0);
218 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->dest_nid) == 8);
219 CLASSERT ((int)offsetof(lnet_hdr_t, src_nid) == 8);
220 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->src_nid) == 8);
221 CLASSERT ((int)offsetof(lnet_hdr_t, dest_pid) == 16);
222 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->dest_pid) == 4);
223 CLASSERT ((int)offsetof(lnet_hdr_t, src_pid) == 20);
224 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->src_pid) == 4);
225 CLASSERT ((int)offsetof(lnet_hdr_t, type) == 24);
226 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->type) == 4);
227 CLASSERT ((int)offsetof(lnet_hdr_t, payload_length) == 28);
228 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->payload_length) == 4);
229 CLASSERT ((int)offsetof(lnet_hdr_t, msg) == 32);
230 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg) == 40);
233 CLASSERT ((int)offsetof(lnet_hdr_t, msg.ack.dst_wmd) == 32);
234 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.ack.dst_wmd) == 16);
235 CLASSERT ((int)offsetof(lnet_hdr_t, msg.ack.match_bits) == 48);
236 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.ack.match_bits) == 8);
237 CLASSERT ((int)offsetof(lnet_hdr_t, msg.ack.mlength) == 56);
238 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.ack.mlength) == 4);
241 CLASSERT ((int)offsetof(lnet_hdr_t, msg.put.ack_wmd) == 32);
242 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.put.ack_wmd) == 16);
243 CLASSERT ((int)offsetof(lnet_hdr_t, msg.put.match_bits) == 48);
244 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.put.match_bits) == 8);
245 CLASSERT ((int)offsetof(lnet_hdr_t, msg.put.hdr_data) == 56);
246 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.put.hdr_data) == 8);
247 CLASSERT ((int)offsetof(lnet_hdr_t, msg.put.ptl_index) == 64);
248 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.put.ptl_index) == 4);
249 CLASSERT ((int)offsetof(lnet_hdr_t, msg.put.offset) == 68);
250 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.put.offset) == 4);
253 CLASSERT ((int)offsetof(lnet_hdr_t, msg.get.return_wmd) == 32);
254 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.get.return_wmd) == 16);
255 CLASSERT ((int)offsetof(lnet_hdr_t, msg.get.match_bits) == 48);
256 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.get.match_bits) == 8);
257 CLASSERT ((int)offsetof(lnet_hdr_t, msg.get.ptl_index) == 56);
258 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.get.ptl_index) == 4);
259 CLASSERT ((int)offsetof(lnet_hdr_t, msg.get.src_offset) == 60);
260 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.get.src_offset) == 4);
261 CLASSERT ((int)offsetof(lnet_hdr_t, msg.get.sink_length) == 64);
262 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.get.sink_length) == 4);
265 CLASSERT ((int)offsetof(lnet_hdr_t, msg.reply.dst_wmd) == 32);
266 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.reply.dst_wmd) == 16);
269 CLASSERT ((int)offsetof(lnet_hdr_t, msg.hello.incarnation) == 32);
270 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.hello.incarnation) == 8);
271 CLASSERT ((int)offsetof(lnet_hdr_t, msg.hello.type) == 40);
272 CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.hello.type) == 4);
275 static lnd_t *lnet_find_lnd_by_type(__u32 type)
278 struct list_head *tmp;
280 /* holding lnd mutex */
281 list_for_each(tmp, &the_lnet.ln_lnds) {
282 lnd = list_entry(tmp, lnd_t, lnd_list);
284 if (lnd->lnd_type == type)
291 lnet_register_lnd (lnd_t *lnd)
293 LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
295 LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
296 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
298 list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
299 lnd->lnd_refcount = 0;
301 CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
303 LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
305 EXPORT_SYMBOL(lnet_register_lnd);
308 lnet_unregister_lnd (lnd_t *lnd)
310 LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
312 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
313 LASSERT(lnd->lnd_refcount == 0);
315 list_del(&lnd->lnd_list);
316 CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
318 LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
320 EXPORT_SYMBOL(lnet_unregister_lnd);
323 lnet_counters_get(lnet_counters_t *counters)
325 lnet_counters_t *ctr;
328 memset(counters, 0, sizeof(*counters));
330 lnet_net_lock(LNET_LOCK_EX);
332 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
333 counters->msgs_max += ctr->msgs_max;
334 counters->msgs_alloc += ctr->msgs_alloc;
335 counters->errors += ctr->errors;
336 counters->send_count += ctr->send_count;
337 counters->recv_count += ctr->recv_count;
338 counters->route_count += ctr->route_count;
339 counters->drop_count += ctr->drop_count;
340 counters->send_length += ctr->send_length;
341 counters->recv_length += ctr->recv_length;
342 counters->route_length += ctr->route_length;
343 counters->drop_length += ctr->drop_length;
346 lnet_net_unlock(LNET_LOCK_EX);
348 EXPORT_SYMBOL(lnet_counters_get);
351 lnet_counters_reset(void)
353 lnet_counters_t *counters;
356 lnet_net_lock(LNET_LOCK_EX);
358 cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
359 memset(counters, 0, sizeof(lnet_counters_t));
361 lnet_net_unlock(LNET_LOCK_EX);
364 static __u64 lnet_create_interface_cookie(void)
366 /* NB the interface cookie in wire handles guards against delayed
367 * replies and ACKs appearing valid after reboot. Initialisation time,
368 * even if it's only implemented to millisecond resolution is probably
369 * easily good enough. */
372 do_gettimeofday(&tv);
375 cookie += tv.tv_usec;
380 lnet_res_type2str(int type)
385 case LNET_COOKIE_TYPE_MD:
387 case LNET_COOKIE_TYPE_ME:
389 case LNET_COOKIE_TYPE_EQ:
395 lnet_res_container_cleanup(struct lnet_res_container *rec)
399 if (rec->rec_type == 0) /* not set yet, it's uninitialized */
402 while (!list_empty(&rec->rec_active)) {
403 struct list_head *e = rec->rec_active.next;
406 if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
407 lnet_eq_free(list_entry(e, lnet_eq_t, eq_list));
409 } else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
410 lnet_md_free(list_entry(e, lnet_libmd_t, md_list));
412 } else { /* NB: Active MEs should be attached on portals */
419 /* Found alive MD/ME/EQ, user really should unlink/free
420 * all of them before finalize LNet, but if someone didn't,
421 * we have to recycle garbage for him */
422 CERROR("%d active elements on exit of %s container\n",
423 count, lnet_res_type2str(rec->rec_type));
426 if (rec->rec_lh_hash != NULL) {
427 LIBCFS_FREE(rec->rec_lh_hash,
428 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
429 rec->rec_lh_hash = NULL;
432 rec->rec_type = 0; /* mark it as finalized */
436 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
441 LASSERT(rec->rec_type == 0);
443 rec->rec_type = type;
444 INIT_LIST_HEAD(&rec->rec_active);
446 rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
448 /* Arbitrary choice of hash table size */
449 LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
450 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
451 if (rec->rec_lh_hash == NULL) {
456 for (i = 0; i < LNET_LH_HASH_SIZE; i++)
457 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
462 CERROR("Failed to setup %s resource container\n",
463 lnet_res_type2str(type));
464 lnet_res_container_cleanup(rec);
469 lnet_res_containers_destroy(struct lnet_res_container **recs)
471 struct lnet_res_container *rec;
474 cfs_percpt_for_each(rec, i, recs)
475 lnet_res_container_cleanup(rec);
477 cfs_percpt_free(recs);
480 static struct lnet_res_container **
481 lnet_res_containers_create(int type)
483 struct lnet_res_container **recs;
484 struct lnet_res_container *rec;
488 recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
490 CERROR("Failed to allocate %s resource containers\n",
491 lnet_res_type2str(type));
495 cfs_percpt_for_each(rec, i, recs) {
496 rc = lnet_res_container_setup(rec, i, type);
498 lnet_res_containers_destroy(recs);
507 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
509 /* ALWAYS called with lnet_res_lock held */
510 struct list_head *head;
511 lnet_libhandle_t *lh;
514 if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
517 hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
518 head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
520 list_for_each_entry(lh, head, lh_hash_chain) {
521 if (lh->lh_cookie == cookie)
529 lnet_res_lh_initialize(struct lnet_res_container *rec, lnet_libhandle_t *lh)
531 /* ALWAYS called with lnet_res_lock held */
532 unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
535 lh->lh_cookie = rec->rec_lh_cookie;
536 rec->rec_lh_cookie += 1 << ibits;
538 hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
540 list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
543 static int lnet_unprepare(void);
546 lnet_prepare(lnet_pid_t requested_pid)
548 /* Prepare to bring up the network */
549 struct lnet_res_container **recs;
552 if (requested_pid == LNET_PID_ANY) {
553 /* Don't instantiate LNET just for me */
557 LASSERT(the_lnet.ln_refcount == 0);
559 the_lnet.ln_routing = 0;
561 LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
562 the_lnet.ln_pid = requested_pid;
564 INIT_LIST_HEAD(&the_lnet.ln_test_peers);
565 INIT_LIST_HEAD(&the_lnet.ln_nis);
566 INIT_LIST_HEAD(&the_lnet.ln_nis_cpt);
567 INIT_LIST_HEAD(&the_lnet.ln_nis_zombie);
568 INIT_LIST_HEAD(&the_lnet.ln_routers);
569 INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
570 INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
572 rc = lnet_create_remote_nets_table();
576 the_lnet.ln_interface_cookie = lnet_create_interface_cookie();
578 the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
579 sizeof(lnet_counters_t));
580 if (the_lnet.ln_counters == NULL) {
581 CERROR("Failed to allocate counters for LNet\n");
586 rc = lnet_peer_tables_create();
590 rc = lnet_msg_containers_create();
594 rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
595 LNET_COOKIE_TYPE_EQ);
599 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
605 the_lnet.ln_me_containers = recs;
607 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
613 the_lnet.ln_md_containers = recs;
615 rc = lnet_portals_create();
617 CERROR("Failed to create portals for LNet: %d\n", rc);
629 lnet_unprepare (void)
631 /* NB no LNET_LOCK since this is the last reference. All LND instances
632 * have shut down already, so it is safe to unlink and free all
633 * descriptors, even those that appear committed to a network op (eg MD
634 * with non-zero pending count) */
636 lnet_fail_nid(LNET_NID_ANY, 0);
638 LASSERT(the_lnet.ln_refcount == 0);
639 LASSERT(list_empty(&the_lnet.ln_test_peers));
640 LASSERT(list_empty(&the_lnet.ln_nis));
641 LASSERT(list_empty(&the_lnet.ln_nis_cpt));
642 LASSERT(list_empty(&the_lnet.ln_nis_zombie));
644 lnet_portals_destroy();
646 if (the_lnet.ln_md_containers != NULL) {
647 lnet_res_containers_destroy(the_lnet.ln_md_containers);
648 the_lnet.ln_md_containers = NULL;
651 if (the_lnet.ln_me_containers != NULL) {
652 lnet_res_containers_destroy(the_lnet.ln_me_containers);
653 the_lnet.ln_me_containers = NULL;
656 lnet_res_container_cleanup(&the_lnet.ln_eq_container);
658 lnet_msg_containers_destroy();
659 lnet_peer_tables_destroy();
660 lnet_rtrpools_free(0);
662 if (the_lnet.ln_counters != NULL) {
663 cfs_percpt_free(the_lnet.ln_counters);
664 the_lnet.ln_counters = NULL;
666 lnet_destroy_remote_nets_table();
672 lnet_net2ni_locked(__u32 net, int cpt)
674 struct list_head *tmp;
677 LASSERT(cpt != LNET_LOCK_EX);
679 list_for_each(tmp, &the_lnet.ln_nis) {
680 ni = list_entry(tmp, lnet_ni_t, ni_list);
682 if (LNET_NIDNET(ni->ni_nid) == net) {
683 lnet_ni_addref_locked(ni, cpt);
692 lnet_net2ni(__u32 net)
697 ni = lnet_net2ni_locked(net, 0);
702 EXPORT_SYMBOL(lnet_net2ni);
705 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
710 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
715 val = hash_long(key, LNET_CPT_BITS);
716 /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
720 return (unsigned int)(key + val + (val >> 1)) % number;
724 lnet_cpt_of_nid_locked(lnet_nid_t nid)
728 /* must called with hold of lnet_net_lock */
729 if (LNET_CPT_NUMBER == 1)
730 return 0; /* the only one */
732 /* take lnet_net_lock(any) would be OK */
733 if (!list_empty(&the_lnet.ln_nis_cpt)) {
734 list_for_each_entry(ni, &the_lnet.ln_nis_cpt, ni_cptlist) {
735 if (LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid))
738 LASSERT(ni->ni_cpts != NULL);
739 return ni->ni_cpts[lnet_nid_cpt_hash
740 (nid, ni->ni_ncpts)];
744 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
748 lnet_cpt_of_nid(lnet_nid_t nid)
753 if (LNET_CPT_NUMBER == 1)
754 return 0; /* the only one */
756 if (list_empty(&the_lnet.ln_nis_cpt))
757 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
759 cpt = lnet_net_lock_current();
760 cpt2 = lnet_cpt_of_nid_locked(nid);
761 lnet_net_unlock(cpt);
765 EXPORT_SYMBOL(lnet_cpt_of_nid);
768 lnet_islocalnet(__u32 net)
773 cpt = lnet_net_lock_current();
775 ni = lnet_net2ni_locked(net, cpt);
777 lnet_ni_decref_locked(ni, cpt);
779 lnet_net_unlock(cpt);
785 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
788 struct list_head *tmp;
790 LASSERT(cpt != LNET_LOCK_EX);
792 list_for_each(tmp, &the_lnet.ln_nis) {
793 ni = list_entry(tmp, lnet_ni_t, ni_list);
795 if (ni->ni_nid == nid) {
796 lnet_ni_addref_locked(ni, cpt);
805 lnet_islocalnid(lnet_nid_t nid)
810 cpt = lnet_net_lock_current();
811 ni = lnet_nid2ni_locked(nid, cpt);
813 lnet_ni_decref_locked(ni, cpt);
814 lnet_net_unlock(cpt);
820 lnet_count_acceptor_nis (void)
822 /* Return the # of NIs that need the acceptor. */
824 struct list_head *tmp;
828 cpt = lnet_net_lock_current();
829 list_for_each(tmp, &the_lnet.ln_nis) {
830 ni = list_entry(tmp, lnet_ni_t, ni_list);
832 if (ni->ni_lnd->lnd_accept != NULL)
836 lnet_net_unlock(cpt);
841 static lnet_ping_info_t *
842 lnet_ping_info_create(int num_ni)
844 lnet_ping_info_t *ping_info;
847 infosz = offsetof(lnet_ping_info_t, pi_ni[num_ni]);
848 LIBCFS_ALLOC(ping_info, infosz);
849 if (ping_info == NULL) {
850 CERROR("Can't allocate ping info[%d]\n", num_ni);
854 ping_info->pi_nnis = num_ni;
855 ping_info->pi_pid = the_lnet.ln_pid;
856 ping_info->pi_magic = LNET_PROTO_PING_MAGIC;
857 ping_info->pi_features = LNET_PING_FEAT_NI_STATUS;
863 lnet_get_ni_count(void)
870 list_for_each_entry(ni, &the_lnet.ln_nis, ni_list)
879 lnet_ping_info_free(lnet_ping_info_t *pinfo)
882 offsetof(lnet_ping_info_t,
883 pi_ni[pinfo->pi_nnis]));
887 lnet_ping_info_destroy(void)
891 lnet_net_lock(LNET_LOCK_EX);
893 list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
895 ni->ni_status = NULL;
899 lnet_ping_info_free(the_lnet.ln_ping_info);
900 the_lnet.ln_ping_info = NULL;
902 lnet_net_unlock(LNET_LOCK_EX);
906 lnet_ping_event_handler(lnet_event_t *event)
908 lnet_ping_info_t *pinfo = event->md.user_ptr;
911 pinfo->pi_features = LNET_PING_FEAT_INVAL;
915 lnet_ping_info_setup(lnet_ping_info_t **ppinfo, lnet_handle_md_t *md_handle,
916 int ni_count, bool set_eq)
918 lnet_handle_me_t me_handle;
919 lnet_process_id_t id = {LNET_NID_ANY, LNET_PID_ANY};
920 lnet_md_t md = {NULL};
924 rc = LNetEQAlloc(0, lnet_ping_event_handler,
925 &the_lnet.ln_ping_target_eq);
927 CERROR("Can't allocate ping EQ: %d\n", rc);
932 *ppinfo = lnet_ping_info_create(ni_count);
933 if (*ppinfo == NULL) {
938 rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
939 LNET_PROTO_PING_MATCHBITS, 0,
940 LNET_UNLINK, LNET_INS_AFTER,
943 CERROR("Can't create ping ME: %d\n", rc);
947 /* initialize md content */
949 md.length = offsetof(lnet_ping_info_t,
950 pi_ni[(*ppinfo)->pi_nnis]);
951 md.threshold = LNET_MD_THRESH_INF;
953 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
954 LNET_MD_MANAGE_REMOTE;
956 md.eq_handle = the_lnet.ln_ping_target_eq;
957 md.user_ptr = *ppinfo;
959 rc = LNetMDAttach(me_handle, md, LNET_RETAIN, md_handle);
961 CERROR("Can't attach ping MD: %d\n", rc);
968 rc2 = LNetMEUnlink(me_handle);
971 lnet_ping_info_free(*ppinfo);
975 LNetEQFree(the_lnet.ln_ping_target_eq);
980 lnet_ping_md_unlink(lnet_ping_info_t *pinfo, lnet_handle_md_t *md_handle)
982 sigset_t blocked = cfs_block_allsigs();
984 LNetMDUnlink(*md_handle);
985 LNetInvalidateHandle(md_handle);
987 /* NB md could be busy; this just starts the unlink */
988 while (pinfo->pi_features != LNET_PING_FEAT_INVAL) {
989 CDEBUG(D_NET, "Still waiting for ping MD to unlink\n");
990 set_current_state(TASK_UNINTERRUPTIBLE);
991 schedule_timeout(cfs_time_seconds(1));
994 cfs_restore_sigs(blocked);
998 lnet_ping_info_install_locked(lnet_ping_info_t *ping_info)
1002 lnet_ni_status_t *ns;
1005 list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
1006 LASSERT(i < ping_info->pi_nnis);
1008 ns = &ping_info->pi_ni[i];
1010 ns->ns_nid = ni->ni_nid;
1013 ns->ns_status = (ni->ni_status != NULL) ?
1014 ni->ni_status->ns_status : LNET_NI_STATUS_UP;
1023 lnet_ping_target_update(lnet_ping_info_t *pinfo, lnet_handle_md_t md_handle)
1025 lnet_ping_info_t *old_pinfo = NULL;
1026 lnet_handle_md_t old_md;
1028 /* switch the NIs to point to the new ping info created */
1029 lnet_net_lock(LNET_LOCK_EX);
1031 if (!the_lnet.ln_routing)
1032 pinfo->pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1033 lnet_ping_info_install_locked(pinfo);
1035 if (the_lnet.ln_ping_info != NULL) {
1036 old_pinfo = the_lnet.ln_ping_info;
1037 old_md = the_lnet.ln_ping_target_md;
1039 the_lnet.ln_ping_target_md = md_handle;
1040 the_lnet.ln_ping_info = pinfo;
1042 lnet_net_unlock(LNET_LOCK_EX);
1044 if (old_pinfo != NULL) {
1045 /* unlink the old ping info */
1046 lnet_ping_md_unlink(old_pinfo, &old_md);
1047 lnet_ping_info_free(old_pinfo);
1052 lnet_ping_target_fini(void)
1056 lnet_ping_md_unlink(the_lnet.ln_ping_info,
1057 &the_lnet.ln_ping_target_md);
1059 rc = LNetEQFree(the_lnet.ln_ping_target_eq);
1062 lnet_ping_info_destroy();
1066 lnet_ni_tq_credits(lnet_ni_t *ni)
1070 LASSERT(ni->ni_ncpts >= 1);
1072 if (ni->ni_ncpts == 1)
1073 return ni->ni_maxtxcredits;
1075 credits = ni->ni_maxtxcredits / ni->ni_ncpts;
1076 credits = max(credits, 8 * ni->ni_peertxcredits);
1077 credits = min(credits, ni->ni_maxtxcredits);
1083 lnet_ni_unlink_locked(lnet_ni_t *ni)
1085 if (!list_empty(&ni->ni_cptlist)) {
1086 list_del_init(&ni->ni_cptlist);
1087 lnet_ni_decref_locked(ni, 0);
1090 /* move it to zombie list and nobody can find it anymore */
1091 LASSERT(!list_empty(&ni->ni_list));
1092 list_move(&ni->ni_list, &the_lnet.ln_nis_zombie);
1093 lnet_ni_decref_locked(ni, 0); /* drop ln_nis' ref */
1097 lnet_clear_zombies_nis_locked(void)
1103 /* Now wait for the NI's I just nuked to show up on ln_zombie_nis
1104 * and shut them down in guaranteed thread context */
1106 while (!list_empty(&the_lnet.ln_nis_zombie)) {
1110 ni = list_entry(the_lnet.ln_nis_zombie.next,
1111 lnet_ni_t, ni_list);
1112 list_del_init(&ni->ni_list);
1113 cfs_percpt_for_each(ref, j, ni->ni_refs) {
1116 /* still busy, add it back to zombie list */
1117 list_add(&ni->ni_list, &the_lnet.ln_nis_zombie);
1121 if (!list_empty(&ni->ni_list)) {
1122 lnet_net_unlock(LNET_LOCK_EX);
1124 if ((i & (-i)) == i) {
1126 "Waiting for zombie LNI %s\n",
1127 libcfs_nid2str(ni->ni_nid));
1129 set_current_state(TASK_UNINTERRUPTIBLE);
1130 schedule_timeout(cfs_time_seconds(1));
1131 lnet_net_lock(LNET_LOCK_EX);
1135 ni->ni_lnd->lnd_refcount--;
1136 lnet_net_unlock(LNET_LOCK_EX);
1138 islo = ni->ni_lnd->lnd_type == LOLND;
1140 LASSERT(!in_interrupt());
1141 (ni->ni_lnd->lnd_shutdown)(ni);
1143 /* can't deref lnd anymore now; it might have unregistered
1147 CDEBUG(D_LNI, "Removed LNI %s\n",
1148 libcfs_nid2str(ni->ni_nid));
1152 lnet_net_lock(LNET_LOCK_EX);
1157 lnet_shutdown_lndnis(void)
1162 /* NB called holding the global mutex */
1164 /* All quiet on the API front */
1165 LASSERT(!the_lnet.ln_shutdown);
1166 LASSERT(the_lnet.ln_refcount == 0);
1167 LASSERT(list_empty(&the_lnet.ln_nis_zombie));
1169 lnet_net_lock(LNET_LOCK_EX);
1170 the_lnet.ln_shutdown = 1; /* flag shutdown */
1172 /* Unlink NIs from the global table */
1173 while (!list_empty(&the_lnet.ln_nis)) {
1174 ni = list_entry(the_lnet.ln_nis.next,
1175 lnet_ni_t, ni_list);
1176 lnet_ni_unlink_locked(ni);
1179 /* Drop the cached loopback NI. */
1180 if (the_lnet.ln_loni != NULL) {
1181 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
1182 the_lnet.ln_loni = NULL;
1185 lnet_net_unlock(LNET_LOCK_EX);
1187 /* Clear lazy portals and drop delayed messages which hold refs
1188 * on their lnet_msg_t::msg_rxpeer */
1189 for (i = 0; i < the_lnet.ln_nportals; i++)
1190 LNetClearLazyPortal(i);
1192 /* Clear the peer table and wait for all peers to go (they hold refs on
1194 lnet_peer_tables_cleanup(NULL);
1196 lnet_net_lock(LNET_LOCK_EX);
1198 lnet_clear_zombies_nis_locked();
1199 the_lnet.ln_shutdown = 0;
1200 lnet_net_unlock(LNET_LOCK_EX);
1203 /* shutdown down the NI and release refcount */
1205 lnet_shutdown_lndni(struct lnet_ni *ni)
1209 lnet_net_lock(LNET_LOCK_EX);
1210 lnet_ni_unlink_locked(ni);
1211 lnet_net_unlock(LNET_LOCK_EX);
1213 /* clear messages for this NI on the lazy portal */
1214 for (i = 0; i < the_lnet.ln_nportals; i++)
1215 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
1217 /* Do peer table cleanup for this ni */
1218 lnet_peer_tables_cleanup(ni);
1220 lnet_net_lock(LNET_LOCK_EX);
1221 lnet_clear_zombies_nis_locked();
1222 lnet_net_unlock(LNET_LOCK_EX);
1226 lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout,
1227 __s32 peer_cr, __s32 peer_buf_cr, __s32 credits)
1232 struct lnet_tx_queue *tq;
1235 lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
1237 LASSERT(libcfs_isknown_lnd(lnd_type));
1239 if (lnd_type == CIBLND || lnd_type == OPENIBLND ||
1240 lnd_type == IIBLND || lnd_type == VIBLND) {
1241 CERROR("LND %s obsoleted\n", libcfs_lnd2str(lnd_type));
1245 /* Make sure this new NI is unique. */
1246 lnet_net_lock(LNET_LOCK_EX);
1247 rc = lnet_net_unique(LNET_NIDNET(ni->ni_nid), &the_lnet.ln_nis);
1248 lnet_net_unlock(LNET_LOCK_EX);
1251 if (lnd_type == LOLND) {
1256 CERROR("Net %s is not unique\n",
1257 libcfs_net2str(LNET_NIDNET(ni->ni_nid)));
1263 LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
1264 lnd = lnet_find_lnd_by_type(lnd_type);
1267 LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
1268 rc = request_module("%s", libcfs_lnd2modname(lnd_type));
1269 LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
1271 lnd = lnet_find_lnd_by_type(lnd_type);
1273 LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
1274 CERROR("Can't load LND %s, module %s, rc=%d\n",
1275 libcfs_lnd2str(lnd_type),
1276 libcfs_lnd2modname(lnd_type), rc);
1277 #ifndef HAVE_MODULE_LOADING_SUPPORT
1278 LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
1279 "compiled with kernel module "
1280 "loading support.");
1287 lnet_net_lock(LNET_LOCK_EX);
1288 lnd->lnd_refcount++;
1289 lnet_net_unlock(LNET_LOCK_EX);
1293 rc = (lnd->lnd_startup)(ni);
1295 LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
1298 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
1299 rc, libcfs_lnd2str(lnd->lnd_type));
1300 lnet_net_lock(LNET_LOCK_EX);
1301 lnd->lnd_refcount--;
1302 lnet_net_unlock(LNET_LOCK_EX);
1306 /* If given some LND tunable parameters, parse those now to
1307 * override the values in the NI structure. */
1308 if (peer_buf_cr >= 0)
1309 ni->ni_peerrtrcredits = peer_buf_cr;
1310 if (peer_timeout >= 0)
1311 ni->ni_peertimeout = peer_timeout;
1314 * Note: For now, don't allow the user to change
1315 * peertxcredits as this number is used in the
1316 * IB LND to control queue depth.
1317 * if (peer_cr != -1)
1318 * ni->ni_peertxcredits = peer_cr;
1321 ni->ni_maxtxcredits = credits;
1323 LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query != NULL);
1325 lnet_net_lock(LNET_LOCK_EX);
1326 /* refcount for ln_nis */
1327 lnet_ni_addref_locked(ni, 0);
1328 list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
1329 if (ni->ni_cpts != NULL) {
1330 lnet_ni_addref_locked(ni, 0);
1331 list_add_tail(&ni->ni_cptlist, &the_lnet.ln_nis_cpt);
1334 lnet_net_unlock(LNET_LOCK_EX);
1336 if (lnd->lnd_type == LOLND) {
1338 LASSERT(the_lnet.ln_loni == NULL);
1339 the_lnet.ln_loni = ni;
1343 if (ni->ni_peertxcredits == 0 || ni->ni_maxtxcredits == 0) {
1344 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
1345 libcfs_lnd2str(lnd->lnd_type),
1346 ni->ni_peertxcredits == 0 ?
1348 /* shutdown the NI since if we get here then it must've already
1351 lnet_shutdown_lndni(ni);
1355 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
1356 tq->tq_credits_min =
1357 tq->tq_credits_max =
1358 tq->tq_credits = lnet_ni_tq_credits(ni);
1361 CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
1362 libcfs_nid2str(ni->ni_nid), ni->ni_peertxcredits,
1363 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
1364 ni->ni_peerrtrcredits, ni->ni_peertimeout);
1373 lnet_startup_lndnis(struct list_head *nilist)
1379 while (!list_empty(nilist)) {
1380 ni = list_entry(nilist->next, lnet_ni_t, ni_list);
1381 list_del(&ni->ni_list);
1382 rc = lnet_startup_lndni(ni, -1, -1, -1, -1);
1392 lnet_shutdown_lndnis();
1398 * Initialize LNet library.
1400 * Automatically called at module loading time. Caller has to call
1401 * lnet_fini() after a call to lnet_init(), if and only if the latter
1402 * returned 0. It must be called exactly once.
1404 * \return 0 on success, and -ve on failures.
1411 lnet_assert_wire_constants();
1413 memset(&the_lnet, 0, sizeof(the_lnet));
1415 /* refer to global cfs_cpt_table for now */
1416 the_lnet.ln_cpt_table = cfs_cpt_table;
1417 the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_table);
1419 LASSERT(the_lnet.ln_cpt_number > 0);
1420 if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
1421 /* we are under risk of consuming all lh_cookie */
1422 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
1423 "please change setting of CPT-table and retry\n",
1424 the_lnet.ln_cpt_number, LNET_CPT_MAX);
1428 while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
1429 the_lnet.ln_cpt_bits++;
1431 rc = lnet_create_locks();
1433 CERROR("Can't create LNet global locks: %d\n", rc);
1437 the_lnet.ln_refcount = 0;
1438 LNetInvalidateHandle(&the_lnet.ln_rc_eqh);
1439 INIT_LIST_HEAD(&the_lnet.ln_lnds);
1440 INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
1441 INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
1443 /* The hash table size is the number of bits it takes to express the set
1444 * ln_num_routes, minus 1 (better to under estimate than over so we
1445 * don't waste memory). */
1446 if (rnet_htable_size <= 0)
1447 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
1448 else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
1449 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
1450 the_lnet.ln_remote_nets_hbits = max_t(int, 1,
1451 order_base_2(rnet_htable_size) - 1);
1453 /* All LNDs apart from the LOLND are in separate modules. They
1454 * register themselves when their module loads, and unregister
1455 * themselves when their module is unloaded. */
1456 lnet_register_lnd(&the_lolnd);
1461 * Finalize LNet library.
1463 * \pre lnet_init() called with success.
1464 * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
1469 LASSERT(the_lnet.ln_refcount == 0);
1471 while (!list_empty(&the_lnet.ln_lnds))
1472 lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
1474 lnet_destroy_locks();
1478 * Set LNet PID and start LNet interfaces, routing, and forwarding.
1480 * Users must call this function at least once before any other functions.
1481 * For each successful call there must be a corresponding call to
1482 * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
1485 * The PID used by LNet may be different from the one requested.
1488 * \param requested_pid PID requested by the caller.
1490 * \return >= 0 on success, and < 0 error code on failures.
1493 LNetNIInit(lnet_pid_t requested_pid)
1495 int im_a_router = 0;
1498 lnet_ping_info_t *pinfo;
1499 lnet_handle_md_t md_handle;
1500 struct list_head net_head;
1502 INIT_LIST_HEAD(&net_head);
1504 LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
1506 CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
1508 if (the_lnet.ln_refcount > 0) {
1509 rc = the_lnet.ln_refcount++;
1510 LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
1514 rc = lnet_prepare(requested_pid);
1516 LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
1520 /* Add in the loopback network */
1521 if (lnet_ni_alloc(LNET_MKNET(LOLND, 0), NULL, &net_head) == NULL) {
1526 /* If LNet is being initialized via DLC it is possible
1527 * that the user requests not to load module parameters (ones which
1528 * are supported by DLC) on initialization. Therefore, make sure not
1529 * to load networks, routes and forwarding from module parameters
1530 * in this case. On cleanup in case of failure only clean up
1531 * routes if it has been loaded */
1532 if (!the_lnet.ln_nis_from_mod_params) {
1533 rc = lnet_parse_networks(&net_head,
1534 lnet_get_networks());
1539 ni_count = lnet_startup_lndnis(&net_head);
1545 if (!the_lnet.ln_nis_from_mod_params) {
1546 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
1550 rc = lnet_check_routes();
1554 rc = lnet_rtrpools_alloc(im_a_router);
1559 rc = lnet_acceptor_start();
1562 the_lnet.ln_refcount = 1;
1563 /* Now I may use my own API functions... */
1565 rc = lnet_ping_info_setup(&pinfo, &md_handle, ni_count, true);
1569 lnet_ping_target_update(pinfo, md_handle);
1571 rc = lnet_router_checker_start();
1578 LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
1583 lnet_ping_target_fini();
1585 the_lnet.ln_refcount = 0;
1586 lnet_acceptor_stop();
1588 if (!the_lnet.ln_nis_from_mod_params)
1589 lnet_destroy_routes();
1591 lnet_shutdown_lndnis();
1595 LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
1596 while (!list_empty(&net_head)) {
1598 ni = list_entry(net_head.next, struct lnet_ni, ni_list);
1599 list_del_init(&ni->ni_list);
1604 EXPORT_SYMBOL(LNetNIInit);
1607 * Stop LNet interfaces, routing, and forwarding.
1609 * Users must call this function once for each successful call to LNetNIInit().
1610 * Once the LNetNIFini() operation has been started, the results of pending
1611 * API operations are undefined.
1613 * \return always 0 for current implementation.
1618 LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
1620 LASSERT (the_lnet.ln_refcount > 0);
1622 if (the_lnet.ln_refcount != 1) {
1623 the_lnet.ln_refcount--;
1625 LASSERT(!the_lnet.ln_niinit_self);
1630 lnet_router_checker_stop();
1631 lnet_ping_target_fini();
1633 /* Teardown fns that use my own API functions BEFORE here */
1634 the_lnet.ln_refcount = 0;
1636 lnet_acceptor_stop();
1637 lnet_destroy_routes();
1638 lnet_shutdown_lndnis();
1642 LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
1645 EXPORT_SYMBOL(LNetNIFini);
1648 * Grabs the ni data from the ni structure and fills the out
1651 * \param[in] ni network interface structure
1652 * \param[out] cpt_count the number of cpts the ni is on
1653 * \param[out] nid Network Interface ID
1654 * \param[out] peer_timeout NI peer timeout
1655 * \param[out] peer_tx_crdits NI peer transmit credits
1656 * \param[out] peer_rtr_credits NI peer router credits
1657 * \param[out] max_tx_credits NI max transmit credit
1658 * \param[out] net_config Network configuration
1661 lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid,
1662 int *peer_timeout, int *peer_tx_credits,
1663 int *peer_rtr_credits, int *max_tx_credits,
1664 struct lnet_ioctl_net_config *net_config)
1671 if (net_config == NULL)
1674 CLASSERT(ARRAY_SIZE(ni->ni_interfaces) ==
1675 ARRAY_SIZE(net_config->ni_interfaces));
1677 if (ni->ni_interfaces[0] != NULL) {
1678 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
1679 if (ni->ni_interfaces[i] != NULL) {
1680 strncpy(net_config->ni_interfaces[i],
1681 ni->ni_interfaces[i],
1682 sizeof(net_config->ni_interfaces[i]));
1688 *peer_timeout = ni->ni_peertimeout;
1689 *peer_tx_credits = ni->ni_peertxcredits;
1690 *peer_rtr_credits = ni->ni_peerrtrcredits;
1691 *max_tx_credits = ni->ni_maxtxcredits;
1693 net_config->ni_status = ni->ni_status->ns_status;
1696 ni->ni_cpts != NULL && i < ni->ni_ncpts &&
1697 i < LNET_MAX_SHOW_NUM_CPT;
1699 net_config->ni_cpts[i] = ni->ni_cpts[i];
1701 *cpt_count = ni->ni_ncpts;
1705 lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout,
1706 int *peer_tx_credits, int *peer_rtr_credits,
1707 int *max_tx_credits,
1708 struct lnet_ioctl_net_config *net_config)
1711 struct list_head *tmp;
1715 cpt = lnet_net_lock_current();
1717 list_for_each(tmp, &the_lnet.ln_nis) {
1718 ni = list_entry(tmp, lnet_ni_t, ni_list);
1722 lnet_fill_ni_info(ni, cpt_count, nid, peer_timeout,
1723 peer_tx_credits, peer_rtr_credits,
1724 max_tx_credits, net_config);
1730 lnet_net_unlock(cpt);
1735 lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets,
1736 __s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr,
1739 lnet_ping_info_t *pinfo;
1740 lnet_handle_md_t md_handle;
1742 struct list_head net_head;
1744 lnet_remotenet_t *rnet;
1746 INIT_LIST_HEAD(&net_head);
1748 /* Create a ni structure for the network string */
1749 rc = lnet_parse_networks(&net_head, nets);
1751 return rc == 0 ? -EINVAL : rc;
1753 LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
1756 rc = -EINVAL; /* only add one interface per call */
1760 ni = list_entry(net_head.next, struct lnet_ni, ni_list);
1762 lnet_net_lock(LNET_LOCK_EX);
1763 rnet = lnet_find_net_locked(LNET_NIDNET(ni->ni_nid));
1764 lnet_net_unlock(LNET_LOCK_EX);
1765 /* make sure that the net added doesn't invalidate the current
1766 * configuration LNet is keeping */
1768 CERROR("Adding net %s will invalidate routing configuration\n",
1774 rc = lnet_ping_info_setup(&pinfo, &md_handle, 1 + lnet_get_ni_count(),
1779 list_del_init(&ni->ni_list);
1781 rc = lnet_startup_lndni(ni, peer_timeout, peer_cr,
1782 peer_buf_cr, credits);
1786 if (ni->ni_lnd->lnd_accept != NULL) {
1787 rc = lnet_acceptor_start();
1789 /* shutdown the ni that we just started */
1790 CERROR("Failed to start up acceptor thread\n");
1791 lnet_shutdown_lndni(ni);
1796 lnet_ping_target_update(pinfo, md_handle);
1797 LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
1802 lnet_ping_md_unlink(pinfo, &md_handle);
1803 lnet_ping_info_free(pinfo);
1805 LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
1806 while (!list_empty(&net_head)) {
1807 ni = list_entry(net_head.next, struct lnet_ni, ni_list);
1808 list_del_init(&ni->ni_list);
1815 lnet_dyn_del_ni(__u32 net)
1818 lnet_ping_info_t *pinfo;
1819 lnet_handle_md_t md_handle;
1822 /* don't allow userspace to shutdown the LOLND */
1823 if (LNET_NETTYP(net) == LOLND)
1826 LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
1827 /* create and link a new ping info, before removing the old one */
1828 rc = lnet_ping_info_setup(&pinfo, &md_handle,
1829 lnet_get_ni_count() - 1, false);
1833 ni = lnet_net2ni(net);
1839 /* decrement the reference counter taken by lnet_net2ni() */
1840 lnet_ni_decref_locked(ni, 0);
1842 lnet_shutdown_lndni(ni);
1844 if (lnet_count_acceptor_nis() == 0)
1845 lnet_acceptor_stop();
1847 lnet_ping_target_update(pinfo, md_handle);
1850 lnet_ping_md_unlink(pinfo, &md_handle);
1851 lnet_ping_info_free(pinfo);
1853 LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
1859 * This is an ugly hack to export IOC_LIBCFS_DEBUG_PEER and
1860 * IOC_LIBCFS_PORTALS_COMPATIBILITY commands to users, by tweaking the LNet
1861 * internal ioctl handler.
1863 * IOC_LIBCFS_PORTALS_COMPATIBILITY is now deprecated, don't use it.
1865 * \param cmd IOC_LIBCFS_DEBUG_PEER to print debugging data about a peer.
1866 * The data will be printed to system console. Don't use it excessively.
1867 * \param arg A pointer to lnet_process_id_t, process ID of the peer.
1869 * \return Always return 0 when called by users directly (i.e., not via ioctl).
1872 LNetCtl(unsigned int cmd, void *arg)
1874 struct libcfs_ioctl_data *data = arg;
1875 struct lnet_ioctl_config_data *config;
1876 lnet_process_id_t id = {0};
1880 CLASSERT(LIBCFS_IOC_DATA_MAX >= sizeof(struct lnet_ioctl_net_config) +
1881 sizeof(struct lnet_ioctl_config_data));
1884 case IOC_LIBCFS_GET_NI:
1885 rc = LNetGetId(data->ioc_count, &id);
1886 data->ioc_nid = id.nid;
1889 case IOC_LIBCFS_FAIL_NID:
1890 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
1892 case IOC_LIBCFS_ADD_ROUTE:
1895 if (config->cfg_hdr.ioc_len < sizeof(*config))
1898 LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
1899 rc = lnet_add_route(config->cfg_net,
1900 config->cfg_config_u.cfg_route.rtr_hop,
1902 config->cfg_config_u.cfg_route.
1905 rc = lnet_check_routes();
1907 lnet_del_route(config->cfg_net,
1910 LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
1913 case IOC_LIBCFS_DEL_ROUTE:
1916 if (config->cfg_hdr.ioc_len < sizeof(*config))
1919 LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
1920 rc = lnet_del_route(config->cfg_net, config->cfg_nid);
1921 LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
1924 case IOC_LIBCFS_GET_ROUTE:
1927 if (config->cfg_hdr.ioc_len < sizeof(*config))
1930 return lnet_get_route(config->cfg_count,
1932 &config->cfg_config_u.cfg_route.rtr_hop,
1934 &config->cfg_config_u.cfg_route.rtr_flags,
1935 &config->cfg_config_u.cfg_route.
1938 case IOC_LIBCFS_GET_NET: {
1939 struct lnet_ioctl_net_config *net_config;
1940 size_t total = sizeof(*config) + sizeof(*net_config);
1944 if (config->cfg_hdr.ioc_len < total)
1947 net_config = (struct lnet_ioctl_net_config *)
1949 if (net_config == NULL)
1952 return lnet_get_net_config(config->cfg_count,
1955 &config->cfg_config_u.
1956 cfg_net.net_peer_timeout,
1957 &config->cfg_config_u.cfg_net.
1958 net_peer_tx_credits,
1959 &config->cfg_config_u.cfg_net.
1960 net_peer_rtr_credits,
1961 &config->cfg_config_u.cfg_net.
1966 case IOC_LIBCFS_GET_LNET_STATS:
1968 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
1970 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
1973 lnet_counters_get(&lnet_stats->st_cntrs);
1977 case IOC_LIBCFS_CONFIG_RTR:
1980 if (config->cfg_hdr.ioc_len < sizeof(*config))
1983 LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
1984 if (config->cfg_config_u.cfg_buffers.buf_enable) {
1985 rc = lnet_rtrpools_enable();
1986 LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
1989 lnet_rtrpools_disable();
1990 LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
1993 case IOC_LIBCFS_ADD_BUF:
1996 if (config->cfg_hdr.ioc_len < sizeof(*config))
1999 LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
2000 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
2002 config->cfg_config_u.cfg_buffers.
2004 config->cfg_config_u.cfg_buffers.
2006 LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
2009 case IOC_LIBCFS_GET_BUF: {
2010 struct lnet_ioctl_pool_cfg *pool_cfg;
2011 size_t total = sizeof(*config) + sizeof(*pool_cfg);
2015 if (config->cfg_hdr.ioc_len < total)
2018 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
2019 return lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
2022 case IOC_LIBCFS_GET_PEER_INFO: {
2023 struct lnet_ioctl_peer *peer_info = arg;
2025 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
2028 return lnet_get_peer_info(
2029 peer_info->pr_count,
2031 peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
2032 &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
2033 &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
2034 &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
2035 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
2036 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
2037 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_rtr_credits,
2038 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
2041 case IOC_LIBCFS_NOTIFY_ROUTER:
2042 return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
2043 cfs_time_current() -
2044 cfs_time_seconds(cfs_time_current_sec() -
2045 (time_t)data->ioc_u64[0]));
2047 case IOC_LIBCFS_PORTALS_COMPATIBILITY:
2048 /* This can be removed once lustre stops calling it */
2051 case IOC_LIBCFS_LNET_DIST:
2052 rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
2053 if (rc < 0 && rc != -EHOSTUNREACH)
2056 data->ioc_u32[0] = rc;
2059 case IOC_LIBCFS_TESTPROTOCOMPAT:
2060 lnet_net_lock(LNET_LOCK_EX);
2061 the_lnet.ln_testprotocompat = data->ioc_flags;
2062 lnet_net_unlock(LNET_LOCK_EX);
2065 case IOC_LIBCFS_LNET_FAULT:
2066 return lnet_fault_ctl(data->ioc_flags, data);
2068 case IOC_LIBCFS_PING:
2069 id.nid = data->ioc_nid;
2070 id.pid = data->ioc_u32[0];
2071 rc = lnet_ping(id, data->ioc_u32[1], /* timeout */
2072 (lnet_process_id_t __user *)data->ioc_pbuf1,
2073 data->ioc_plen1/sizeof(lnet_process_id_t));
2076 data->ioc_count = rc;
2079 case IOC_LIBCFS_DEBUG_PEER: {
2080 /* CAVEAT EMPTOR: this one designed for calling directly; not
2082 id = *((lnet_process_id_t *) arg);
2084 lnet_debug_peer(id.nid);
2086 ni = lnet_net2ni(LNET_NIDNET(id.nid));
2088 CDEBUG(D_WARNING, "No NI for %s\n", libcfs_id2str(id));
2090 if (ni->ni_lnd->lnd_ctl == NULL) {
2091 CDEBUG(D_WARNING, "No ctl for %s\n",
2094 (void)ni->ni_lnd->lnd_ctl(ni, cmd, arg);
2103 ni = lnet_net2ni(data->ioc_net);
2107 if (ni->ni_lnd->lnd_ctl == NULL)
2110 rc = ni->ni_lnd->lnd_ctl(ni, cmd, arg);
2117 EXPORT_SYMBOL(LNetCtl);
2120 * Retrieve the lnet_process_id_t ID of LNet interface at \a index. Note that
2121 * all interfaces share a same PID, as requested by LNetNIInit().
2123 * \param index Index of the interface to look up.
2124 * \param id On successful return, this location will hold the
2125 * lnet_process_id_t ID of the interface.
2127 * \retval 0 If an interface exists at \a index.
2128 * \retval -ENOENT If no interface has been found.
2131 LNetGetId(unsigned int index, lnet_process_id_t *id)
2134 struct list_head *tmp;
2138 LASSERT(the_lnet.ln_refcount > 0);
2140 cpt = lnet_net_lock_current();
2142 list_for_each(tmp, &the_lnet.ln_nis) {
2146 ni = list_entry(tmp, lnet_ni_t, ni_list);
2148 id->nid = ni->ni_nid;
2149 id->pid = the_lnet.ln_pid;
2154 lnet_net_unlock(cpt);
2157 EXPORT_SYMBOL(LNetGetId);
2160 * Print a string representation of handle \a h into buffer \a str of
2164 LNetSnprintHandle(char *str, int len, lnet_handle_any_t h)
2166 snprintf(str, len, LPX64, h.cookie);
2168 EXPORT_SYMBOL(LNetSnprintHandle);
2171 lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t __user *ids,
2174 lnet_handle_eq_t eqh;
2175 lnet_handle_md_t mdh;
2177 lnet_md_t md = { NULL };
2181 const int a_long_time = 60000; /* mS */
2183 lnet_ping_info_t *info;
2184 lnet_process_id_t tmpid;
2191 infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
2194 id.nid == LNET_NID_ANY ||
2195 timeout_ms > 500000 || /* arbitrary limit! */
2196 n_ids > 20) /* arbitrary limit! */
2199 if (id.pid == LNET_PID_ANY)
2200 id.pid = LNET_PID_LUSTRE;
2202 LIBCFS_ALLOC(info, infosz);
2206 /* NB 2 events max (including any unlink event) */
2207 rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
2209 CERROR("Can't allocate EQ: %d\n", rc);
2213 /* initialize md content */
2216 md.threshold = 2; /*GET/REPLY*/
2218 md.options = LNET_MD_TRUNCATE;
2222 rc = LNetMDBind(md, LNET_UNLINK, &mdh);
2224 CERROR("Can't bind MD: %d\n", rc);
2228 rc = LNetGet(LNET_NID_ANY, mdh, id,
2229 LNET_RESERVED_PORTAL,
2230 LNET_PROTO_PING_MATCHBITS, 0);
2233 /* Don't CERROR; this could be deliberate! */
2235 rc2 = LNetMDUnlink(mdh);
2238 /* NB must wait for the UNLINK event below... */
2240 timeout_ms = a_long_time;
2244 /* MUST block for unlink to complete */
2246 blocked = cfs_block_allsigs();
2248 rc2 = LNetEQPoll(&eqh, 1, timeout_ms, &event, &which);
2251 cfs_restore_sigs(blocked);
2253 CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
2254 (rc2 <= 0) ? -1 : event.type,
2255 (rc2 <= 0) ? -1 : event.status,
2256 (rc2 > 0 && event.unlinked) ? " unlinked" : "");
2258 LASSERT(rc2 != -EOVERFLOW); /* can't miss anything */
2260 if (rc2 <= 0 || event.status != 0) {
2261 /* timeout or error */
2262 if (!replied && rc == 0)
2263 rc = (rc2 < 0) ? rc2 :
2264 (rc2 == 0) ? -ETIMEDOUT :
2268 /* Ensure completion in finite time... */
2270 /* No assertion (racing with network) */
2272 timeout_ms = a_long_time;
2273 } else if (rc2 == 0) {
2274 /* timed out waiting for unlink */
2275 CWARN("ping %s: late network completion\n",
2278 } else if (event.type == LNET_EVENT_REPLY) {
2283 } while (rc2 <= 0 || !event.unlinked);
2287 CWARN("%s: Unexpected rc >= 0 but no reply!\n",
2294 LASSERT(nob >= 0 && nob <= infosz);
2296 rc = -EPROTO; /* if I can't parse... */
2299 /* can't check magic/version */
2300 CERROR("%s: ping info too short %d\n",
2301 libcfs_id2str(id), nob);
2305 if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
2306 lnet_swap_pinginfo(info);
2307 } else if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
2308 CERROR("%s: Unexpected magic %08x\n",
2309 libcfs_id2str(id), info->pi_magic);
2313 if ((info->pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
2314 CERROR("%s: ping w/o NI status: 0x%x\n",
2315 libcfs_id2str(id), info->pi_features);
2319 if (nob < offsetof(lnet_ping_info_t, pi_ni[0])) {
2320 CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
2321 nob, (int)offsetof(lnet_ping_info_t, pi_ni[0]));
2325 if (info->pi_nnis < n_ids)
2326 n_ids = info->pi_nnis;
2328 if (nob < offsetof(lnet_ping_info_t, pi_ni[n_ids])) {
2329 CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
2330 nob, (int)offsetof(lnet_ping_info_t, pi_ni[n_ids]));
2334 rc = -EFAULT; /* If I SEGV... */
2336 memset(&tmpid, 0, sizeof(tmpid));
2337 for (i = 0; i < n_ids; i++) {
2338 tmpid.pid = info->pi_pid;
2339 tmpid.nid = info->pi_ni[i].ns_nid;
2340 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
2346 rc2 = LNetEQFree(eqh);
2348 CERROR("rc2 %d\n", rc2);
2352 LIBCFS_FREE(info, infosz);