4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_LNET
34 #include <linux/log2.h>
35 #include <linux/ktime.h>
37 #include <lnet/lib-lnet.h>
39 #define D_LNI D_CONSOLE
41 lnet_t the_lnet; /* THE state of the network */
42 EXPORT_SYMBOL(the_lnet);
44 static char *ip2nets = "";
45 module_param(ip2nets, charp, 0444);
46 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
48 static char *networks = "";
49 module_param(networks, charp, 0444);
50 MODULE_PARM_DESC(networks, "local networks");
52 static char *routes = "";
53 module_param(routes, charp, 0444);
54 MODULE_PARM_DESC(routes, "routes to non-local networks");
56 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
57 module_param(rnet_htable_size, int, 0444);
58 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
60 static int use_tcp_bonding = false;
61 module_param(use_tcp_bonding, int, 0444);
62 MODULE_PARM_DESC(use_tcp_bonding,
63 "Set to 1 to use socklnd bonding. 0 to use Multi-Rail");
65 static int lnet_ping(lnet_process_id_t id, signed long timeout,
66 lnet_process_id_t __user *ids, int n_ids);
75 lnet_get_networks(void)
80 if (*networks != 0 && *ip2nets != 0) {
81 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
82 "'ip2nets' but not both at once\n");
87 rc = lnet_parse_ip2nets(&nets, ip2nets);
88 return (rc == 0) ? nets : NULL;
100 spin_lock_init(&the_lnet.ln_eq_wait_lock);
101 init_waitqueue_head(&the_lnet.ln_eq_waitq);
102 init_waitqueue_head(&the_lnet.ln_rc_waitq);
103 mutex_init(&the_lnet.ln_lnd_mutex);
104 mutex_init(&the_lnet.ln_api_mutex);
108 lnet_fini_locks(void)
112 struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
113 struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
117 lnet_descriptor_setup(void)
119 /* create specific kmem_cache for MEs and small MDs (i.e., originally
120 * allocated in <size-xxx> kmem_cache).
122 lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(lnet_me_t),
124 if (!lnet_mes_cachep)
127 lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
128 LNET_SMALL_MD_SIZE, 0, 0,
130 if (!lnet_small_mds_cachep)
137 lnet_descriptor_cleanup(void)
140 if (lnet_small_mds_cachep) {
141 kmem_cache_destroy(lnet_small_mds_cachep);
142 lnet_small_mds_cachep = NULL;
145 if (lnet_mes_cachep) {
146 kmem_cache_destroy(lnet_mes_cachep);
147 lnet_mes_cachep = NULL;
152 lnet_create_remote_nets_table(void)
155 struct list_head *hash;
157 LASSERT(the_lnet.ln_remote_nets_hash == NULL);
158 LASSERT(the_lnet.ln_remote_nets_hbits > 0);
159 LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
161 CERROR("Failed to create remote nets hash table\n");
165 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
166 INIT_LIST_HEAD(&hash[i]);
167 the_lnet.ln_remote_nets_hash = hash;
172 lnet_destroy_remote_nets_table(void)
176 if (the_lnet.ln_remote_nets_hash == NULL)
179 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
180 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
182 LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
183 LNET_REMOTE_NETS_HASH_SIZE *
184 sizeof(the_lnet.ln_remote_nets_hash[0]));
185 the_lnet.ln_remote_nets_hash = NULL;
189 lnet_destroy_locks(void)
191 if (the_lnet.ln_res_lock != NULL) {
192 cfs_percpt_lock_free(the_lnet.ln_res_lock);
193 the_lnet.ln_res_lock = NULL;
196 if (the_lnet.ln_net_lock != NULL) {
197 cfs_percpt_lock_free(the_lnet.ln_net_lock);
198 the_lnet.ln_net_lock = NULL;
205 lnet_create_locks(void)
209 the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
210 if (the_lnet.ln_res_lock == NULL)
213 the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
214 if (the_lnet.ln_net_lock == NULL)
220 lnet_destroy_locks();
224 static void lnet_assert_wire_constants(void)
226 /* Wire protocol assertions generated by 'wirecheck'
227 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
228 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
229 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
232 CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
233 CLASSERT(LNET_PROTO_TCP_VERSION_MAJOR == 1);
234 CLASSERT(LNET_PROTO_TCP_VERSION_MINOR == 0);
235 CLASSERT(LNET_MSG_ACK == 0);
236 CLASSERT(LNET_MSG_PUT == 1);
237 CLASSERT(LNET_MSG_GET == 2);
238 CLASSERT(LNET_MSG_REPLY == 3);
239 CLASSERT(LNET_MSG_HELLO == 4);
241 /* Checks for struct lnet_handle_wire */
242 CLASSERT((int)sizeof(struct lnet_handle_wire) == 16);
243 CLASSERT((int)offsetof(struct lnet_handle_wire, wh_interface_cookie) == 0);
244 CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) == 8);
245 CLASSERT((int)offsetof(struct lnet_handle_wire, wh_object_cookie) == 8);
246 CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) == 8);
248 /* Checks for struct lnet_magicversion_t */
249 CLASSERT((int)sizeof(lnet_magicversion_t) == 8);
250 CLASSERT((int)offsetof(lnet_magicversion_t, magic) == 0);
251 CLASSERT((int)sizeof(((lnet_magicversion_t *)0)->magic) == 4);
252 CLASSERT((int)offsetof(lnet_magicversion_t, version_major) == 4);
253 CLASSERT((int)sizeof(((lnet_magicversion_t *)0)->version_major) == 2);
254 CLASSERT((int)offsetof(lnet_magicversion_t, version_minor) == 6);
255 CLASSERT((int)sizeof(((lnet_magicversion_t *)0)->version_minor) == 2);
257 /* Checks for struct lnet_hdr_t */
258 CLASSERT((int)sizeof(lnet_hdr_t) == 72);
259 CLASSERT((int)offsetof(lnet_hdr_t, dest_nid) == 0);
260 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->dest_nid) == 8);
261 CLASSERT((int)offsetof(lnet_hdr_t, src_nid) == 8);
262 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->src_nid) == 8);
263 CLASSERT((int)offsetof(lnet_hdr_t, dest_pid) == 16);
264 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->dest_pid) == 4);
265 CLASSERT((int)offsetof(lnet_hdr_t, src_pid) == 20);
266 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->src_pid) == 4);
267 CLASSERT((int)offsetof(lnet_hdr_t, type) == 24);
268 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->type) == 4);
269 CLASSERT((int)offsetof(lnet_hdr_t, payload_length) == 28);
270 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->payload_length) == 4);
271 CLASSERT((int)offsetof(lnet_hdr_t, msg) == 32);
272 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg) == 40);
275 CLASSERT((int)offsetof(lnet_hdr_t, msg.ack.dst_wmd) == 32);
276 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.ack.dst_wmd) == 16);
277 CLASSERT((int)offsetof(lnet_hdr_t, msg.ack.match_bits) == 48);
278 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.ack.match_bits) == 8);
279 CLASSERT((int)offsetof(lnet_hdr_t, msg.ack.mlength) == 56);
280 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.ack.mlength) == 4);
283 CLASSERT((int)offsetof(lnet_hdr_t, msg.put.ack_wmd) == 32);
284 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.ack_wmd) == 16);
285 CLASSERT((int)offsetof(lnet_hdr_t, msg.put.match_bits) == 48);
286 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.match_bits) == 8);
287 CLASSERT((int)offsetof(lnet_hdr_t, msg.put.hdr_data) == 56);
288 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.hdr_data) == 8);
289 CLASSERT((int)offsetof(lnet_hdr_t, msg.put.ptl_index) == 64);
290 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.ptl_index) == 4);
291 CLASSERT((int)offsetof(lnet_hdr_t, msg.put.offset) == 68);
292 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.put.offset) == 4);
295 CLASSERT((int)offsetof(lnet_hdr_t, msg.get.return_wmd) == 32);
296 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.return_wmd) == 16);
297 CLASSERT((int)offsetof(lnet_hdr_t, msg.get.match_bits) == 48);
298 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.match_bits) == 8);
299 CLASSERT((int)offsetof(lnet_hdr_t, msg.get.ptl_index) == 56);
300 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.ptl_index) == 4);
301 CLASSERT((int)offsetof(lnet_hdr_t, msg.get.src_offset) == 60);
302 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.src_offset) == 4);
303 CLASSERT((int)offsetof(lnet_hdr_t, msg.get.sink_length) == 64);
304 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.get.sink_length) == 4);
307 CLASSERT((int)offsetof(lnet_hdr_t, msg.reply.dst_wmd) == 32);
308 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.reply.dst_wmd) == 16);
311 CLASSERT((int)offsetof(lnet_hdr_t, msg.hello.incarnation) == 32);
312 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.hello.incarnation) == 8);
313 CLASSERT((int)offsetof(lnet_hdr_t, msg.hello.type) == 40);
314 CLASSERT((int)sizeof(((lnet_hdr_t *)0)->msg.hello.type) == 4);
317 static lnd_t *lnet_find_lnd_by_type(__u32 type)
320 struct list_head *tmp;
322 /* holding lnd mutex */
323 list_for_each(tmp, &the_lnet.ln_lnds) {
324 lnd = list_entry(tmp, lnd_t, lnd_list);
326 if (lnd->lnd_type == type)
333 lnet_register_lnd (lnd_t *lnd)
335 mutex_lock(&the_lnet.ln_lnd_mutex);
337 LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
338 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
340 list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
341 lnd->lnd_refcount = 0;
343 CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
345 mutex_unlock(&the_lnet.ln_lnd_mutex);
347 EXPORT_SYMBOL(lnet_register_lnd);
350 lnet_unregister_lnd (lnd_t *lnd)
352 mutex_lock(&the_lnet.ln_lnd_mutex);
354 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
355 LASSERT(lnd->lnd_refcount == 0);
357 list_del(&lnd->lnd_list);
358 CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
360 mutex_unlock(&the_lnet.ln_lnd_mutex);
362 EXPORT_SYMBOL(lnet_unregister_lnd);
365 lnet_counters_get(lnet_counters_t *counters)
367 lnet_counters_t *ctr;
370 memset(counters, 0, sizeof(*counters));
372 lnet_net_lock(LNET_LOCK_EX);
374 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
375 counters->msgs_max += ctr->msgs_max;
376 counters->msgs_alloc += ctr->msgs_alloc;
377 counters->errors += ctr->errors;
378 counters->send_count += ctr->send_count;
379 counters->recv_count += ctr->recv_count;
380 counters->route_count += ctr->route_count;
381 counters->drop_count += ctr->drop_count;
382 counters->send_length += ctr->send_length;
383 counters->recv_length += ctr->recv_length;
384 counters->route_length += ctr->route_length;
385 counters->drop_length += ctr->drop_length;
388 lnet_net_unlock(LNET_LOCK_EX);
390 EXPORT_SYMBOL(lnet_counters_get);
393 lnet_counters_reset(void)
395 lnet_counters_t *counters;
398 lnet_net_lock(LNET_LOCK_EX);
400 cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
401 memset(counters, 0, sizeof(lnet_counters_t));
403 lnet_net_unlock(LNET_LOCK_EX);
407 lnet_res_type2str(int type)
412 case LNET_COOKIE_TYPE_MD:
414 case LNET_COOKIE_TYPE_ME:
416 case LNET_COOKIE_TYPE_EQ:
422 lnet_res_container_cleanup(struct lnet_res_container *rec)
426 if (rec->rec_type == 0) /* not set yet, it's uninitialized */
429 while (!list_empty(&rec->rec_active)) {
430 struct list_head *e = rec->rec_active.next;
433 if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
434 lnet_eq_free(list_entry(e, lnet_eq_t, eq_list));
436 } else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
437 lnet_md_free(list_entry(e, lnet_libmd_t, md_list));
439 } else { /* NB: Active MEs should be attached on portals */
446 /* Found alive MD/ME/EQ, user really should unlink/free
447 * all of them before finalize LNet, but if someone didn't,
448 * we have to recycle garbage for him */
449 CERROR("%d active elements on exit of %s container\n",
450 count, lnet_res_type2str(rec->rec_type));
453 if (rec->rec_lh_hash != NULL) {
454 LIBCFS_FREE(rec->rec_lh_hash,
455 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
456 rec->rec_lh_hash = NULL;
459 rec->rec_type = 0; /* mark it as finalized */
463 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
468 LASSERT(rec->rec_type == 0);
470 rec->rec_type = type;
471 INIT_LIST_HEAD(&rec->rec_active);
473 rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
475 /* Arbitrary choice of hash table size */
476 LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
477 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
478 if (rec->rec_lh_hash == NULL) {
483 for (i = 0; i < LNET_LH_HASH_SIZE; i++)
484 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
489 CERROR("Failed to setup %s resource container\n",
490 lnet_res_type2str(type));
491 lnet_res_container_cleanup(rec);
496 lnet_res_containers_destroy(struct lnet_res_container **recs)
498 struct lnet_res_container *rec;
501 cfs_percpt_for_each(rec, i, recs)
502 lnet_res_container_cleanup(rec);
504 cfs_percpt_free(recs);
507 static struct lnet_res_container **
508 lnet_res_containers_create(int type)
510 struct lnet_res_container **recs;
511 struct lnet_res_container *rec;
515 recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
517 CERROR("Failed to allocate %s resource containers\n",
518 lnet_res_type2str(type));
522 cfs_percpt_for_each(rec, i, recs) {
523 rc = lnet_res_container_setup(rec, i, type);
525 lnet_res_containers_destroy(recs);
534 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
536 /* ALWAYS called with lnet_res_lock held */
537 struct list_head *head;
538 lnet_libhandle_t *lh;
541 if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
544 hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
545 head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
547 list_for_each_entry(lh, head, lh_hash_chain) {
548 if (lh->lh_cookie == cookie)
556 lnet_res_lh_initialize(struct lnet_res_container *rec, lnet_libhandle_t *lh)
558 /* ALWAYS called with lnet_res_lock held */
559 unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
562 lh->lh_cookie = rec->rec_lh_cookie;
563 rec->rec_lh_cookie += 1 << ibits;
565 hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
567 list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
570 static int lnet_unprepare(void);
573 lnet_prepare(lnet_pid_t requested_pid)
575 /* Prepare to bring up the network */
576 struct lnet_res_container **recs;
579 if (requested_pid == LNET_PID_ANY) {
580 /* Don't instantiate LNET just for me */
584 LASSERT(the_lnet.ln_refcount == 0);
586 the_lnet.ln_routing = 0;
588 LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
589 the_lnet.ln_pid = requested_pid;
591 INIT_LIST_HEAD(&the_lnet.ln_test_peers);
592 INIT_LIST_HEAD(&the_lnet.ln_nets);
593 INIT_LIST_HEAD(&the_lnet.ln_routers);
594 INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
595 INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
597 rc = lnet_descriptor_setup();
601 rc = lnet_create_remote_nets_table();
606 * NB the interface cookie in wire handles guards against delayed
607 * replies and ACKs appearing valid after reboot.
609 the_lnet.ln_interface_cookie = ktime_get_real_ns();
611 the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
612 sizeof(lnet_counters_t));
613 if (the_lnet.ln_counters == NULL) {
614 CERROR("Failed to allocate counters for LNet\n");
619 rc = lnet_peer_tables_create();
623 rc = lnet_msg_containers_create();
627 rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
628 LNET_COOKIE_TYPE_EQ);
632 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
638 the_lnet.ln_me_containers = recs;
640 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
646 the_lnet.ln_md_containers = recs;
648 rc = lnet_portals_create();
650 CERROR("Failed to create portals for LNet: %d\n", rc);
662 lnet_unprepare (void)
664 /* NB no LNET_LOCK since this is the last reference. All LND instances
665 * have shut down already, so it is safe to unlink and free all
666 * descriptors, even those that appear committed to a network op (eg MD
667 * with non-zero pending count) */
669 lnet_fail_nid(LNET_NID_ANY, 0);
671 LASSERT(the_lnet.ln_refcount == 0);
672 LASSERT(list_empty(&the_lnet.ln_test_peers));
673 LASSERT(list_empty(&the_lnet.ln_nets));
675 lnet_portals_destroy();
677 if (the_lnet.ln_md_containers != NULL) {
678 lnet_res_containers_destroy(the_lnet.ln_md_containers);
679 the_lnet.ln_md_containers = NULL;
682 if (the_lnet.ln_me_containers != NULL) {
683 lnet_res_containers_destroy(the_lnet.ln_me_containers);
684 the_lnet.ln_me_containers = NULL;
687 lnet_res_container_cleanup(&the_lnet.ln_eq_container);
689 lnet_msg_containers_destroy();
690 lnet_peer_tables_destroy();
691 lnet_rtrpools_free(0);
693 if (the_lnet.ln_counters != NULL) {
694 cfs_percpt_free(the_lnet.ln_counters);
695 the_lnet.ln_counters = NULL;
697 lnet_destroy_remote_nets_table();
698 lnet_descriptor_cleanup();
704 lnet_net2ni_locked(__u32 net_id, int cpt)
707 struct lnet_net *net;
709 LASSERT(cpt != LNET_LOCK_EX);
711 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
712 if (net->net_id == net_id) {
713 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
723 lnet_net2ni(__u32 net)
728 ni = lnet_net2ni_locked(net, 0);
733 EXPORT_SYMBOL(lnet_net2ni);
736 lnet_get_net_locked(__u32 net_id)
738 struct lnet_net *net;
740 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
741 if (net->net_id == net_id)
749 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
754 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
759 val = hash_long(key, LNET_CPT_BITS);
760 /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
764 return (unsigned int)(key + val + (val >> 1)) % number;
768 lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
770 struct lnet_net *net;
772 /* must called with hold of lnet_net_lock */
773 if (LNET_CPT_NUMBER == 1)
774 return 0; /* the only one */
777 * If NI is provided then use the CPT identified in the NI cpt
778 * list if one exists. If one doesn't exist, then that NI is
779 * associated with all CPTs and it follows that the net it belongs
780 * to is implicitly associated with all CPTs, so just hash the nid
784 if (ni->ni_cpts != NULL)
785 return ni->ni_cpts[lnet_nid_cpt_hash(nid,
788 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
791 /* no NI provided so look at the net */
792 net = lnet_get_net_locked(LNET_NIDNET(nid));
794 if (net != NULL && net->net_cpts != NULL) {
795 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
798 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
802 lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
807 if (LNET_CPT_NUMBER == 1)
808 return 0; /* the only one */
810 cpt = lnet_net_lock_current();
812 cpt2 = lnet_cpt_of_nid_locked(nid, ni);
814 lnet_net_unlock(cpt);
818 EXPORT_SYMBOL(lnet_cpt_of_nid);
821 lnet_islocalnet(__u32 net_id)
823 struct lnet_net *net;
827 cpt = lnet_net_lock_current();
829 net = lnet_get_net_locked(net_id);
833 lnet_net_unlock(cpt);
839 lnet_is_ni_healthy_locked(struct lnet_ni *ni)
841 if (ni->ni_state == LNET_NI_STATE_ACTIVE ||
842 ni->ni_state == LNET_NI_STATE_DEGRADED)
849 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
851 struct lnet_net *net;
854 LASSERT(cpt != LNET_LOCK_EX);
856 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
857 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
858 if (ni->ni_nid == nid)
867 lnet_nid2ni_addref(lnet_nid_t nid)
872 ni = lnet_nid2ni_locked(nid, 0);
874 lnet_ni_addref_locked(ni, 0);
879 EXPORT_SYMBOL(lnet_nid2ni_addref);
882 lnet_islocalnid(lnet_nid_t nid)
887 cpt = lnet_net_lock_current();
888 ni = lnet_nid2ni_locked(nid, cpt);
889 lnet_net_unlock(cpt);
895 lnet_count_acceptor_nets(void)
897 /* Return the # of NIs that need the acceptor. */
899 struct lnet_net *net;
902 cpt = lnet_net_lock_current();
903 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
904 /* all socklnd type networks should have the acceptor
906 if (net->net_lnd->lnd_accept != NULL)
910 lnet_net_unlock(cpt);
915 static struct lnet_ping_info *
916 lnet_ping_info_create(int num_ni)
918 struct lnet_ping_info *ping_info;
921 infosz = offsetof(struct lnet_ping_info, pi_ni[num_ni]);
922 LIBCFS_ALLOC(ping_info, infosz);
923 if (ping_info == NULL) {
924 CERROR("Can't allocate ping info[%d]\n", num_ni);
928 ping_info->pi_nnis = num_ni;
929 ping_info->pi_pid = the_lnet.ln_pid;
930 ping_info->pi_magic = LNET_PROTO_PING_MAGIC;
931 ping_info->pi_features = LNET_PING_FEAT_NI_STATUS;
937 lnet_get_net_ni_count_locked(struct lnet_net *net)
942 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
949 lnet_get_ni_count(void)
952 struct lnet_net *net;
957 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
958 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
968 lnet_ping_info_free(struct lnet_ping_info *pinfo)
971 offsetof(struct lnet_ping_info,
972 pi_ni[pinfo->pi_nnis]));
976 lnet_ping_info_destroy(void)
978 struct lnet_net *net;
981 lnet_net_lock(LNET_LOCK_EX);
983 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
984 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
986 ni->ni_status = NULL;
991 lnet_ping_info_free(the_lnet.ln_ping_info);
992 the_lnet.ln_ping_info = NULL;
994 lnet_net_unlock(LNET_LOCK_EX);
998 lnet_ping_event_handler(lnet_event_t *event)
1000 struct lnet_ping_info *pinfo = event->md.user_ptr;
1002 if (event->unlinked)
1003 pinfo->pi_features = LNET_PING_FEAT_INVAL;
1007 lnet_ping_info_setup(struct lnet_ping_info **ppinfo, lnet_handle_md_t *md_handle,
1008 int ni_count, bool set_eq)
1010 lnet_handle_me_t me_handle;
1011 lnet_process_id_t id = {LNET_NID_ANY, LNET_PID_ANY};
1012 lnet_md_t md = {NULL};
1016 rc = LNetEQAlloc(0, lnet_ping_event_handler,
1017 &the_lnet.ln_ping_target_eq);
1019 CERROR("Can't allocate ping EQ: %d\n", rc);
1024 *ppinfo = lnet_ping_info_create(ni_count);
1025 if (*ppinfo == NULL) {
1030 rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1031 LNET_PROTO_PING_MATCHBITS, 0,
1032 LNET_UNLINK, LNET_INS_AFTER,
1035 CERROR("Can't create ping ME: %d\n", rc);
1039 /* initialize md content */
1041 md.length = offsetof(struct lnet_ping_info,
1042 pi_ni[(*ppinfo)->pi_nnis]);
1043 md.threshold = LNET_MD_THRESH_INF;
1045 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1046 LNET_MD_MANAGE_REMOTE;
1048 md.eq_handle = the_lnet.ln_ping_target_eq;
1049 md.user_ptr = *ppinfo;
1051 rc = LNetMDAttach(me_handle, md, LNET_RETAIN, md_handle);
1053 CERROR("Can't attach ping MD: %d\n", rc);
1060 rc2 = LNetMEUnlink(me_handle);
1063 lnet_ping_info_free(*ppinfo);
1067 LNetEQFree(the_lnet.ln_ping_target_eq);
1072 lnet_ping_md_unlink(struct lnet_ping_info *pinfo, lnet_handle_md_t *md_handle)
1074 sigset_t blocked = cfs_block_allsigs();
1076 LNetMDUnlink(*md_handle);
1077 LNetInvalidateHandle(md_handle);
1079 /* NB md could be busy; this just starts the unlink */
1080 while (pinfo->pi_features != LNET_PING_FEAT_INVAL) {
1081 CDEBUG(D_NET, "Still waiting for ping MD to unlink\n");
1082 set_current_state(TASK_UNINTERRUPTIBLE);
1083 schedule_timeout(cfs_time_seconds(1));
1086 cfs_restore_sigs(blocked);
1090 lnet_ping_info_install_locked(struct lnet_ping_info *ping_info)
1094 struct lnet_net *net;
1095 struct lnet_ni_status *ns;
1098 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1099 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1100 LASSERT(i < ping_info->pi_nnis);
1102 ns = &ping_info->pi_ni[i];
1104 ns->ns_nid = ni->ni_nid;
1107 ns->ns_status = (ni->ni_status != NULL) ?
1108 ni->ni_status->ns_status :
1120 lnet_ping_target_update(struct lnet_ping_info *pinfo, lnet_handle_md_t md_handle)
1122 struct lnet_ping_info *old_pinfo = NULL;
1123 lnet_handle_md_t old_md;
1125 /* switch the NIs to point to the new ping info created */
1126 lnet_net_lock(LNET_LOCK_EX);
1128 if (!the_lnet.ln_routing)
1129 pinfo->pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1130 lnet_ping_info_install_locked(pinfo);
1132 if (the_lnet.ln_ping_info != NULL) {
1133 old_pinfo = the_lnet.ln_ping_info;
1134 old_md = the_lnet.ln_ping_target_md;
1136 the_lnet.ln_ping_target_md = md_handle;
1137 the_lnet.ln_ping_info = pinfo;
1139 lnet_net_unlock(LNET_LOCK_EX);
1141 if (old_pinfo != NULL) {
1142 /* unlink the old ping info */
1143 lnet_ping_md_unlink(old_pinfo, &old_md);
1144 lnet_ping_info_free(old_pinfo);
1149 lnet_ping_target_fini(void)
1153 lnet_ping_md_unlink(the_lnet.ln_ping_info,
1154 &the_lnet.ln_ping_target_md);
1156 rc = LNetEQFree(the_lnet.ln_ping_target_eq);
1159 lnet_ping_info_destroy();
1163 lnet_ni_tq_credits(lnet_ni_t *ni)
1167 LASSERT(ni->ni_ncpts >= 1);
1169 if (ni->ni_ncpts == 1)
1170 return ni->ni_net->net_tunables.lct_max_tx_credits;
1172 credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
1173 credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
1174 credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
1180 lnet_ni_unlink_locked(lnet_ni_t *ni)
1182 if (!list_empty(&ni->ni_cptlist)) {
1183 list_del_init(&ni->ni_cptlist);
1184 lnet_ni_decref_locked(ni, 0);
1187 /* move it to zombie list and nobody can find it anymore */
1188 LASSERT(!list_empty(&ni->ni_netlist));
1189 list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
1190 lnet_ni_decref_locked(ni, 0);
1194 lnet_clear_zombies_nis_locked(struct lnet_net *net)
1199 struct list_head *zombie_list = &net->net_ni_zombie;
1202 * Now wait for the NIs I just nuked to show up on the zombie
1203 * list and shut them down in guaranteed thread context
1206 while (!list_empty(zombie_list)) {
1210 ni = list_entry(zombie_list->next,
1211 lnet_ni_t, ni_netlist);
1212 list_del_init(&ni->ni_netlist);
1213 /* the ni should be in deleting state. If it's not it's
1215 LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
1216 cfs_percpt_for_each(ref, j, ni->ni_refs) {
1219 /* still busy, add it back to zombie list */
1220 list_add(&ni->ni_netlist, zombie_list);
1224 if (!list_empty(&ni->ni_netlist)) {
1225 lnet_net_unlock(LNET_LOCK_EX);
1227 if ((i & (-i)) == i) {
1229 "Waiting for zombie LNI %s\n",
1230 libcfs_nid2str(ni->ni_nid));
1232 set_current_state(TASK_UNINTERRUPTIBLE);
1233 schedule_timeout(cfs_time_seconds(1));
1234 lnet_net_lock(LNET_LOCK_EX);
1238 lnet_net_unlock(LNET_LOCK_EX);
1240 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
1242 LASSERT(!in_interrupt());
1243 (net->net_lnd->lnd_shutdown)(ni);
1246 CDEBUG(D_LNI, "Removed LNI %s\n",
1247 libcfs_nid2str(ni->ni_nid));
1251 lnet_net_lock(LNET_LOCK_EX);
1255 /* shutdown down the NI and release refcount */
1257 lnet_shutdown_lndni(struct lnet_ni *ni)
1260 struct lnet_net *net = ni->ni_net;
1262 lnet_net_lock(LNET_LOCK_EX);
1263 ni->ni_state = LNET_NI_STATE_DELETING;
1264 lnet_ni_unlink_locked(ni);
1265 lnet_net_unlock(LNET_LOCK_EX);
1267 /* clear messages for this NI on the lazy portal */
1268 for (i = 0; i < the_lnet.ln_nportals; i++)
1269 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
1271 /* Do peer table cleanup for this ni */
1272 lnet_peer_tables_cleanup(ni);
1274 lnet_net_lock(LNET_LOCK_EX);
1275 lnet_clear_zombies_nis_locked(net);
1276 lnet_net_unlock(LNET_LOCK_EX);
1280 lnet_shutdown_lndnet(struct lnet_net *net)
1284 lnet_net_lock(LNET_LOCK_EX);
1286 net->net_state = LNET_NET_STATE_DELETING;
1288 list_del_init(&net->net_list);
1290 while (!list_empty(&net->net_ni_list)) {
1291 ni = list_entry(net->net_ni_list.next,
1292 lnet_ni_t, ni_netlist);
1293 lnet_net_unlock(LNET_LOCK_EX);
1294 lnet_shutdown_lndni(ni);
1295 lnet_net_lock(LNET_LOCK_EX);
1299 * decrement ref count on lnd only when the entire network goes
1302 net->net_lnd->lnd_refcount--;
1304 lnet_net_unlock(LNET_LOCK_EX);
1310 lnet_shutdown_lndnets(void)
1312 struct lnet_net *net;
1314 /* NB called holding the global mutex */
1316 /* All quiet on the API front */
1317 LASSERT(!the_lnet.ln_shutdown);
1318 LASSERT(the_lnet.ln_refcount == 0);
1320 lnet_net_lock(LNET_LOCK_EX);
1321 the_lnet.ln_shutdown = 1; /* flag shutdown */
1323 while (!list_empty(&the_lnet.ln_nets)) {
1325 * move the nets to the zombie list to avoid them being
1326 * picked up for new work. LONET is also included in the
1327 * Nets that will be moved to the zombie list
1329 net = list_entry(the_lnet.ln_nets.next,
1330 struct lnet_net, net_list);
1331 list_move(&net->net_list, &the_lnet.ln_net_zombie);
1334 /* Drop the cached loopback Net. */
1335 if (the_lnet.ln_loni != NULL) {
1336 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
1337 the_lnet.ln_loni = NULL;
1339 lnet_net_unlock(LNET_LOCK_EX);
1341 /* iterate through the net zombie list and delete each net */
1342 while (!list_empty(&the_lnet.ln_net_zombie)) {
1343 net = list_entry(the_lnet.ln_net_zombie.next,
1344 struct lnet_net, net_list);
1345 lnet_shutdown_lndnet(net);
1348 lnet_net_lock(LNET_LOCK_EX);
1349 the_lnet.ln_shutdown = 0;
1350 lnet_net_unlock(LNET_LOCK_EX);
1354 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
1357 struct lnet_tx_queue *tq;
1359 struct lnet_net *net = ni->ni_net;
1361 mutex_lock(&the_lnet.ln_lnd_mutex);
1364 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
1365 ni->ni_lnd_tunables_set = true;
1368 rc = (net->net_lnd->lnd_startup)(ni);
1370 mutex_unlock(&the_lnet.ln_lnd_mutex);
1373 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
1374 rc, libcfs_lnd2str(net->net_lnd->lnd_type));
1375 lnet_net_lock(LNET_LOCK_EX);
1376 net->net_lnd->lnd_refcount--;
1377 lnet_net_unlock(LNET_LOCK_EX);
1381 ni->ni_state = LNET_NI_STATE_ACTIVE;
1383 /* We keep a reference on the loopback net through the loopback NI */
1384 if (net->net_lnd->lnd_type == LOLND) {
1386 LASSERT(the_lnet.ln_loni == NULL);
1387 the_lnet.ln_loni = ni;
1388 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
1389 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
1390 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
1391 ni->ni_net->net_tunables.lct_peer_timeout = 0;
1395 if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
1396 ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
1397 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
1398 libcfs_lnd2str(net->net_lnd->lnd_type),
1399 ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
1401 /* shutdown the NI since if we get here then it must've already
1404 lnet_shutdown_lndni(ni);
1408 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
1409 tq->tq_credits_min =
1410 tq->tq_credits_max =
1411 tq->tq_credits = lnet_ni_tq_credits(ni);
1414 CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
1415 libcfs_nid2str(ni->ni_nid),
1416 ni->ni_net->net_tunables.lct_peer_tx_credits,
1417 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
1418 ni->ni_net->net_tunables.lct_peer_rtr_credits,
1419 ni->ni_net->net_tunables.lct_peer_timeout);
1428 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
1431 struct lnet_net *net_l = NULL;
1432 struct list_head local_ni_list;
1438 net->net_tunables.lct_peer_timeout;
1440 net->net_tunables.lct_max_tx_credits;
1441 int peerrtrcredits =
1442 net->net_tunables.lct_peer_rtr_credits;
1444 INIT_LIST_HEAD(&local_ni_list);
1447 * make sure that this net is unique. If it isn't then
1448 * we are adding interfaces to an already existing network, and
1449 * 'net' is just a convenient way to pass in the list.
1450 * if it is unique we need to find the LND and load it if
1453 if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
1454 lnd_type = LNET_NETTYP(net->net_id);
1456 LASSERT(libcfs_isknown_lnd(lnd_type));
1458 if (lnd_type == CIBLND || lnd_type == OPENIBLND ||
1459 lnd_type == IIBLND || lnd_type == VIBLND) {
1460 CERROR("LND %s obsoleted\n", libcfs_lnd2str(lnd_type));
1465 mutex_lock(&the_lnet.ln_lnd_mutex);
1466 lnd = lnet_find_lnd_by_type(lnd_type);
1469 mutex_unlock(&the_lnet.ln_lnd_mutex);
1470 rc = request_module("%s", libcfs_lnd2modname(lnd_type));
1471 mutex_lock(&the_lnet.ln_lnd_mutex);
1473 lnd = lnet_find_lnd_by_type(lnd_type);
1475 mutex_unlock(&the_lnet.ln_lnd_mutex);
1476 CERROR("Can't load LND %s, module %s, rc=%d\n",
1477 libcfs_lnd2str(lnd_type),
1478 libcfs_lnd2modname(lnd_type), rc);
1479 #ifndef HAVE_MODULE_LOADING_SUPPORT
1480 LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
1481 "compiled with kernel module "
1482 "loading support.");
1489 lnet_net_lock(LNET_LOCK_EX);
1490 lnd->lnd_refcount++;
1491 lnet_net_unlock(LNET_LOCK_EX);
1495 mutex_unlock(&the_lnet.ln_lnd_mutex);
1501 * net_l: if the network being added is unique then net_l
1502 * will point to that network
1503 * if the network being added is not unique then
1504 * net_l points to the existing network.
1506 * When we enter the loop below, we'll pick NIs off he
1507 * network beign added and start them up, then add them to
1508 * a local ni list. Once we've successfully started all
1509 * the NIs then we join the local NI list (of started up
1510 * networks) with the net_l->net_ni_list, which should
1511 * point to the correct network to add the new ni list to
1513 * If any of the new NIs fail to start up, then we want to
1514 * iterate through the local ni list, which should include
1515 * any NIs which were successfully started up, and shut
1518 * After than we want to delete the network being added,
1519 * to avoid a memory leak.
1523 * When a network uses TCP bonding then all its interfaces
1524 * must be specified when the network is first defined: the
1525 * TCP bonding code doesn't allow for interfaces to be added
1528 if (net_l != net && net_l != NULL && use_tcp_bonding &&
1529 LNET_NETTYP(net_l->net_id) == SOCKLND) {
1534 while (!list_empty(&net->net_ni_added)) {
1535 ni = list_entry(net->net_ni_added.next, struct lnet_ni,
1537 list_del_init(&ni->ni_netlist);
1539 /* make sure that the the NI we're about to start
1540 * up is actually unique. if it's not fail. */
1541 if (!lnet_ni_unique_net(&net_l->net_ni_list,
1542 ni->ni_interfaces[0])) {
1547 /* adjust the pointer the parent network, just in case it
1548 * the net is a duplicate */
1551 rc = lnet_startup_lndni(ni, tun);
1553 LASSERT(ni->ni_net->net_tunables.lct_peer_timeout <= 0 ||
1554 ni->ni_net->net_lnd->lnd_query != NULL);
1560 list_add_tail(&ni->ni_netlist, &local_ni_list);
1565 lnet_net_lock(LNET_LOCK_EX);
1566 list_splice_tail(&local_ni_list, &net_l->net_ni_list);
1567 lnet_net_unlock(LNET_LOCK_EX);
1569 /* if the network is not unique then we don't want to keep
1570 * it around after we're done. Free it. Otherwise add that
1571 * net to the global the_lnet.ln_nets */
1572 if (net_l != net && net_l != NULL) {
1574 * TODO - note. currently the tunables can not be updated
1579 net->net_state = LNET_NET_STATE_ACTIVE;
1581 * restore tunables after it has been overwitten by the
1584 if (peer_timeout != -1)
1585 net->net_tunables.lct_peer_timeout = peer_timeout;
1586 if (maxtxcredits != -1)
1587 net->net_tunables.lct_max_tx_credits = maxtxcredits;
1588 if (peerrtrcredits != -1)
1589 net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
1591 lnet_net_lock(LNET_LOCK_EX);
1592 list_add_tail(&net->net_list, &the_lnet.ln_nets);
1593 lnet_net_unlock(LNET_LOCK_EX);
1600 * shutdown the new NIs that are being started up
1601 * free the NET being started
1603 while (!list_empty(&local_ni_list)) {
1604 ni = list_entry(local_ni_list.next, struct lnet_ni,
1607 lnet_shutdown_lndni(ni);
1617 lnet_startup_lndnets(struct list_head *netlist)
1619 struct lnet_net *net;
1623 while (!list_empty(netlist)) {
1624 net = list_entry(netlist->next, struct lnet_net, net_list);
1625 list_del_init(&net->net_list);
1627 rc = lnet_startup_lndnet(net, NULL);
1637 lnet_shutdown_lndnets();
1643 * Initialize LNet library.
1645 * Automatically called at module loading time. Caller has to call
1646 * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
1647 * latter returned 0. It must be called exactly once.
1649 * \retval 0 on success
1650 * \retval -ve on failures.
1652 int lnet_lib_init(void)
1656 lnet_assert_wire_constants();
1658 memset(&the_lnet, 0, sizeof(the_lnet));
1660 /* refer to global cfs_cpt_table for now */
1661 the_lnet.ln_cpt_table = cfs_cpt_table;
1662 the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_table);
1664 LASSERT(the_lnet.ln_cpt_number > 0);
1665 if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
1666 /* we are under risk of consuming all lh_cookie */
1667 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
1668 "please change setting of CPT-table and retry\n",
1669 the_lnet.ln_cpt_number, LNET_CPT_MAX);
1673 while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
1674 the_lnet.ln_cpt_bits++;
1676 rc = lnet_create_locks();
1678 CERROR("Can't create LNet global locks: %d\n", rc);
1682 the_lnet.ln_refcount = 0;
1683 LNetInvalidateHandle(&the_lnet.ln_rc_eqh);
1684 INIT_LIST_HEAD(&the_lnet.ln_lnds);
1685 INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
1686 INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
1687 INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
1689 /* The hash table size is the number of bits it takes to express the set
1690 * ln_num_routes, minus 1 (better to under estimate than over so we
1691 * don't waste memory). */
1692 if (rnet_htable_size <= 0)
1693 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
1694 else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
1695 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
1696 the_lnet.ln_remote_nets_hbits = max_t(int, 1,
1697 order_base_2(rnet_htable_size) - 1);
1699 /* All LNDs apart from the LOLND are in separate modules. They
1700 * register themselves when their module loads, and unregister
1701 * themselves when their module is unloaded. */
1702 lnet_register_lnd(&the_lolnd);
1707 * Finalize LNet library.
1709 * \pre lnet_lib_init() called with success.
1710 * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
1712 void lnet_lib_exit(void)
1714 LASSERT(the_lnet.ln_refcount == 0);
1716 while (!list_empty(&the_lnet.ln_lnds))
1717 lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
1719 lnet_destroy_locks();
1723 * Set LNet PID and start LNet interfaces, routing, and forwarding.
1725 * Users must call this function at least once before any other functions.
1726 * For each successful call there must be a corresponding call to
1727 * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
1730 * The PID used by LNet may be different from the one requested.
1733 * \param requested_pid PID requested by the caller.
1735 * \return >= 0 on success, and < 0 error code on failures.
1738 LNetNIInit(lnet_pid_t requested_pid)
1740 int im_a_router = 0;
1743 struct lnet_ping_info *pinfo;
1744 lnet_handle_md_t md_handle;
1745 struct list_head net_head;
1746 struct lnet_net *net;
1748 INIT_LIST_HEAD(&net_head);
1750 mutex_lock(&the_lnet.ln_api_mutex);
1752 CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
1754 if (the_lnet.ln_refcount > 0) {
1755 rc = the_lnet.ln_refcount++;
1756 mutex_unlock(&the_lnet.ln_api_mutex);
1760 rc = lnet_prepare(requested_pid);
1762 mutex_unlock(&the_lnet.ln_api_mutex);
1766 /* create a network for Loopback network */
1767 net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
1770 goto err_empty_list;
1773 /* Add in the loopback NI */
1774 if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
1776 goto err_empty_list;
1779 /* If LNet is being initialized via DLC it is possible
1780 * that the user requests not to load module parameters (ones which
1781 * are supported by DLC) on initialization. Therefore, make sure not
1782 * to load networks, routes and forwarding from module parameters
1783 * in this case. On cleanup in case of failure only clean up
1784 * routes if it has been loaded */
1785 if (!the_lnet.ln_nis_from_mod_params) {
1786 rc = lnet_parse_networks(&net_head, lnet_get_networks(),
1789 goto err_empty_list;
1792 ni_count = lnet_startup_lndnets(&net_head);
1795 goto err_empty_list;
1798 if (!the_lnet.ln_nis_from_mod_params) {
1799 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
1801 goto err_shutdown_lndnis;
1803 rc = lnet_check_routes();
1805 goto err_destroy_routes;
1807 rc = lnet_rtrpools_alloc(im_a_router);
1809 goto err_destroy_routes;
1812 rc = lnet_acceptor_start();
1814 goto err_destroy_routes;
1816 the_lnet.ln_refcount = 1;
1817 /* Now I may use my own API functions... */
1819 rc = lnet_ping_info_setup(&pinfo, &md_handle, ni_count, true);
1821 goto err_acceptor_stop;
1823 lnet_ping_target_update(pinfo, md_handle);
1825 rc = lnet_router_checker_start();
1832 mutex_unlock(&the_lnet.ln_api_mutex);
1837 lnet_ping_target_fini();
1839 the_lnet.ln_refcount = 0;
1840 lnet_acceptor_stop();
1842 if (!the_lnet.ln_nis_from_mod_params)
1843 lnet_destroy_routes();
1844 err_shutdown_lndnis:
1845 lnet_shutdown_lndnets();
1849 mutex_unlock(&the_lnet.ln_api_mutex);
1850 while (!list_empty(&net_head)) {
1851 struct lnet_net *net;
1853 net = list_entry(net_head.next, struct lnet_net, net_list);
1854 list_del_init(&net->net_list);
1859 EXPORT_SYMBOL(LNetNIInit);
1862 * Stop LNet interfaces, routing, and forwarding.
1864 * Users must call this function once for each successful call to LNetNIInit().
1865 * Once the LNetNIFini() operation has been started, the results of pending
1866 * API operations are undefined.
1868 * \return always 0 for current implementation.
1873 mutex_lock(&the_lnet.ln_api_mutex);
1875 LASSERT(the_lnet.ln_refcount > 0);
1877 if (the_lnet.ln_refcount != 1) {
1878 the_lnet.ln_refcount--;
1880 LASSERT(!the_lnet.ln_niinit_self);
1885 lnet_router_checker_stop();
1886 lnet_ping_target_fini();
1888 /* Teardown fns that use my own API functions BEFORE here */
1889 the_lnet.ln_refcount = 0;
1891 lnet_acceptor_stop();
1892 lnet_destroy_routes();
1893 lnet_shutdown_lndnets();
1897 mutex_unlock(&the_lnet.ln_api_mutex);
1900 EXPORT_SYMBOL(LNetNIFini);
1903 * Grabs the ni data from the ni structure and fills the out
1906 * \param[in] ni network interface structure
1907 * \param[out] cpt_count the number of cpts the ni is on
1908 * \param[out] nid Network Interface ID
1909 * \param[out] peer_timeout NI peer timeout
1910 * \param[out] peer_tx_crdits NI peer transmit credits
1911 * \param[out] peer_rtr_credits NI peer router credits
1912 * \param[out] max_tx_credits NI max transmit credit
1913 * \param[out] net_config Network configuration
1916 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_data *config)
1918 struct lnet_ioctl_net_config *net_config;
1919 struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
1920 size_t min_size, tunable_size = 0;
1926 net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
1930 BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
1931 ARRAY_SIZE(net_config->ni_interfaces));
1933 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
1934 if (!ni->ni_interfaces[i])
1937 strncpy(net_config->ni_interfaces[i],
1938 ni->ni_interfaces[i],
1939 sizeof(net_config->ni_interfaces[i]));
1942 config->cfg_nid = ni->ni_nid;
1943 config->cfg_config_u.cfg_net.net_peer_timeout =
1944 ni->ni_net->net_tunables.lct_peer_timeout;
1945 config->cfg_config_u.cfg_net.net_max_tx_credits =
1946 ni->ni_net->net_tunables.lct_max_tx_credits;
1947 config->cfg_config_u.cfg_net.net_peer_tx_credits =
1948 ni->ni_net->net_tunables.lct_peer_tx_credits;
1949 config->cfg_config_u.cfg_net.net_peer_rtr_credits =
1950 ni->ni_net->net_tunables.lct_peer_rtr_credits;
1952 net_config->ni_status = ni->ni_status->ns_status;
1955 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
1957 for (i = 0; i < num_cpts; i++)
1958 net_config->ni_cpts[i] = ni->ni_cpts[i];
1960 config->cfg_ncpts = num_cpts;
1964 * See if user land tools sent in a newer and larger version
1965 * of struct lnet_tunables than what the kernel uses.
1967 min_size = sizeof(*config) + sizeof(*net_config);
1969 if (config->cfg_hdr.ioc_len > min_size)
1970 tunable_size = config->cfg_hdr.ioc_len - min_size;
1972 /* Don't copy too much data to user space */
1973 min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
1974 lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
1976 if (lnd_cfg && min_size) {
1977 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
1978 config->cfg_config_u.cfg_net.net_interface_count = 1;
1980 /* Tell user land that kernel side has less data */
1981 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
1982 min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
1983 config->cfg_hdr.ioc_len -= min_size;
1989 lnet_get_ni_idx_locked(int idx)
1992 struct lnet_net *net;
1994 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1995 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2005 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
2008 struct lnet_net *net = mynet;
2012 net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
2014 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2020 if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
2021 /* if you reached the end of the ni list and the net is
2022 * specified, then there are no more nis in that net */
2026 /* we reached the end of this net ni list. move to the
2028 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
2029 /* no more nets and no more NIs. */
2032 /* get the next net */
2033 net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
2035 /* get the ni on it */
2036 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2042 /* there are more nis left */
2043 ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
2049 lnet_get_net_config(struct lnet_ioctl_config_data *config)
2054 int idx = config->cfg_count;
2056 cpt = lnet_net_lock_current();
2058 ni = lnet_get_ni_idx_locked(idx);
2063 lnet_fill_ni_info(ni, config);
2067 lnet_net_unlock(cpt);
2072 lnet_dyn_add_ni(lnet_pid_t requested_pid, struct lnet_ioctl_config_data *conf)
2074 char *nets = conf->cfg_config_u.cfg_net.net_intf;
2075 struct lnet_ping_info *pinfo;
2076 lnet_handle_md_t md_handle;
2077 struct lnet_net *net;
2078 struct list_head net_head;
2080 lnet_remotenet_t *rnet;
2082 int num_acceptor_nets;
2084 struct lnet_ioctl_config_lnd_tunables *lnd_tunables = NULL;
2086 INIT_LIST_HEAD(&net_head);
2088 if (conf && conf->cfg_hdr.ioc_len > sizeof(*conf))
2089 lnd_tunables = (struct lnet_ioctl_config_lnd_tunables *)conf->cfg_bulk;
2091 /* Create a net/ni structures for the network string */
2092 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
2094 return rc == 0 ? -EINVAL : rc;
2096 mutex_lock(&the_lnet.ln_api_mutex);
2099 rc = -EINVAL; /* only add one network per call */
2103 net = list_entry(net_head.next, struct lnet_net, net_list);
2105 lnet_net_lock(LNET_LOCK_EX);
2106 rnet = lnet_find_rnet_locked(net->net_id);
2107 lnet_net_unlock(LNET_LOCK_EX);
2108 /* make sure that the net added doesn't invalidate the current
2109 * configuration LNet is keeping */
2111 CERROR("Adding net %s will invalidate routing configuration\n",
2118 * make sure you calculate the correct number of slots in the ping
2119 * info. Since the ping info is a flattened list of all the NIs,
2120 * we should allocate enough slots to accomodate the number of NIs
2121 * which will be added.
2123 * We can use lnet_get_net_ni_count_locked() since the net is not
2124 * on a public list yet, so locking is not a problem
2126 net_ni_count = lnet_get_net_ni_count_locked(net);
2128 rc = lnet_ping_info_setup(&pinfo, &md_handle,
2129 net_ni_count + lnet_get_ni_count(),
2134 list_del_init(&net->net_list);
2137 memcpy(&net->net_tunables,
2138 &lnd_tunables->lt_cmn, sizeof(lnd_tunables->lt_cmn));
2141 * before starting this network get a count of the current TCP
2142 * networks which require the acceptor thread running. If that
2143 * count is == 0 before we start up this network, then we'd want to
2144 * start up the acceptor thread after starting up this network
2146 num_acceptor_nets = lnet_count_acceptor_nets();
2149 * lnd_startup_lndnet() can deallocate 'net' even if it it returns
2150 * success, because we endded up adding interfaces to an existing
2151 * network. So grab the net_type now
2153 net_type = LNET_NETTYP(net->net_id);
2155 rc = lnet_startup_lndnet(net,
2156 (lnd_tunables) ? &lnd_tunables->lt_tun : NULL);
2161 * Start the acceptor thread if this is the first network
2162 * being added that requires the thread.
2164 if (net_type == SOCKLND && num_acceptor_nets == 0)
2166 rc = lnet_acceptor_start();
2168 /* shutdown the net that we just started */
2169 CERROR("Failed to start up acceptor thread\n");
2171 * Note that if we needed to start the acceptor
2172 * thread, then 'net' must have been the first TCP
2173 * network, therefore was unique, and therefore
2174 * wasn't deallocated by lnet_startup_lndnet()
2176 lnet_shutdown_lndnet(net);
2181 lnet_ping_target_update(pinfo, md_handle);
2182 mutex_unlock(&the_lnet.ln_api_mutex);
2187 lnet_ping_md_unlink(pinfo, &md_handle);
2188 lnet_ping_info_free(pinfo);
2190 mutex_unlock(&the_lnet.ln_api_mutex);
2191 while (!list_empty(&net_head)) {
2192 net = list_entry(net_head.next, struct lnet_net, net_list);
2193 list_del_init(&net->net_list);
2200 lnet_dyn_del_ni(__u32 net_id)
2202 struct lnet_net *net;
2203 struct lnet_ping_info *pinfo;
2204 lnet_handle_md_t md_handle;
2208 /* don't allow userspace to shutdown the LOLND */
2209 if (LNET_NETTYP(net_id) == LOLND)
2212 mutex_lock(&the_lnet.ln_api_mutex);
2216 net = lnet_get_net_locked(net_id);
2222 net_ni_count = lnet_get_net_ni_count_locked(net);
2226 /* create and link a new ping info, before removing the old one */
2227 rc = lnet_ping_info_setup(&pinfo, &md_handle,
2228 lnet_get_ni_count() - net_ni_count, false);
2232 lnet_shutdown_lndnet(net);
2234 if (lnet_count_acceptor_nets() == 0)
2235 lnet_acceptor_stop();
2237 lnet_ping_target_update(pinfo, md_handle);
2240 mutex_unlock(&the_lnet.ln_api_mutex);
2246 * LNet ioctl handler.
2250 LNetCtl(unsigned int cmd, void *arg)
2252 struct libcfs_ioctl_data *data = arg;
2253 struct lnet_ioctl_config_data *config;
2254 lnet_process_id_t id = {0};
2258 BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
2259 sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
2262 case IOC_LIBCFS_GET_NI:
2263 rc = LNetGetId(data->ioc_count, &id);
2264 data->ioc_nid = id.nid;
2267 case IOC_LIBCFS_FAIL_NID:
2268 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
2270 case IOC_LIBCFS_ADD_ROUTE:
2273 if (config->cfg_hdr.ioc_len < sizeof(*config))
2276 mutex_lock(&the_lnet.ln_api_mutex);
2277 rc = lnet_add_route(config->cfg_net,
2278 config->cfg_config_u.cfg_route.rtr_hop,
2280 config->cfg_config_u.cfg_route.
2283 rc = lnet_check_routes();
2285 lnet_del_route(config->cfg_net,
2288 mutex_unlock(&the_lnet.ln_api_mutex);
2291 case IOC_LIBCFS_DEL_ROUTE:
2294 if (config->cfg_hdr.ioc_len < sizeof(*config))
2297 mutex_lock(&the_lnet.ln_api_mutex);
2298 rc = lnet_del_route(config->cfg_net, config->cfg_nid);
2299 mutex_unlock(&the_lnet.ln_api_mutex);
2302 case IOC_LIBCFS_GET_ROUTE:
2305 if (config->cfg_hdr.ioc_len < sizeof(*config))
2308 return lnet_get_route(config->cfg_count,
2310 &config->cfg_config_u.cfg_route.rtr_hop,
2312 &config->cfg_config_u.cfg_route.rtr_flags,
2313 &config->cfg_config_u.cfg_route.
2316 case IOC_LIBCFS_GET_NET: {
2317 size_t total = sizeof(*config) +
2318 sizeof(struct lnet_ioctl_net_config);
2321 if (config->cfg_hdr.ioc_len < total)
2324 return lnet_get_net_config(config);
2327 case IOC_LIBCFS_GET_LNET_STATS:
2329 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
2331 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
2334 lnet_counters_get(&lnet_stats->st_cntrs);
2338 case IOC_LIBCFS_CONFIG_RTR:
2341 if (config->cfg_hdr.ioc_len < sizeof(*config))
2344 mutex_lock(&the_lnet.ln_api_mutex);
2345 if (config->cfg_config_u.cfg_buffers.buf_enable) {
2346 rc = lnet_rtrpools_enable();
2347 mutex_unlock(&the_lnet.ln_api_mutex);
2350 lnet_rtrpools_disable();
2351 mutex_unlock(&the_lnet.ln_api_mutex);
2354 case IOC_LIBCFS_ADD_BUF:
2357 if (config->cfg_hdr.ioc_len < sizeof(*config))
2360 mutex_lock(&the_lnet.ln_api_mutex);
2361 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
2363 config->cfg_config_u.cfg_buffers.
2365 config->cfg_config_u.cfg_buffers.
2367 mutex_unlock(&the_lnet.ln_api_mutex);
2370 case IOC_LIBCFS_GET_BUF: {
2371 struct lnet_ioctl_pool_cfg *pool_cfg;
2372 size_t total = sizeof(*config) + sizeof(*pool_cfg);
2376 if (config->cfg_hdr.ioc_len < total)
2379 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
2380 return lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
2383 case IOC_LIBCFS_GET_PEER_INFO: {
2384 struct lnet_ioctl_peer *peer_info = arg;
2386 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
2389 return lnet_get_peer_info(
2390 peer_info->pr_count,
2392 peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
2393 &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
2394 &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
2395 &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
2396 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
2397 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
2398 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_rtr_credits,
2399 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
2402 case IOC_LIBCFS_NOTIFY_ROUTER: {
2403 unsigned long jiffies_passed;
2405 jiffies_passed = ktime_get_real_seconds() - data->ioc_u64[0];
2406 jiffies_passed = cfs_time_seconds(jiffies_passed);
2408 return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
2409 jiffies - jiffies_passed);
2412 case IOC_LIBCFS_LNET_DIST:
2413 rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
2414 if (rc < 0 && rc != -EHOSTUNREACH)
2417 data->ioc_u32[0] = rc;
2420 case IOC_LIBCFS_TESTPROTOCOMPAT:
2421 lnet_net_lock(LNET_LOCK_EX);
2422 the_lnet.ln_testprotocompat = data->ioc_flags;
2423 lnet_net_unlock(LNET_LOCK_EX);
2426 case IOC_LIBCFS_LNET_FAULT:
2427 return lnet_fault_ctl(data->ioc_flags, data);
2429 case IOC_LIBCFS_PING: {
2430 signed long timeout;
2432 id.nid = data->ioc_nid;
2433 id.pid = data->ioc_u32[0];
2435 /* Don't block longer than 2 minutes */
2436 if (data->ioc_u32[1] > 120 * MSEC_PER_SEC)
2439 /* If timestamp is negative then disable timeout */
2440 if ((s32)data->ioc_u32[1] < 0)
2441 timeout = MAX_SCHEDULE_TIMEOUT;
2443 timeout = msecs_to_jiffies(data->ioc_u32[1]);
2445 rc = lnet_ping(id, timeout, data->ioc_pbuf1,
2446 data->ioc_plen1 / sizeof(lnet_process_id_t));
2449 data->ioc_count = rc;
2453 ni = lnet_net2ni(data->ioc_net);
2457 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
2460 rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
2466 EXPORT_SYMBOL(LNetCtl);
2468 void LNetDebugPeer(lnet_process_id_t id)
2470 lnet_debug_peer(id.nid);
2472 EXPORT_SYMBOL(LNetDebugPeer);
2475 * Retrieve the lnet_process_id_t ID of LNet interface at \a index. Note that
2476 * all interfaces share a same PID, as requested by LNetNIInit().
2478 * \param index Index of the interface to look up.
2479 * \param id On successful return, this location will hold the
2480 * lnet_process_id_t ID of the interface.
2482 * \retval 0 If an interface exists at \a index.
2483 * \retval -ENOENT If no interface has been found.
2486 LNetGetId(unsigned int index, lnet_process_id_t *id)
2489 struct lnet_net *net;
2493 LASSERT(the_lnet.ln_refcount > 0);
2495 cpt = lnet_net_lock_current();
2497 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2498 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2502 id->nid = ni->ni_nid;
2503 id->pid = the_lnet.ln_pid;
2509 lnet_net_unlock(cpt);
2512 EXPORT_SYMBOL(LNetGetId);
2515 * Print a string representation of handle \a h into buffer \a str of
2519 LNetSnprintHandle(char *str, int len, lnet_handle_any_t h)
2521 snprintf(str, len, "%#llx", h.cookie);
2523 EXPORT_SYMBOL(LNetSnprintHandle);
2525 static int lnet_ping(lnet_process_id_t id, signed long timeout,
2526 lnet_process_id_t __user *ids, int n_ids)
2528 lnet_handle_eq_t eqh;
2529 lnet_handle_md_t mdh;
2531 lnet_md_t md = { NULL };
2535 const signed long a_long_time = msecs_to_jiffies(60 * MSEC_PER_SEC);
2537 struct lnet_ping_info *info;
2538 lnet_process_id_t tmpid;
2545 infosz = offsetof(struct lnet_ping_info, pi_ni[n_ids]);
2547 /* n_ids limit is arbitrary */
2548 if (n_ids <= 0 || n_ids > 20 || id.nid == LNET_NID_ANY)
2551 if (id.pid == LNET_PID_ANY)
2552 id.pid = LNET_PID_LUSTRE;
2554 LIBCFS_ALLOC(info, infosz);
2558 /* NB 2 events max (including any unlink event) */
2559 rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
2561 CERROR("Can't allocate EQ: %d\n", rc);
2565 /* initialize md content */
2568 md.threshold = 2; /*GET/REPLY*/
2570 md.options = LNET_MD_TRUNCATE;
2574 rc = LNetMDBind(md, LNET_UNLINK, &mdh);
2576 CERROR("Can't bind MD: %d\n", rc);
2580 rc = LNetGet(LNET_NID_ANY, mdh, id,
2581 LNET_RESERVED_PORTAL,
2582 LNET_PROTO_PING_MATCHBITS, 0);
2585 /* Don't CERROR; this could be deliberate! */
2587 rc2 = LNetMDUnlink(mdh);
2590 /* NB must wait for the UNLINK event below... */
2592 timeout = a_long_time;
2596 /* MUST block for unlink to complete */
2598 blocked = cfs_block_allsigs();
2600 rc2 = LNetEQPoll(&eqh, 1, timeout, &event, &which);
2603 cfs_restore_sigs(blocked);
2605 CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
2606 (rc2 <= 0) ? -1 : event.type,
2607 (rc2 <= 0) ? -1 : event.status,
2608 (rc2 > 0 && event.unlinked) ? " unlinked" : "");
2610 LASSERT(rc2 != -EOVERFLOW); /* can't miss anything */
2612 if (rc2 <= 0 || event.status != 0) {
2613 /* timeout or error */
2614 if (!replied && rc == 0)
2615 rc = (rc2 < 0) ? rc2 :
2616 (rc2 == 0) ? -ETIMEDOUT :
2620 /* Ensure completion in finite time... */
2622 /* No assertion (racing with network) */
2624 timeout = a_long_time;
2625 } else if (rc2 == 0) {
2626 /* timed out waiting for unlink */
2627 CWARN("ping %s: late network completion\n",
2630 } else if (event.type == LNET_EVENT_REPLY) {
2635 } while (rc2 <= 0 || !event.unlinked);
2639 CWARN("%s: Unexpected rc >= 0 but no reply!\n",
2646 LASSERT(nob >= 0 && nob <= infosz);
2648 rc = -EPROTO; /* if I can't parse... */
2651 /* can't check magic/version */
2652 CERROR("%s: ping info too short %d\n",
2653 libcfs_id2str(id), nob);
2657 if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
2658 lnet_swap_pinginfo(info);
2659 } else if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
2660 CERROR("%s: Unexpected magic %08x\n",
2661 libcfs_id2str(id), info->pi_magic);
2665 if ((info->pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
2666 CERROR("%s: ping w/o NI status: 0x%x\n",
2667 libcfs_id2str(id), info->pi_features);
2671 if (nob < offsetof(struct lnet_ping_info, pi_ni[0])) {
2672 CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
2673 nob, (int)offsetof(struct lnet_ping_info, pi_ni[0]));
2677 if (info->pi_nnis < n_ids)
2678 n_ids = info->pi_nnis;
2680 if (nob < offsetof(struct lnet_ping_info, pi_ni[n_ids])) {
2681 CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
2682 nob, (int)offsetof(struct lnet_ping_info, pi_ni[n_ids]));
2686 rc = -EFAULT; /* If I SEGV... */
2688 memset(&tmpid, 0, sizeof(tmpid));
2689 for (i = 0; i < n_ids; i++) {
2690 tmpid.pid = info->pi_pid;
2691 tmpid.nid = info->pi_ni[i].ns_nid;
2692 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
2698 rc2 = LNetEQFree(eqh);
2700 CERROR("rc2 %d\n", rc2);
2704 LIBCFS_FREE(info, infosz);